Refactoring the image_pool for android, and adding some common utils for camera configuration. Also experimenting with optimization - grayscale preview is way faster than color right now.

pull/13383/head
Ethan Rublee 15 years ago
parent 077dd77757
commit 3a932b0f6c
  1. 4
      android/android-jni/AndroidManifest.xml
  2. 8
      android/android-jni/Makefile
  3. 2
      android/android-jni/jni/Application.mk
  4. 377
      android/android-jni/jni/Calibration.cpp
  5. 39
      android/android-jni/jni/Calibration.h
  6. 477
      android/android-jni/jni/gl_code.cpp
  7. 47
      android/android-jni/jni/glcamera.h
  8. 117
      android/android-jni/jni/image_pool.cpp
  9. 84
      android/android-jni/jni/image_pool.h
  10. 6
      android/android-jni/jni/image_pool.i
  11. 136
      android/android-jni/jni/yuv420sp2rgb.c
  12. 11
      android/android-jni/res/layout/calibrationviewer.xml
  13. 40
      android/android-jni/res/layout/camerasettings.xml
  14. 40
      android/android-jni/res/layout/chesssizer.xml
  15. 11
      android/android-jni/res/values/attrs.xml
  16. 20
      android/android-jni/res/values/chessnumbers.xml
  17. 20
      android/android-jni/res/values/settingnumbers.xml
  18. 19
      android/android-jni/res/values/strings.xml
  19. 1
      android/android-jni/sample.local.env.mk
  20. 47
      android/android-jni/src/com/opencv/calibration/CalibrationViewer.java
  21. 75
      android/android-jni/src/com/opencv/calibration/ChessBoardChooser.java
  22. 166
      android/android-jni/src/com/opencv/calibration/services/CalibrationService.java
  23. 166
      android/android-jni/src/com/opencv/camera/CameraConfig.java
  24. 414
      android/android-jni/src/com/opencv/camera/NativePreviewer.java
  25. 300
      android/android-jni/src/com/opencv/camera/NativeProcessor.java

@ -11,6 +11,10 @@
regular Android project. regular Android project.
--> -->
<activity android:name="com.opencv.OpenCV" /> <activity android:name="com.opencv.OpenCV" />
<activity android:name="com.opencv.calibration.ChessBoardChooser"/>
<activity android:name="com.opencv.calibration.CameraConfig"/>
<activity android:name="com.opencv.calibration.CalibrationViewer"/>
<service android:name="com.opencv.calibration.services.CalibrationService"/>
</application> </application>
<!-- set the opengl version <!-- set the opengl version
<uses-feature android:glEsVersion="0x00020000" />--> <uses-feature android:glEsVersion="0x00020000" />-->

@ -12,6 +12,10 @@ $(info gedit $(LOCAL_ENV_MK))
$(error Please setup the $(LOCAL_ENV_MK) - the default was just created') $(error Please setup the $(LOCAL_ENV_MK) - the default was just created')
endif endif
ifndef ARM_TARGETS
ARM_TARGETS=armeabi armeabi-v7a
endif
ANDROID_NDK_BASE = $(ANDROID_NDK_ROOT) ANDROID_NDK_BASE = $(ANDROID_NDK_ROOT)
$(info OPENCV_CONFIG = $(OPENCV_CONFIG)) $(info OPENCV_CONFIG = $(OPENCV_CONFIG))
@ -44,7 +48,7 @@ all: $(LIB) nogdb
#calls the ndk-build script, passing it OPENCV_ROOT and OPENCV_LIBS_DIR #calls the ndk-build script, passing it OPENCV_ROOT and OPENCV_LIBS_DIR
$(LIB): $(SWIG_C_OUT) $(SOURCES) $(HEADERS) $(ANDROID_MKS) $(LIB): $(SWIG_C_OUT) $(SOURCES) $(HEADERS) $(ANDROID_MKS)
$(ANDROID_NDK_BASE)/ndk-build OPENCV_CONFIG=$(OPENCV_CONFIG) \ $(ANDROID_NDK_BASE)/ndk-build OPENCV_CONFIG=$(OPENCV_CONFIG) \
PROJECT_PATH=$(PROJECT_PATH) V=$(V) $(NDK_FLAGS) PROJECT_PATH=$(PROJECT_PATH) ARM_TARGETS=$(ARM_TARGETS) V=$(V) $(NDK_FLAGS)
#this creates the swig wrappers #this creates the swig wrappers
@ -70,5 +74,5 @@ clean-swig:
#does clean-swig and then uses the ndk-build clean #does clean-swig and then uses the ndk-build clean
clean: clean-swig clean: clean-swig
$(ANDROID_NDK_BASE)/ndk-build OPENCV_CONFIG=$(OPENCV_CONFIG) \ $(ANDROID_NDK_BASE)/ndk-build OPENCV_CONFIG=$(OPENCV_CONFIG) \
PROJECT_PATH=$(PROJECT_PATH) clean V=$(V) $(NDK_FLAGS) PROJECT_PATH=$(PROJECT_PATH) clean ARM_TARGETS=$(ARM_TARGETS) V=$(V) $(NDK_FLAGS)

@ -1,2 +1,2 @@
APP_ABI := armeabi armeabi-v7a APP_ABI := $(ARM_TARGETS)
APP_MODULES := android-opencv APP_MODULES := android-opencv

@ -7,255 +7,240 @@
#include "Calibration.h" #include "Calibration.h"
#include <sys/stat.h> #include <sys/stat.h>
using namespace cv; using namespace cv;
Calibration::Calibration():patternsize(6,8) Calibration::Calibration() :
patternsize(6, 8)
{ {
} }
Calibration::~Calibration() { Calibration::~Calibration()
{
} }
namespace namespace
{ {
double computeReprojectionErrors( double computeReprojectionErrors(const vector<vector<Point3f> >& objectPoints,
const vector<vector<Point3f> >& objectPoints, const vector<vector< const vector<vector<Point2f> >& imagePoints, const vector<Mat>& rvecs, const vector<
Point2f> >& imagePoints, const vector<Mat>& rvecs, Mat>& tvecs, const Mat& cameraMatrix, const Mat& distCoeffs,
const vector<Mat>& tvecs, const Mat& cameraMatrix, vector<float>& perViewErrors)
const Mat& distCoeffs, vector<float>& perViewErrors) { {
vector<Point2f> imagePoints2; vector<Point2f> imagePoints2;
int i, totalPoints = 0; int i, totalPoints = 0;
double totalErr = 0, err; double totalErr = 0, err;
perViewErrors.resize(objectPoints.size()); perViewErrors.resize(objectPoints.size());
for (i = 0; i < (int) objectPoints.size(); i++) { for (i = 0; i < (int)objectPoints.size(); i++)
projectPoints(Mat(objectPoints[i]), rvecs[i], tvecs[i], cameraMatrix, {
distCoeffs, imagePoints2); projectPoints(Mat(objectPoints[i]), rvecs[i], tvecs[i], cameraMatrix, distCoeffs, imagePoints2);
err = norm(Mat(imagePoints[i]), Mat(imagePoints2), CV_L1 ); err = norm(Mat(imagePoints[i]), Mat(imagePoints2), CV_L1);
int n = (int) objectPoints[i].size(); int n = (int)objectPoints[i].size();
perViewErrors[i] = err / n; perViewErrors[i] = err / n;
totalErr += err; totalErr += err;
totalPoints += n; totalPoints += n;
} }
return totalErr / totalPoints; return totalErr / totalPoints;
} }
void calcChessboardCorners(Size boardSize, float squareSize, vector<Point3f>& corners)
{
corners.resize(0);
void calcChessboardCorners(Size boardSize, float squareSize, vector< for (int i = 0; i < boardSize.height; i++)
Point3f>& corners) { for (int j = 0; j < boardSize.width; j++)
corners.resize(0); corners.push_back(Point3f(float(j * squareSize), float(i * squareSize), 0));
for (int i = 0; i < boardSize.height; i++)
for (int j = 0; j < boardSize.width; j++)
corners.push_back(Point3f(float(j * squareSize), float(i
* squareSize), 0));
} }
/**from opencv/samples/cpp/calibration.cpp /**from opencv/samples/cpp/calibration.cpp
* *
*/ */
bool runCalibration(vector<vector<Point2f> > imagePoints, bool runCalibration(vector<vector<Point2f> > imagePoints, Size imageSize, Size boardSize, float squareSize,
Size imageSize, Size boardSize, float squareSize, float aspectRatio, float aspectRatio, int flags, Mat& cameraMatrix, Mat& distCoeffs, vector<Mat>& rvecs,
int flags, Mat& cameraMatrix, Mat& distCoeffs, vector<Mat>& rvecs, vector<Mat>& tvecs, vector<float>& reprojErrs, double& totalAvgErr)
vector<Mat>& tvecs, vector<float>& reprojErrs, double& totalAvgErr) { {
cameraMatrix = Mat::eye(3, 3, CV_64F); cameraMatrix = Mat::eye(3, 3, CV_64F);
if (flags & CV_CALIB_FIX_ASPECT_RATIO) if (flags & CV_CALIB_FIX_ASPECT_RATIO)
cameraMatrix.at<double> (0, 0) = aspectRatio; cameraMatrix.at<double> (0, 0) = aspectRatio;
distCoeffs = Mat::zeros(4, 1, CV_64F); distCoeffs = Mat::zeros(4, 1, CV_64F);
vector<vector<Point3f> > objectPoints(1); vector<vector<Point3f> > objectPoints(1);
calcChessboardCorners(boardSize, squareSize, objectPoints[0]); calcChessboardCorners(boardSize, squareSize, objectPoints[0]);
for (size_t i = 1; i < imagePoints.size(); i++) for (size_t i = 1; i < imagePoints.size(); i++)
objectPoints.push_back(objectPoints[0]); objectPoints.push_back(objectPoints[0]);
calibrateCamera(objectPoints, imagePoints, imageSize, cameraMatrix, calibrateCamera(objectPoints, imagePoints, imageSize, cameraMatrix, distCoeffs, rvecs, tvecs, flags);
distCoeffs, rvecs, tvecs, flags);
bool ok = checkRange(cameraMatrix, CV_CHECK_QUIET ) && checkRange( bool ok = checkRange(cameraMatrix, CV_CHECK_QUIET) && checkRange(distCoeffs, CV_CHECK_QUIET);
distCoeffs, CV_CHECK_QUIET );
totalAvgErr = computeReprojectionErrors(objectPoints, imagePoints, rvecs, totalAvgErr
tvecs, cameraMatrix, distCoeffs, reprojErrs); = computeReprojectionErrors(objectPoints, imagePoints, rvecs, tvecs, cameraMatrix, distCoeffs, reprojErrs);
return ok; return ok;
} }
void saveCameraParams(const string& filename, Size imageSize, Size boardSize, void saveCameraParams(const string& filename, Size imageSize, Size boardSize, float squareSize, float aspectRatio,
float squareSize, float aspectRatio, int flags, int flags, const Mat& cameraMatrix, const Mat& distCoeffs, const vector<Mat>& rvecs,
const Mat& cameraMatrix, const Mat& distCoeffs, const vector<Mat>& tvecs, const vector<float>& reprojErrs,
const vector<Mat>& rvecs, const vector<Mat>& tvecs, const vector<vector<Point2f> >& imagePoints, double totalAvgErr)
const vector<float>& reprojErrs, {
const vector<vector<Point2f> >& imagePoints, double totalAvgErr) { FileStorage fs(filename, FileStorage::WRITE);
FileStorage fs(filename, FileStorage::WRITE);
time_t t;
time_t t; time(&t);
time(&t); struct tm *t2 = localtime(&t);
struct tm *t2 = localtime(&t); char buf[1024];
char buf[1024]; strftime(buf, sizeof(buf) - 1, "%c", t2);
strftime(buf, sizeof(buf) - 1, "%c", t2);
fs << "calibration_time" << buf;
fs << "calibration_time" << buf;
if (!rvecs.empty() || !reprojErrs.empty())
if (!rvecs.empty() || !reprojErrs.empty()) fs << "nframes" << (int)std::max(rvecs.size(), reprojErrs.size());
fs << "nframes" << (int) std::max(rvecs.size(), reprojErrs.size()); fs << "image_width" << imageSize.width;
fs << "image_width" << imageSize.width; fs << "image_height" << imageSize.height;
fs << "image_height" << imageSize.height; fs << "board_width" << boardSize.width;
fs << "board_width" << boardSize.width; fs << "board_height" << boardSize.height;
fs << "board_height" << boardSize.height; fs << "squareSize" << squareSize;
fs << "squareSize" << squareSize;
if (flags & CV_CALIB_FIX_ASPECT_RATIO)
if (flags & CV_CALIB_FIX_ASPECT_RATIO) fs << "aspectRatio" << aspectRatio;
fs << "aspectRatio" << aspectRatio;
if (flags != 0)
if (flags != 0) { {
sprintf(buf, "flags: %s%s%s%s", sprintf(buf, "flags: %s%s%s%s", flags & CV_CALIB_USE_INTRINSIC_GUESS ? "+use_intrinsic_guess" : "", flags
flags & CV_CALIB_USE_INTRINSIC_GUESS ? "+use_intrinsic_guess" & CV_CALIB_FIX_ASPECT_RATIO ? "+fix_aspectRatio" : "", flags & CV_CALIB_FIX_PRINCIPAL_POINT
: "", ? "+fix_principal_point" : "", flags & CV_CALIB_ZERO_TANGENT_DIST ? "+zero_tangent_dist" : "");
flags & CV_CALIB_FIX_ASPECT_RATIO ? "+fix_aspectRatio" : "", cvWriteComment(*fs, buf, 0);
flags & CV_CALIB_FIX_PRINCIPAL_POINT ? "+fix_principal_point" }
: "",
flags & CV_CALIB_ZERO_TANGENT_DIST ? "+zero_tangent_dist" : ""); fs << "flags" << flags;
cvWriteComment(*fs, buf, 0);
} fs << "camera_matrix" << cameraMatrix;
fs << "distortion_coefficients" << distCoeffs;
fs << "flags" << flags;
fs << "avg_reprojection_error" << totalAvgErr;
fs << "camera_matrix" << cameraMatrix; if (!reprojErrs.empty())
fs << "distortion_coefficients" << distCoeffs; fs << "per_view_reprojection_errors" << Mat(reprojErrs);
fs << "avg_reprojection_error" << totalAvgErr; if (!rvecs.empty() && !tvecs.empty())
if (!reprojErrs.empty()) {
fs << "per_view_reprojection_errors" << Mat(reprojErrs); Mat bigmat(rvecs.size(), 6, CV_32F);
for (size_t i = 0; i < rvecs.size(); i++)
if (!rvecs.empty() && !tvecs.empty()) { {
Mat bigmat(rvecs.size(), 6, CV_32F); Mat r = bigmat(Range(i, i + 1), Range(0, 3));
for (size_t i = 0; i < rvecs.size(); i++) { Mat t = bigmat(Range(i, i + 1), Range(3, 6));
Mat r = bigmat(Range(i, i + 1), Range(0, 3)); rvecs[i].copyTo(r);
Mat t = bigmat(Range(i, i + 1), Range(3, 6)); tvecs[i].copyTo(t);
rvecs[i].copyTo(r); }
tvecs[i].copyTo(t); cvWriteComment(*fs, "a set of 6-tuples (rotation vector + translation vector) for each view", 0);
} fs << "extrinsic_parameters" << bigmat;
cvWriteComment( }
*fs,
"a set of 6-tuples (rotation vector + translation vector) for each view", if (!imagePoints.empty())
0); {
fs << "extrinsic_parameters" << bigmat; Mat imagePtMat(imagePoints.size(), imagePoints[0].size(), CV_32FC2);
} for (size_t i = 0; i < imagePoints.size(); i++)
{
if (!imagePoints.empty()) { Mat r = imagePtMat.row(i).reshape(2, imagePtMat.cols);
Mat imagePtMat(imagePoints.size(), imagePoints[0].size(), CV_32FC2); Mat(imagePoints[i]).copyTo(r);
for (size_t i = 0; i < imagePoints.size(); i++) { }
Mat r = imagePtMat.row(i).reshape(2, imagePtMat.cols); fs << "image_points" << imagePtMat;
Mat(imagePoints[i]).copyTo(r); }
}
fs << "image_points" << imagePtMat;
}
} }
}//anon namespace }//anon namespace
bool Calibration::detectAndDrawChessboard(int idx,image_pool* pool) { bool Calibration::detectAndDrawChessboard(int idx, image_pool* pool)
{
Mat grey;
pool->getGrey(idx, grey);
if (grey.empty())
return false;
vector<Point2f> corners;
Mat grey = pool->getGrey(idx);
if (grey.empty())
return false;
vector<Point2f> corners;
IplImage iplgrey = grey; IplImage iplgrey = grey;
if (!cvCheckChessboard(&iplgrey, patternsize)) if (!cvCheckChessboard(&iplgrey, patternsize))
return false; return false;
bool patternfound = findChessboardCorners(grey, patternsize, corners); bool patternfound = findChessboardCorners(grey, patternsize, corners);
Mat * img = pool->getImage(idx); Mat img = pool->getImage(idx);
if (corners.size() < 1) if (corners.size() < 1)
return false; return false;
cornerSubPix(grey, corners, Size(11, 11), Size(-1, -1), TermCriteria( cornerSubPix(grey, corners, Size(11, 11), Size(-1, -1), TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
if(patternfound) if (patternfound)
imagepoints.push_back(corners); imagepoints.push_back(corners);
drawChessboardCorners(*img, patternsize, Mat(corners), patternfound); drawChessboardCorners(img, patternsize, Mat(corners), patternfound);
imgsize = grey.size(); imgsize = grey.size();
return patternfound; return patternfound;
} }
void Calibration::drawText(int i, image_pool* pool, const char* ctext){ void Calibration::drawText(int i, image_pool* pool, const char* ctext)
// Use "y" to show that the baseLine is about {
string text = ctext; // Use "y" to show that the baseLine is about
int fontFace = FONT_HERSHEY_COMPLEX_SMALL; string text = ctext;
double fontScale = .8; int fontFace = FONT_HERSHEY_COMPLEX_SMALL;
int thickness = .5; double fontScale = .8;
int thickness = .5;
Mat img = *pool->getImage(i);
Mat img = pool->getImage(i);
int baseline=0;
Size textSize = getTextSize(text, fontFace, int baseline = 0;
fontScale, thickness, &baseline); Size textSize = getTextSize(text, fontFace, fontScale, thickness, &baseline);
baseline += thickness; baseline += thickness;
// center the text // center the text
Point textOrg((img.cols - textSize.width)/2, Point textOrg((img.cols - textSize.width) / 2, (img.rows - textSize.height * 2));
(img.rows - textSize.height *2));
// draw the box
rectangle(img, textOrg + Point(0, baseline),
textOrg + Point(textSize.width, -textSize.height),
Scalar(0,0,255),CV_FILLED);
// ... and the baseline first
line(img, textOrg + Point(0, thickness),
textOrg + Point(textSize.width, thickness),
Scalar(0, 0, 255));
// then put the text itself
putText(img, text, textOrg, fontFace, fontScale,
Scalar::all(255), thickness, 8);
}
void Calibration::resetChess() { // draw the box
rectangle(img, textOrg + Point(0, baseline), textOrg + Point(textSize.width, -textSize.height), Scalar(0, 0, 255),
CV_FILLED);
// ... and the baseline first
line(img, textOrg + Point(0, thickness), textOrg + Point(textSize.width, thickness), Scalar(0, 0, 255));
imagepoints.clear(); // then put the text itself
putText(img, text, textOrg, fontFace, fontScale, Scalar::all(255), thickness, 8);
} }
void Calibration::calibrate(const char* filename) { void Calibration::resetChess()
{
vector<Mat> rvecs, tvecs; imagepoints.clear();
vector<float> reprojErrs; }
double totalAvgErr = 0;
int flags = 0;
flags |= CV_CALIB_FIX_PRINCIPAL_POINT | CV_CALIB_FIX_ASPECT_RATIO;
bool writeExtrinsics = true;
bool writePoints = true;
bool ok = runCalibration(imagepoints, imgsize, patternsize, 1.f, 1.f, void Calibration::calibrate(const char* filename)
flags, K, distortion, rvecs, tvecs, reprojErrs, totalAvgErr); {
vector<Mat> rvecs, tvecs;
vector<float> reprojErrs;
double totalAvgErr = 0;
int flags = 0;
flags |= CV_CALIB_FIX_PRINCIPAL_POINT | CV_CALIB_FIX_ASPECT_RATIO;
bool writeExtrinsics = true;
bool writePoints = true;
bool ok = runCalibration(imagepoints, imgsize, patternsize, 1.f, 1.f, flags, K, distortion, rvecs, tvecs, reprojErrs,
totalAvgErr);
if (ok){ if (ok)
{
saveCameraParams(filename, imgsize, patternsize, 1.f, saveCameraParams(filename, imgsize, patternsize, 1.f, 1.f, flags, K, distortion, writeExtrinsics ? rvecs : vector<
1.f, flags, K, distortion, writeExtrinsics ? rvecs Mat> (), writeExtrinsics ? tvecs : vector<Mat> (), writeExtrinsics ? reprojErrs : vector<float> (), writePoints
: vector<Mat> (), writeExtrinsics ? tvecs ? imagepoints : vector<vector<Point2f> > (), totalAvgErr);
: vector<Mat> (), writeExtrinsics ? reprojErrs }
: vector<float> (), writePoints ? imagepoints : vector<
vector<Point2f> > (), totalAvgErr);
}
} }
int Calibration::getNumberDetectedChessboards() { int Calibration::getNumberDetectedChessboards()
return imagepoints.size(); {
return imagepoints.size();
} }

@ -14,8 +14,6 @@
#include <opencv2/imgproc/imgproc.hpp> #include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/calib3d/calib3d.hpp> #include <opencv2/calib3d/calib3d.hpp>
#include <vector> #include <vector>
#include "image_pool.h" #include "image_pool.h"
@ -24,36 +22,33 @@
#define DETECT_STAR 1 #define DETECT_STAR 1
#define DETECT_SURF 2 #define DETECT_SURF 2
class Calibration
{
public:
class Calibration { Calibration();
std::vector<cv::KeyPoint> keypoints; virtual ~Calibration();
vector<vector<Point2f> > imagepoints;
cv::Mat K;
cv::Mat distortion;
cv::Size imgsize;
bool detectAndDrawChessboard(int idx, image_pool* pool);
void resetChess();
public: int getNumberDetectedChessboards();
cv::Size patternsize; void calibrate(const char* filename);
Calibration();
virtual ~Calibration();
bool detectAndDrawChessboard(int idx, image_pool* pool); void drawText(int idx, image_pool* pool, const char* text);
void resetChess(); cv::Size patternsize;
private:
std::vector<cv::KeyPoint> keypoints;
int getNumberDetectedChessboards(); std::vector<std::vector<cv::Point2f> > imagepoints;
void calibrate(const char* filename); cv::Mat K;
cv::Mat distortion;
cv::Size imgsize;
void drawText(int idx, image_pool* pool, const char* text);
}; };
#endif /* PROCESSOR_H_ */ #endif /* PROCESSOR_H_ */

@ -37,273 +37,286 @@ using namespace cv;
#define LOGI(...) __android_log_print(ANDROID_LOG_INFO,LOG_TAG,__VA_ARGS__) #define LOGI(...) __android_log_print(ANDROID_LOG_INFO,LOG_TAG,__VA_ARGS__)
#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR,LOG_TAG,__VA_ARGS__) #define LOGE(...) __android_log_print(ANDROID_LOG_ERROR,LOG_TAG,__VA_ARGS__)
static void printGLString(const char *name, GLenum s) { static void printGLString(const char *name, GLenum s)
const char *v = (const char *) glGetString(s); {
LOGI("GL %s = %s\n", name, v); const char *v = (const char *)glGetString(s);
LOGI("GL %s = %s\n", name, v);
} }
static void checkGlError(const char* op) { static void checkGlError(const char* op)
for (GLint error = glGetError(); error; error = glGetError()) { {
LOGI("after %s() glError (0x%x)\n", op, error); for (GLint error = glGetError(); error; error = glGetError())
} {
LOGI("after %s() glError (0x%x)\n", op, error);
}
} }
static const char gVertexShader[] = "attribute vec4 a_position; \n" static const char gVertexShader[] = "attribute vec4 a_position; \n"
"attribute vec2 a_texCoord; \n" "attribute vec2 a_texCoord; \n"
"varying vec2 v_texCoord; \n" "varying vec2 v_texCoord; \n"
"void main() \n" "void main() \n"
"{ \n" "{ \n"
" gl_Position = a_position; \n" " gl_Position = a_position; \n"
" v_texCoord = a_texCoord; \n" " v_texCoord = a_texCoord; \n"
"} \n"; "} \n";
static const char gFragmentShader[] = static const char gFragmentShader[] = "precision mediump float; \n"
"precision mediump float; \n" "varying vec2 v_texCoord; \n"
"varying vec2 v_texCoord; \n" "uniform sampler2D s_texture; \n"
"uniform sampler2D s_texture; \n" "void main() \n"
"void main() \n" "{ \n"
"{ \n" " gl_FragColor = texture2D( s_texture, v_texCoord );\n"
" gl_FragColor = texture2D( s_texture, v_texCoord );\n" "} \n";
"} \n";
const GLfloat gTriangleVertices[] = {0.0f, 0.5f, -0.5f, -0.5f, 0.5f, -0.5f};
const GLfloat gTriangleVertices[] = { 0.0f, 0.5f, -0.5f, -0.5f, 0.5f, -0.5f }; GLubyte testpixels[4 * 3] = {255, 0, 0, // Red
GLubyte testpixels[4 * 3] = { 255, 0, 0, // Red 0, 255, 0, // Green
0, 255, 0, // Green 0, 0, 255, // Blue
0, 0, 255, // Blue 255, 255, 0 // Yellow
255, 255, 0 // Yellow };
};
GLuint glcamera::createSimpleTexture2D(GLuint _textureid, GLubyte* pixels, int width, int height, int channels)
GLuint glcamera::createSimpleTexture2D(GLuint _textureid, GLubyte* pixels, {
int width, int height, int channels) {
// Bind the texture
// Bind the texture glActiveTexture( GL_TEXTURE0);
glActiveTexture(GL_TEXTURE0); checkGlError("glActiveTexture");
checkGlError("glActiveTexture"); // Bind the texture object
// Bind the texture object glBindTexture(GL_TEXTURE_2D, _textureid);
glBindTexture(GL_TEXTURE_2D, _textureid); checkGlError("glBindTexture");
checkGlError("glBindTexture");
GLenum format;
GLenum format; switch (channels)
switch (channels) { {
case 3: case 3:
format = GL_RGB; format = GL_RGB;
break; break;
case 1: case 1:
format = GL_LUMINANCE; format = GL_LUMINANCE;
break; break;
case 4: case 4:
format = GL_RGBA; format = GL_RGBA;
break; break;
} }
// Load the texture // Load the texture
glTexImage2D(GL_TEXTURE_2D, 0, format, width, height, 0, format, glTexImage2D(GL_TEXTURE_2D, 0, format, width, height, 0, format, GL_UNSIGNED_BYTE, pixels);
GL_UNSIGNED_BYTE, pixels);
checkGlError("glTexImage2D");
checkGlError("glTexImage2D"); // Set the filtering mode
// Set the filtering mode glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST ); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST );
return _textureid;
return _textureid;
} }
GLuint glcamera::loadShader(GLenum shaderType, const char* pSource) { GLuint glcamera::loadShader(GLenum shaderType, const char* pSource)
GLuint shader = glCreateShader(shaderType); {
if (shader) { GLuint shader = glCreateShader(shaderType);
glShaderSource(shader, 1, &pSource, NULL); if (shader)
glCompileShader(shader); {
GLint compiled = 0; glShaderSource(shader, 1, &pSource, NULL);
glGetShaderiv(shader, GL_COMPILE_STATUS, &compiled); glCompileShader(shader);
if (!compiled) { GLint compiled = 0;
GLint infoLen = 0; glGetShaderiv(shader, GL_COMPILE_STATUS, &compiled);
glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &infoLen); if (!compiled)
if (infoLen) { {
char* buf = (char*) malloc(infoLen); GLint infoLen = 0;
if (buf) { glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &infoLen);
glGetShaderInfoLog(shader, infoLen, NULL, buf); if (infoLen)
LOGE("Could not compile shader %d:\n%s\n", {
shaderType, buf); char* buf = (char*)malloc(infoLen);
free(buf); if (buf)
} {
glDeleteShader(shader); glGetShaderInfoLog(shader, infoLen, NULL, buf);
shader = 0; LOGE("Could not compile shader %d:\n%s\n",
} shaderType, buf);
} free(buf);
} }
return shader; glDeleteShader(shader);
shader = 0;
}
}
}
return shader;
} }
GLuint glcamera::createProgram(const char* pVertexSource, GLuint glcamera::createProgram(const char* pVertexSource, const char* pFragmentSource)
const char* pFragmentSource) { {
GLuint vertexShader = loadShader(GL_VERTEX_SHADER, pVertexSource); GLuint vertexShader = loadShader(GL_VERTEX_SHADER, pVertexSource);
if (!vertexShader) { if (!vertexShader)
return 0; {
} return 0;
}
GLuint pixelShader = loadShader(GL_FRAGMENT_SHADER, pFragmentSource);
if (!pixelShader) { GLuint pixelShader = loadShader(GL_FRAGMENT_SHADER, pFragmentSource);
return 0; if (!pixelShader)
} {
return 0;
GLuint program = glCreateProgram(); }
if (program) {
glAttachShader(program, vertexShader); GLuint program = glCreateProgram();
checkGlError("glAttachShader"); if (program)
glAttachShader(program, pixelShader); {
checkGlError("glAttachShader"); glAttachShader(program, vertexShader);
glLinkProgram(program); checkGlError("glAttachShader");
GLint linkStatus = GL_FALSE; glAttachShader(program, pixelShader);
glGetProgramiv(program, GL_LINK_STATUS, &linkStatus); checkGlError("glAttachShader");
if (linkStatus != GL_TRUE) { glLinkProgram(program);
GLint bufLength = 0; GLint linkStatus = GL_FALSE;
glGetProgramiv(program, GL_INFO_LOG_LENGTH, &bufLength); glGetProgramiv(program, GL_LINK_STATUS, &linkStatus);
if (bufLength) { if (linkStatus != GL_TRUE)
char* buf = (char*) malloc(bufLength); {
if (buf) { GLint bufLength = 0;
glGetProgramInfoLog(program, bufLength, NULL, buf); glGetProgramiv(program, GL_INFO_LOG_LENGTH, &bufLength);
LOGE("Could not link program:\n%s\n", buf); if (bufLength)
free(buf); {
} char* buf = (char*)malloc(bufLength);
} if (buf)
glDeleteProgram(program); {
program = 0; glGetProgramInfoLog(program, bufLength, NULL, buf);
} LOGE("Could not link program:\n%s\n", buf);
} free(buf);
return program; }
}
glDeleteProgram(program);
program = 0;
}
}
return program;
} }
//GLuint textureID; //GLuint textureID;
bool glcamera::setupGraphics(int w, int h) { bool glcamera::setupGraphics(int w, int h)
printGLString("Version", GL_VERSION); {
printGLString("Vendor", GL_VENDOR); printGLString("Version", GL_VERSION);
printGLString("Renderer", GL_RENDERER); printGLString("Vendor", GL_VENDOR);
printGLString("Extensions", GL_EXTENSIONS); printGLString("Renderer", GL_RENDERER);
printGLString("Extensions", GL_EXTENSIONS);
LOGI("setupGraphics(%d, %d)", w, h);
gProgram = createProgram(gVertexShader, gFragmentShader); LOGI("setupGraphics(%d, %d)", w, h);
if (!gProgram) { gProgram = createProgram(gVertexShader, gFragmentShader);
LOGE("Could not create program."); if (!gProgram)
return false; {
} LOGE("Could not create program.");
gvPositionHandle = glGetAttribLocation(gProgram, "a_position"); return false;
gvTexCoordHandle = glGetAttribLocation(gProgram, "a_texCoord"); }
gvPositionHandle = glGetAttribLocation(gProgram, "a_position");
gvSamplerHandle = glGetAttribLocation(gProgram, "s_texture"); gvTexCoordHandle = glGetAttribLocation(gProgram, "a_texCoord");
// Use tightly packed data gvSamplerHandle = glGetAttribLocation(gProgram, "s_texture");
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
// Use tightly packed data
// Generate a texture object glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glGenTextures(1, &textureID);
textureID = createSimpleTexture2D(textureID, testpixels, 2, 2, 3); // Generate a texture object
glGenTextures(1, &textureID);
checkGlError("glGetAttribLocation"); textureID = createSimpleTexture2D(textureID, testpixels, 2, 2, 3);
LOGI("glGetAttribLocation(\"vPosition\") = %d\n",
gvPositionHandle); checkGlError("glGetAttribLocation");
LOGI("glGetAttribLocation(\"vPosition\") = %d\n",
glViewport(0, 0, w, h); gvPositionHandle);
checkGlError("glViewport");
return true; glViewport(0, 0, w, h);
checkGlError("glViewport");
return true;
} }
void glcamera::renderFrame() { void glcamera::renderFrame()
{
GLfloat vVertices[] = { -1.0f, 1.0f, 0.0f, // Position 0
0.0f, 0.0f, // TexCoord 0
-1.0f, -1.0f, 0.0f, // Position 1
0.0f, 1.0f, // TexCoord 1
1.0f, -1.0f, 0.0f, // Position 2
1.0f, 1.0f, // TexCoord 2
1.0f, 1.0f, 0.0f, // Position 3
1.0f, 0.0f // TexCoord 3
};
GLushort indices[] = { 0, 1, 2, 0, 2, 3 };
GLsizei stride = 5 * sizeof(GLfloat); // 3 for position, 2 for texture
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
checkGlError("glClearColor");
glClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT);
checkGlError("glClear");
glUseProgram(gProgram);
checkGlError("glUseProgram");
// Load the vertex position
glVertexAttribPointer(gvPositionHandle, 3, GL_FLOAT, GL_FALSE, stride,
vVertices);
// Load the texture coordinate
glVertexAttribPointer(gvTexCoordHandle, 2, GL_FLOAT, GL_FALSE, stride,
&vVertices[3]);
glEnableVertexAttribArray(gvPositionHandle);
glEnableVertexAttribArray(gvTexCoordHandle);
// Bind the texture
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, textureID);
// Set the sampler texture unit to 0
glUniform1i(gvSamplerHandle, 0);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_SHORT, indices);
//checkGlError("glVertexAttribPointer");
//glEnableVertexAttribArray(gvPositionHandle);
//checkGlError("glEnableVertexAttribArray");
//glDrawArrays(GL_TRIANGLES, 0, 3);
//checkGlError("glDrawArrays");
}
void glcamera::init(int width, int height) { GLfloat vVertices[] = {-1.0f, 1.0f, 0.0f, // Position 0
newimage = false; 0.0f, 0.0f, // TexCoord 0
nimg = Mat(); -1.0f, -1.0f, 0.0f, // Position 1
setupGraphics(width, height); 0.0f, 1.0f, // TexCoord 1
1.0f, -1.0f, 0.0f, // Position 2
1.0f, 1.0f, // TexCoord 2
1.0f, 1.0f, 0.0f, // Position 3
1.0f, 0.0f // TexCoord 3
};
GLushort indices[] = {0, 1, 2, 0, 2, 3};
GLsizei stride = 5 * sizeof(GLfloat); // 3 for position, 2 for texture
} glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
checkGlError("glClearColor");
glClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT);
checkGlError("glClear");
glUseProgram(gProgram);
checkGlError("glUseProgram");
// Load the vertex position
glVertexAttribPointer(gvPositionHandle, 3, GL_FLOAT, GL_FALSE, stride, vVertices);
// Load the texture coordinate
glVertexAttribPointer(gvTexCoordHandle, 2, GL_FLOAT, GL_FALSE, stride, &vVertices[3]);
void glcamera::step() { glEnableVertexAttribArray(gvPositionHandle);
if (newimage && !nimg.empty()) { glEnableVertexAttribArray(gvTexCoordHandle);
textureID = createSimpleTexture2D(textureID, // Bind the texture
nimg.ptr<unsigned char> (0), nimg.rows, nimg.cols, glActiveTexture( GL_TEXTURE0);
nimg.channels()); glBindTexture(GL_TEXTURE_2D, textureID);
newimage = false;
}
renderFrame();
// Set the sampler texture unit to 0
glUniform1i(gvSamplerHandle, 0);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_SHORT, indices);
//checkGlError("glVertexAttribPointer");
//glEnableVertexAttribArray(gvPositionHandle);
//checkGlError("glEnableVertexAttribArray");
//glDrawArrays(GL_TRIANGLES, 0, 3);
//checkGlError("glDrawArrays");
} }
void glcamera::setTextureImage(Ptr<Mat> img) { void glcamera::init(int width, int height)
{
newimage = false;
nimg = Mat();
setupGraphics(width, height);
//int p2 = (int)(std::log(img->size().width)/0.69315); }
int sz = 256;//std::pow(2,p2);
Size size(sz, sz);
resize(*img, nimg, size,cv::INTER_NEAREST); void glcamera::step()
{
if (newimage && !nimg.empty())
{
newimage = true; textureID = createSimpleTexture2D(textureID, nimg.ptr<unsigned char> (0), nimg.rows, nimg.cols, nimg.channels());
newimage = false;
}
renderFrame();
} }
#define NEAREST_POW2(x)((int)(0.5 + std::log(x)/0.69315) )
void glcamera::setTextureImage(const Mat& img)
{
Size size(256, 256);
resize(img, nimg, size, cv::INTER_NEAREST);
newimage = true;
}
void glcamera::drawMatToGL(int idx, image_pool* pool) { void glcamera::drawMatToGL(int idx, image_pool* pool)
{
Ptr<Mat> img = pool->getImage(idx); Mat img = pool->getImage(idx);
if (img.empty()) if (img.empty())
return; //no image at input_idx! return; //no image at input_idx!
setTextureImage(img); setTextureImage(img);
} }
glcamera::glcamera():newimage(false) { glcamera::glcamera() :
LOGI("glcamera constructor"); newimage(false)
{
LOGI("glcamera constructor");
} }
glcamera::~glcamera() { glcamera::~glcamera()
LOGI("glcamera destructor"); {
LOGI("glcamera destructor");
} }

@ -6,35 +6,34 @@
#include <GLES2/gl2ext.h> #include <GLES2/gl2ext.h>
#include "image_pool.h" #include "image_pool.h"
class glcamera {
Mat nimg;
bool newimage;
GLuint textureID;
GLuint gProgram;
GLuint gvPositionHandle;
GLuint gvTexCoordHandle;
GLuint gvSamplerHandle;
class glcamera
{
public: public:
glcamera(); glcamera();
~glcamera(); ~glcamera();
void init(int width, int height); void init(int width, int height);
void step(); void step();
void drawMatToGL(int idx, image_pool* pool); void drawMatToGL(int idx, image_pool* pool);
void setTextureImage(Ptr<Mat> img); void setTextureImage(const cv::Mat& img);
private: private:
GLuint createSimpleTexture2D(GLuint _textureid, GLubyte* pixels, int width, GLuint createSimpleTexture2D(GLuint _textureid, GLubyte* pixels, int width, int height, int channels);
int height, int channels); GLuint loadShader(GLenum shaderType, const char* pSource);
GLuint loadShader(GLenum shaderType, const char* pSource); GLuint
GLuint createProgram(const char* pVertexSource, const char* pFragmentSource);
createProgram(const char* pVertexSource, bool setupGraphics(int w, int h);
const char* pFragmentSource); void renderFrame();
bool setupGraphics(int w, int h); cv::Mat nimg;
void renderFrame(); bool newimage;
GLuint textureID;
GLuint gProgram;
GLuint gvPositionHandle;
GLuint gvTexCoordHandle;
GLuint gvSamplerHandle;
}; };
#endif #endif

@ -5,92 +5,97 @@
#include <android/log.h> #include <android/log.h>
#include <opencv2/imgproc/imgproc.hpp> #include <opencv2/imgproc/imgproc.hpp>
using namespace cv;
#define LOG_TAG "libandroid-opencv" #define LOG_TAG "libandroid-opencv"
#define LOGI(...) __android_log_print(ANDROID_LOG_INFO,LOG_TAG,__VA_ARGS__) #define LOGI(...) __android_log_print(ANDROID_LOG_INFO,LOG_TAG,__VA_ARGS__)
#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR,LOG_TAG,__VA_ARGS__) #define LOGE(...) __android_log_print(ANDROID_LOG_ERROR,LOG_TAG,__VA_ARGS__)
JNIEXPORT jint JNI_OnLoad(JavaVM* vm, void* reserved) JNIEXPORT jint JNI_OnLoad(JavaVM* vm, void* reserved)
{ {
JNIEnv *env; JNIEnv *env;
LOGI("JNI_OnLoad called for opencv"); LOGI("JNI_OnLoad called for opencv");
return JNI_VERSION_1_4; return JNI_VERSION_1_4;
} }
JNIEXPORT void JNICALL Java_com_opencv_jni_opencvJNI_addYUVtoPool(JNIEnv * env, JNIEXPORT void JNICALL Java_com_opencv_jni_opencvJNI_addYUVtoPool(JNIEnv * env,
jclass thiz, jlong ppool, jobject _jpool, jbyteArray jbuffer, jclass thiz, jlong ppool, jobject _jpool, jbyteArray jbuffer,
jint jidx, jint jwidth, jint jheight, jboolean jgrey) { jint jidx, jint jwidth, jint jheight, jboolean jgrey)
image_pool *pool = (image_pool *) ppool; {
int buff_height = jheight + (jheight/2);
Ptr<Mat> mat = pool->getYUV(jidx); Size buff_size(jwidth,buff_height);
image_pool *pool = (image_pool *) ppool;
if (mat.empty() || mat->cols != jwidth || mat->rows != jheight * 2) { Mat mat = pool->getYUV(jidx);
//pool->deleteGrey(jidx);
mat = new Mat(jheight * 2, jwidth, CV_8UC1);
}
jsize sz = env->GetArrayLength(jbuffer); if (mat.empty() || mat.size() != buff_size )
uchar* buff = mat->ptr<uchar> (0); {
mat.create(buff_size, CV_8UC1);
}
env->GetByteArrayRegion(jbuffer, 0, sz, (jbyte*) buff); jsize sz = env->GetArrayLength(jbuffer);
uchar* buff = mat.ptr<uchar> (0);
pool->addYUVMat(jidx, mat); env->GetByteArrayRegion(jbuffer, 0, sz, (jbyte*) buff);
Ptr<Mat> color = pool->getImage(jidx);
if (color.empty() || color->cols != jwidth || color->rows != jheight) {
//pool->deleteImage(jidx);
color = new Mat(jheight, jwidth, CV_8UC3);
}
if (!jgrey) {
//doesn't work unfortunately.. pool->addYUVMat(jidx, mat);
//cvtColor(*mat,*color, CV_YCrCb2RGB);
color_convert_common(buff, buff + jwidth * jheight, jwidth, jheight,
color->ptr<uchar> (0), false);
} Mat color = pool->getImage(jidx);
if (jgrey) { if (!jgrey)
Mat grey; {
pool->getGrey(jidx, grey);
cvtColor(grey, *color, CV_GRAY2RGB); if (color.cols != jwidth || color.rows != jheight || color.channels() != 3)
{
color.create(jheight, jwidth, CV_8UC3);
}
//doesn't work unfortunately..
//TODO cvtColor(mat,color, CV_YCrCb2RGB);
color_convert_common(buff, buff + jwidth * jheight, jwidth, jheight,
color.ptr<uchar> (0), false);
}
} if (jgrey)
{
Mat grey = pool->getGrey(jidx);
color = grey;
}
pool->addImage(jidx, color); pool->addImage(jidx, color);
} }
image_pool::image_pool() { image_pool::image_pool()
{
} }
image_pool::~image_pool() { image_pool::~image_pool()
__android_log_print(ANDROID_LOG_INFO, "image_pool", "destructor called"); {
__android_log_print(ANDROID_LOG_INFO, "image_pool", "destructor called");
} }
cv::Ptr<Mat> image_pool::getImage(int i) { Mat image_pool::getImage(int i)
return imagesmap[i]; {
return imagesmap[i];
} }
void image_pool::getGrey(int i, Mat & grey) { Mat image_pool::getGrey(int i)
{
cv::Ptr<Mat> tm = yuvImagesMap[i]; Mat tm = yuvImagesMap[i];
if (tm.empty()) if (tm.empty())
return; return tm;
grey = (*tm)(Range(0, tm->rows / 2), Range::all()); return tm(Range(0, tm.rows * (2.0f/3)), Range::all());
} }
cv::Ptr<Mat> image_pool::getYUV(int i) { Mat image_pool::getYUV(int i)
{
return yuvImagesMap[i]; return yuvImagesMap[i];
} }
void image_pool::addYUVMat(int i, cv::Ptr<Mat> mat) { void image_pool::addYUVMat(int i, Mat mat)
{
yuvImagesMap[i] = mat; yuvImagesMap[i] = mat;
} }
void image_pool::addImage(int i, cv::Ptr<Mat> mat) { void image_pool::addImage(int i, Mat mat)
{
imagesmap[i] = mat; imagesmap[i] = mat;
} }

@ -1,12 +1,14 @@
#ifndef IMAGE_POOL_H #ifndef IMAGE_POOL_H_ANDROID_KDJFKJ
#define IMAGE_POOL_H #define IMAGE_POOL_H_ANDROID_KDJFKJ
#include <opencv2/core/core.hpp> #include <opencv2/core/core.hpp>
#include <jni.h>
#include <map> #include <map>
using namespace cv;
#if ANDROID
#include <jni.h>
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C"
{
#endif #endif
JNIEXPORT jint JNI_OnLoad(JavaVM* vm, void* reserved); JNIEXPORT jint JNI_OnLoad(JavaVM* vm, void* reserved);
@ -15,48 +17,48 @@ JNIEXPORT jint JNI_OnLoad(JavaVM* vm, void* reserved);
// JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_); // JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_);
JNIEXPORT void JNICALL Java_com_opencv_jni_opencvJNI_addYUVtoPool JNIEXPORT void JNICALL Java_com_opencv_jni_opencvJNI_addYUVtoPool(JNIEnv *, jclass, jlong, jobject, jbyteArray, jint,
(JNIEnv *, jclass, jlong, jobject, jbyteArray, jint, jint, jint, jboolean); jint, jint, jboolean);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
#endif
class image_pool
{
//bool yuv2mat2(char *data, int size, int width, int height, bool grey, Mat& mat);
class image_pool {
std::map<int, Ptr< Mat> > imagesmap;
std::map<int, Ptr< Mat> > yuvImagesMap;
//uchar * mbuffer;
//int length;
public: public:
image_pool(); image_pool();
~image_pool(); ~image_pool();
cv::Ptr<Mat> getImage(int i); cv::Mat getImage(int i);
cv::Mat getGrey(int i);
void getGrey(int i, Mat & grey); cv::Mat getYUV(int i);
cv::Ptr<Mat> getYUV(int i);
int getCount()
int getCount(){ {
return imagesmap.size(); return imagesmap.size();
} }
void addImage(int i, Ptr< Mat> mat); /** Adds a mat at the given index - will not do a deep copy, just images[i] = mat
/** this function stores the given matrix in the the yuvImagesMap. Also, *
* after this call getGrey will work, as the grey image is just the top */
* half of the YUV mat. void addImage(int i, cv::Mat mat);
*
* \param i index to store yuv image at /** this function stores the given matrix in the the yuvImagesMap. Also,
* \param mat the yuv matrix to store * after this call getGrey will work, as the grey image is just the top
*/ * half of the YUV mat.
void addYUVMat(int i, Ptr< Mat> mat); *
* \param i index to store yuv image at
* \param mat the yuv matrix to store
int addYUV(uchar* buffer, int size, int width, int height, bool grey,int idx); */
void addYUVMat(int i, cv::Mat mat);
void getBitmap(int * outintarray, int size, int idx);
// int addYUV(uchar* buffer, int size, int width, int height, bool grey,int idx);
//
// void getBitmap(int * outintarray, int size, int idx);
private:
std::map<int, cv::Mat> imagesmap;
std::map<int, cv::Mat> yuvImagesMap;
}; };
#endif #endif

@ -46,10 +46,8 @@ public:
~image_pool(); ~image_pool();
Ptr<Mat> getImage(int i); Mat getImage(int i);
void addImage(int i, Mat mat);
void addImage(int i, Ptr< Mat> mat);

@ -1,98 +1,80 @@
#include <string.h> #include <string.h>
#include <jni.h> #include <jni.h>
#include <yuv420sp2rgb.h>
#ifndef max
#define max(a,b) ({typeof(a) _a = (a); typeof(b) _b = (b); _a > _b ? _a : _b; })
#define min(a,b) ({typeof(a) _a = (a); typeof(b) _b = (b); _a < _b ? _a : _b; })
#endif
#include <yuv420sp2rgb.h>
/* /*
YUV 4:2:0 image with a plane of 8 bit Y samples followed by an interleaved YUV 4:2:0 image with a plane of 8 bit Y samples followed by an interleaved
U/V plane containing 8 bit 2x2 subsampled chroma samples. U/V plane containing 8 bit 2x2 subsampled chroma samples.
except the interleave order of U and V is reversed. except the interleave order of U and V is reversed.
H V H V
Y Sample Period 1 1 Y Sample Period 1 1
U (Cb) Sample Period 2 2 U (Cb) Sample Period 2 2
V (Cr) Sample Period 2 2 V (Cr) Sample Period 2 2
*/ */
/* /*
size of a char: size of a char:
find . -name limits.h -exec grep CHAR_BIT {} \; find . -name limits.h -exec grep CHAR_BIT {} \;
*/ */
#ifndef max
#define max(a,b) ({typeof(a) _a = (a); typeof(b) _b = (b); _a > _b ? _a : _b; })
#define min(a,b) ({typeof(a) _a = (a); typeof(b) _b = (b); _a < _b ? _a : _b; })
#endif
const int bytes_per_pixel = 2; const int bytes_per_pixel = 2;
void color_convert_common( void color_convert_common(unsigned char *pY, unsigned char *pUV, int width, int height, unsigned char *buffer, int grey)
unsigned char *pY, unsigned char *pUV,
int width, int height, unsigned char *buffer,
int grey)
{ {
int i, j;
int nR, nG, nB;
int nY, nU, nV;
unsigned char *out = buffer;
int offset = 0;
if(grey){ int i, j;
for (i = 0; i < height; i++) { int nR, nG, nB;
for (j = 0; j < width; j++) { int nY, nU, nV;
unsigned char nB = *(pY + i * width + j); unsigned char *out = buffer;
int offset = 0;
if (grey)
{
memcpy(out,pY,width*height*sizeof(unsigned char));
}
else
// YUV 4:2:0
for (i = 0; i < height; i++)
{
for (j = 0; j < width; j++)
{
nY = *(pY + i * width + j);
nV = *(pUV + (i / 2) * width + bytes_per_pixel * (j / 2));
nU = *(pUV + (i / 2) * width + bytes_per_pixel * (j / 2) + 1);
// Yuv Convert
nY -= 16;
nU -= 128;
nV -= 128;
if (nY < 0)
nY = 0;
out[offset++] = (unsigned char)nB; nB = (int)(1192 * nY + 2066 * nU);
// out[offset++] = (unsigned char)nB; nG = (int)(1192 * nY - 833 * nV - 400 * nU);
// out[offset++] = (unsigned char)nB; nR = (int)(1192 * nY + 1634 * nV);
}
}
}else
// YUV 4:2:0
for (i = 0; i < height; i++) {
for (j = 0; j < width; j++) {
nY = *(pY + i * width + j);
nV = *(pUV + (i/2) * width + bytes_per_pixel * (j/2));
nU = *(pUV + (i/2) * width + bytes_per_pixel * (j/2) + 1);
// Yuv Convert
nY -= 16;
nU -= 128;
nV -= 128;
if (nY < 0)
nY = 0;
// nR = (int)(1.164 * nY + 2.018 * nU);
// nG = (int)(1.164 * nY - 0.813 * nV - 0.391 * nU);
// nB = (int)(1.164 * nY + 1.596 * nV);
nB = (int)(1192 * nY + 2066 * nU);
nG = (int)(1192 * nY - 833 * nV - 400 * nU);
nR = (int)(1192 * nY + 1634 * nV);
nR = min(262143, max(0, nR));
nG = min(262143, max(0, nG));
nB = min(262143, max(0, nB));
nR >>= 10; nR &= 0xff;
nG >>= 10; nG &= 0xff;
nB >>= 10; nB &= 0xff;
out[offset++] = (unsigned char)nR; nR = min(262143, max(0, nR));
out[offset++] = (unsigned char)nG; nG = min(262143, max(0, nG));
out[offset++] = (unsigned char)nB; nB = min(262143, max(0, nB));
//out[offset++] = 0xff; //set alpha for ARGB 8888 format nR >>= 10;
nR &= 0xff;
nG >>= 10;
nG &= 0xff;
nB >>= 10;
nB &= 0xff;
out[offset++] = (unsigned char)nR;
out[offset++] = (unsigned char)nG;
out[offset++] = (unsigned char)nB;
}
}
} }
//offset = i * width * 3; //non power of two
//offset = i * texture_size + j;//power of two
//offset *= 3; //3 byte per pixel
//out = buffer + offset;
}
}

@ -0,0 +1,11 @@
<?xml version="1.0" encoding="utf-8"?>
<LinearLayout
xmlns:android="http://schemas.android.com/apk/res/android"
android:layout_width="fill_parent"
android:layout_height="fill_parent"
android:orientation="vertical"
android:gravity="center_vertical|center_horizontal">
<TextView android:scrollbars="vertical" android:id="@+id/calibtext" android:text="" android:layout_width="wrap_content"
android:layout_height="wrap_content" android:padding="20dip"/>
</LinearLayout>

@ -0,0 +1,40 @@
<?xml version="1.0" encoding="utf-8"?>
<LinearLayout
xmlns:android="http://schemas.android.com/apk/res/android"
android:layout_width="fill_parent"
android:layout_height="fill_parent"
android:orientation="vertical"
android:gravity="center_vertical|center_horizontal">
<TextView android:text="@string/settings_text" android:autoLink="web" android:layout_width="wrap_content"
android:layout_height="wrap_content" android:padding="20dip"/>
<LinearLayout android:id="@+id/LinearLayout01"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:gravity="center_vertical">
<TextView android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:text="@string/image_size_prompt"/>
<Spinner android:id="@+id/image_size"
android:layout_width="fill_parent"
android:layout_height="wrap_content"
android:saveEnabled="true"
android:prompt="@string/image_size_prompt"
android:entries="@array/image_sizes">
</Spinner>
</LinearLayout>
<LinearLayout android:id="@+id/LinearLayout01"
android:layout_width="wrap_content" android:layout_height="wrap_content"
android:gravity="center_vertical">
<TextView android:layout_width="wrap_content"
android:layout_height="wrap_content" android:text="@string/camera_mode_prompt"/>
<Spinner android:id="@+id/camera_mode"
android:layout_width="fill_parent"
android:layout_height="wrap_content"
android:saveEnabled="true"
android:prompt="@string/camera_mode_prompt"
android:entries="@array/camera_mode">
</Spinner>
</LinearLayout>
</LinearLayout>

@ -0,0 +1,40 @@
<?xml version="1.0" encoding="utf-8"?>
<LinearLayout
xmlns:android="http://schemas.android.com/apk/res/android"
android:layout_width="fill_parent"
android:layout_height="fill_parent"
android:orientation="vertical"
android:gravity="center_vertical|center_horizontal">
<TextView android:text="@string/patterntext" android:autoLink="web" android:layout_width="wrap_content"
android:layout_height="wrap_content" android:padding="20dip"/>
<LinearLayout android:id="@+id/LinearLayout01"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:gravity="center_vertical">
<TextView android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:text="Corners in width direction:"/>
<Spinner android:id="@+id/rows"
android:layout_width="fill_parent"
android:layout_height="wrap_content"
android:saveEnabled="true"
android:prompt="@string/chesspromptx"
android:entries="@array/chesssizes">
</Spinner>
</LinearLayout>
<LinearLayout android:id="@+id/LinearLayout01"
android:layout_width="wrap_content" android:layout_height="wrap_content"
android:gravity="center_vertical">
<TextView android:layout_width="wrap_content"
android:layout_height="wrap_content" android:text="Corners in height direction:"/>
<Spinner android:id="@+id/cols"
android:layout_width="fill_parent"
android:layout_height="wrap_content"
android:saveEnabled="true"
android:prompt="@string/chessprompty"
android:entries="@array/chesssizes">
</Spinner>
</LinearLayout>
</LinearLayout>

@ -0,0 +1,11 @@
<?xml version="1.0" encoding="utf-8"?>
<resources>
<declare-styleable name="CameraParams">
<attr name="preview_width" format="integer"/>
<attr name="preview_height" format="integer"/>
</declare-styleable>
</resources>

@ -0,0 +1,20 @@
<?xml version="1.0" encoding="utf-8"?>
<resources>
<string-array name="chesssizes">
<item>3</item>
<item>4</item>
<item>5</item>
<item>6</item>
<item>7</item>
<item>8</item>
<item>9</item>
<item>10</item>
<item>11</item>
<item>12</item>
<item>13</item>
</string-array>
<string name="chesspromptx">
Choose the width:</string>
<string name="chessprompty">
Choose the height:</string>
</resources>

@ -0,0 +1,20 @@
<?xml version="1.0" encoding="utf-8"?>
<resources>
<string-array name="image_sizes">
<item>320x240</item>
<item>400x300</item>
<item>600x400</item>
<item>800x600</item>
<item>1000x800</item>
</string-array>
<string-array name="camera_mode">
<item>color</item>
<item>BW</item>
</string-array>
<string name="image_size_prompt">
Image Size:\n(may not be exact)
</string>
<string name="camera_mode_prompt">
Camera Mode:
</string>
</resources>

@ -0,0 +1,19 @@
<?xml version="1.0" encoding="utf-8"?>
<resources>
<string name="app_name">Calibration</string>
<string name="patternsize">Pattern Size</string>
<string name="patterntext">Please choose the width and height (number of inside corners) of the checker
board pattern you will be using for calibration. Default is 6 by 8 corners. You may find a checkerboard pattern at
http://opencv.willowgarage.com/pattern</string>
<string name="patternlink">http://opencv.willowgarage.com/pattern</string>
<string name="camera_settings_label">Camera Settings</string>
<string name="settings_text">Change the camera settings</string>
<string name="calibration_service_started">Calibration calculations have started...</string>
<string name="calibration_service_stopped">Calibration calculations has stopped.</string>
<string name="calibration_service_finished">Calibration finished, you camera is calibrated.</string>
<string name="calibration_service_label">Calibration</string>
<string name="calibration_not_enough">Please capture atleast 10 images of the pattern!</string>
</resources>

@ -6,3 +6,4 @@ OPENCV_CONFIG=../build/android-opencv.mk
#you can download the ndk from http://www.crystax.net/android/ndk-r4.php #you can download the ndk from http://www.crystax.net/android/ndk-r4.php
ANDROID_NDK_ROOT=$(HOME)/android-ndk-r4-crystax ANDROID_NDK_ROOT=$(HOME)/android-ndk-r4-crystax
ARM_TARGETS=armeabi armeabi-v7a

@ -0,0 +1,47 @@
package com.opencv.calibration;
import java.io.BufferedReader;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import android.app.Activity;
import android.os.Bundle;
import android.text.method.ScrollingMovementMethod;
import android.util.Log;
import android.widget.TextView;
import com.opencv.R;
public class CalibrationViewer extends Activity {
@Override
protected void onCreate(Bundle savedInstanceState) {
// TODO Auto-generated method stub
super.onCreate(savedInstanceState);
setContentView(R.layout.calibrationviewer);
Bundle extras = getIntent().getExtras();
String filename = extras.getString("calibfile");
if (filename != null) {
TextView text = (TextView) findViewById(R.id.calibtext);
text.setMovementMethod(new ScrollingMovementMethod());
try {
BufferedReader reader = new BufferedReader(new FileReader(
filename));
while (reader.ready()) {
text.append(reader.readLine() +"\n");
}
} catch (FileNotFoundException e) {
Log.e("opencv", "could not open calibration file at:"
+ filename);
} catch (IOException e) {
Log.e("opencv", "error reading file: "
+ filename);
}
}
}
}

@ -0,0 +1,75 @@
package com.opencv.calibration;
import com.opencv.R;
import com.opencv.jni.Size;
import android.app.Activity;
import android.content.Context;
import android.content.SharedPreferences;
import android.content.SharedPreferences.Editor;
import android.os.Bundle;
import android.view.View;
import android.widget.AdapterView;
import android.widget.AdapterView.OnItemSelectedListener;
import android.widget.Spinner;
public class ChessBoardChooser extends Activity {
public static final String CHESS_SIZE = "chess_size";
public static final int DEFAULT_WIDTH = 6;
public static final int DEFAULT_HEIGHT = 8;
public static final int LOWEST = 3;
class DimChooser implements OnItemSelectedListener {
private String dim;
public DimChooser(String dim) {
this.dim = dim;
}
@Override
public void onItemSelected(AdapterView<?> arg0, View arg1, int pos,
long arg3) {
SharedPreferences settings = getSharedPreferences(CHESS_SIZE, 0);
Editor editor = settings.edit();
editor.putInt(dim, pos + LOWEST);
editor.commit();
}
@Override
public void onNothingSelected(AdapterView<?> arg0) {
}
}
@Override
protected void onCreate(Bundle savedInstanceState) {
// TODO Auto-generated method stub
super.onCreate(savedInstanceState);
setContentView(R.layout.chesssizer);
// Restore preferences
SharedPreferences settings = getSharedPreferences(CHESS_SIZE, 0);
int width = settings.getInt("width", 6);
int height = settings.getInt("height", 8);
Spinner wspin, hspin;
wspin = (Spinner) findViewById(R.id.rows);
hspin = (Spinner) findViewById(R.id.cols);
wspin.setSelection(width - LOWEST);
hspin.setSelection(height - LOWEST);
wspin.setOnItemSelectedListener(new DimChooser("width"));
hspin.setOnItemSelectedListener(new DimChooser("height"));
}
public static Size getPatternSize(Context ctx) {
SharedPreferences settings = ctx.getSharedPreferences(CHESS_SIZE, 0);
int width = settings.getInt("width", 6);
int height = settings.getInt("height", 8);
return new Size(width, height);
}
}

@ -0,0 +1,166 @@
package com.opencv.calibration.services;
import java.io.File;
import java.io.IOException;
import android.app.Notification;
import android.app.NotificationManager;
import android.app.PendingIntent;
import android.app.Service;
import android.content.Intent;
import android.os.Binder;
import android.os.IBinder;
import android.util.Log;
import android.widget.Toast;
import com.opencv.R;
import com.opencv.calibration.CalibrationViewer;
import com.opencv.calibration.Calibrator;
import com.opencv.calibration.Calibrator.CalibrationCallback;
public class CalibrationService extends Service implements CalibrationCallback {
Class<?> activity;
int icon;
File calibration_file;
public void startCalibrating(Class<?> activitycaller,int icon_id, Calibrator calibrator, File calibration_file)
throws IOException {
activity = activitycaller;
icon = icon_id;
// Display a notification about us starting. We put an icon in the
// status bar.
showNotification();
this.calibration_file = calibration_file;
calibrator.setCallback(this);
calibrator.calibrate(calibration_file);
}
private NotificationManager mNM;
/**
* Class for clients to access. Because we know this service always runs in
* the same process as its clients, we don't need to deal with IPC.
*/
public class CalibrationServiceBinder extends Binder {
public CalibrationService getService() {
return CalibrationService.this;
}
}
@Override
public int onStartCommand(Intent intent, int flags, int startId) {
Log.i("LocalService", "Received start id " + startId + ": " + intent);
// We want this service to continue running until it is explicitly
// stopped, so return sticky.
return START_NOT_STICKY;
}
@Override
public void onCreate() {
mNM = (NotificationManager) getSystemService(NOTIFICATION_SERVICE);
}
@Override
public void onDestroy() {
// Cancel the persistent notification.
// mNM.cancel(R.string.calibration_service_started);
// Tell the user we stopped.
Toast.makeText(this, R.string.calibration_service_finished,
Toast.LENGTH_SHORT).show();
}
private final IBinder mBinder = new CalibrationServiceBinder();
@Override
public IBinder onBind(Intent intent) {
return mBinder;
}
/**
* Show a notification while this service is running.
*/
private void showNotification() {
// In this sample, we'll use the same text for the ticker and the
// expanded notification
CharSequence text = getText(R.string.calibration_service_started);
// Set the icon, scrolling text and timestamp
Notification notification = new Notification(icon, text,
System.currentTimeMillis());
// The PendingIntent to launch our activity if the user selects this
// notification
PendingIntent contentIntent = PendingIntent.getActivity(this, 0,
new Intent(this, activity), 0);
// Set the info for the views that show in the notification panel.
notification.setLatestEventInfo(this,
getText(R.string.calibration_service_label), text,
contentIntent);
notification.defaults |= Notification.DEFAULT_SOUND;
// Send the notification.
// We use a layout id because it is a unique number. We use it later to
// cancel.
mNM.notify(R.string.calibration_service_started, notification);
}
/**
* Show a notification while this service is running.
*/
private void doneNotification() {
// In this sample, we'll use the same text for the ticker and the
// expanded notification
CharSequence text = getText(R.string.calibration_service_finished);
// Set the icon, scrolling text and timestamp
Notification notification = new Notification(icon, text,
System.currentTimeMillis());
Intent intent = new Intent(this,CalibrationViewer.class);
intent.putExtra("calibfile", calibration_file.getAbsolutePath());
// The PendingIntent to launch our activity if the user selects this
// notification
PendingIntent contentIntent = PendingIntent.getActivity(this, 0,
intent, 0);
// Set the info for the views that show in the notification panel.
notification.setLatestEventInfo(this,
getText(R.string.calibration_service_label), text,
contentIntent);
notification.defaults |= Notification.DEFAULT_SOUND;
// Send the notification.
// We use a layout id because it is a unique number. We use it later to
// cancel.
mNM.notify(R.string.calibration_service_started, notification);
}
@Override
public void onFoundChessboard(Calibrator calibrator) {
// TODO Auto-generated method stub
}
@Override
public void onDoneCalibration(Calibrator calibration, File calibfile) {
doneNotification();
stopSelf();
}
@Override
public void onFailedChessboard(Calibrator calibrator) {
// TODO Auto-generated method stub
}
}

@ -0,0 +1,166 @@
package com.opencv.camera;
import com.opencv.R;
import android.app.Activity;
import android.content.Context;
import android.content.SharedPreferences;
import android.content.SharedPreferences.Editor;
import android.os.Bundle;
import android.view.View;
import android.widget.AdapterView;
import android.widget.AdapterView.OnItemSelectedListener;
import android.widget.Spinner;
public class CameraConfig extends Activity {
public static final String CAMERA_SETTINGS = "CAMERA_SETTINGS";
public static final String CAMERA_MODE = "camera_mode";
public static final String IMAGE_WIDTH = "IMAGE_WIDTH";
public static final String IMAGE_HEIGHT = "IMAGE_HEIGHT";
public static final int CAMERA_MODE_BW = 0;
public static final int CAMERA_MODE_COLOR = 1;
public static int readCameraMode(Context ctx) {
// Restore preferences
SharedPreferences settings = ctx.getSharedPreferences(CAMERA_SETTINGS,
0);
int mode = settings.getInt(CAMERA_MODE, CAMERA_MODE_BW);
return mode;
}
static public void setCameraMode(Context context, String mode) {
int m = 0;
if (mode.equals("BW")) {
m = CAMERA_MODE_BW;
} else if (mode.equals("color"))
m = CAMERA_MODE_COLOR;
setCameraMode(context, m);
}
private static String sizeToString(int[] size) {
return size[0] + "x" + size[1];
}
private static void parseStrToSize(String ssize, int[] size) {
String sz[] = ssize.split("x");
size[0] = Integer.valueOf(sz[0]);
size[1] = Integer.valueOf(sz[1]);
}
public static void readImageSize(Context ctx, int[] size) {
// Restore preferences
SharedPreferences settings = ctx.getSharedPreferences(CAMERA_SETTINGS,
0);
size[0] = settings.getInt(IMAGE_WIDTH, 600);
size[1] = settings.getInt(IMAGE_HEIGHT, 600);
}
public static void setCameraMode(Context ctx, int mode) {
// Restore preferences
SharedPreferences settings = ctx.getSharedPreferences(CAMERA_SETTINGS,
0);
Editor editor = settings.edit();
editor.putInt(CAMERA_MODE, mode);
editor.commit();
}
public static void setImageSize(Context ctx, String strsize) {
int size[] = { 0, 0 };
parseStrToSize(strsize, size);
setImageSize(ctx, size[0], size[1]);
}
public static void setImageSize(Context ctx, int width, int height) {
// Restore preferences
SharedPreferences settings = ctx.getSharedPreferences(CAMERA_SETTINGS,
0);
Editor editor = settings.edit();
editor.putInt(IMAGE_WIDTH, width);
editor.putInt(IMAGE_HEIGHT, height);
editor.commit();
}
@Override
protected void onCreate(Bundle savedInstanceState) {
// TODO Auto-generated method stub
super.onCreate(savedInstanceState);
setContentView(R.layout.camerasettings);
int mode = readCameraMode(this);
int size[] = { 0, 0 };
readImageSize(this, size);
final Spinner size_spinner;
final Spinner mode_spinner;
size_spinner = (Spinner) findViewById(R.id.image_size);
mode_spinner = (Spinner) findViewById(R.id.camera_mode);
String strsize = sizeToString(size);
String strmode = modeToString(mode);
String sizes[] = getResources().getStringArray(R.array.image_sizes);
int i = 1;
for (String x : sizes) {
if (x.equals(strsize))
break;
i++;
}
if(i <= sizes.length)
size_spinner.setSelection(i-1);
i = 1;
String modes[] = getResources().getStringArray(R.array.camera_mode);
for (String x :modes) {
if (x.equals(strmode))
break;
i++;
}
if(i <= modes.length)
mode_spinner.setSelection(i-1);
size_spinner.setOnItemSelectedListener(new OnItemSelectedListener() {
@Override
public void onItemSelected(AdapterView<?> arg0, View spinner,
int position, long arg3) {
Object o = size_spinner.getItemAtPosition(position);
if (o != null)
setImageSize(spinner.getContext(), (String) o);
}
@Override
public void onNothingSelected(AdapterView<?> arg0) {
}
});
mode_spinner.setOnItemSelectedListener(new OnItemSelectedListener() {
@Override
public void onItemSelected(AdapterView<?> arg0, View spinner,
int position, long arg3) {
Object o = mode_spinner.getItemAtPosition(position);
if (o != null)
setCameraMode(spinner.getContext(), (String) o);
}
@Override
public void onNothingSelected(AdapterView<?> arg0) {
}
});
}
private String modeToString(int mode) {
switch (mode) {
case CAMERA_MODE_BW:
return "BW";
case CAMERA_MODE_COLOR:
return "color";
default:
return "";
}
}
}

@ -22,35 +22,44 @@ import com.opencv.camera.NativeProcessor.PoolCallback;
public class NativePreviewer extends SurfaceView implements public class NativePreviewer extends SurfaceView implements
SurfaceHolder.Callback, Camera.PreviewCallback, NativeProcessorCallback { SurfaceHolder.Callback, Camera.PreviewCallback, NativeProcessorCallback {
SurfaceHolder mHolder;
Camera mCamera;
private NativeProcessor processor; /** Constructor useful for defining a NativePreviewer in android layout xml
*
private int preview_width, preview_height; * @param context
private int pixelformat; * @param attributes
private PixelFormat pixelinfo; */
public NativePreviewer(Context context, AttributeSet attributes) {
public NativePreviewer(Context context,AttributeSet attributes){ super(context, attributes);
super(context,attributes);
listAllCameraMethods(); listAllCameraMethods();
// Install a SurfaceHolder.Callback so we get notified when the // Install a SurfaceHolder.Callback so we get notified when the
// underlying surface is created and destroyed. // underlying surface is created and destroyed.
mHolder = getHolder(); mHolder = getHolder();
mHolder.addCallback(this); mHolder.addCallback(this);
mHolder.setType(SurfaceHolder.SURFACE_TYPE_PUSH_BUFFERS); mHolder.setType(SurfaceHolder.SURFACE_TYPE_PUSH_BUFFERS);
this.preview_width = attributes.getAttributeIntValue("opencv", "preview_width", 600);
this.preview_height= attributes.getAttributeIntValue("opencv", "preview_height", 600);
processor = new NativeProcessor(); /* TODO get this working! Can't figure out how to define these in xml
*/
preview_width = attributes.getAttributeIntValue("opencv",
"preview_width", 600);
preview_height = attributes.getAttributeIntValue("opencv",
"preview_height", 600);
setZOrderMediaOverlay(false); Log.d("NativePreviewer", "Trying to use preview size of " + preview_width + " " + preview_height);
processor = new NativeProcessor();
setZOrderMediaOverlay(false);
} }
/**
*
* @param context
* @param preview_width the desired camera preview width - will attempt to get as close to this as possible
* @param preview_height the desired camera preview height
*/
public NativePreviewer(Context context, int preview_width, public NativePreviewer(Context context, int preview_width,
int preview_height) { int preview_height) {
super(context); super(context);
listAllCameraMethods(); listAllCameraMethods();
// Install a SurfaceHolder.Callback so we get notified when the // Install a SurfaceHolder.Callback so we get notified when the
@ -63,62 +72,38 @@ public class NativePreviewer extends SurfaceView implements
this.preview_height = preview_height; this.preview_height = preview_height;
processor = new NativeProcessor(); processor = new NativeProcessor();
setZOrderMediaOverlay(false); setZOrderMediaOverlay(false);
} }
Handler camerainiter = new Handler(); /** Only call in the oncreate function of the instantiating activity
void initCamera(SurfaceHolder holder) throws InterruptedException{ *
if(mCamera == null){ * @param width desired width
// The Surface has been created, acquire the camera and tell it where * @param height desired height
// to draw. */
int i = 0; public void setPreviewSize(int width, int height){
while(i++ < 5){ preview_width = width;
try{ preview_height = height;
mCamera = Camera.open();
break; Log.d("NativePreviewer", "Trying to use preview size of " + preview_width + " " + preview_height);
}catch(RuntimeException e){
Thread.sleep(200);
}
}
try {
mCamera.setPreviewDisplay(holder);
} catch (IOException exception) {
mCamera.release();
mCamera = null;
}catch(RuntimeException e){
Log.e("camera", "stacktrace", e);
}
}
}
void releaseCamera(){
if(mCamera !=null){
// Surface will be destroyed when we return, so stop the preview.
// Because the CameraDevice object is not a shared resource, it's very
// important to release it when the activity is paused.
mCamera.stopPreview();
mCamera.release();
}
// processor = null; }
mCamera = null;
mAcb = null; public void setParamsFromPrefs(Context ctx){
mPCWB = null; int size[] ={0,0};
CameraConfig.readImageSize(ctx, size);
int mode = CameraConfig.readCameraMode(ctx);
setPreviewSize(size[0], size[1]);
setGrayscale(mode == CameraConfig.CAMERA_MODE_BW ? true : false);
} }
public void surfaceCreated(SurfaceHolder holder) { public void surfaceCreated(SurfaceHolder holder) {
} }
public void surfaceDestroyed(SurfaceHolder holder) { public void surfaceDestroyed(SurfaceHolder holder) {
releaseCamera(); releaseCamera();
} }
private boolean hasAutoFocus = false;
public void surfaceChanged(SurfaceHolder holder, int format, int w, int h) { public void surfaceChanged(SurfaceHolder holder, int format, int w, int h) {
try { try {
@ -128,44 +113,48 @@ public class NativePreviewer extends SurfaceView implements
e.printStackTrace(); e.printStackTrace();
return; return;
} }
// Now that the size is known, set up the camera parameters and begin // Now that the size is known, set up the camera parameters and begin
// the preview. // the preview.
Camera.Parameters parameters = mCamera.getParameters(); Camera.Parameters parameters = mCamera.getParameters();
List<Camera.Size> pvsizes = mCamera.getParameters().getSupportedPreviewSizes(); List<Camera.Size> pvsizes = mCamera.getParameters()
.getSupportedPreviewSizes();
int best_width = 1000000; int best_width = 1000000;
int best_height = 1000000; int best_height = 1000000;
for(Size x: pvsizes){ int bdist = 100000;
if(x.width - preview_width >= 0 && x.width <= best_width){ for (Size x : pvsizes) {
if (Math.abs(x.width - preview_width) < bdist) {
bdist = Math.abs(x.width - preview_width);
best_width = x.width; best_width = x.width;
best_height = x.height; best_height = x.height;
} }
} }
preview_width = best_width; preview_width = best_width;
preview_height = best_height; preview_height = best_height;
List<String> fmodes = mCamera.getParameters().getSupportedFocusModes();
Log.d("NativePreviewer", "Determined compatible preview size is: (" + preview_width + "," + preview_height+")");
List<String> fmodes = mCamera.getParameters().getSupportedFocusModes();
int idx = fmodes.indexOf(Camera.Parameters.FOCUS_MODE_INFINITY); int idx = fmodes.indexOf(Camera.Parameters.FOCUS_MODE_INFINITY);
if(idx != -1){ if (idx != -1) {
parameters.setFocusMode(Camera.Parameters.FOCUS_MODE_INFINITY); parameters.setFocusMode(Camera.Parameters.FOCUS_MODE_INFINITY);
}else if(fmodes.indexOf(Camera.Parameters.FOCUS_MODE_FIXED) != -1){ } else if (fmodes.indexOf(Camera.Parameters.FOCUS_MODE_FIXED) != -1) {
parameters.setFocusMode(Camera.Parameters.FOCUS_MODE_FIXED); parameters.setFocusMode(Camera.Parameters.FOCUS_MODE_FIXED);
} }
if(fmodes.indexOf(Camera.Parameters.FOCUS_MODE_AUTO) != -1){ if (fmodes.indexOf(Camera.Parameters.FOCUS_MODE_AUTO) != -1) {
hasAutoFocus = true; hasAutoFocus = true;
} }
List<String> scenemodes = mCamera.getParameters().getSupportedSceneModes(); List<String> scenemodes = mCamera.getParameters()
if(scenemodes != null) .getSupportedSceneModes();
if(scenemodes.indexOf(Camera.Parameters.SCENE_MODE_STEADYPHOTO) != -1){ if (scenemodes != null)
parameters.setSceneMode(Camera.Parameters.SCENE_MODE_STEADYPHOTO); if (scenemodes.indexOf(Camera.Parameters.SCENE_MODE_STEADYPHOTO) != -1) {
} parameters
.setSceneMode(Camera.Parameters.SCENE_MODE_STEADYPHOTO);
}
parameters.setPreviewSize(preview_width, preview_height); parameters.setPreviewSize(preview_width, preview_height);
@ -194,68 +183,83 @@ public class NativePreviewer extends SurfaceView implements
mCamera.startPreview(); mCamera.startPreview();
//postautofocus(0);
} }
public void postautofocus(int delay) { public void postautofocus(int delay) {
if(hasAutoFocus) if (hasAutoFocus)
handler.postDelayed(autofocusrunner, delay); handler.postDelayed(autofocusrunner, delay);
} }
private Runnable autofocusrunner = new Runnable() {
@Override
public void run() {
mCamera.autoFocus(autocallback);
}
};
Camera.AutoFocusCallback autocallback = new Camera.AutoFocusCallback() {
@Override
public void onAutoFocus(boolean success, Camera camera) {
if(!success)
postautofocus(1000);
}
};
Handler handler = new Handler();
/** /**
* This method will list all methods of the android.hardware.Camera class, * Demonstration of how to use onPreviewFrame. In this case I'm not
* even the hidden ones. With the information it provides, you can use the * processing the data, I'm just adding the buffer back to the buffer queue
* same approach I took below to expose methods that were written but hidden * for re-use
* in eclair
*/ */
private void listAllCameraMethods() { public void onPreviewFrame(byte[] data, Camera camera) {
try {
Class<?> c = Class.forName("android.hardware.Camera"); if (start == null) {
Method[] m = c.getMethods(); start = new Date();
for (int i = 0; i < m.length; i++) {
Log.d("NativePreviewer", " method:" + m[i].toString());
}
} catch (Exception e) {
// TODO Auto-generated catch block
Log.e("NativePreviewer", e.toString());
} }
processor.post(data, preview_width, preview_height, pixelformat,
System.nanoTime(), this);
fcount++;
if (fcount % 100 == 0) {
double ms = (new Date()).getTime() - start.getTime();
Log.i("NativePreviewer", "fps:" + fcount / (ms / 1000.0));
start = new Date();
fcount = 0;
}
}
@Override
public void onDoneNativeProcessing(byte[] buffer) {
addCallbackBuffer(buffer);
}
public void addCallbackStack(LinkedList<PoolCallback> callbackstack) {
processor.addCallbackStack(callbackstack);
} }
/** /**
* These variables are re-used over and over by addCallbackBuffer * This must be called when the activity pauses, in Activity.onPause This
* has the side effect of clearing the callback stack.
*
*/ */
Method mAcb; public void onPause() {
releaseCamera();
addCallbackStack(null);
processor.stop();
}
public void onResume() {
processor.start();
}
private Method mPCWB;
private void initForPCWB() {
private void initForACB() {
try { try {
mAcb = Class.forName("android.hardware.Camera").getMethod( mPCWB = Class.forName("android.hardware.Camera").getMethod(
"addCallbackBuffer", byte[].class); "setPreviewCallbackWithBuffer", PreviewCallback.class);
} catch (Exception e) { } catch (Exception e) {
Log Log.e("NativePreviewer",
.e("NativePreviewer", "Problem setting up for setPreviewCallbackWithBuffer: "
"Problem setting up for addCallbackBuffer: " + e.toString());
+ e.toString());
} }
} }
/** /**
@ -274,27 +278,10 @@ public class NativePreviewer extends SurfaceView implements
try { try {
mAcb.invoke(mCamera, b); mAcb.invoke(mCamera, b);
} catch (Exception e) {
Log.e("NativePreviewer", "invoking addCallbackBuffer failed: "
+ e.toString());
}
}
Method mPCWB;
private void initForPCWB() {
try {
mPCWB = Class.forName("android.hardware.Camera").getMethod(
"setPreviewCallbackWithBuffer", PreviewCallback.class);
} catch (Exception e) { } catch (Exception e) {
Log.e("NativePreviewer", Log.e("NativePreviewer",
"Problem setting up for setPreviewCallbackWithBuffer: " "invoking addCallbackBuffer failed: " + e.toString());
+ e.toString());
} }
} }
/** /**
@ -321,7 +308,8 @@ public class NativePreviewer extends SurfaceView implements
} }
} }
protected void clearPreviewCallbackWithBuffer() { @SuppressWarnings("unused")
private void clearPreviewCallbackWithBuffer() {
// mCamera.setPreviewCallback(this); // mCamera.setPreviewCallback(this);
// return; // return;
try { try {
@ -341,69 +329,117 @@ public class NativePreviewer extends SurfaceView implements
} }
} }
Date start;
int fcount = 0;
boolean processing = false;
/** /**
* Demonstration of how to use onPreviewFrame. In this case I'm not * These variables are re-used over and over by addCallbackBuffer
* processing the data, I'm just adding the buffer back to the buffer queue
* for re-use
*/ */
public void onPreviewFrame(byte[] data, Camera camera) { private Method mAcb;
if (start == null) { private void initForACB() {
start = new Date(); try {
mAcb = Class.forName("android.hardware.Camera").getMethod(
"addCallbackBuffer", byte[].class);
} catch (Exception e) {
Log.e("NativePreviewer",
"Problem setting up for addCallbackBuffer: " + e.toString());
} }
}
private Runnable autofocusrunner = new Runnable() {
processor.post(data, preview_width, preview_height, pixelformat, System.nanoTime(),
this); @Override
public void run() {
fcount++; mCamera.autoFocus(autocallback);
if (fcount % 100 == 0) {
double ms = (new Date()).getTime() - start.getTime();
Log.i("NativePreviewer", "fps:" + fcount / (ms / 1000.0));
start = new Date();
fcount = 0;
} }
};
private Camera.AutoFocusCallback autocallback = new Camera.AutoFocusCallback() {
} @Override
public void onAutoFocus(boolean success, Camera camera) {
if (!success)
postautofocus(1000);
}
};
@Override /**
public void onDoneNativeProcessing(byte[] buffer) { * This method will list all methods of the android.hardware.Camera class,
addCallbackBuffer(buffer); * even the hidden ones. With the information it provides, you can use the
* same approach I took below to expose methods that were written but hidden
* in eclair
*/
private void listAllCameraMethods() {
try {
Class<?> c = Class.forName("android.hardware.Camera");
Method[] m = c.getMethods();
for (int i = 0; i < m.length; i++) {
Log.d("NativePreviewer", " method:" + m[i].toString());
}
} catch (Exception e) {
// TODO Auto-generated catch block
Log.e("NativePreviewer", e.toString());
}
} }
public void addCallbackStack(LinkedList<PoolCallback> callbackstack) { private void initCamera(SurfaceHolder holder) throws InterruptedException {
processor.addCallbackStack(callbackstack); if (mCamera == null) {
// The Surface has been created, acquire the camera and tell it
// where
// to draw.
int i = 0;
while (i++ < 5) {
try {
mCamera = Camera.open();
break;
} catch (RuntimeException e) {
Thread.sleep(200);
}
}
try {
mCamera.setPreviewDisplay(holder);
} catch (IOException exception) {
mCamera.release();
mCamera = null;
} catch (RuntimeException e) {
Log.e("camera", "stacktrace", e);
}
}
} }
/**This must be called when the activity pauses, in Activity.onPause private void releaseCamera() {
* This has the side effect of clearing the callback stack. if (mCamera != null) {
* // Surface will be destroyed when we return, so stop the preview.
*/ // Because the CameraDevice object is not a shared resource, it's
public void onPause() { // very
// important to release it when the activity is paused.
releaseCamera(); mCamera.stopPreview();
mCamera.release();
addCallbackStack(null); }
processor.stop(); // processor = null;
mCamera = null;
mAcb = null;
mPCWB = null;
} }
public void onResume() { private Handler handler = new Handler();
private Date start;
processor.start(); private int fcount = 0;
private boolean hasAutoFocus = false;
private SurfaceHolder mHolder;
private Camera mCamera;
private NativeProcessor processor;
private int preview_width, preview_height;
private int pixelformat;
private PixelFormat pixelinfo;
public void setGrayscale(boolean b) {
processor.setGrayscale(b);
} }

@ -11,8 +11,153 @@ import android.util.Log;
import com.opencv.jni.image_pool; import com.opencv.jni.image_pool;
import com.opencv.jni.opencv; import com.opencv.jni.opencv;
/** The NativeProcessor is a native processing stack engine.
*
* What this means is that the NativeProcessor handles loading
* live camera frames into native memory space, i.e. the image_pool
* and then calling a stack of PoolCallback's and passing them the
* image_pool.
*
* The image_pool index 0 is populated with the live video image
*
* And any modifications to this the pool are in place, so you may
* pass on changes to the pool to the next PoolCallback in the stack.
*
*/
public class NativeProcessor { public class NativeProcessor {
/** Users that would like to be able to have access to live video frames
* should implement a PoolCallback
* the idx and pool contain the images, specifically at idx == 0 is the
* live video frame.
*/
static public interface PoolCallback {
void process(int idx, image_pool pool, long timestamp,
NativeProcessor nativeProcessor);
}
/**At every frame, each PoolCallback is called in order and is passed the
* the same pool and index
*
* @param stack A list of PoolCallback objects, that will be called in order
*/
public void addCallbackStack(LinkedList<PoolCallback> stack) {
try {
while (!stacklock.tryLock(10, TimeUnit.MILLISECONDS)) {
}
try {
nextStack = stack;
} finally {
stacklock.unlock();
}
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
/**
* Create a NativeProcessor. The processor will not start running until
* start is called, at which point it will operate in its own thread and
* sleep until a post is called. The processor should not be started until
* an onSurfaceChange event, and should be shut down when the surface is
* destroyed by calling interupt.
*
*/
public NativeProcessor() {
gray_scale_only = false;
}
/** Grayscale only is much faster because the yuv does not get decoded, and grayscale is only one
* byter per pixel - giving fast opengl texture loading.
*
* You still have access to the whole yuv image, but grayscale is only immediately available to
* use without further effort.
*
* Suggestion - use grayscale only and save your yuv images to disk if you would like color images
*
* Also, in grayscale mode, the images in the pool are only single channel, so please keep this in mind
* when accessing the color images - check the cv::Mat::channels() or cv::Mat::type() if your messing
* with color channels
*
* @param grayscale true if you want to only process grayscale images
*/
public void setGrayscale(boolean grayscale){
gray_scale_only = grayscale;
}
/**
* A callback that allows the NativeProcessor to pass back the buffer when
* it has completed processing a frame.
*/
static protected interface NativeProcessorCallback {
/**
* Called after processing, meant to be recieved by the NativePreviewer
* wich reuses the byte buffer for the camera preview...
*
* @param buffer
* the buffer passed to the NativeProcessor with post.
*/
void onDoneNativeProcessing(byte[] buffer);
}
protected void stop() {
mthread.interrupt();
try {
mthread.join();
} catch (InterruptedException e) {
Log.w("NativeProcessor",
"interupted while stoping " + e.getMessage());
}
mthread = null;
}
protected void start() {
mthread = new ProcessorThread();
mthread.start();
}
/**
* post is used to notify the processor that a preview frame is ready, this
* will return almost immediately. if the processor is busy, returns false
* and is essentially a nop.
*
* @param buffer
* a preview frame from the Android Camera onPreviewFrame
* callback
* @param width
* of preview frame
* @param height
* of preview frame
* @param format
* of preview frame
* @return true if the processor wasn't busy and accepted the post, false if
* the processor is still processing.
*/
protected boolean post(byte[] buffer, int width, int height, int format,
long timestamp, NativeProcessorCallback callback) {
lock.lock();
try {
NPPostObject pobj = new NPPostObject(buffer, width, height, format,
timestamp, callback);
postobjects.addFirst(pobj);
} finally {
lock.unlock();
}
return true;
}
private class ProcessorThread extends Thread { private class ProcessorThread extends Thread {
private void process(NPPostObject pobj) throws Exception { private void process(NPPostObject pobj) throws Exception {
@ -20,7 +165,7 @@ public class NativeProcessor {
if (pobj.format == PixelFormat.YCbCr_420_SP) { if (pobj.format == PixelFormat.YCbCr_420_SP) {
// add as color image, because we know how to decode this // add as color image, because we know how to decode this
opencv.addYUVtoPool(pool, pobj.buffer, 0, pobj.width, opencv.addYUVtoPool(pool, pobj.buffer, 0, pobj.width,
pobj.height, false); pobj.height, gray_scale_only);
} else if (pobj.format == PixelFormat.YCbCr_422_SP) { } else if (pobj.format == PixelFormat.YCbCr_422_SP) {
// add as gray image, because this format is not coded // add as gray image, because this format is not coded
@ -31,7 +176,6 @@ public class NativeProcessor {
} else } else
throw new Exception("bad pixel format!"); throw new Exception("bad pixel format!");
for (PoolCallback x : stack) { for (PoolCallback x : stack) {
if (interrupted()) { if (interrupted()) {
throw new InterruptedException( throw new InterruptedException(
@ -39,11 +183,9 @@ public class NativeProcessor {
} }
x.process(0, pool, pobj.timestamp, NativeProcessor.this); x.process(0, pool, pobj.timestamp, NativeProcessor.this);
} }
pobj.done(); // tell the postobject that we're done doing pobj.done(); // tell the postobject that we're done doing
// all the processing. // all the processing.
} }
@ -53,8 +195,8 @@ public class NativeProcessor {
try { try {
while (true) { while (true) {
yield(); yield();
while(!stacklock.tryLock(5, TimeUnit.MILLISECONDS)){ while (!stacklock.tryLock(5, TimeUnit.MILLISECONDS)) {
} }
try { try {
if (nextStack != null) { if (nextStack != null) {
@ -64,25 +206,26 @@ public class NativeProcessor {
} finally { } finally {
stacklock.unlock(); stacklock.unlock();
} }
NPPostObject pobj = null; NPPostObject pobj = null;
while(!lock.tryLock(5, TimeUnit.MILLISECONDS)){ while (!lock.tryLock(5, TimeUnit.MILLISECONDS)) {
} }
try { try {
if(postobjects.isEmpty()) continue; if (postobjects.isEmpty())
continue;
pobj = postobjects.removeLast(); pobj = postobjects.removeLast();
} finally { } finally {
lock.unlock(); lock.unlock();
} }
if(interrupted())throw new InterruptedException(); if (interrupted())
throw new InterruptedException();
if(stack != null && pobj != null)
if (stack != null && pobj != null)
process(pobj); process(pobj);
} }
} catch (InterruptedException e) { } catch (InterruptedException e) {
@ -99,102 +242,10 @@ public class NativeProcessor {
} }
} }
ProcessorThread mthread;
static public interface PoolCallback {
void process(int idx, image_pool pool,long timestamp, NativeProcessor nativeProcessor);
}
Lock stacklock = new ReentrantLock();
LinkedList<PoolCallback> nextStack;
void addCallbackStack(LinkedList<PoolCallback> stack) {
try {
while (!stacklock.tryLock(10, TimeUnit.MILLISECONDS)) {
}
try {
nextStack = stack;
} finally {
stacklock.unlock();
}
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
/**
* A callback that allows the NativeProcessor to pass back the buffer when
* it has completed processing a frame.
*
* @author ethan
*
*/
static public interface NativeProcessorCallback {
/**
* Called after processing, meant to be recieved by the NativePreviewer
* wich reuses the byte buffer for the camera preview...
*
* @param buffer
* the buffer passed to the NativeProcessor with post.
*/
void onDoneNativeProcessing(byte[] buffer);
}
/**
* Create a NativeProcessor. The processor will not start running until
* start is called, at which point it will operate in its own thread and
* sleep until a post is called. The processor should not be started until
* an onSurfaceChange event, and should be shut down when the surface is
* destroyed by calling interupt.
*
*/
public NativeProcessor() {
}
/**
* post is used to notify the processor that a preview frame is ready, this
* will return almost immediately. if the processor is busy, returns false
* and is essentially a nop.
*
* @param buffer
* a preview frame from the Android Camera onPreviewFrame
* callback
* @param width
* of preview frame
* @param height
* of preview frame
* @param format
* of preview frame
* @return true if the processor wasn't busy and accepted the post, false if
* the processor is still processing.
*/
public boolean post(byte[] buffer, int width, int height, int format,long timestamp,
NativeProcessorCallback callback) {
lock.lock();
try {
NPPostObject pobj = new NPPostObject(buffer, width, height,
format,timestamp, callback);
postobjects.addFirst(pobj);
} finally {
lock.unlock();
}
return true;
}
static private class NPPostObject { static private class NPPostObject {
public NPPostObject(byte[] buffer, int width, int height, int format, long timestamp, public NPPostObject(byte[] buffer, int width, int height, int format,
NativeProcessorCallback callback) { long timestamp, NativeProcessorCallback callback) {
this.buffer = buffer; this.buffer = buffer;
this.width = width; this.width = width;
this.height = height; this.height = height;
@ -215,6 +266,7 @@ public class NativeProcessor {
NativeProcessorCallback callback; NativeProcessorCallback callback;
} }
private LinkedList<NPPostObject> postobjects = new LinkedList<NPPostObject>(); private LinkedList<NPPostObject> postobjects = new LinkedList<NPPostObject>();
private image_pool pool = new image_pool(); private image_pool pool = new image_pool();
@ -222,20 +274,12 @@ public class NativeProcessor {
private final Lock lock = new ReentrantLock(); private final Lock lock = new ReentrantLock();
private LinkedList<PoolCallback> stack = new LinkedList<PoolCallback>(); private LinkedList<PoolCallback> stack = new LinkedList<PoolCallback>();
private boolean gray_scale_only;
private Lock stacklock = new ReentrantLock();
void stop() { private LinkedList<PoolCallback> nextStack;
mthread.interrupt();
try { private ProcessorThread mthread;
mthread.join();
} catch (InterruptedException e) {
Log.w("NativeProcessor","interupted while stoping " + e.getMessage());
}
mthread = null;
}
void start() {
mthread = new ProcessorThread();
mthread.start();
}
} }
Loading…
Cancel
Save