move tutorials and samples to main repo

pull/3708/head
AleksandrPanov 1 year ago committed by Alex
parent b042744ae4
commit a4a04c7c0f
  1. 3
      modules/aruco/include/opencv2/aruco.hpp
  2. 23
      modules/aruco/include/opencv2/aruco/aruco_calib.hpp
  3. 2
      modules/aruco/include/opencv2/aruco/charuco.hpp
  4. 3
      modules/aruco/perf/perf_main.cpp
  5. 9
      modules/aruco/perf/perf_precomp.hpp
  6. 48
      modules/aruco/samples/aruco_samples_utility.hpp
  7. 230
      modules/aruco/samples/calibrate_camera.cpp
  8. 250
      modules/aruco/samples/calibrate_camera_charuco.cpp
  9. 132
      modules/aruco/samples/create_board.cpp
  10. 132
      modules/aruco/samples/create_board_charuco.cpp
  11. 117
      modules/aruco/samples/create_diamond.cpp
  12. 216
      modules/aruco/samples/detect_board.cpp
  13. 223
      modules/aruco/samples/detect_board_charuco.cpp
  14. 233
      modules/aruco/samples/detect_diamonds.cpp
  15. 30
      modules/aruco/samples/detector_params.yml
  16. 21
      modules/aruco/samples/tutorial_camera_charuco.yml
  17. 14
      modules/aruco/samples/tutorial_camera_params.yml
  18. 150
      modules/aruco/samples/tutorial_charuco_create_detect.cpp
  19. 38
      modules/aruco/samples/tutorial_dict.yml
  20. 247
      modules/aruco/test/test_aruco_tutorial.cpp
  21. 238
      modules/aruco/test/test_aruco_utils.hpp
  22. 8
      modules/aruco/test/test_main.cpp
  23. 12
      modules/aruco/test/test_precomp.hpp
  24. 138
      modules/aruco/tutorials/aruco_calibration/aruco_calibration.markdown
  25. BIN
      modules/aruco/tutorials/aruco_calibration/images/arucocalibration.png
  26. BIN
      modules/aruco/tutorials/aruco_calibration/images/charucocalibration.png
  27. BIN
      modules/aruco/tutorials/aruco_calibration/images/img_00.jpg
  28. BIN
      modules/aruco/tutorials/aruco_calibration/images/img_01.jpg
  29. BIN
      modules/aruco/tutorials/aruco_calibration/images/img_02.jpg
  30. BIN
      modules/aruco/tutorials/aruco_calibration/images/img_03.jpg
  31. 162
      modules/aruco/tutorials/aruco_faq/aruco_faq.markdown
  32. 249
      modules/aruco/tutorials/charuco_detection/charuco_detection.markdown
  33. BIN
      modules/aruco/tutorials/charuco_detection/images/charucoboard.png
  34. BIN
      modules/aruco/tutorials/charuco_detection/images/charucodefinition.png
  35. BIN
      modules/aruco/tutorials/charuco_detection/images/chaxis.jpg
  36. BIN
      modules/aruco/tutorials/charuco_detection/images/chcorners.jpg
  37. BIN
      modules/aruco/tutorials/charuco_detection/images/chocclusion.jpg
  38. BIN
      modules/aruco/tutorials/charuco_detection/images/chocclusion_original.jpg
  39. BIN
      modules/aruco/tutorials/charuco_detection/images/choriginal.jpg
  40. 182
      modules/aruco/tutorials/charuco_diamond_detection/charuco_diamond_detection.markdown
  41. BIN
      modules/aruco/tutorials/charuco_diamond_detection/images/detecteddiamonds.png
  42. BIN
      modules/aruco/tutorials/charuco_diamond_detection/images/diamondmarker.png
  43. BIN
      modules/aruco/tutorials/charuco_diamond_detection/images/diamondmarkers.png
  44. BIN
      modules/aruco/tutorials/charuco_diamond_detection/images/diamondsaxis.jpg
  45. 65
      modules/aruco/tutorials/table_of_content_aruco.markdown

@ -52,7 +52,7 @@ CV_EXPORTS_W void getBoardObjectAndImagePoints(const Ptr<Board> &board, InputArr
InputArray detectedIds, OutputArray objPoints, OutputArray imgPoints);
/** @deprecated Use cv::solvePnP
/** @deprecated Use Board::matchImagePoints and cv::solvePnP
*/
CV_EXPORTS_W int estimatePoseBoard(InputArrayOfArrays corners, InputArray ids, const Ptr<Board> &board,
InputArray cameraMatrix, InputArray distCoeffs, InputOutputArray rvec,
@ -75,6 +75,7 @@ CV_EXPORTS_W int estimatePoseBoard(InputArrayOfArrays corners, InputArray ids, c
* This function estimates a Charuco board pose from some detected corners.
* The function checks if the input corners are enough and valid to perform pose estimation.
* If pose estimation is valid, returns true, else returns false.
* @deprecated Use CharucoBoard::matchImagePoints and cv::solvePnP
* @sa use cv::drawFrameAxes to get world coordinate system axis for object points
*/
CV_EXPORTS_W bool estimatePoseCharucoBoard(InputArray charucoCorners, InputArray charucoIds,

@ -16,7 +16,10 @@ namespace aruco {
* PatternPositionType defines center this system and axes direction.
* Axis X (red color) - first coordinate, axis Y (green color) - second coordinate,
* axis Z (blue color) - third coordinate.
* @sa estimatePoseSingleMarkers(), check tutorial_aruco_detection in aruco contrib
*
* @deprecated Use Board::matchImagePoints and cv::solvePnP
*
* @sa estimatePoseSingleMarkers()
*/
enum PatternPositionType {
/** @brief The marker coordinate system is centered on the middle of the marker.
@ -24,9 +27,6 @@ enum PatternPositionType {
* The coordinates of the four corners (CCW order) of the marker in its own coordinate system are:
* (-markerLength/2, markerLength/2, 0), (markerLength/2, markerLength/2, 0),
* (markerLength/2, -markerLength/2, 0), (-markerLength/2, -markerLength/2, 0).
*
* These pattern points define this coordinate system:
* ![Image with axes drawn](tutorials/images/singlemarkersaxes2.jpg)
*/
ARUCO_CCW_CENTER,
/** @brief The marker coordinate system is centered on the top-left corner of the marker.
@ -35,9 +35,6 @@ enum PatternPositionType {
* (0, 0, 0), (markerLength, 0, 0),
* (markerLength, markerLength, 0), (0, markerLength, 0).
*
* These pattern points define this coordinate system:
* ![Image with axes drawn](tutorials/images/singlemarkersaxes.jpg)
*
* These pattern dots are convenient to use with a chessboard/ChArUco board.
*/
ARUCO_CW_TOP_LEFT_CORNER
@ -50,7 +47,10 @@ enum PatternPositionType {
* rvec and tvec values as initial approximations of the rotation and translation vectors, respectively, and further
* optimizes them (default false).
* @param solvePnPMethod Method for solving a PnP problem: see @ref calib3d_solvePnP_flags (default SOLVEPNP_ITERATIVE).
* @sa PatternPositionType, solvePnP(), check tutorial_aruco_detection in aruco contrib
*
* @deprecated Use Board::matchImagePoints and cv::solvePnP
*
* @sa PatternPositionType, solvePnP()
*/
struct CV_EXPORTS_W_SIMPLE EstimateParameters {
CV_PROP_RW PatternPositionType pattern;
@ -95,6 +95,8 @@ struct CV_EXPORTS_W_SIMPLE EstimateParameters {
* This function calibrates a camera using an Aruco Board. The function receives a list of
* detected markers from several views of the Board. The process is similar to the chessboard
* calibration in calibrateCamera(). The function returns the final re-projection error.
*
* @deprecated Use Board::matchImagePoints and cv::solvePnP
*/
CV_EXPORTS_AS(calibrateCameraArucoExtended)
double calibrateCameraAruco(InputArrayOfArrays corners, InputArray ids, InputArray counter, const Ptr<Board> &board,
@ -105,6 +107,7 @@ double calibrateCameraAruco(InputArrayOfArrays corners, InputArray ids, InputArr
/** @overload
* @brief It's the same function as #calibrateCameraAruco but without calibration error estimation.
* @deprecated Use Board::matchImagePoints and cv::solvePnP
*/
CV_EXPORTS_W double calibrateCameraAruco(InputArrayOfArrays corners, InputArray ids, InputArray counter,
const Ptr<Board> &board, Size imageSize, InputOutputArray cameraMatrix,
@ -147,6 +150,8 @@ CV_EXPORTS_W double calibrateCameraAruco(InputArrayOfArrays corners, InputArray
* This function calibrates a camera using a set of corners of a Charuco Board. The function
* receives a list of detected corners and its identifiers from several views of the Board.
* The function returns the final re-projection error.
*
* @deprecated Use CharucoBoard::matchImagePoints and cv::solvePnP
*/
CV_EXPORTS_AS(calibrateCameraCharucoExtended)
double calibrateCameraCharuco(InputArrayOfArrays charucoCorners, InputArrayOfArrays charucoIds,
@ -158,6 +163,8 @@ double calibrateCameraCharuco(InputArrayOfArrays charucoCorners, InputArrayOfArr
/**
* @brief It's the same function as #calibrateCameraCharuco but without calibration error estimation.
*
* @deprecated Use CharucoBoard::matchImagePoints and cv::solvePnP
*/
CV_EXPORTS_W double calibrateCameraCharuco(InputArrayOfArrays charucoCorners, InputArrayOfArrays charucoIds,
const Ptr<CharucoBoard> &board, Size imageSize,

@ -96,6 +96,8 @@ CV_EXPORTS_W void detectCharucoDiamond(InputArray image, InputArrayOfArrays mark
* @param borderBits width of the marker borders.
*
* This function return the image of a ChArUco marker, ready to be printed.
*
* @deprecated Use CharucoBoard::generateImage()
*/
CV_EXPORTS_W void drawCharucoDiamond(const Ptr<Dictionary> &dictionary, Vec4i ids, int squareLength,
int markerLength, OutputArray img, int marginSize = 0,

@ -1,3 +0,0 @@
#include "perf_precomp.hpp"
CV_PERF_TEST_MAIN(aruco)

@ -1,9 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#ifndef __OPENCV_PERF_PRECOMP_HPP__
#define __OPENCV_PERF_PRECOMP_HPP__
#include "opencv2/ts.hpp"
#endif

@ -1,48 +0,0 @@
#include <opencv2/highgui.hpp>
#include <opencv2/objdetect/aruco_detector.hpp>
#include <opencv2/calib3d.hpp>
#include <ctime>
namespace {
inline static bool readCameraParameters(std::string filename, cv::Mat &camMatrix, cv::Mat &distCoeffs) {
cv::FileStorage fs(filename, cv::FileStorage::READ);
if (!fs.isOpened())
return false;
fs["camera_matrix"] >> camMatrix;
fs["distortion_coefficients"] >> distCoeffs;
return true;
}
inline static bool saveCameraParams(const std::string &filename, cv::Size imageSize, float aspectRatio, int flags,
const cv::Mat &cameraMatrix, const cv::Mat &distCoeffs, double totalAvgErr) {
cv::FileStorage fs(filename, cv::FileStorage::WRITE);
if (!fs.isOpened())
return false;
time_t tt;
time(&tt);
struct tm *t2 = localtime(&tt);
char buf[1024];
strftime(buf, sizeof(buf) - 1, "%c", t2);
fs << "calibration_time" << buf;
fs << "image_width" << imageSize.width;
fs << "image_height" << imageSize.height;
if (flags & cv::CALIB_FIX_ASPECT_RATIO) fs << "aspectRatio" << aspectRatio;
if (flags != 0) {
sprintf(buf, "flags: %s%s%s%s",
flags & cv::CALIB_USE_INTRINSIC_GUESS ? "+use_intrinsic_guess" : "",
flags & cv::CALIB_FIX_ASPECT_RATIO ? "+fix_aspectRatio" : "",
flags & cv::CALIB_FIX_PRINCIPAL_POINT ? "+fix_principal_point" : "",
flags & cv::CALIB_ZERO_TANGENT_DIST ? "+zero_tangent_dist" : "");
}
fs << "flags" << flags;
fs << "camera_matrix" << cameraMatrix;
fs << "distortion_coefficients" << distCoeffs;
fs << "avg_reprojection_error" << totalAvgErr;
return true;
}
}

@ -1,230 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include <ctime>
#include <iostream>
#include <vector>
#include <opencv2/calib3d.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/objdetect.hpp>
#include "aruco_samples_utility.hpp"
using namespace std;
using namespace cv;
namespace {
const char* about =
"Calibration using a ArUco Planar Grid board\n"
" To capture a frame for calibration, press 'c',\n"
" If input comes from video, press any key for next frame\n"
" To finish capturing, press 'ESC' key and calibration starts.\n";
const char* keys =
"{w | | Number of squares in X direction }"
"{h | | Number of squares in Y direction }"
"{l | | Marker side length (in meters) }"
"{s | | Separation between two consecutive markers in the grid (in meters) }"
"{d | | dictionary: DICT_4X4_50=0, DICT_4X4_100=1, DICT_4X4_250=2,"
"DICT_4X4_1000=3, DICT_5X5_50=4, DICT_5X5_100=5, DICT_5X5_250=6, DICT_5X5_1000=7, "
"DICT_6X6_50=8, DICT_6X6_100=9, DICT_6X6_250=10, DICT_6X6_1000=11, DICT_7X7_50=12,"
"DICT_7X7_100=13, DICT_7X7_250=14, DICT_7X7_1000=15, DICT_ARUCO_ORIGINAL = 16}"
"{cd | | Input file with custom dictionary }"
"{@outfile |<none> | Output file with calibrated camera parameters }"
"{v | | Input from video file, if ommited, input comes from camera }"
"{ci | 0 | Camera id if input doesnt come from video (-v) }"
"{dp | | File of marker detector parameters }"
"{rs | false | Apply refind strategy }"
"{zt | false | Assume zero tangential distortion }"
"{a | | Fix aspect ratio (fx/fy) to this value }"
"{pc | false | Fix the principal point at the center }";
}
int main(int argc, char *argv[]) {
CommandLineParser parser(argc, argv, keys);
parser.about(about);
if(argc < 6) {
parser.printMessage();
return 0;
}
int markersX = parser.get<int>("w");
int markersY = parser.get<int>("h");
float markerLength = parser.get<float>("l");
float markerSeparation = parser.get<float>("s");
string outputFile = parser.get<String>(0);
int calibrationFlags = 0;
float aspectRatio = 1;
if(parser.has("a")) {
calibrationFlags |= CALIB_FIX_ASPECT_RATIO;
aspectRatio = parser.get<float>("a");
}
if(parser.get<bool>("zt")) calibrationFlags |= CALIB_ZERO_TANGENT_DIST;
if(parser.get<bool>("pc")) calibrationFlags |= CALIB_FIX_PRINCIPAL_POINT;
aruco::DetectorParameters detectorParams;
if(parser.has("dp")) {
FileStorage fs(parser.get<string>("dp"), FileStorage::READ);
bool readOk = detectorParams.readDetectorParameters(fs.root());
if(!readOk) {
cerr << "Invalid detector parameters file" << endl;
return 0;
}
}
bool refindStrategy = parser.get<bool>("rs");
int camId = parser.get<int>("ci");
String video;
if(parser.has("v")) {
video = parser.get<String>("v");
}
if(!parser.check()) {
parser.printErrors();
return 0;
}
VideoCapture inputVideo;
int waitTime;
if(!video.empty()) {
inputVideo.open(video);
waitTime = 0;
} else {
inputVideo.open(camId);
waitTime = 10;
}
aruco::Dictionary dictionary = aruco::getPredefinedDictionary(0);
if (parser.has("d")) {
int dictionaryId = parser.get<int>("d");
dictionary = aruco::getPredefinedDictionary(
aruco::PredefinedDictionaryType(dictionaryId)
);
}
else if (parser.has("cd")) {
FileStorage fs(parser.get<std::string>("cd"), FileStorage::READ);
bool readOk = dictionary.aruco::Dictionary::readDictionary(fs.root());
if(!readOk) {
cerr << "Invalid dictionary file" << endl;
return 0;
}
}
else {
cerr << "Dictionary not specified" << endl;
return 0;
}
// Create board object
aruco::GridBoard gridboard(Size(markersX, markersY), markerLength, markerSeparation, dictionary);
// Collected frames for calibration
vector<vector<vector<Point2f>>> allMarkerCorners;
vector<vector<int>> allMarkerIds;
Size imageSize;
aruco::ArucoDetector detector(dictionary, detectorParams);
while(inputVideo.grab()) {
Mat image, imageCopy;
inputVideo.retrieve(image);
vector<int> markerIds;
vector<vector<Point2f>> markerCorners, rejectedMarkers;
// Detect markers
detector.detectMarkers(image, markerCorners, markerIds, rejectedMarkers);
// Refind strategy to detect more markers
if(refindStrategy) {
detector.refineDetectedMarkers(
image, gridboard, markerCorners, markerIds, rejectedMarkers
);
}
// Draw results
image.copyTo(imageCopy);
if(!markerIds.empty()) {
aruco::drawDetectedMarkers(imageCopy, markerCorners, markerIds);
}
putText(
imageCopy, "Press 'c' to add current frame. 'ESC' to finish and calibrate",
Point(10, 20), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(255, 0, 0), 2
);
imshow("out", imageCopy);
// Wait for key pressed
char key = (char)waitKey(waitTime);
if(key == 27) {
break;
}
if(key == 'c' && !markerIds.empty()) {
cout << "Frame captured" << endl;
allMarkerCorners.push_back(markerCorners);
allMarkerIds.push_back(markerIds);
imageSize = image.size();
}
}
if(allMarkerIds.empty()) {
cerr << "Not enough captures for calibration" << endl;
return 0;
}
Mat cameraMatrix, distCoeffs;
if(calibrationFlags & CALIB_FIX_ASPECT_RATIO) {
cameraMatrix = Mat::eye(3, 3, CV_64F);
cameraMatrix.at<double>(0, 0) = aspectRatio;
}
// Prepare data for calibration
vector<Point3f> objectPoints;
vector<Point2f> imagePoints;
vector<Mat> processedObjectPoints, processedImagePoints;
size_t nFrames = allMarkerCorners.size();
for(size_t frame = 0; frame < nFrames; frame++) {
Mat currentImgPoints, currentObjPoints;
gridboard.matchImagePoints(
allMarkerCorners[frame], allMarkerIds[frame],
currentObjPoints, currentImgPoints
);
if(currentImgPoints.total() > 0 && currentObjPoints.total() > 0) {
processedImagePoints.push_back(currentImgPoints);
processedObjectPoints.push_back(currentObjPoints);
}
}
// Calibrate camera
double repError = calibrateCamera(
processedObjectPoints, processedImagePoints, imageSize,
cameraMatrix, distCoeffs, noArray(), noArray(), noArray(),
noArray(), noArray(), calibrationFlags
);
bool saveOk = saveCameraParams(
outputFile, imageSize, aspectRatio, calibrationFlags, cameraMatrix,
distCoeffs, repError
);
if(!saveOk) {
cerr << "Cannot save output file" << endl;
return 0;
}
cout << "Rep Error: " << repError << endl;
cout << "Calibration saved to " << outputFile << endl;
return 0;
}

@ -1,250 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include <iostream>
#include <vector>
#include <opencv2/calib3d.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/objdetect.hpp>
#include "aruco_samples_utility.hpp"
using namespace std;
using namespace cv;
namespace {
const char* about =
"Calibration using a ChArUco board\n"
" To capture a frame for calibration, press 'c',\n"
" If input comes from video, press any key for next frame\n"
" To finish capturing, press 'ESC' key and calibration starts.\n";
const char* keys =
"{w | | Number of squares in X direction }"
"{h | | Number of squares in Y direction }"
"{sl | | Square side length (in meters) }"
"{ml | | Marker side length (in meters) }"
"{d | | dictionary: DICT_4X4_50=0, DICT_4X4_100=1, DICT_4X4_250=2,"
"DICT_4X4_1000=3, DICT_5X5_50=4, DICT_5X5_100=5, DICT_5X5_250=6, DICT_5X5_1000=7, "
"DICT_6X6_50=8, DICT_6X6_100=9, DICT_6X6_250=10, DICT_6X6_1000=11, DICT_7X7_50=12,"
"DICT_7X7_100=13, DICT_7X7_250=14, DICT_7X7_1000=15, DICT_ARUCO_ORIGINAL = 16}"
"{cd | | Input file with custom dictionary }"
"{@outfile |<none> | Output file with calibrated camera parameters }"
"{v | | Input from video file, if ommited, input comes from camera }"
"{ci | 0 | Camera id if input doesnt come from video (-v) }"
"{dp | | File of marker detector parameters }"
"{rs | false | Apply refind strategy }"
"{zt | false | Assume zero tangential distortion }"
"{a | | Fix aspect ratio (fx/fy) to this value }"
"{pc | false | Fix the principal point at the center }"
"{sc | false | Show detected chessboard corners after calibration }";
}
int main(int argc, char *argv[]) {
CommandLineParser parser(argc, argv, keys);
parser.about(about);
if(argc < 7) {
parser.printMessage();
return 0;
}
int squaresX = parser.get<int>("w");
int squaresY = parser.get<int>("h");
float squareLength = parser.get<float>("sl");
float markerLength = parser.get<float>("ml");
string outputFile = parser.get<string>(0);
bool showChessboardCorners = parser.get<bool>("sc");
int calibrationFlags = 0;
float aspectRatio = 1;
if(parser.has("a")) {
calibrationFlags |= CALIB_FIX_ASPECT_RATIO;
aspectRatio = parser.get<float>("a");
}
if(parser.get<bool>("zt")) calibrationFlags |= CALIB_ZERO_TANGENT_DIST;
if(parser.get<bool>("pc")) calibrationFlags |= CALIB_FIX_PRINCIPAL_POINT;
aruco::DetectorParameters detectorParams = aruco::DetectorParameters();
if(parser.has("dp")) {
FileStorage fs(parser.get<string>("dp"), FileStorage::READ);
bool readOk = detectorParams.readDetectorParameters(fs.root());
if(!readOk) {
cerr << "Invalid detector parameters file" << endl;
return 0;
}
}
bool refindStrategy = parser.get<bool>("rs");
int camId = parser.get<int>("ci");
String video;
if(parser.has("v")) {
video = parser.get<String>("v");
}
if(!parser.check()) {
parser.printErrors();
return 0;
}
VideoCapture inputVideo;
int waitTime;
if(!video.empty()) {
inputVideo.open(video);
waitTime = 0;
} else {
inputVideo.open(camId);
waitTime = 10;
}
aruco::Dictionary dictionary = aruco::getPredefinedDictionary(0);
if (parser.has("d")) {
int dictionaryId = parser.get<int>("d");
dictionary = aruco::getPredefinedDictionary(aruco::PredefinedDictionaryType(dictionaryId));
}
else if (parser.has("cd")) {
FileStorage fs(parser.get<std::string>("cd"), FileStorage::READ);
bool readOk = dictionary.aruco::Dictionary::readDictionary(fs.root());
if(!readOk) {
cerr << "Invalid dictionary file" << endl;
return 0;
}
}
else {
cerr << "Dictionary not specified" << endl;
return 0;
}
// Create charuco board object
aruco::CharucoBoard board(Size(squaresX, squaresY), squareLength, markerLength, dictionary);
aruco::CharucoParameters charucoParams;
if(refindStrategy) {
charucoParams.tryRefineMarkers = true;
}
aruco::CharucoDetector detector(board, charucoParams, detectorParams);
// Collect data from each frame
vector<Mat> allCharucoCorners;
vector<Mat> allCharucoIds;
vector<vector<Point2f>> allImagePoints;
vector<vector<Point3f>> allObjectPoints;
vector<Mat> allImages;
Size imageSize;
while(inputVideo.grab()) {
Mat image, imageCopy;
inputVideo.retrieve(image);
vector<int> markerIds;
vector<vector<Point2f>> markerCorners, rejectedMarkers;
Mat currentCharucoCorners;
Mat currentCharucoIds;
vector<Point3f> currentObjectPoints;
vector<Point2f> currentImagePoints;
// Detect ChArUco board
detector.detectBoard(image, currentCharucoCorners, currentCharucoIds);
// Draw results
image.copyTo(imageCopy);
if(!markerIds.empty()) {
aruco::drawDetectedMarkers(imageCopy, markerCorners);
}
if(currentCharucoCorners.total() > 3) {
aruco::drawDetectedCornersCharuco(imageCopy, currentCharucoCorners, currentCharucoIds);
}
putText(imageCopy, "Press 'c' to add current frame. 'ESC' to finish and calibrate",
Point(10, 20), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(255, 0, 0), 2);
imshow("out", imageCopy);
// Wait for key pressed
char key = (char)waitKey(waitTime);
if(key == 27) {
break;
}
if(key == 'c' && currentCharucoCorners.total() > 3) {
// Match image points
board.matchImagePoints(currentCharucoCorners, currentCharucoIds, currentObjectPoints, currentImagePoints);
if(currentImagePoints.empty() || currentObjectPoints.empty()) {
cout << "Point matching failed, try again." << endl;
continue;
}
cout << "Frame captured" << endl;
allCharucoCorners.push_back(currentCharucoCorners);
allCharucoIds.push_back(currentCharucoIds);
allImagePoints.push_back(currentImagePoints);
allObjectPoints.push_back(currentObjectPoints);
allImages.push_back(image);
imageSize = image.size();
}
}
if(allCharucoCorners.size() < 4) {
cerr << "Not enough corners for calibration" << endl;
return 0;
}
Mat cameraMatrix, distCoeffs;
if(calibrationFlags & CALIB_FIX_ASPECT_RATIO) {
cameraMatrix = Mat::eye(3, 3, CV_64F);
cameraMatrix.at<double>(0, 0) = aspectRatio;
}
// Calibrate camera using ChArUco
double repError = calibrateCamera(
allObjectPoints, allImagePoints, imageSize,
cameraMatrix, distCoeffs, noArray(), noArray(), noArray(),
noArray(), noArray(), calibrationFlags
);
bool saveOk = saveCameraParams(
outputFile, imageSize, aspectRatio, calibrationFlags,
cameraMatrix, distCoeffs, repError
);
if(!saveOk) {
cerr << "Cannot save output file" << endl;
return 0;
}
cout << "Rep Error: " << repError << endl;
cout << "Calibration saved to " << outputFile << endl;
// Show interpolated charuco corners for debugging
if(showChessboardCorners) {
for(size_t frame = 0; frame < allImages.size(); frame++) {
Mat imageCopy = allImages[frame].clone();
if(allCharucoCorners[frame].total() > 0) {
aruco::drawDetectedCornersCharuco(
imageCopy, allCharucoCorners[frame], allCharucoIds[frame]
);
}
imshow("out", imageCopy);
char key = (char)waitKey(0);
if(key == 27) {
break;
}
}
}
return 0;
}

@ -1,132 +0,0 @@
/*
By downloading, copying, installing or using the software you agree to this
license. If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement
For Open Source Computer Vision Library
(3-clause BSD License)
Copyright (C) 2013, OpenCV Foundation, all rights reserved.
Third party copyrights are property of their respective owners.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are
disclaimed. In no event shall copyright holders or contributors be liable for
any direct, indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
*/
#include <opencv2/highgui.hpp>
#include <opencv2/objdetect/aruco_detector.hpp>
#include <iostream>
#include "aruco_samples_utility.hpp"
using namespace cv;
namespace {
const char* about = "Create an ArUco grid board image";
const char* keys =
"{@outfile |<none> | Output image }"
"{w | | Number of markers in X direction }"
"{h | | Number of markers in Y direction }"
"{l | | Marker side length (in pixels) }"
"{s | | Separation between two consecutive markers in the grid (in pixels)}"
"{d | | dictionary: DICT_4X4_50=0, DICT_4X4_100=1, DICT_4X4_250=2,"
"DICT_4X4_1000=3, DICT_5X5_50=4, DICT_5X5_100=5, DICT_5X5_250=6, DICT_5X5_1000=7, "
"DICT_6X6_50=8, DICT_6X6_100=9, DICT_6X6_250=10, DICT_6X6_1000=11, DICT_7X7_50=12,"
"DICT_7X7_100=13, DICT_7X7_250=14, DICT_7X7_1000=15, DICT_ARUCO_ORIGINAL = 16}"
"{cd | | Input file with custom dictionary }"
"{m | | Margins size (in pixels). Default is marker separation (-s) }"
"{bb | 1 | Number of bits in marker borders }"
"{si | false | show generated image }";
}
int main(int argc, char *argv[]) {
CommandLineParser parser(argc, argv, keys);
parser.about(about);
if(argc < 7) {
parser.printMessage();
return 0;
}
int markersX = parser.get<int>("w");
int markersY = parser.get<int>("h");
int markerLength = parser.get<int>("l");
int markerSeparation = parser.get<int>("s");
int margins = markerSeparation;
if(parser.has("m")) {
margins = parser.get<int>("m");
}
int borderBits = parser.get<int>("bb");
bool showImage = parser.get<bool>("si");
String out = parser.get<String>(0);
if(!parser.check()) {
parser.printErrors();
return 0;
}
Size imageSize;
imageSize.width = markersX * (markerLength + markerSeparation) - markerSeparation + 2 * margins;
imageSize.height =
markersY * (markerLength + markerSeparation) - markerSeparation + 2 * margins;
aruco::Dictionary dictionary = aruco::getPredefinedDictionary(0);
if (parser.has("d")) {
int dictionaryId = parser.get<int>("d");
dictionary = aruco::getPredefinedDictionary(aruco::PredefinedDictionaryType(dictionaryId));
}
else if (parser.has("cd")) {
FileStorage fs(parser.get<std::string>("cd"), FileStorage::READ);
bool readOk = dictionary.aruco::Dictionary::readDictionary(fs.root());
if(!readOk)
{
std::cerr << "Invalid dictionary file" << std::endl;
return 0;
}
}
else {
std::cerr << "Dictionary not specified" << std::endl;
return 0;
}
aruco::GridBoard board(Size(markersX, markersY), float(markerLength), float(markerSeparation), dictionary);
// show created board
Mat boardImage;
board.generateImage(imageSize, boardImage, margins, borderBits);
if(showImage) {
imshow("board", boardImage);
waitKey(0);
}
imwrite(out, boardImage);
return 0;
}

@ -1,132 +0,0 @@
/*
By downloading, copying, installing or using the software you agree to this
license. If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement
For Open Source Computer Vision Library
(3-clause BSD License)
Copyright (C) 2013, OpenCV Foundation, all rights reserved.
Third party copyrights are property of their respective owners.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are
disclaimed. In no event shall copyright holders or contributors be liable for
any direct, indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
*/
#include <opencv2/highgui.hpp>
#include <opencv2/aruco/charuco.hpp>
#include <iostream>
#include "aruco_samples_utility.hpp"
using namespace cv;
namespace {
const char* about = "Create a ChArUco board image";
//! [charuco_detect_board_keys]
const char* keys =
"{@outfile |<none> | Output image }"
"{w | | Number of squares in X direction }"
"{h | | Number of squares in Y direction }"
"{sl | | Square side length (in pixels) }"
"{ml | | Marker side length (in pixels) }"
"{d | | dictionary: DICT_4X4_50=0, DICT_4X4_100=1, DICT_4X4_250=2,"
"DICT_4X4_1000=3, DICT_5X5_50=4, DICT_5X5_100=5, DICT_5X5_250=6, DICT_5X5_1000=7, "
"DICT_6X6_50=8, DICT_6X6_100=9, DICT_6X6_250=10, DICT_6X6_1000=11, DICT_7X7_50=12,"
"DICT_7X7_100=13, DICT_7X7_250=14, DICT_7X7_1000=15, DICT_ARUCO_ORIGINAL = 16}"
"{cd | | Input file with custom dictionary }"
"{m | | Margins size (in pixels). Default is (squareLength-markerLength) }"
"{bb | 1 | Number of bits in marker borders }"
"{si | false | show generated image }";
}
//! [charuco_detect_board_keys]
int main(int argc, char *argv[]) {
CommandLineParser parser(argc, argv, keys);
parser.about(about);
if(argc < 7) {
parser.printMessage();
return 0;
}
int squaresX = parser.get<int>("w");
int squaresY = parser.get<int>("h");
int squareLength = parser.get<int>("sl");
int markerLength = parser.get<int>("ml");
int margins = squareLength - markerLength;
if(parser.has("m")) {
margins = parser.get<int>("m");
}
int borderBits = parser.get<int>("bb");
bool showImage = parser.get<bool>("si");
String out = parser.get<String>(0);
if(!parser.check()) {
parser.printErrors();
return 0;
}
aruco::Dictionary dictionary = aruco::getPredefinedDictionary(0);
if (parser.has("d")) {
int dictionaryId = parser.get<int>("d");
dictionary = aruco::getPredefinedDictionary(aruco::PredefinedDictionaryType(dictionaryId));
}
else if (parser.has("cd")) {
FileStorage fs(parser.get<std::string>("cd"), FileStorage::READ);
bool readOk = dictionary.aruco::Dictionary::readDictionary(fs.root());
if(!readOk) {
std::cerr << "Invalid dictionary file" << std::endl;
return 0;
}
}
else {
std::cerr << "Dictionary not specified" << std::endl;
return 0;
}
Size imageSize;
imageSize.width = squaresX * squareLength + 2 * margins;
imageSize.height = squaresY * squareLength + 2 * margins;
aruco::CharucoBoard board(Size(squaresX, squaresY), (float)squareLength, (float)markerLength, dictionary);
// show created board
Mat boardImage;
board.generateImage(imageSize, boardImage, margins, borderBits);
if(showImage) {
imshow("board", boardImage);
waitKey(0);
}
imwrite(out, boardImage);
return 0;
}

@ -1,117 +0,0 @@
/*
By downloading, copying, installing or using the software you agree to this
license. If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement
For Open Source Computer Vision Library
(3-clause BSD License)
Copyright (C) 2013, OpenCV Foundation, all rights reserved.
Third party copyrights are property of their respective owners.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are
disclaimed. In no event shall copyright holders or contributors be liable for
any direct, indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
*/
#include <opencv2/highgui.hpp>
#include <opencv2/aruco/charuco.hpp>
#include <vector>
#include <iostream>
using namespace std;
using namespace cv;
namespace {
const char* about = "Create a ChArUco marker image";
const char* keys =
"{@outfile |<none> | Output image }"
"{sl | | Square side length (in pixels) }"
"{ml | | Marker side length (in pixels) }"
"{d | | dictionary: DICT_4X4_50=0, DICT_4X4_100=1, DICT_4X4_250=2,"
"DICT_4X4_1000=3, DICT_5X5_50=4, DICT_5X5_100=5, DICT_5X5_250=6, DICT_5X5_1000=7, "
"DICT_6X6_50=8, DICT_6X6_100=9, DICT_6X6_250=10, DICT_6X6_1000=11, DICT_7X7_50=12,"
"DICT_7X7_100=13, DICT_7X7_250=14, DICT_7X7_1000=15, DICT_ARUCO_ORIGINAL = 16}"
"{ids |<none> | Four ids for the ChArUco marker: id1,id2,id3,id4 }"
"{m | 0 | Margins size (in pixels) }"
"{bb | 1 | Number of bits in marker borders }"
"{si | false | show generated image }";
}
/**
*/
int main(int argc, char *argv[]) {
CommandLineParser parser(argc, argv, keys);
parser.about(about);
if(argc < 4) {
parser.printMessage();
return 0;
}
int squareLength = parser.get<int>("sl");
int markerLength = parser.get<int>("ml");
int dictionaryId = parser.get<int>("d");
string idsString = parser.get<string>("ids");
int margins = parser.get<int>("m");
int borderBits = parser.get<int>("bb");
bool showImage = parser.get<bool>("si");
String out = parser.get<String>(0);
if(!parser.check()) {
parser.printErrors();
return 0;
}
aruco::Dictionary dictionary = aruco::getPredefinedDictionary(aruco::PredefinedDictionaryType(dictionaryId));
istringstream ss(idsString);
vector< string > splittedIds;
string token;
while(getline(ss, token, ','))
splittedIds.push_back(token);
if(splittedIds.size() < 4) {
cerr << "Incorrect ids format" << endl;
parser.printMessage();
return 0;
}
Vec4i ids;
for(int i = 0; i < 4; i++)
ids[i] = atoi(splittedIds[i].c_str());
Mat markerImg;
aruco::drawCharucoDiamond(makePtr<aruco::Dictionary>(dictionary), ids, squareLength, markerLength, markerImg, margins,
borderBits);
if(showImage) {
imshow("board", markerImg);
waitKey(0);
}
imwrite(out, markerImg);
return 0;
}

@ -1,216 +0,0 @@
/*
By downloading, copying, installing or using the software you agree to this
license. If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement
For Open Source Computer Vision Library
(3-clause BSD License)
Copyright (C) 2013, OpenCV Foundation, all rights reserved.
Third party copyrights are property of their respective owners.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are
disclaimed. In no event shall copyright holders or contributors be liable for
any direct, indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
*/
#include <opencv2/highgui.hpp>
#include <opencv2/aruco.hpp>
#include <vector>
#include <iostream>
#include "aruco_samples_utility.hpp"
using namespace std;
using namespace cv;
namespace {
const char* about = "Pose estimation using a ArUco Planar Grid board";
//! [aruco_detect_board_keys]
const char* keys =
"{w | | Number of squares in X direction }"
"{h | | Number of squares in Y direction }"
"{l | | Marker side length (in pixels) }"
"{s | | Separation between two consecutive markers in the grid (in pixels)}"
"{d | | dictionary: DICT_4X4_50=0, DICT_4X4_100=1, DICT_4X4_250=2,"
"DICT_4X4_1000=3, DICT_5X5_50=4, DICT_5X5_100=5, DICT_5X5_250=6, DICT_5X5_1000=7, "
"DICT_6X6_50=8, DICT_6X6_100=9, DICT_6X6_250=10, DICT_6X6_1000=11, DICT_7X7_50=12,"
"DICT_7X7_100=13, DICT_7X7_250=14, DICT_7X7_1000=15, DICT_ARUCO_ORIGINAL = 16}"
"{cd | | Input file with custom dictionary }"
"{c | | Output file with calibrated camera parameters }"
"{v | | Input from video or image file, if omitted, input comes from camera }"
"{ci | 0 | Camera id if input doesnt come from video (-v) }"
"{dp | | File of marker detector parameters }"
"{rs | | Apply refind strategy }"
"{r | | show rejected candidates too }";
}
//! [aruco_detect_board_keys]
int main(int argc, char *argv[]) {
CommandLineParser parser(argc, argv, keys);
parser.about(about);
if(argc < 7) {
parser.printMessage();
return 0;
}
int markersX = parser.get<int>("w");
int markersY = parser.get<int>("h");
float markerLength = parser.get<float>("l");
float markerSeparation = parser.get<float>("s");
bool showRejected = parser.has("r");
bool refindStrategy = parser.has("rs");
int camId = parser.get<int>("ci");
Mat camMatrix, distCoeffs;
if(parser.has("c")) {
bool readOk = readCameraParameters(parser.get<string>("c"), camMatrix, distCoeffs);
if(!readOk) {
cerr << "Invalid camera file" << endl;
return 0;
}
}
aruco::DetectorParameters detectorParams;
if(parser.has("dp")) {
FileStorage fs(parser.get<string>("dp"), FileStorage::READ);
bool readOk = detectorParams.readDetectorParameters(fs.root());
if(!readOk) {
cerr << "Invalid detector parameters file" << endl;
return 0;
}
}
detectorParams.cornerRefinementMethod = aruco::CORNER_REFINE_SUBPIX; // do corner refinement in markers
String video;
if(parser.has("v")) {
video = parser.get<String>("v");
}
if(!parser.check()) {
parser.printErrors();
return 0;
}
aruco::Dictionary dictionary = aruco::getPredefinedDictionary(0);
if (parser.has("d")) {
int dictionaryId = parser.get<int>("d");
dictionary = aruco::getPredefinedDictionary(aruco::PredefinedDictionaryType(dictionaryId));
}
else if (parser.has("cd")) {
FileStorage fs(parser.get<std::string>("cd"), FileStorage::READ);
bool readOk = dictionary.aruco::Dictionary::readDictionary(fs.root());
if(!readOk) {
cerr << "Invalid dictionary file" << endl;
return 0;
}
}
else {
cerr << "Dictionary not specified" << endl;
return 0;
}
aruco::ArucoDetector detector(dictionary, detectorParams);
VideoCapture inputVideo;
int waitTime;
if(!video.empty()) {
inputVideo.open(video);
waitTime = 0;
} else {
inputVideo.open(camId);
waitTime = 10;
}
float axisLength = 0.5f * ((float)min(markersX, markersY) * (markerLength + markerSeparation) +
markerSeparation);
// create board object
Ptr<aruco::GridBoard> gridboard = new aruco::GridBoard(Size(markersX, markersY), markerLength, markerSeparation, dictionary);
Ptr<aruco::Board> board = gridboard.staticCast<aruco::Board>();
double totalTime = 0;
int totalIterations = 0;
while(inputVideo.grab()) {
Mat image, imageCopy;
inputVideo.retrieve(image);
double tick = (double)getTickCount();
vector< int > ids;
vector< vector< Point2f > > corners, rejected;
Vec3d rvec, tvec;
// detect markers
detector.detectMarkers(image, corners, ids, rejected);
// refind strategy to detect more markers
if(refindStrategy)
detector.refineDetectedMarkers(image, *board, corners, ids, rejected, camMatrix,
distCoeffs);
// estimate board pose
int markersOfBoardDetected = 0;
if(!ids.empty()) {
// Get object and image points for the solvePnP function
cv::Mat objPoints, imgPoints;
board->matchImagePoints(corners, ids, objPoints, imgPoints);
// Find pose
cv::solvePnP(objPoints, imgPoints, camMatrix, distCoeffs, rvec, tvec);
markersOfBoardDetected = (int)objPoints.total() / 4;
}
double currentTime = ((double)getTickCount() - tick) / getTickFrequency();
totalTime += currentTime;
totalIterations++;
if(totalIterations % 30 == 0) {
cout << "Detection Time = " << currentTime * 1000 << " ms "
<< "(Mean = " << 1000 * totalTime / double(totalIterations) << " ms)" << endl;
}
// draw results
image.copyTo(imageCopy);
if(!ids.empty()) {
aruco::drawDetectedMarkers(imageCopy, corners, ids);
}
if(showRejected && !rejected.empty())
aruco::drawDetectedMarkers(imageCopy, rejected, noArray(), Scalar(100, 0, 255));
if(markersOfBoardDetected > 0)
cv::drawFrameAxes(imageCopy, camMatrix, distCoeffs, rvec, tvec, axisLength);
imshow("out", imageCopy);
char key = (char)waitKey(waitTime);
if(key == 27) break;
}
return 0;
}

@ -1,223 +0,0 @@
/*
By downloading, copying, installing or using the software you agree to this
license. If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement
For Open Source Computer Vision Library
(3-clause BSD License)
Copyright (C) 2013, OpenCV Foundation, all rights reserved.
Third party copyrights are property of their respective owners.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are
disclaimed. In no event shall copyright holders or contributors be liable for
any direct, indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
*/
#include <opencv2/highgui.hpp>
#include <opencv2/aruco/charuco.hpp>
#include <vector>
#include <iostream>
#include "aruco_samples_utility.hpp"
using namespace std;
using namespace cv;
namespace {
const char* about = "Pose estimation using a ChArUco board";
const char* keys =
"{w | | Number of squares in X direction }"
"{h | | Number of squares in Y direction }"
"{sl | | Square side length (in meters) }"
"{ml | | Marker side length (in meters) }"
"{d | | dictionary: DICT_4X4_50=0, DICT_4X4_100=1, DICT_4X4_250=2,"
"DICT_4X4_1000=3, DICT_5X5_50=4, DICT_5X5_100=5, DICT_5X5_250=6, DICT_5X5_1000=7, "
"DICT_6X6_50=8, DICT_6X6_100=9, DICT_6X6_250=10, DICT_6X6_1000=11, DICT_7X7_50=12,"
"DICT_7X7_100=13, DICT_7X7_250=14, DICT_7X7_1000=15, DICT_ARUCO_ORIGINAL = 16}"
"{cd | | Input file with custom dictionary }"
"{c | | Output file with calibrated camera parameters }"
"{v | | Input from video or image file, if ommited, input comes from camera }"
"{ci | 0 | Camera id if input doesnt come from video (-v) }"
"{dp | | File of marker detector parameters }"
"{rs | | Apply refind strategy }"
"{r | | show rejected candidates too }";
}
int main(int argc, char *argv[]) {
CommandLineParser parser(argc, argv, keys);
parser.about(about);
if(argc < 6) {
parser.printMessage();
return 0;
}
int squaresX = parser.get<int>("w");
int squaresY = parser.get<int>("h");
float squareLength = parser.get<float>("sl");
float markerLength = parser.get<float>("ml");
bool showRejected = parser.has("r");
bool refindStrategy = parser.has("rs");
int camId = parser.get<int>("ci");
String video;
if(parser.has("v")) {
video = parser.get<String>("v");
}
Mat camMatrix, distCoeffs;
if(parser.has("c")) {
bool readOk = readCameraParameters(parser.get<string>("c"), camMatrix, distCoeffs);
if(!readOk) {
cerr << "Invalid camera file" << endl;
return 0;
}
}
Ptr<aruco::DetectorParameters> detectorParams = makePtr<aruco::DetectorParameters>();
if(parser.has("dp")) {
FileStorage fs(parser.get<string>("dp"), FileStorage::READ);
bool readOk = detectorParams->readDetectorParameters(fs.root());
if(!readOk) {
cerr << "Invalid detector parameters file" << endl;
return 0;
}
}
if(!parser.check()) {
parser.printErrors();
return 0;
}
aruco::Dictionary dictionary = aruco::getPredefinedDictionary(0);
if (parser.has("d")) {
int dictionaryId = parser.get<int>("d");
dictionary = aruco::getPredefinedDictionary(aruco::PredefinedDictionaryType(dictionaryId));
}
else if (parser.has("cd")) {
FileStorage fs(parser.get<std::string>("cd"), FileStorage::READ);
bool readOk = dictionary.aruco::Dictionary::readDictionary(fs.root());
if(!readOk) {
cerr << "Invalid dictionary file" << endl;
return 0;
}
}
else {
cerr << "Dictionary not specified" << endl;
return 0;
}
VideoCapture inputVideo;
int waitTime;
if(!video.empty()) {
inputVideo.open(video);
waitTime = 0;
} else {
inputVideo.open(camId);
waitTime = 10;
}
float axisLength = 0.5f * ((float)min(squaresX, squaresY) * (squareLength));
// create charuco board object
Ptr<aruco::CharucoBoard> charucoboard = new aruco::CharucoBoard(Size(squaresX, squaresY), squareLength, markerLength, dictionary);
Ptr<aruco::Board> board = charucoboard.staticCast<aruco::Board>();
double totalTime = 0;
int totalIterations = 0;
while(inputVideo.grab()) {
Mat image, imageCopy;
inputVideo.retrieve(image);
double tick = (double)getTickCount();
vector< int > markerIds, charucoIds;
vector< vector< Point2f > > markerCorners, rejectedMarkers;
vector< Point2f > charucoCorners;
Vec3d rvec, tvec;
// detect markers
aruco::detectMarkers(image, makePtr<aruco::Dictionary>(dictionary), markerCorners, markerIds, detectorParams,
rejectedMarkers);
// refind strategy to detect more markers
if(refindStrategy)
aruco::refineDetectedMarkers(image, board, markerCorners, markerIds, rejectedMarkers,
camMatrix, distCoeffs);
// interpolate charuco corners
int interpolatedCorners = 0;
if(markerIds.size() > 0)
interpolatedCorners =
aruco::interpolateCornersCharuco(markerCorners, markerIds, image, charucoboard,
charucoCorners, charucoIds, camMatrix, distCoeffs);
// estimate charuco board pose
bool validPose = false;
if(camMatrix.total() != 0)
validPose = estimatePoseCharucoBoard(charucoCorners, charucoIds, charucoboard,
camMatrix, distCoeffs, rvec, tvec);
double currentTime = ((double)getTickCount() - tick) / getTickFrequency();
totalTime += currentTime;
totalIterations++;
if(totalIterations % 30 == 0) {
cout << "Detection Time = " << currentTime * 1000 << " ms "
<< "(Mean = " << 1000 * totalTime / double(totalIterations) << " ms)" << endl;
}
// draw results
image.copyTo(imageCopy);
if(markerIds.size() > 0) {
aruco::drawDetectedMarkers(imageCopy, markerCorners);
}
if(showRejected && rejectedMarkers.size() > 0)
aruco::drawDetectedMarkers(imageCopy, rejectedMarkers, noArray(), Scalar(100, 0, 255));
if(interpolatedCorners > 0) {
Scalar color;
color = Scalar(255, 0, 0);
aruco::drawDetectedCornersCharuco(imageCopy, charucoCorners, charucoIds, color);
}
if(validPose)
cv::drawFrameAxes(imageCopy, camMatrix, distCoeffs, rvec, tvec, axisLength);
imshow("out", imageCopy);
char key = (char)waitKey(waitTime);
if(key == 27) break;
}
return 0;
}

@ -1,233 +0,0 @@
/*
By downloading, copying, installing or using the software you agree to this
license. If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement
For Open Source Computer Vision Library
(3-clause BSD License)
Copyright (C) 2013, OpenCV Foundation, all rights reserved.
Third party copyrights are property of their respective owners.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are
disclaimed. In no event shall copyright holders or contributors be liable for
any direct, indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
*/
#include <opencv2/highgui.hpp>
#include <opencv2/aruco/charuco.hpp>
#include <vector>
#include <iostream>
#include "aruco_samples_utility.hpp"
using namespace std;
using namespace cv;
namespace {
const char* about = "Detect ChArUco markers";
const char* keys =
"{sl | | Square side length (in meters) }"
"{ml | | Marker side length (in meters) }"
"{d | | dictionary: DICT_4X4_50=0, DICT_4X4_100=1, DICT_4X4_250=2,"
"DICT_4X4_1000=3, DICT_5X5_50=4, DICT_5X5_100=5, DICT_5X5_250=6, DICT_5X5_1000=7, "
"DICT_6X6_50=8, DICT_6X6_100=9, DICT_6X6_250=10, DICT_6X6_1000=11, DICT_7X7_50=12,"
"DICT_7X7_100=13, DICT_7X7_250=14, DICT_7X7_1000=15, DICT_ARUCO_ORIGINAL = 16}"
"{cd | | Input file with custom dictionary }"
"{c | | Output file with calibrated camera parameters }"
"{as | | Automatic scale. The provided number is multiplied by the last"
"diamond id becoming an indicator of the square length. In this case, the -sl and "
"-ml are only used to know the relative length relation between squares and markers }"
"{v | | Input from video file, if ommited, input comes from camera }"
"{ci | 0 | Camera id if input doesnt come from video (-v) }"
"{dp | | File of marker detector parameters }"
"{rs | | Apply refind strategy }"
"{refine | | Corner refinement: CORNER_REFINE_NONE=0, CORNER_REFINE_SUBPIX=1,"
"CORNER_REFINE_CONTOUR=2, CORNER_REFINE_APRILTAG=3}"
"{r | | show rejected candidates too }";
}
int main(int argc, char *argv[]) {
CommandLineParser parser(argc, argv, keys);
parser.about(about);
if(argc < 4) {
parser.printMessage();
return 0;
}
float squareLength = parser.get<float>("sl");
float markerLength = parser.get<float>("ml");
bool showRejected = parser.has("r");
bool estimatePose = parser.has("c");
bool autoScale = parser.has("as");
float autoScaleFactor = autoScale ? parser.get<float>("as") : 1.f;
Ptr<aruco::DetectorParameters> detectorParams = makePtr<aruco::DetectorParameters>();
if(parser.has("dp")) {
FileStorage fs(parser.get<string>("dp"), FileStorage::READ);
bool readOk = detectorParams->readDetectorParameters(fs.root());
if(!readOk) {
cerr << "Invalid detector parameters file" << endl;
return 0;
}
}
if (parser.has("refine")) {
//override cornerRefinementMethod read from config file
detectorParams->cornerRefinementMethod = parser.get<aruco::CornerRefineMethod>("refine");
}
std::cout << "Corner refinement method (0: None, 1: Subpixel, 2:contour, 3: AprilTag 2): " << (int)detectorParams->cornerRefinementMethod << std::endl;
int camId = parser.get<int>("ci");
String video;
if(parser.has("v")) {
video = parser.get<String>("v");
}
if(!parser.check()) {
parser.printErrors();
return 0;
}
aruco::Dictionary dictionary = aruco::getPredefinedDictionary(0);
if (parser.has("d")) {
int dictionaryId = parser.get<int>("d");
dictionary = aruco::getPredefinedDictionary(aruco::PredefinedDictionaryType(dictionaryId));
}
else if (parser.has("cd")) {
FileStorage fs(parser.get<std::string>("cd"), FileStorage::READ);
bool readOk = dictionary.aruco::Dictionary::readDictionary(fs.root());
if(!readOk) {
cerr << "Invalid dictionary file" << endl;
return 0;
}
}
else {
cerr << "Dictionary not specified" << endl;
return 0;
}
Mat camMatrix, distCoeffs;
if(estimatePose) {
bool readOk = readCameraParameters(parser.get<string>("c"), camMatrix, distCoeffs);
if(!readOk) {
cerr << "Invalid camera file" << endl;
return 0;
}
}
VideoCapture inputVideo;
int waitTime;
if(!video.empty()) {
inputVideo.open(video);
waitTime = 0;
} else {
inputVideo.open(camId);
waitTime = 10;
}
double totalTime = 0;
int totalIterations = 0;
while(inputVideo.grab()) {
Mat image, imageCopy;
inputVideo.retrieve(image);
double tick = (double)getTickCount();
vector< int > markerIds;
vector< Vec4i > diamondIds;
vector< vector< Point2f > > markerCorners, rejectedMarkers, diamondCorners;
vector< Vec3d > rvecs, tvecs;
// detect markers
aruco::detectMarkers(image, makePtr<aruco::Dictionary>(dictionary), markerCorners, markerIds, detectorParams,
rejectedMarkers);
// detect diamonds
if(markerIds.size() > 0)
aruco::detectCharucoDiamond(image, markerCorners, markerIds,
squareLength / markerLength, diamondCorners, diamondIds,
camMatrix, distCoeffs);
// estimate diamond pose
if(estimatePose && diamondIds.size() > 0) {
if(!autoScale) {
aruco::estimatePoseSingleMarkers(diamondCorners, squareLength, camMatrix,
distCoeffs, rvecs, tvecs);
} else {
// if autoscale, extract square size from last diamond id
for(unsigned int i = 0; i < diamondCorners.size(); i++) {
float autoSquareLength = autoScaleFactor * float(diamondIds[i].val[3]);
vector< vector< Point2f > > currentCorners;
vector< Vec3d > currentRvec, currentTvec;
currentCorners.push_back(diamondCorners[i]);
aruco::estimatePoseSingleMarkers(currentCorners, autoSquareLength, camMatrix,
distCoeffs, currentRvec, currentTvec);
rvecs.push_back(currentRvec[0]);
tvecs.push_back(currentTvec[0]);
}
}
}
double currentTime = ((double)getTickCount() - tick) / getTickFrequency();
totalTime += currentTime;
totalIterations++;
if(totalIterations % 30 == 0) {
cout << "Detection Time = " << currentTime * 1000 << " ms "
<< "(Mean = " << 1000 * totalTime / double(totalIterations) << " ms)" << endl;
}
// draw results
image.copyTo(imageCopy);
if(markerIds.size() > 0)
aruco::drawDetectedMarkers(imageCopy, markerCorners);
if(showRejected && rejectedMarkers.size() > 0)
aruco::drawDetectedMarkers(imageCopy, rejectedMarkers, noArray(), Scalar(100, 0, 255));
if(diamondIds.size() > 0) {
aruco::drawDetectedDiamonds(imageCopy, diamondCorners, diamondIds);
if(estimatePose) {
for(unsigned int i = 0; i < diamondIds.size(); i++)
cv::drawFrameAxes(imageCopy, camMatrix, distCoeffs, rvecs[i], tvecs[i],
squareLength * 1.1f);
}
}
imshow("out", imageCopy);
char key = (char)waitKey(waitTime);
if(key == 27) break;
}
return 0;
}

@ -1,30 +0,0 @@
%YAML:1.0
adaptiveThreshWinSizeMin: 3
adaptiveThreshWinSizeMax: 23
adaptiveThreshWinSizeStep: 10
adaptiveThreshWinSize: 21
adaptiveThreshConstant: 7
minMarkerPerimeterRate: 0.03
maxMarkerPerimeterRate: 4.0
polygonalApproxAccuracyRate: 0.05
minCornerDistanceRate: 0.05
minDistanceToBorder: 3
minMarkerDistance: 10.0
minMarkerDistanceRate: 0.05
cornerRefinementMethod: 0
cornerRefinementWinSize: 5
cornerRefinementMaxIterations: 30
cornerRefinementMinAccuracy: 0.1
markerBorderBits: 1
perspectiveRemovePixelPerCell: 8
perspectiveRemoveIgnoredMarginPerCell: 0.13
maxErroneousBitsInBorderRate: 0.04
minOtsuStdDev: 5.0
errorCorrectionRate: 0.6
# new aruco 3 functionality
useAruco3Detection: 0
minSideLengthCanonicalImg: 32 # 16, 32, 64 --> tau_c from the paper
minMarkerLengthRatioOriginalImg: 0.02 # range [0,0.2] --> tau_i from the paper
cameraMotionSpeed: 0.1 # range [0,1) --> tau_s from the paper
useGlobalThreshold: 0

@ -1,21 +0,0 @@
%YAML:1.0
---
calibration_time: "Wed 08 Dec 2021 05:13:09 PM MSK"
image_width: 640
image_height: 480
flags: 0
camera_matrix: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 4.5251072219637672e+02, 0., 3.1770297317353277e+02, 0.,
4.5676707935146891e+02, 2.7775155919135995e+02, 0., 0., 1. ]
distortion_coefficients: !!opencv-matrix
rows: 1
cols: 5
dt: d
data: [ 1.2136925618707872e-01, -1.0854664722560681e+00,
1.1786843796668460e-04, -4.6240686046485508e-04,
2.9542589406810080e+00 ]
avg_reprojection_error: 1.8234905535936044e-01
info: "The camera calibration parameters were obtained by img_00.jpg-img_03.jpg from aruco/tutorials/aruco_calibration/images"

@ -1,14 +0,0 @@
%YAML:1.0
camera_matrix: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 628.158, 0., 324.099,
0., 628.156, 260.908,
0., 0., 1. ]
distortion_coefficients: !!opencv-matrix
rows: 5
cols: 1
dt: d
data: [ 0.0995485, -0.206384,
0.00754589, 0.00336531, 0 ]

@ -1,150 +0,0 @@
//! [charucohdr]
#include <opencv2/aruco/charuco.hpp>
//! [charucohdr]
#include <opencv2/highgui.hpp>
#include <iostream>
#include <string>
#include "aruco_samples_utility.hpp"
namespace {
const char* about = "A tutorial code on charuco board creation and detection of charuco board with and without camera caliberation";
const char* keys = "{c | | Put value of c=1 to create charuco board;\nc=2 to detect charuco board without camera calibration;\nc=3 to detect charuco board with camera calibration and Pose Estimation}";
}
static inline void createBoard()
{
cv::aruco::Dictionary dictionary = cv::aruco::getPredefinedDictionary(cv::aruco::DICT_6X6_250);
//! [createBoard]
cv::aruco::CharucoBoard board(cv::Size(5, 7), 0.04f, 0.02f, dictionary);
cv::Mat boardImage;
board.generateImage(cv::Size(600, 500), boardImage, 10, 1);
//! [createBoard]
cv::imwrite("BoardImage.jpg", boardImage);
}
//! [detwcp]
static inline void detectCharucoBoardWithCalibrationPose()
{
cv::VideoCapture inputVideo;
inputVideo.open(0);
//! [matdiscoff]
cv::Mat cameraMatrix, distCoeffs;
std::string filename = "calib.txt";
bool readOk = readCameraParameters(filename, cameraMatrix, distCoeffs);
//! [matdiscoff]
if (!readOk) {
std::cerr << "Invalid camera file" << std::endl;
} else {
//! [dictboard]
cv::aruco::Dictionary dictionary = cv::aruco::getPredefinedDictionary(cv::aruco::DICT_6X6_250);
cv::Ptr<cv::aruco::CharucoBoard> board = new cv::aruco::CharucoBoard(cv::Size(5, 7), 0.04f, 0.02f, dictionary);
cv::Ptr<cv::aruco::DetectorParameters> params = cv::makePtr<cv::aruco::DetectorParameters>();
//! [dictboard]
while (inputVideo.grab()) {
//! [inputImg]
cv::Mat image;
//! [inputImg]
cv::Mat imageCopy;
inputVideo.retrieve(image);
image.copyTo(imageCopy);
//! [midcornerdet]
std::vector<int> markerIds;
std::vector<std::vector<cv::Point2f> > markerCorners;
cv::aruco::detectMarkers(image, cv::makePtr<cv::aruco::Dictionary>(board->getDictionary()), markerCorners, markerIds, params);
//! [midcornerdet]
// if at least one marker detected
if (markerIds.size() > 0) {
cv::aruco::drawDetectedMarkers(imageCopy, markerCorners, markerIds);
//! [charidcor]
std::vector<cv::Point2f> charucoCorners;
std::vector<int> charucoIds;
cv::aruco::interpolateCornersCharuco(markerCorners, markerIds, image, board, charucoCorners, charucoIds, cameraMatrix, distCoeffs);
//! [charidcor]
// if at least one charuco corner detected
if (charucoIds.size() > 0) {
cv::Scalar color = cv::Scalar(255, 0, 0);
//! [detcor]
cv::aruco::drawDetectedCornersCharuco(imageCopy, charucoCorners, charucoIds, color);
//! [detcor]
cv::Vec3d rvec, tvec;
//! [pose]
bool valid = cv::aruco::estimatePoseCharucoBoard(charucoCorners, charucoIds, board, cameraMatrix, distCoeffs, rvec, tvec);
//! [pose]
// if charuco pose is valid
if (valid)
cv::drawFrameAxes(imageCopy, cameraMatrix, distCoeffs, rvec, tvec, 0.1f);
}
}
cv::imshow("out", imageCopy);
char key = (char)cv::waitKey(30);
if (key == 27)
break;
}
}
}
//! [detwcp]
//! [detwc]
static inline void detectCharucoBoardWithoutCalibration()
{
cv::VideoCapture inputVideo;
inputVideo.open(0);
cv::aruco::Dictionary dictionary = cv::aruco::getPredefinedDictionary(cv::aruco::DICT_6X6_250);
cv::Ptr<cv::aruco::CharucoBoard> board = new cv::aruco::CharucoBoard(cv::Size(5, 7), 0.04f, 0.02f, dictionary);
cv::Ptr<cv::aruco::DetectorParameters> params = cv::makePtr<cv::aruco::DetectorParameters>();
params->cornerRefinementMethod = cv::aruco::CORNER_REFINE_NONE;
while (inputVideo.grab()) {
cv::Mat image, imageCopy;
inputVideo.retrieve(image);
image.copyTo(imageCopy);
std::vector<int> markerIds;
std::vector<std::vector<cv::Point2f> > markerCorners;
cv::aruco::detectMarkers(image, cv::makePtr<cv::aruco::Dictionary>(board->getDictionary()), markerCorners, markerIds, params);
//or
//cv::aruco::detectMarkers(image, dictionary, markerCorners, markerIds, params);
// if at least one marker detected
if (markerIds.size() > 0) {
cv::aruco::drawDetectedMarkers(imageCopy, markerCorners, markerIds);
//! [charidcorwc]
std::vector<cv::Point2f> charucoCorners;
std::vector<int> charucoIds;
cv::aruco::interpolateCornersCharuco(markerCorners, markerIds, image, board, charucoCorners, charucoIds);
//! [charidcorwc]
// if at least one charuco corner detected
if (charucoIds.size() > 0)
cv::aruco::drawDetectedCornersCharuco(imageCopy, charucoCorners, charucoIds, cv::Scalar(255, 0, 0));
}
cv::imshow("out", imageCopy);
char key = (char)cv::waitKey(30);
if (key == 27)
break;
}
}
//! [detwc]
int main(int argc, char* argv[])
{
cv::CommandLineParser parser(argc, argv, keys);
parser.about(about);
if (argc < 2) {
parser.printMessage();
return 0;
}
int choose = parser.get<int>("c");
switch (choose) {
case 1:
createBoard();
std::cout << "An image named BoardImg.jpg is generated in folder containing this file" << std::endl;
break;
case 2:
detectCharucoBoardWithoutCalibration();
break;
case 3:
detectCharucoBoardWithCalibrationPose();
break;
default:
break;
}
return 0;
}

@ -1,38 +0,0 @@
%YAML:1.0
nmarkers: 35
markersize: 6
marker_0: "101011111011111001001001101100000000"
marker_1: "000000000010011001010011111010111000"
marker_2: "011001100000001010000101111101001101"
marker_3: "001000111111000111011001110000011111"
marker_4: "100110110100101111000000111101110011"
marker_5: "010101101110111000111010111100010111"
marker_6: "101001000110011110101001010100110100"
marker_7: "011010100100110000011101110110100010"
marker_8: "111110001000101000110001010010111101"
marker_9: "011101101100110111001100100001010100"
marker_10: "100001100001010001110001011000000111"
marker_11: "110010010010011100101111111000001111"
marker_12: "110101001001010110011111010110001101"
marker_13: "001111000001000100010001101001010001"
marker_14: "000000010010101010111110110011010011"
marker_15: "110001110111100101110011111100111010"
marker_16: "101011001110001010110011111011001110"
marker_17: "101110111101110100101101011001010111"
marker_18: "000100111000111101010011010101000101"
marker_19: "001110001110001101100101110100000011"
marker_20: "100101101100010110110110110001100011"
marker_21: "010110001001011010000100111000110110"
marker_22: "001000000000100100000000010100010010"
marker_23: "101001110010100110000111111010010000"
marker_24: "111001101010001100011010010001011100"
marker_25: "101000010001010000110100111101101001"
marker_26: "101010000001010011001010110110000001"
marker_27: "100101001000010101001000111101111110"
marker_28: "010010100110010011110001110101011100"
marker_29: "011001000101100001101111010001001111"
marker_30: "000111011100011110001101111011011001"
marker_31: "010100001011000100111101110001101010"
marker_32: "100101101001101010111111101101110100"
marker_33: "101101001010111000000100110111010101"
marker_34: "011111010000111011111110110101100101"

@ -1,247 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
#include "opencv2/objdetect/aruco_detector.hpp"
namespace opencv_test { namespace {
TEST(CV_ArucoTutorial, can_find_singlemarkersoriginal)
{
string img_path = cvtest::findDataFile("aruco/singlemarkersoriginal.jpg");
Mat image = imread(img_path);
aruco::ArucoDetector detector(aruco::getPredefinedDictionary(aruco::DICT_6X6_250));
vector<int> ids;
vector<vector<Point2f> > corners, rejected;
const size_t N = 6ull;
// corners of ArUco markers with indices goldCornersIds
const int goldCorners[N][8] = { {359,310, 404,310, 410,350, 362,350}, {427,255, 469,256, 477,289, 434,288},
{233,273, 190,273, 196,241, 237,241}, {298,185, 334,186, 335,212, 297,211},
{425,163, 430,186, 394,186, 390,162}, {195,155, 230,155, 227,178, 190,178} };
const int goldCornersIds[N] = { 40, 98, 62, 23, 124, 203};
map<int, const int*> mapGoldCorners;
for (size_t i = 0; i < N; i++)
mapGoldCorners[goldCornersIds[i]] = goldCorners[i];
detector.detectMarkers(image, corners, ids, rejected);
ASSERT_EQ(N, ids.size());
for (size_t i = 0; i < N; i++)
{
int arucoId = ids[i];
ASSERT_EQ(4ull, corners[i].size());
ASSERT_TRUE(mapGoldCorners.find(arucoId) != mapGoldCorners.end());
for (int j = 0; j < 4; j++)
{
EXPECT_NEAR(static_cast<float>(mapGoldCorners[arucoId][j * 2]), corners[i][j].x, 1.f);
EXPECT_NEAR(static_cast<float>(mapGoldCorners[arucoId][j * 2 + 1]), corners[i][j].y, 1.f);
}
}
}
TEST(CV_ArucoTutorial, can_find_gboriginal)
{
string imgPath = cvtest::findDataFile("aruco/gboriginal.jpg");
Mat image = imread(imgPath);
string dictPath = cvtest::findDataFile("aruco/tutorial_dict.yml");
aruco::Dictionary dictionary;
FileStorage fs(dictPath, FileStorage::READ);
dictionary.aruco::Dictionary::readDictionary(fs.root()); // set marker from tutorial_dict.yml
aruco::DetectorParameters detectorParams;
aruco::ArucoDetector detector(dictionary, detectorParams);
vector<int> ids;
vector<vector<Point2f> > corners, rejected;
const size_t N = 35ull;
// corners of ArUco markers with indices 0, 1, ..., 34
const int goldCorners[N][8] = { {252,74, 286,81, 274,102, 238,95}, {295,82, 330,89, 319,111, 282,104},
{338,91, 375,99, 365,121, 327,113}, {383,100, 421,107, 412,130, 374,123},
{429,109, 468,116, 461,139, 421,132}, {235,100, 270,108, 257,130, 220,122},
{279,109, 316,117, 304,140, 266,133}, {324,119, 362,126, 352,150, 313,143},
{371,128, 410,136, 400,161, 360,152}, {418,139, 459,145, 451,170, 410,163},
{216,128, 253,136, 239,161, 200,152}, {262,138, 300,146, 287,172, 248,164},
{309,148, 349,156, 337,183, 296,174}, {358,158, 398,167, 388,194, 346,185},
{407,169, 449,176, 440,205, 397,196}, {196,158, 235,168, 218,195, 179,185},
{243,170, 283,178, 269,206, 228,197}, {293,180, 334,190, 321,218, 279,209},
{343,192, 385,200, 374,230, 330,220}, {395,203, 438,211, 429,241, 384,233},
{174,192, 215,201, 197,231, 156,221}, {223,204, 265,213, 249,244, 207,234},
{275,215, 317,225, 303,257, 259,246}, {327,227, 371,238, 359,270, 313,259},
{381,240, 426,249, 416,282, 369,273}, {151,228, 193,238, 173,271, 130,260},
{202,241, 245,251, 228,285, 183,274}, {255,254, 300,264, 284,299, 238,288},
{310,267, 355,278, 342,314, 295,302}, {366,281, 413,290, 402,327, 353,317},
{125,267, 168,278, 147,314, 102,303}, {178,281, 223,293, 204,330, 157,317},
{233,296, 280,307, 263,346, 214,333}, {291,310, 338,322, 323,363, 274,349},
{349,325, 399,336, 386,378, 335,366} };
map<int, const int*> mapGoldCorners;
for (int i = 0; i < static_cast<int>(N); i++)
mapGoldCorners[i] = goldCorners[i];
detector.detectMarkers(image, corners, ids, rejected);
ASSERT_EQ(N, ids.size());
for (size_t i = 0; i < N; i++)
{
int arucoId = ids[i];
ASSERT_EQ(4ull, corners[i].size());
ASSERT_TRUE(mapGoldCorners.find(arucoId) != mapGoldCorners.end());
for (int j = 0; j < 4; j++)
{
EXPECT_NEAR(static_cast<float>(mapGoldCorners[arucoId][j*2]), corners[i][j].x, 1.f);
EXPECT_NEAR(static_cast<float>(mapGoldCorners[arucoId][j*2+1]), corners[i][j].y, 1.f);
}
}
}
TEST(CV_ArucoTutorial, can_find_choriginal)
{
string imgPath = cvtest::findDataFile("aruco/choriginal.jpg");
Mat image = imread(imgPath);
aruco::ArucoDetector detector(aruco::getPredefinedDictionary(aruco::DICT_6X6_250));
vector< int > ids;
vector< vector< Point2f > > corners, rejected;
const size_t N = 17ull;
// corners of aruco markers with indices goldCornersIds
const int goldCorners[N][8] = { {268,77, 290,80, 286,97, 263,94}, {360,90, 382,93, 379,111, 357,108},
{211,106, 233,109, 228,127, 205,123}, {306,120, 328,124, 325,142, 302,138},
{402,135, 425,139, 423,157, 400,154}, {247,152, 271,155, 267,174, 242,171},
{347,167, 371,171, 369,191, 344,187}, {185,185, 209,189, 203,210, 178,206},
{288,201, 313,206, 309,227, 284,223}, {393,218, 418,222, 416,245, 391,241},
{223,240, 250,244, 244,268, 217,263}, {333,258, 359,262, 356,286, 329,282},
{152,281, 179,285, 171,312, 143,307}, {267,300, 294,305, 289,331, 261,327},
{383,319, 410,324, 408,351, 380,347}, {194,347, 223,352, 216,382, 186,377},
{315,368, 345,373, 341,403, 310,398} };
map<int, const int*> mapGoldCorners;
for (int i = 0; i < static_cast<int>(N); i++)
mapGoldCorners[i] = goldCorners[i];
detector.detectMarkers(image, corners, ids, rejected);
ASSERT_EQ(N, ids.size());
for (size_t i = 0; i < N; i++)
{
int arucoId = ids[i];
ASSERT_EQ(4ull, corners[i].size());
ASSERT_TRUE(mapGoldCorners.find(arucoId) != mapGoldCorners.end());
for (int j = 0; j < 4; j++)
{
EXPECT_NEAR(static_cast<float>(mapGoldCorners[arucoId][j * 2]), corners[i][j].x, 1.f);
EXPECT_NEAR(static_cast<float>(mapGoldCorners[arucoId][j * 2 + 1]), corners[i][j].y, 1.f);
}
}
}
TEST(CV_ArucoTutorial, can_find_chocclusion)
{
string imgPath = cvtest::findDataFile("aruco/chocclusion_original.jpg");
Mat image = imread(imgPath);
aruco::ArucoDetector detector(aruco::getPredefinedDictionary(aruco::DICT_6X6_250));
vector< int > ids;
vector< vector< Point2f > > corners, rejected;
const size_t N = 13ull;
// corners of aruco markers with indices goldCornersIds
const int goldCorners[N][8] = { {301,57, 322,62, 317,79, 295,73}, {391,80, 413,85, 408,103, 386,97},
{242,79, 264,85, 256,102, 234,96}, {334,103, 357,109, 352,126, 329,121},
{428,129, 451,134, 448,152, 425,146}, {274,128, 296,134, 290,153, 266,147},
{371,154, 394,160, 390,180, 366,174}, {208,155, 232,161, 223,181, 199,175},
{309,182, 333,188, 327,209, 302,203}, {411,210, 436,216, 432,238, 407,231},
{241,212, 267,219, 258,242, 232,235}, {167,244, 194,252, 183,277, 156,269},
{202,314, 230,322, 220,349, 191,341} };
map<int, const int*> mapGoldCorners;
const int goldCornersIds[N] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 15};
for (int i = 0; i < static_cast<int>(N); i++)
mapGoldCorners[goldCornersIds[i]] = goldCorners[i];
detector.detectMarkers(image, corners, ids, rejected);
ASSERT_EQ(N, ids.size());
for (size_t i = 0; i < N; i++)
{
int arucoId = ids[i];
ASSERT_EQ(4ull, corners[i].size());
ASSERT_TRUE(mapGoldCorners.find(arucoId) != mapGoldCorners.end());
for (int j = 0; j < 4; j++)
{
EXPECT_NEAR(static_cast<float>(mapGoldCorners[arucoId][j * 2]), corners[i][j].x, 1.f);
EXPECT_NEAR(static_cast<float>(mapGoldCorners[arucoId][j * 2 + 1]), corners[i][j].y, 1.f);
}
}
}
TEST(CV_ArucoTutorial, can_find_diamondmarkers)
{
string imgPath = cvtest::findDataFile("aruco/diamondmarkers.jpg");
Mat image = imread(imgPath);
string dictPath = cvtest::findDataFile("aruco/tutorial_dict.yml");
aruco::Dictionary dictionary;
FileStorage fs(dictPath, FileStorage::READ);
dictionary.aruco::Dictionary::readDictionary(fs.root()); // set marker from tutorial_dict.yml
string detectorPath = cvtest::findDataFile("aruco/detector_params.yml");
fs = FileStorage(detectorPath, FileStorage::READ);
aruco::DetectorParameters detectorParams;
detectorParams.readDetectorParameters(fs.root());
detectorParams.cornerRefinementMethod = aruco::CORNER_REFINE_APRILTAG;
aruco::CharucoBoard charucoBoard(Size(3, 3), 0.4f, 0.25f, dictionary);
aruco::CharucoDetector detector(charucoBoard, aruco::CharucoParameters(), detectorParams);
vector<int> ids;
vector<vector<Point2f> > corners, diamondCorners;
vector<Vec4i> diamondIds;
const size_t N = 12ull;
// corner indices of ArUco markers
const int goldCornersIds[N] = { 4, 12, 11, 3, 12, 10, 12, 10, 10, 11, 2, 11 };
map<int, int> counterGoldCornersIds;
for (int i = 0; i < static_cast<int>(N); i++)
counterGoldCornersIds[goldCornersIds[i]]++;
const size_t diamondsN = 3;
// corners of diamonds with Vec4i indices
const float goldDiamondCorners[diamondsN][8] = {{195.6f,150.9f, 213.5f,201.2f, 136.4f,215.3f, 122.4f,163.5f},
{501.1f,171.3f, 501.9f,208.5f, 446.2f,199.8f, 447.8f,163.3f},
{343.4f,361.2f, 359.7f,328.7f, 400.8f,344.6f, 385.7f,378.4f}};
auto comp = [](const Vec4i& a, const Vec4i& b) {
if (a[0] < b[0]) return true;
if (a[1] < b[1]) return true;
if (a[2] < b[2]) return true;
return a[3] < b[3];
};
map<Vec4i, const float*, decltype(comp)> goldDiamonds(comp);
goldDiamonds[Vec4i(10, 4, 11, 12)] = goldDiamondCorners[0];
goldDiamonds[Vec4i(10, 3, 11, 12)] = goldDiamondCorners[1];
goldDiamonds[Vec4i(10, 2, 11, 12)] = goldDiamondCorners[2];
detector.detectDiamonds(image, diamondCorners, diamondIds, corners, ids);
map<int, int> counterRes;
ASSERT_EQ(N, ids.size());
for (size_t i = 0; i < N; i++)
{
int arucoId = ids[i];
counterRes[arucoId]++;
}
ASSERT_EQ(counterGoldCornersIds, counterRes); // check the number of ArUco markers
ASSERT_EQ(goldDiamonds.size(), diamondIds.size()); // check the number of diamonds
for (size_t i = 0; i < goldDiamonds.size(); i++)
{
Vec4i diamondId = diamondIds[i];
ASSERT_TRUE(goldDiamonds.find(diamondId) != goldDiamonds.end());
for (int j = 0; j < 4; j++)
{
EXPECT_NEAR(goldDiamonds[diamondId][j * 2], diamondCorners[i][j].x, 0.5f);
EXPECT_NEAR(goldDiamonds[diamondId][j * 2 + 1], diamondCorners[i][j].y, 0.5f);
}
}
}
}} // namespace

@ -1,238 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
namespace opencv_test {
namespace {
static inline vector<Point2f> getAxis(InputArray _cameraMatrix, InputArray _distCoeffs, InputArray _rvec,
InputArray _tvec, float length, const float offset = 0.f)
{
vector<Point3f> axis;
axis.push_back(Point3f(offset, offset, 0.f));
axis.push_back(Point3f(length+offset, offset, 0.f));
axis.push_back(Point3f(offset, length+offset, 0.f));
axis.push_back(Point3f(offset, offset, length));
vector<Point2f> axis_to_img;
projectPoints(axis, _rvec, _tvec, _cameraMatrix, _distCoeffs, axis_to_img);
return axis_to_img;
}
static inline vector<Point2f> getMarkerById(int id, const vector<vector<Point2f> >& corners, const vector<int>& ids)
{
for (size_t i = 0ull; i < ids.size(); i++)
if (ids[i] == id)
return corners[i];
return vector<Point2f>();
}
static inline double deg2rad(double deg) { return deg * CV_PI / 180.; }
/**
* @brief Get rvec and tvec from yaw, pitch and distance
*/
static inline void getSyntheticRT(double yaw, double pitch, double distance, Mat& rvec, Mat& tvec) {
rvec = Mat::zeros(3, 1, CV_64FC1);
tvec = Mat::zeros(3, 1, CV_64FC1);
// rotate "scene" in pitch axis (x-axis)
Mat rotPitch(3, 1, CV_64FC1);
rotPitch.at<double>(0) = -pitch;
rotPitch.at<double>(1) = 0;
rotPitch.at<double>(2) = 0;
// rotate "scene" in yaw (y-axis)
Mat rotYaw(3, 1, CV_64FC1);
rotYaw.at<double>(0) = 0;
rotYaw.at<double>(1) = yaw;
rotYaw.at<double>(2) = 0;
// compose both rotations
composeRT(rotPitch, Mat(3, 1, CV_64FC1, Scalar::all(0)), rotYaw,
Mat(3, 1, CV_64FC1, Scalar::all(0)), rvec, tvec);
// Tvec, just move in z (camera) direction the specific distance
tvec.at<double>(0) = 0.;
tvec.at<double>(1) = 0.;
tvec.at<double>(2) = distance;
}
/**
* @brief Project a synthetic marker
*/
static inline void projectMarker(Mat& img, Ptr<aruco::Board> board, int markerIndex, Mat cameraMatrix, Mat rvec, Mat tvec,
int markerBorder) {
// canonical image
Mat markerImg;
const int markerSizePixels = 100;
aruco::generateImageMarker(board->getDictionary(), board->getIds()[markerIndex], markerSizePixels, markerImg, markerBorder);
// projected corners
Mat distCoeffs(5, 1, CV_64FC1, Scalar::all(0));
vector<Point2f> corners;
// get max coordinate of board
Point3f maxCoord = board->getRightBottomCorner();
// copy objPoints
vector<Point3f> objPoints = board->getObjPoints()[markerIndex];
// move the marker to the origin
for (size_t i = 0; i < objPoints.size(); i++)
objPoints[i] -= (maxCoord / 2.f);
projectPoints(objPoints, rvec, tvec, cameraMatrix, distCoeffs, corners);
// get perspective transform
vector<Point2f> originalCorners;
originalCorners.push_back(Point2f(0, 0));
originalCorners.push_back(Point2f((float)markerSizePixels, 0));
originalCorners.push_back(Point2f((float)markerSizePixels, (float)markerSizePixels));
originalCorners.push_back(Point2f(0, (float)markerSizePixels));
Mat transformation = getPerspectiveTransform(originalCorners, corners);
// apply transformation
Mat aux;
const char borderValue = 127;
warpPerspective(markerImg, aux, transformation, img.size(), INTER_NEAREST, BORDER_CONSTANT,
Scalar::all(borderValue));
// copy only not-border pixels
for (int y = 0; y < aux.rows; y++) {
for (int x = 0; x < aux.cols; x++) {
if (aux.at< unsigned char >(y, x) == borderValue) continue;
img.at< unsigned char >(y, x) = aux.at< unsigned char >(y, x);
}
}
}
/**
* @brief Get a synthetic image of GridBoard in perspective
*/
static inline Mat projectBoard(Ptr<aruco::GridBoard>& board, Mat cameraMatrix, double yaw, double pitch,
double distance, Size imageSize, int markerBorder) {
Mat rvec, tvec;
getSyntheticRT(yaw, pitch, distance, rvec, tvec);
Mat img = Mat(imageSize, CV_8UC1, Scalar::all(255));
for (unsigned int index = 0; index < board->getIds().size(); index++) {
projectMarker(img, board.staticCast<aruco::Board>(), index, cameraMatrix, rvec, tvec, markerBorder);
}
return img;
}
int getBoardPose(InputArrayOfArrays corners, InputArray ids, const Ptr<aruco::Board> &board,
InputArray cameraMatrix, InputArray distCoeffs, InputOutputArray rvec,
InputOutputArray tvec, bool useExtrinsicGuess = false) {
CV_Assert(corners.total() == ids.total());
Mat objPoints, imgPoints; // get object and image points for the solvePnP function
board->matchImagePoints(corners, ids, objPoints, imgPoints);
CV_Assert(imgPoints.total() == objPoints.total());
if(objPoints.total() == 0) // 0 of the detected markers in board
return 0;
solvePnP(objPoints, imgPoints, cameraMatrix, distCoeffs, rvec, tvec, useExtrinsicGuess);
// divide by four since all the four corners are concatenated in the array for each marker
return (int)objPoints.total() / 4;
}
/** Check if a set of 3d points are enough for calibration. Z coordinate is ignored. Only axis parallel lines are considered */
static bool _arePointsEnoughForPoseEstimation(const std::vector<Point3f> &points) {
if(points.size() < 4) return false;
std::vector<double> sameXValue; // different x values in points
std::vector<int> sameXCounter; // number of points with the x value in sameXValue
for(unsigned int i = 0; i < points.size(); i++) {
bool found = false;
for(unsigned int j = 0; j < sameXValue.size(); j++) {
if(sameXValue[j] == points[i].x) {
found = true;
sameXCounter[j]++;
}
}
if(!found) {
sameXValue.push_back(points[i].x);
sameXCounter.push_back(1);
}
}
// count how many x values has more than 2 points
int moreThan2 = 0;
for(unsigned int i = 0; i < sameXCounter.size(); i++) {
if(sameXCounter[i] >= 2) moreThan2++;
}
// if we have more than 1 two xvalues with more than 2 points, calibration is ok
if(moreThan2 > 1)
return true;
return false;
}
bool getCharucoBoardPose(InputArray charucoCorners, InputArray charucoIds, const Ptr<aruco::CharucoBoard> &board,
InputArray cameraMatrix, InputArray distCoeffs, InputOutputArray rvec, InputOutputArray tvec,
bool useExtrinsicGuess = false) {
CV_Assert((charucoCorners.getMat().total() == charucoIds.getMat().total()));
if(charucoIds.getMat().total() < 4) return false; // need, at least, 4 corners
std::vector<Point3f> objPoints;
objPoints.reserve(charucoIds.getMat().total());
for(unsigned int i = 0; i < charucoIds.getMat().total(); i++) {
int currId = charucoIds.getMat().at< int >(i);
CV_Assert(currId >= 0 && currId < (int)board->getChessboardCorners().size());
objPoints.push_back(board->getChessboardCorners()[currId]);
}
// points need to be in different lines, check if detected points are enough
if(!_arePointsEnoughForPoseEstimation(objPoints)) return false;
solvePnP(objPoints, charucoCorners, cameraMatrix, distCoeffs, rvec, tvec, useExtrinsicGuess);
return true;
}
/**
* @brief Return object points for the system centered in a middle (by default) or in a top left corner of single
* marker, given the marker length
*/
static Mat _getSingleMarkerObjectPoints(float markerLength, bool use_aruco_ccw_center) {
CV_Assert(markerLength > 0);
Mat objPoints(4, 1, CV_32FC3);
// set coordinate system in the top-left corner of the marker, with Z pointing out
if (use_aruco_ccw_center) {
objPoints.ptr<Vec3f>(0)[0] = Vec3f(-markerLength/2.f, markerLength/2.f, 0);
objPoints.ptr<Vec3f>(0)[1] = Vec3f(markerLength/2.f, markerLength/2.f, 0);
objPoints.ptr<Vec3f>(0)[2] = Vec3f(markerLength/2.f, -markerLength/2.f, 0);
objPoints.ptr<Vec3f>(0)[3] = Vec3f(-markerLength/2.f, -markerLength/2.f, 0);
}
else {
objPoints.ptr<Vec3f>(0)[0] = Vec3f(0.f, 0.f, 0);
objPoints.ptr<Vec3f>(0)[1] = Vec3f(markerLength, 0.f, 0);
objPoints.ptr<Vec3f>(0)[2] = Vec3f(markerLength, markerLength, 0);
objPoints.ptr<Vec3f>(0)[3] = Vec3f(0.f, markerLength, 0);
}
return objPoints;
}
void getMarkersPoses(InputArrayOfArrays corners, float markerLength, InputArray cameraMatrix, InputArray distCoeffs,
OutputArray _rvecs, OutputArray _tvecs, OutputArray objPoints = noArray(),
bool use_aruco_ccw_center = true, SolvePnPMethod solvePnPMethod = SolvePnPMethod::SOLVEPNP_ITERATIVE) {
CV_Assert(markerLength > 0);
Mat markerObjPoints = _getSingleMarkerObjectPoints(markerLength, use_aruco_ccw_center);
int nMarkers = (int)corners.total();
_rvecs.create(nMarkers, 1, CV_64FC3);
_tvecs.create(nMarkers, 1, CV_64FC3);
Mat rvecs = _rvecs.getMat(), tvecs = _tvecs.getMat();
for (int i = 0; i < nMarkers; i++)
solvePnP(markerObjPoints, corners.getMat(i), cameraMatrix, distCoeffs, rvecs.at<Vec3d>(i), tvecs.at<Vec3d>(i),
solvePnPMethod);
if(objPoints.needed())
markerObjPoints.convertTo(objPoints, -1);
}
}
}

@ -1,8 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
CV_TEST_MAIN("cv",
cvtest::addDataSearchSubDirectory("contrib/aruco")
)

@ -1,12 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#ifndef __OPENCV_TEST_PRECOMP_HPP__
#define __OPENCV_TEST_PRECOMP_HPP__
#include "opencv2/ts.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/calib3d.hpp"
#include <opencv2/objdetect/charuco_detector.hpp>
#endif

@ -1,138 +0,0 @@
Calibration with ArUco and ChArUco {#tutorial_aruco_calibration}
==============================
@prev_tutorial{tutorial_charuco_diamond_detection}
@next_tutorial{tutorial_aruco_faq}
The ArUco module can also be used to calibrate a camera. Camera calibration consists in obtaining the
camera intrinsic parameters and distortion coefficients. This parameters remain fixed unless the camera
optic is modified, thus camera calibration only need to be done once.
Camera calibration is usually performed using the OpenCV ```calibrateCamera()``` function. This function
requires some correspondences between environment points and their projection in the camera image from
different viewpoints. In general, these correspondences are obtained from the corners of chessboard
patterns. See ```calibrateCamera()``` function documentation or the OpenCV calibration tutorial for
more detailed information.
Using the ArUco module, calibration can be performed based on ArUco markers corners or ChArUco corners.
Calibrating using ArUco is much more versatile than using traditional chessboard patterns, since it
allows occlusions or partial views.
As it can be stated, calibration can be done using both, marker corners or ChArUco corners. However,
it is highly recommended using the ChArUco corners approach since the provided corners are much
more accurate in comparison to the marker corners. Calibration using a standard Board should only be
employed in those scenarios where the ChArUco boards cannot be employed because of any kind of restriction.
Calibration with ChArUco Boards
------
To calibrate using a ChArUco board, it is necessary to detect the board from different viewpoints, in the
same way that the standard calibration does with the traditional chessboard pattern. However, due to the
benefits of using ChArUco, occlusions and partial views are allowed, and not all the corners need to be
visible in all the viewpoints.
![ChArUco calibration viewpoints](images/charucocalibration.png)
The function to calibrate is `cv::calibrateCamera()`. Example:
@code{.cpp}
Ptr<aruco::CharucoBoard> board = ... // create charuco board
Size imageSize = ... // camera image size
vector<vector<Point2f>> allCharucoCorners;
vector<vector<int>> allCharucoIds;
vector<vector<Point2f>> allImagePoints;
vector<vector<Point3f>> allObjectPoints;
// Detect charuco board from several viewpoints and fill
// allCharucoCorners, allCharucoIds, allImagePoints and allObjectPoints
while(inputVideo.grab()) {
detector.detectBoard(
image, currentCharucoCorners, currentCharucoIds
);
board.matchImagePoints(
currentCharucoCorners, currentCharucoIds,
currentObjectPoints, currentImagePoints
);
...
}
// After capturing in several viewpoints, start calibration
Mat cameraMatrix, distCoeffs;
vector<Mat> rvecs, tvecs;
// Set calibration flags (same than in calibrateCamera() function)
int calibrationFlags = ...
double repError = calibrateCamera(
allObjectPoints, allImagePoints, imageSize,
cameraMatrix, distCoeffs, rvecs, tvecs, noArray(),
noArray(), noArray(), calibrationFlags
);
@endcode
The ChArUco corners and ChArUco identifiers captured on each viewpoint are stored in the vectors ```allCharucoCorners``` and ```allCharucoIds```, one element per viewpoint.
The `calibrateCamera()` function will fill the `cameraMatrix` and `distCoeffs` arrays with the camera calibration parameters. It will return the reprojection
error obtained from the calibration. The elements in `rvecs` and `tvecs` will be filled with the estimated pose of the camera (respect to the ChArUco board)
in each of the viewpoints.
Finally, the `calibrationFlags` parameter determines some of the options for the calibration.
A full working example is included in the `calibrate_camera_charuco.cpp` inside the `samples` folder.
Note: The samples now take input via commandline via the [OpenCV Commandline Parser](http://docs.opencv.org/trunk/d0/d2e/classcv_1_1CommandLineParser.html#gsc.tab=0). For this file the example parameters will look like
@code{.cpp}
"camera_calib.txt" -w=5 -h=7 -sl=0.04 -ml=0.02 -d=10
-v="path_aruco/tutorials/aruco_calibration/images/img_%02d.jpg
-c=path_aruco/samples/tutorial_camera_params.yml
@endcode
The camera calibration parameters from `samples/tutorial_camera_charuco.yml` were obtained by `aruco_calibration/images/img_00.jpg-img_03.jpg`.
Calibration with ArUco Boards
------
As it has been stated, it is recommended the use of ChAruco boards instead of ArUco boards for camera calibration, since
ChArUco corners are more accurate than marker corners. However, in some special cases it must be required to use calibration
based on ArUco boards. As in the previous case, it requires the detections of an ArUco board from different viewpoints.
![ArUco calibration viewpoints](images/arucocalibration.png)
Example of ```calibrateCameraAruco()``` use:
@code{.cpp}
Ptr<aruco::Board> board = ... // create aruco board
Size imgSize = ... // camera image size
vector<vector<vector<Point2f>>> allMarkerCorners;
vector<vector<int>> allMarkerIds;
// Detect aruco board from several viewpoints and fill allMarkerCorners, allMarkerIds
detector.detectMarkers(image, markerCorners, markerIds, rejectedMarkers);
...
// After capturing in several viewpoints, match image points and start calibration
board->matchImagePoints(
allMarkerCorners[frame], allMarkerIds[frame],
currentObjPoints, currentImgPoints
);
Mat cameraMatrix, distCoeffs;
vector<Mat> rvecs, tvecs;
int calibrationFlags = ... // Set calibration flags (same than in calibrateCamera() function)
double repError = calibrateCamera(
processedObjectPoints, processedImagePoints, imageSize,
cameraMatrix, distCoeffs, rvecs, tvecs, noArray(),
noArray(), noArray(), calibrationFlags
);
@endcode
A full working example is included in the `calibrate_camera.cpp` inside the `samples` folder.
Note: The samples now take input via commandline via the [OpenCV Commandline Parser](http://docs.opencv.org/trunk/d0/d2e/classcv_1_1CommandLineParser.html#gsc.tab=0). For this file the example parameters will look like
@code{.cpp}
"camera_calib.txt" -w=5 -h=7 -l=100 -s=10 -d=10
@endcode

Binary file not shown.

Before

Width:  |  Height:  |  Size: 324 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 313 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 124 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 115 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 102 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 123 KiB

@ -1,162 +0,0 @@
Aruco module FAQ {#tutorial_aruco_faq}
==============================
@prev_tutorial{tutorial_aruco_calibration}
This is a compilation of questions that can be useful for those that want to use the aruco module.
- I only want to label some objects, what should I use?
In this case, you only need single ArUco markers. You can place one or several markers with different ids in each of the object you want to identify.
- Which algorithm is used for marker detection?
The aruco module is based on the original ArUco library. A full description of the detection process can be found in:
> S. Garrido-Jurado, R. Muñoz-Salinas, F. J. Madrid-Cuevas, and M. J. Marín-Jiménez. 2014.
> "Automatic generation and detection of highly reliable fiducial markers under occlusion".
> Pattern Recogn. 47, 6 (June 2014), 2280-2292. DOI=10.1016/j.patcog.2014.01.005
- My markers are not being detected correctly, what can I do?
There can be many factors that avoid the correct detection of markers. You probably need to adjust some of the parameters
in the ```DetectorParameters``` object. The first thing you can do is checking if your markers are returned
as rejected candidates by the ```detectMarkers()``` function. Depending on this, you should try to modify different parameters.
If you are using a ArUco board, you can also try the ```refineDetectedMarkers()``` function.
If you are [using big markers](https://github.com/opencv/opencv_contrib/issues/2811) (400x400 pixels and more), try increasing ```adaptiveThreshWinSizeMax``` value.
Also avoid [narrow borders](https://github.com/opencv/opencv_contrib/issues/2492) (5% or less of the marker perimeter, adjusted by ```minMarkerDistanceRate```) around markers.
- What are the benefits of ArUco boards? What are the drawbacks?
Using a board of markers you can obtain the camera pose from a set of markers, instead of a single one. This way,
the detection is able to handle occlusion of partial views of the Board, since only one marker is necessary to obtain the pose.
Furthermore, as in most cases you are using more corners for pose estimation, it will be more accurate than using a single marker.
The main drawback is that a Board is not as versatile as a single marker.
- What are the benefits of ChArUco boards over ArUco boards? And the drawbacks?
ChArUco boards combines chessboards with ArUco boards. Thanks to this, the corners provided by ChArUco boards are more accurate than those provided by ArUco Boards (or single markers).
The main drawback is that ChArUco boards are not as versatile as ArUco board. For instance, a ChArUco board is a planar board with a specific marker layout while the ArUco boards
can have any layout, even in 3d. Furthermore, the markers in the ChArUco board are usually smaller and more difficult to detect.
- I do not need pose estimation, should I use ChArUco boards?
No. The main goal of ChArUco boards is provide high accurate corners for pose estimation or camera calibration.
- Should all the markers in an ArUco board be placed in the same plane?
No, the marker corners in a ArUco board can be placed anywhere in its 3d coordinate system.
- Should all the markers in an ChArUco board be placed in the same plane?
Yes, all the markers in a ChArUco board need to be in the same plane and their layout is fixed by the chessboard shape.
- What is the difference between a ```Board``` object and a ```GridBoard``` object?
The ```GridBoard``` class is a specific type of board that inherits from ```Board``` class. A ```GridBoard``` object is a board whose markers are placed in the same
plane and in a grid layout.
- What are Diamond markers?
Diamond markers are very similar to a ChArUco board of 3x3 squares. However, contrary to ChArUco boards, the detection of diamonds is based on the relative position of the markers.
They are useful when you want to provide a conceptual meaning to any (or all) of the markers in the diamond. An example is using one of the marker to provide the diamond scale.
- Do I need to detect marker before board detection, ChArUco board detection or Diamond detection?
Yes, the detection of single markers is a basic tool in the aruco module. It is done using the ```detectMarkers()``` function. The rest of functionalities receives
a list of detected markers from this function.
- I want to calibrate my camera, can I use this module?
Yes, the aruco module provides functionalities to calibrate the camera using both, ArUco boards and ChArUco boards.
- Should I calibrate using a ChArUco board or an ArUco board?
It is highly recommended the calibration using ChArUco board due to the high accuracy.
- Should I use a predefined dictionary or generate my own dictionary?
In general, it is easier to use one of the predefined dictionaries. However, if you need a bigger dictionary (in terms of number of markers or number of bits)
you should generate your own dictionary. Dictionary generation is also useful if you want to maximize the inter-marker distance to achieve a better error
correction during the identification step.
- I am generating my own dictionary but it takes too long
Dictionary generation should only be done once at the beginning of your application and it should take some seconds. If you are
generating the dictionary on each iteration of your detection loop, you are doing it wrong.
Furthermore, it is recommendable to save the dictionary to a file with ```cv::aruco::Dictionary::writeDictionary()``` and read it with ```cv::aruco::Dictionary::readDictionary()``` on every execution, so you don't need to generate it.
- I would like to use some markers of the original ArUco library that I have already printed, can I use them?
Yes, one of the predefined dictionary is ```DICT_ARUCO_ORIGINAL```, which detects the marker of the original ArUco library with the same identifiers.
- Can I use the Board configuration file of the original ArUco library in this module?
Not directly, you will need to adapt the information of the ArUco file to the aruco module Board format.
- Can I use this module to detect the markers of other libraries based on binary fiducial markers?
Probably yes, however you will need to port the dictionary of the original library to the aruco module format.
- Do I need to store the Dictionary information in a file so I can use it in different executions?
If you are using one of the predefined dictionaries, it is not necessary. Otherwise, it is recommendable that you save it to file.
- Do I need to store the Board information in a file so I can use it in different executions?
If you are using a ```GridBoard``` or a ```ChArUco``` board you only need to store the board measurements that are provided to the ```GridBoard::create()``` function or in or `ChArUco` constructor.
If you manually modify the marker ids of the boards, or if you use a different type of board, you should save your board object to file.
- Does the aruco module provide functions to save the Dictionary or Board to file?
You can use ```cv::aruco::Dictionary::writeDictionary()``` and ```cv::aruco::Dictionary::readDictionary()``` for ```cv::aruco::Dictionary```. The data member of board classes are public and can be easily stored.
- Alright, but how can I render a 3d model to create an augmented reality application?
To do so, you will need to use an external rendering engine library, such as OpenGL. The aruco module only provides the functionality to
obtain the camera pose, i.e. the rotation and traslation vectors, which is necessary to create the augmented reality effect.
However, you will need to adapt the rotation and traslation vectors from the OpenCV format to the format accepted by your 3d rendering library.
The original ArUco library contains examples of how to do it for OpenGL and Ogre3D.
- I have use this module in my research work, how can I cite it?
You can cite the original ArUco library:
> S. Garrido-Jurado, R. Muñoz-Salinas, F. J. Madrid-Cuevas, and M. J. Marín-Jiménez. 2014.
> "Automatic generation and detection of highly reliable fiducial markers under occlusion".
> Pattern Recogn. 47, 6 (June 2014), 2280-2292. DOI=10.1016/j.patcog.2014.01.005
- Pose estimation markers are not being detected correctly, what can I do?
It is important to remark that the estimation of the pose using only 4 coplanar points is subject to ambiguity.
In general, the ambiguity can be solved, if the camera is near to the marker.
However, as the marker becomes small, the errors in the corner estimation grows and ambiguity comes as a problem.
Try increasing the size of the marker you're using, and you can also try non-symmetrical (aruco_dict_utils.cpp) markers to avoid collisions.
Use multiple markers (ArUco/ChArUco/Diamonds boards) and pose estimation with solvePnP() with the ```SOLVEPNP_IPPE_SQUARE``` option.
More in [this issue](https://github.com/opencv/opencv/issues/8813).

@ -1,249 +0,0 @@
Detection of ChArUco Boards {#tutorial_charuco_detection}
==============================
@prev_tutorial{tutorial_aruco_board_detection}
@next_tutorial{tutorial_charuco_diamond_detection}
ArUco markers and boards are very useful due to their fast detection and their versatility.
However, one of the problems of ArUco markers is that the accuracy of their corner positions is not too high,
even after applying subpixel refinement.
On the contrary, the corners of chessboard patterns can be refined more accurately since each corner is
surrounded by two black squares. However, finding a chessboard pattern is not as versatile as finding an ArUco board:
it has to be completely visible and occlusions are not permitted.
A ChArUco board tries to combine the benefits of these two approaches:
![Charuco definition](images/charucodefinition.png)
The ArUco part is used to interpolate the position of the chessboard corners, so that it has the versatility of marker
boards, since it allows occlusions or partial views. Moreover, since the interpolated corners belong to a chessboard,
they are very accurate in terms of subpixel accuracy.
When high precision is necessary, such as in camera calibration, Charuco boards are a better option than standard
Aruco boards.
Goal
----
In this tutorial you will learn:
- How to create a charuco board ?
- How to detect the charuco corners without performing camera calibration ?
- How to detect the charuco corners with camera calibration and pose estimation ?
Source code
-----------
You can find this code in `opencv_contrib/modules/aruco/samples/tutorial_charuco_create_detect.cpp`
Here's a sample code of how to achieve all the stuff enumerated at the goal list.
@include samples/tutorial_charuco_create_detect.cpp
ChArUco Board Creation
------
The aruco module provides the ```cv::aruco::CharucoBoard``` class that represents a Charuco Board and which inherits from the ```Board``` class.
This class, as the rest of ChArUco functionalities, are defined in:
@snippet samples/tutorial_charuco_create_detect.cpp charucohdr
To define a ```CharucoBoard```, it is necessary:
- Number of chessboard squares in X direction.
- Number of chessboard squares in Y direction.
- Length of square side.
- Length of marker side.
- The dictionary of the markers.
- Ids of all the markers.
As for the ```GridBoard``` objects, the aruco module provides a function to create ```CharucoBoard```s easily. This function
is the static function ```cv::aruco::CharucoBoard::create()``` :
@code{.cpp}
cv::aruco::CharucoBoard board = cv::aruco::CharucoBoard::create(5, 7, 0.04, 0.02, dictionary);
@endcode
- The first and second parameters are the number of squares in X and Y direction respectively.
- The third and fourth parameters are the length of the squares and the markers respectively. They can be provided
in any unit, having in mind that the estimated pose for this board would be measured in the same units (usually meters are used).
- Finally, the dictionary of the markers is provided.
The ids of each of the markers are assigned by default in ascending order and starting on 0, like in ```GridBoard::create()```.
This can be easily customized by accessing to the ids vector through ```board.ids```, like in the ```Board``` parent class.
Once we have our ```CharucoBoard``` object, we can create an image to print it. This can be done with the
<code>CharucoBoard::generateImage()</code> method:
@snippet samples/tutorial_charuco_create_detect.cpp createBoard
- The first parameter is the size of the output image in pixels. In this case 600x500 pixels. If this is not proportional
to the board dimensions, it will be centered on the image.
- ```boardImage```: the output image with the board.
- The third parameter is the (optional) margin in pixels, so none of the markers are touching the image border.
In this case the margin is 10.
- Finally, the size of the marker border, similarly to ```generateImageMarker()``` function. The default value is 1.
The output image will be something like this:
![](images/charucoboard.png)
A full working example is included in the `create_board_charuco.cpp` inside the `modules/aruco/samples/`.
Note: The create_board_charuco.cpp now take input via commandline via the [OpenCV Commandline Parser](http://docs.opencv.org/trunk/d0/d2e/classcv_1_1CommandLineParser.html#gsc.tab=0). For this file the example parameters will look like
@code{.cpp}
"_output_path_/chboard.png" -w=5 -h=7 -sl=200 -ml=120 -d=10
@endcode
ChArUco Board Detection
------
When you detect a ChArUco board, what you are actually detecting is each of the chessboard corners of the board.
Each corner on a ChArUco board has a unique identifier (id) assigned. These ids go from 0 to the total number of corners in the board.
The steps of charuco board detection can be broken down to the following steps:
- **Taking input Image**
@snippet samples/tutorial_charuco_create_detect.cpp inputImg
The original image where the markers are to be detected. The image is necessary to perform subpixel refinement in the ChArUco corners.
- **Reading the camera calibration Parameters(only for detection with camera calibration)**
@snippet samples/tutorial_charuco_create_detect.cpp matdiscoff
The parameters of readCameraParameters are:
- filename- This is the path to caliberation.txt file which is the output file generated by calibrate_camera_charuco.cpp
- cameraMatrix and distCoeffs- the optional camera calibration parameters
This function takes these parameters as input and returns a boolean value of whether the camera calibration parameters are valid or not. For detection of corners without calibration, this step is not required.
- **Detecting the markers**
@snippet samples/tutorial_charuco_create_detect.cpp dictboard
@snippet samples/tutorial_charuco_create_detect.cpp midcornerdet
The parameters of detectMarkers are:
- image - Input image.
- dictionary - Pointer to the Dictionary/Set of Markers that will be searched.
- markerCorners - vector of detected marker corners.
- markerIds - vector of identifiers of the detected markers
- params - marker detection parameters
The detection of the ChArUco corners is based on the previous detected markers. So that, first markers are detected, and then ChArUco corners are interpolated from markers.
- **Interpolation of charuco corners from markers**
For detection with calibration
@snippet samples/tutorial_charuco_create_detect.cpp charidcor
For detection without calibration
@snippet samples/tutorial_charuco_create_detect.cpp charidcorwc
The function that detect the ChArUco corners is cv::aruco::interpolateCornersCharuco(). This function returns the number of Charuco corners interpolated.
- ```std::vector<cv::Point2f> charucoCorners``` : list of image positions of the detected corners.
- ```std::vector<int> charucoIds``` : ids for each of the detected corners in ```charucoCorners```.
If calibration parameters are provided, the ChArUco corners are interpolated by, first, estimating a rough pose from the ArUco markers
and, then, reprojecting the ChArUco corners back to the image.
On the other hand, if calibration parameters are not provided, the ChArUco corners are interpolated by calculating the
corresponding homography between the ChArUco plane and the ChArUco image projection.
The main problem of using homography is that the interpolation is more sensible to image distortion. Actually, the homography is only performed
using the closest markers of each ChArUco corner to reduce the effect of distortion.
When detecting markers for ChArUco boards, and specially when using homography, it is recommended to disable the corner refinement of markers. The reason of this
is that, due to the proximity of the chessboard squares, the subpixel process can produce important
deviations in the corner positions and these deviations are propagated to the ChArUco corner interpolation,
producing poor results.
Furthermore, only those corners whose two surrounding markers have be found are returned. If any of the two surrounding markers has
not been detected, this usually means that there is some occlusion or the image quality is not good in that zone. In any case, it is
preferable not to consider that corner, since what we want is to be sure that the interpolated ChArUco corners are very accurate.
After the ChArUco corners have been interpolated, a subpixel refinement is performed.
Once we have interpolated the ChArUco corners, we would probably want to draw them to see if their detections are correct.
This can be easily done using the ```drawDetectedCornersCharuco()``` function:
@snippet samples/tutorial_charuco_create_detect.cpp detcor
- ```imageCopy``` is the image where the corners will be drawn (it will normally be the same image where the corners were detected).
- The ```outputImage``` will be a clone of ```inputImage``` with the corners drawn.
- ```charucoCorners``` and ```charucoIds``` are the detected Charuco corners from the ```interpolateCornersCharuco()``` function.
- Finally, the last parameter is the (optional) color we want to draw the corners with, of type ```cv::Scalar```.
For this image:
![Image with Charuco board](images/choriginal.jpg)
The result will be:
![Charuco board detected](images/chcorners.jpg)
In the presence of occlusion. like in the following image, although some corners are clearly visible, not all their surrounding markers have been detected due occlusion and, thus, they are not interpolated:
![Charuco detection with occlusion](images/chocclusion.jpg)
Finally, this is a full example of ChArUco detection (without using calibration parameters):
@snippet samples/tutorial_charuco_create_detect.cpp detwc
Sample video:
@htmlonly
<iframe width="420" height="315" src="https://www.youtube.com/embed/Nj44m_N_9FY" frameborder="0" allowfullscreen></iframe>
@endhtmlonly
A full working example is included in the `detect_board_charuco.cpp` inside the `modules/aruco/samples/`.
Note: The samples now take input via commandline via the [OpenCV Commandline Parser](http://docs.opencv.org/trunk/d0/d2e/classcv_1_1CommandLineParser.html#gsc.tab=0). For this file the example parameters will look like
@code{.cpp}
-w=5 -h=7 -sl=0.04 -ml=0.02 -d=10
-v=/path_to_aruco_tutorials/charuco_detection/images/choriginal.jpg
@endcode
ChArUco Pose Estimation
------
The final goal of the ChArUco boards is finding corners very accurately for a high precision calibration or pose estimation.
The aruco module provides a function to perform ChArUco pose estimation easily. As in the ```GridBoard```, the coordinate system
of the ```CharucoBoard``` is placed in the board plane with the Z axis pointing out, and centered in the bottom left corner of the board.
The function for pose estimation is ```estimatePoseCharucoBoard()```:
@snippet samples/tutorial_charuco_create_detect.cpp pose
- The ```charucoCorners``` and ```charucoIds``` parameters are the detected charuco corners from the ```interpolateCornersCharuco()```
function.
- The third parameter is the ```CharucoBoard``` object.
- The ```cameraMatrix``` and ```distCoeffs``` are the camera calibration parameters which are necessary for pose estimation.
- Finally, the ```rvec``` and ```tvec``` parameters are the output pose of the Charuco Board.
- The function returns true if the pose was correctly estimated and false otherwise. The main reason of failing is that there are
not enough corners for pose estimation or they are in the same line.
The axis can be drawn using ```drawFrameAxes()``` to check the pose is correctly estimated. The result would be: (X:red, Y:green, Z:blue)
![Charuco Board Axis](images/chaxis.jpg)
A full example of ChArUco detection with pose estimation:
@snippet samples/tutorial_charuco_create_detect.cpp detwcp
A full working example is included in the `detect_board_charuco.cpp` inside the `modules/aruco/samples/detect_board_charuco.cpp`.
Note: The samples now take input via commandline via the [OpenCV Commandline Parser](http://docs.opencv.org/trunk/d0/d2e/classcv_1_1CommandLineParser.html#gsc.tab=0). For this file the example parameters will look like
@code{.cpp}
-w=5 -h=7 -sl=0.04 -ml=0.02 -d=10 -dp="_path_/detector_params.yml"
-v=/path_to_aruco_tutorials/charuco_detection/images/choriginal.jpg
-c=/path_to_aruco_samples/tutorial_camera_charuco.yml
@endcode

Binary file not shown.

Before

Width:  |  Height:  |  Size: 31 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 134 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 107 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 97 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 112 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 115 KiB

@ -1,182 +0,0 @@
Detection of Diamond Markers {#tutorial_charuco_diamond_detection}
==============================
@prev_tutorial{tutorial_charuco_detection}
@next_tutorial{tutorial_aruco_calibration}
A ChArUco diamond marker (or simply diamond marker) is a chessboard composed by 3x3 squares and 4 ArUco markers inside the white squares.
It is similar to a ChArUco board in appearance, however they are conceptually different.
![Diamond marker examples](images/diamondmarkers.png)
In both, ChArUco board and Diamond markers, their detection is based on the previous detected ArUco
markers. In the ChArUco case, the used markers are selected by directly looking their identifiers. This means
that if a marker (included in the board) is found on a image, it will be automatically assumed to belong to the board. Furthermore,
if a marker board is found more than once in the image, it will produce an ambiguity since the system wont
be able to know which one should be used for the Board.
On the other hand, the detection of Diamond marker is not based on the identifiers. Instead, their detection
is based on the relative position of the markers. As a consequence, marker identifiers can be repeated in the
same diamond or among different diamonds, and they can be detected simultaneously without ambiguity. However,
due to the complexity of finding marker based on their relative position, the diamond markers are limited to
a size of 3x3 squares and 4 markers.
As in a single ArUco marker, each Diamond marker is composed by 4 corners and a identifier. The four corners
correspond to the 4 chessboard corners in the marker and the identifier is actually an array of 4 numbers, which are
the identifiers of the four ArUco markers inside the diamond.
Diamond markers are useful in those scenarios where repeated markers should be allowed. For instance:
- To increase the number of identifiers of single markers by using diamond marker for labeling. They would allow
up to N^4 different ids, being N the number of markers in the used dictionary.
- Give to each of the four markers a conceptual meaning. For instance, one of the four marker ids could be
used to indicate the scale of the marker (i.e. the size of the square), so that the same diamond can be found
in the environment with different sizes just by changing one of the four markers and the user does not need
to manually indicate the scale of each of them. This case is included in the ```diamond_detector.cpp``` file inside
the samples folder of the module.
Furthermore, as its corners are chessboard corners, they can be used for accurate pose estimation.
The diamond functionalities are included in ```<opencv2/aruco/charuco.hpp>```
ChArUco Diamond Creation
------
The image of a diamond marker can be easily created using the ```drawCharucoDiamond()``` function.
For instance:
@code{.cpp}
cv::Mat diamondImage;
cv::Ptr<cv::aruco::Dictionary> dictionary = cv::aruco::getPredefinedDictionary(cv::aruco::DICT_6X6_250);
cv::aruco::drawCharucoDiamond(dictionary, cv::Vec4i(45,68,28,74), 200, 120, markerImage);
@endcode
This will create a diamond marker image with a square size of 200 pixels and a marker size of 120 pixels.
The marker ids are given in the second parameter as a ```Vec4i``` object. The order of the marker ids
in the diamond layout are the same as in a standard ChArUco board, i.e. top, left, right and bottom.
The image produced will be:
![Diamond marker](images/diamondmarker.png)
A full working example is included in the `create_diamond.cpp` inside the `modules/aruco/samples/`.
Note: The samples now take input via commandline via the [OpenCV Commandline Parser](http://docs.opencv.org/trunk/d0/d2e/classcv_1_1CommandLineParser.html#gsc.tab=0). For this file the example parameters will look like
@code{.cpp}
"_path_/mydiamond.png" -sl=200 -ml=120 -d=10 -ids=45,68,28,74
@endcode
ChArUco Diamond Detection
------
As in most cases, the detection of diamond markers requires a previous detection of ArUco markers.
After detecting markers, diamond are detected using the ```detectCharucoDiamond()``` function:
@code{.cpp}
cv::Mat inputImage;
float squareLength = 0.40;
float markerLength = 0.25;
...
std::vector<int> markerIds;
std::vector<std::vector< cv::Point2f>> markerCorners;
// detect ArUco markers
cv::aruco::detectMarkers(inputImage, dictionary, markerCorners, markerIds);
std::vector<cv::Vec4i> diamondIds;
std::vector<std::vector<cv::Point2f>> diamondCorners;
// detect diamon diamonds
cv::aruco::detectCharucoDiamond(inputImage, markerCorners, markerIds, squareLength / markerLength, diamondCorners, diamondIds);
@endcode
The ```detectCharucoDiamond()``` function receives the original image and the previous detected marker corners and ids.
The input image is necessary to perform subpixel refinement in the ChArUco corners.
It also receives the rate between the square size and the marker sizes which is required for both, detecting the diamond
from the relative positions of the markers and interpolating the ChArUco corners.
The function returns the detected diamonds in two parameters. The first parameter, ```diamondCorners```, is an array containing
all the four corners of each detected diamond. Its format is similar to the detected corners by the ```detectMarkers()```
function and, for each diamond, the corners are represented in the same order than in the ArUco markers, i.e. clockwise order
starting with the top-left corner. The second returned parameter, ```diamondIds```, contains all the ids of the returned
diamond corners in ```diamondCorners```. Each id is actually an array of 4 integers that can be represented with ```Vec4i```.
The detected diamond can be visualized using the function ```drawDetectedDiamonds()``` which simply receives the image and the diamond
corners and ids:
@code{.cpp}
...
std::vector<cv::Vec4i> diamondIds;
std::vector<std::vector<cv::Point2f>> diamondCorners;
cv::aruco::detectCharucoDiamond(inputImage, markerCorners, markerIds, squareLength / markerLength, diamondCorners, diamondIds);
cv::aruco::drawDetectedDiamonds(inputImage, diamondCorners, diamondIds);
@endcode
The result is the same that the one produced by ```drawDetectedMarkers()```, but printing the four ids of the diamond:
![Detected diamond markers](images/detecteddiamonds.png)
A full working example is included in the `detect_diamonds.cpp` inside the `modules/aruco/samples/`.
Note: The samples now take input via commandline via the [OpenCV Commandline Parser](http://docs.opencv.org/trunk/d0/d2e/classcv_1_1CommandLineParser.html#gsc.tab=0). For this file the example parameters will look like
@code{.cpp}
-dp="path_aruco/samples/detector_params.yml" -sl=0.04 -ml=0.012 -refine=3
-v="path_aruco/tutorials/charuco_diamond_detection/images/diamondmarkers.png"
-cd="path_aruco/samples/tutorial_dict.yml
@endcode
ChArUco Diamond Pose Estimation
------
Since a ChArUco diamond is represented by its four corners, its pose can be estimated in the same way than in a single ArUco marker,
i.e. using the ```estimatePoseSingleMarkers()``` function. For instance:
@code{.cpp}
...
std::vector<cv::Vec4i> diamondIds;
std::vector<std::vector<cv::Point2f>> diamondCorners;
// detect diamon diamonds
cv::aruco::detectCharucoDiamond(inputImage, markerCorners, markerIds, squareLength / markerLength, diamondCorners, diamondIds);
// estimate poses
std::vector<cv::Vec3d> rvecs, tvecs;
cv::aruco::estimatePoseSingleMarkers(diamondCorners, squareLength, camMatrix, distCoeffs, rvecs, tvecs);
// draw axis
for(unsigned int i=0; i<rvecs.size(); i++)
cv::drawFrameAxes(inputImage, camMatrix, distCoeffs, rvecs[i], tvecs[i], axisLength);
@endcode
The function will obtain the rotation and translation vector for each of the diamond marker and store them
in ```rvecs``` and ```tvecs```. Note that the diamond corners are a chessboard square corners and thus, the square length
has to be provided for pose estimation, and not the marker length. Camera calibration parameters are also required.
Finally, an axis can be drawn to check the estimated pose is correct using ```drawFrameAxes()```:
![Detected diamond axis](images/diamondsaxis.jpg)
The coordinate system of the diamond pose will be in the center of the marker with the Z axis pointing out,
as in a simple ArUco marker pose estimation.
Sample video:
@htmlonly
<iframe width="420" height="315" src="https://www.youtube.com/embed/OqKpBnglH7k" frameborder="0" allowfullscreen></iframe>
@endhtmlonly
A full working example is included in the `detect_diamonds.cpp` inside the `modules/aruco/samples/`.
Note: The samples now take input via commandline via the [OpenCV Commandline Parser](http://docs.opencv.org/trunk/d0/d2e/classcv_1_1CommandLineParser.html#gsc.tab=0). For this file the example parameters will look like
@code{.cpp}
-dp="path_aruco/samples/detector_params.yml" -sl=0.04 -ml=0.012 -refine=3
-v="path_aruco/tutorials/charuco_diamond_detection/images/diamondmarkers.png"
-cd="path_aruco/samples/tutorial_dict.yml
-c="path_aruco/samples/tutorial_camera_params.yml"
@endcode

Binary file not shown.

Before

Width:  |  Height:  |  Size: 417 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 6.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 420 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 104 KiB

@ -1,65 +0,0 @@
ArUco marker detection (aruco module) {#tutorial_table_of_content_aruco}
==========================================================
ArUco markers are binary square fiducial markers that can be used for camera pose estimation.
Their main benefit is that their detection is robust, fast and simple.
The aruco module includes the detection of these types of markers and the tools to employ them
for pose estimation and camera calibration.
Also, the ChArUco functionalities combine ArUco markers with traditional chessboards to allow
an easy and versatile corner detection. The module also includes the functions to detect
ChArUco corners and use them for pose estimation and camera calibration.
If you are going to print out the markers, an useful script/GUI tool is place at
opencv_contrib/modules/aruco/misc/pattern_generator/ that can generate vector graphics
of ArUco, ArUcoGrid and ChArUco boards. It can help you to print out the pattern with real size
and without artifacts.
- @subpage tutorial_aruco_detection
*Compatibility:* \> OpenCV 3.0
*Author:* Sergio Garrido, Steve Nicholson
Basic detection and pose estimation from single ArUco markers.
- @subpage tutorial_aruco_board_detection
*Compatibility:* \> OpenCV 3.0
*Author:* Sergio Garrido
Detection and pose estimation using a Board of markers
- @subpage tutorial_charuco_detection
*Compatibility:* \> OpenCV 3.0
*Author:* Sergio Garrido
Basic detection using ChArUco corners
- @subpage tutorial_charuco_diamond_detection
*Compatibility:* \> OpenCV 3.0
*Author:* Sergio Garrido
Detection and pose estimation using ChArUco markers
- @subpage tutorial_aruco_calibration
*Compatibility:* \> OpenCV 3.0
*Author:* Sergio Garrido
Camera Calibration using ArUco and ChArUco boards
- @subpage tutorial_aruco_faq
*Compatibility:* \> OpenCV 3.0
*Author:* Sergio Garrido
General and useful questions about the aruco module
Loading…
Cancel
Save