Merge branch 4.x

pull/25041/head
Alexander Smorkalov 10 months ago
commit f084a229b4
  1. 18
      3rdparty/ippicv/ippicv.cmake
  2. 2
      3rdparty/tbb/CMakeLists.txt
  3. 39
      modules/3d/include/opencv2/3d.hpp
  4. 5
      modules/3d/misc/objc/gen_dict.json
  5. 11
      modules/3d/src/fisheye.cpp
  6. 3
      modules/3d/src/usac/utils.cpp
  7. 28
      modules/3d/test/test_fisheye.cpp
  8. 3
      modules/calib/misc/objc/gen_dict.json
  9. 12
      modules/calib/src/calibinit.cpp
  10. 2
      modules/calib/src/fisheye.cpp
  11. 17
      modules/calib/test/test_fisheye.cpp
  12. 1
      modules/dnn/src/init.cpp
  13. 2
      modules/dnn/src/int8layers/elementwise_layers.cpp
  14. 2
      modules/dnn/src/tflite/tflite_importer.cpp
  15. 2
      modules/flann/include/opencv2/flann/dist.h
  16. 23
      modules/gapi/src/backends/ov/govbackend.cpp
  17. 181
      modules/gapi/test/infer/gapi_infer_ov_tests.cpp
  18. 48
      modules/imgcodecs/test/test_tiff.cpp
  19. 2
      modules/objdetect/include/opencv2/objdetect/aruco_detector.hpp
  20. 6
      modules/objdetect/perf/perf_barcode.cpp
  21. 4
      modules/objdetect/src/barcode.cpp
  22. 36
      modules/objdetect/src/qrcode.cpp
  23. 7
      modules/objdetect/test/test_barcode.cpp
  24. 65
      modules/objdetect/test/test_qrcode.cpp
  25. 2
      modules/python/src2/typing_stubs_generation/predefined_types.py
  26. 9
      platforms/android/build_sdk.py

@ -2,7 +2,7 @@ function(download_ippicv root_var)
set(${root_var} "" PARENT_SCOPE)
# Commit SHA in the opencv_3rdparty repo
set(IPPICV_COMMIT "0cc4aa06bf2bef4b05d237c69a5a96b9cd0cb85a")
set(IPPICV_COMMIT "c7c6d527dde5fee7cb914ee9e4e20f7436aab3a1")
# Define actual ICV versions
if(APPLE)
set(OPENCV_ICV_PLATFORM "macosx")
@ -13,21 +13,21 @@ function(download_ippicv root_var)
set(OPENCV_ICV_PLATFORM "linux")
set(OPENCV_ICV_PACKAGE_SUBDIR "ippicv_lnx")
if(X86_64)
set(OPENCV_ICV_NAME "ippicv_2021.10.0_lnx_intel64_20230919_general.tgz")
set(OPENCV_ICV_HASH "606a19b207ebedfe42d59fd916cc4850")
set(OPENCV_ICV_NAME "ippicv_2021.10.1_lnx_intel64_20231206_general.tgz")
set(OPENCV_ICV_HASH "90884d3b9508f31f6a154165591b8b0b")
else()
set(OPENCV_ICV_NAME "ippicv_2021.10.0_lnx_ia32_20230919_general.tgz")
set(OPENCV_ICV_HASH "ea08487b810baad2f68aca87b74a2db9")
set(OPENCV_ICV_NAME "ippicv_2021.10.1_lnx_ia32_20231206_general.tgz")
set(OPENCV_ICV_HASH "d9510f3ce08f6074aac472a5c19a3b53")
endif()
elseif(WIN32 AND NOT ARM)
set(OPENCV_ICV_PLATFORM "windows")
set(OPENCV_ICV_PACKAGE_SUBDIR "ippicv_win")
if(X86_64)
set(OPENCV_ICV_NAME "ippicv_2021.10.0_win_intel64_20230919_general.zip")
set(OPENCV_ICV_HASH "538a819ec84193a9c9f3c0f8df0be8b7")
set(OPENCV_ICV_NAME "ippicv_2021.10.1_win_intel64_20231206_general.zip")
set(OPENCV_ICV_HASH "2d5f137d4dd8a5205cc1edb5616fb3da")
else()
set(OPENCV_ICV_NAME "ippicv_2021.10.0_win_ia32_20230919_general.zip")
set(OPENCV_ICV_HASH "8ff93c69415ab0835cc1e94dc5660f5d")
set(OPENCV_ICV_NAME "ippicv_2021.10.1_win_ia32_20231206_general.zip")
set(OPENCV_ICV_HASH "63c41a943e93ca87541b71ab67f207b5")
endif()
else()
return()

@ -100,6 +100,8 @@ ocv_warnings_disable(CMAKE_CXX_FLAGS
-Wimplicit-fallthrough # TBB 2018 under GCC 7+
-Wmissing-prototypes # MacOSX, Android/Clang
-Wundef -Wmissing-declarations # TBB 2019
-Wnon-virtual-dtor # oneTBB-2020.2 Android
-Wunused-but-set-variable # oneTBB-2020.2 Android
)
set(TBB_SOURCE_FILES ${lib_srcs} ${lib_hdrs})

@ -2603,6 +2603,45 @@ of image, we can notice that on image a) these points are distorted.
CV_EXPORTS_W void undistortImage(InputArray distorted, OutputArray undistorted,
InputArray K, InputArray D, InputArray Knew = cv::noArray(), const Size& new_size = Size());
/**
@brief Finds an object pose from 3D-2D point correspondences for fisheye camera moodel.
@param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
1xN/Nx1 3-channel, where N is the number of points. vector\<Point3d\> can be also passed here.
@param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
where N is the number of points. vector\<Point2d\> can be also passed here.
@param cameraMatrix Input camera intrinsic matrix \f$\cameramatrix{A}\f$ .
@param distCoeffs Input vector of distortion coefficients (4x1/1x4).
@param rvec Output rotation vector (see @ref Rodrigues ) that, together with tvec, brings points from
the model coordinate system to the camera coordinate system.
@param tvec Output translation vector.
@param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
the provided rvec and tvec values as initial approximations of the rotation and translation
vectors, respectively, and further optimizes them.
@param flags Method for solving a PnP problem: see @ref calib3d_solvePnP_flags
This function returns the rotation and the translation vectors that transform a 3D point expressed in the object
coordinate frame to the camera coordinate frame, using different methods:
- P3P methods (@ref SOLVEPNP_P3P, @ref SOLVEPNP_AP3P): need 4 input points to return a unique solution.
- @ref SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar.
- @ref SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
Number of input points must be 4. Object points must be defined in the following order:
- point 0: [-squareLength / 2, squareLength / 2, 0]
- point 1: [ squareLength / 2, squareLength / 2, 0]
- point 2: [ squareLength / 2, -squareLength / 2, 0]
- point 3: [-squareLength / 2, -squareLength / 2, 0]
- for all the other flags, number of input points must be >= 4 and object points can be in any configuration.
@param criteria Termination criteria for internal undistortPoints call.
The function interally undistorts points with @ref undistortPoints and call @ref cv::solvePnP,
thus the input are very similar. Check there and Perspective-n-Points is described in @ref calib3d_solvePnP
for more information.
*/
CV_EXPORTS_W bool solvePnP( InputArray objectPoints, InputArray imagePoints,
InputArray cameraMatrix, InputArray distCoeffs,
OutputArray rvec, OutputArray tvec,
bool useExtrinsicGuess = false, int flags = SOLVEPNP_ITERATIVE,
TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 10, 1e-8)
);
} // namespace fisheye
/** @brief Octree for 3D vision.

@ -0,0 +1,5 @@
{
"namespaces_dict": {
"cv.fisheye": "fisheye"
}
}

@ -593,4 +593,15 @@ void cv::fisheye::undistortImage(InputArray distorted, OutputArray undistorted,
cv::remap(distorted, undistorted, map1, map2, INTER_LINEAR, BORDER_CONSTANT);
}
bool cv::fisheye::solvePnP( InputArray opoints, InputArray ipoints,
InputArray cameraMatrix, InputArray distCoeffs,
OutputArray rvec, OutputArray tvec, bool useExtrinsicGuess,
int flags, TermCriteria criteria)
{
Mat imagePointsNormalized;
cv::fisheye::undistortPoints(ipoints, imagePointsNormalized, cameraMatrix, distCoeffs, noArray(), cameraMatrix, criteria);
return cv::solvePnP(opoints, imagePointsNormalized, cameraMatrix, noArray(), rvec, tvec, useExtrinsicGuess, flags);
}
} // namespace cv

@ -169,6 +169,9 @@ public:
int getRealRoots (const std::vector<double> &coeffs, std::vector<double> &real_roots) override {
if (coeffs.empty())
return 0;
for (auto c : coeffs)
if (cvIsNaN(c) || cvIsInf(c))
return 0;
Poly input(coeffs);
if (input.degree() < 1)
return 0;

@ -129,6 +129,34 @@ TEST_F(fisheyeTest, distortUndistortPoints)
}
}
TEST_F(fisheyeTest, solvePnP)
{
const int n = 16;
const cv::Matx33d R_mat ( 9.9756700084424932e-01, 6.9698277640183867e-02, 1.4929569991321144e-03,
-6.9711825162322980e-02, 9.9748249845531767e-01, 1.2997180766418455e-02,
-5.8331736398316541e-04,-1.3069635393884985e-02, 9.9991441852366736e-01);
const cv::Vec3d T(-9.9217369356044638e-02, 3.1741831972356663e-03, 1.8551007952921010e-04);
cv::Mat obj_points(1, n, CV_64FC3);
theRNG().fill(obj_points, cv::RNG::NORMAL, 2, 1);
obj_points = cv::abs(obj_points) * 10;
cv::Mat R;
cv::Rodrigues(R_mat, R);
cv::Mat img_points;
cv::fisheye::projectPoints(obj_points, img_points, R, T, this->K, this->D);
cv::Mat rvec_pred;
cv::Mat tvec_pred;
bool converged = cv::fisheye::solvePnP(obj_points, img_points, this->K, this->D, rvec_pred, tvec_pred);
EXPECT_MAT_NEAR(R, rvec_pred, 1e-6);
EXPECT_MAT_NEAR(T, tvec_pred, 1e-6);
ASSERT_TRUE(converged);
}
TEST_F(fisheyeTest, undistortImage)
{
// we use it to reduce patch size for images in testdata

@ -1,4 +1,7 @@
{
"namespaces_dict": {
"cv.fisheye": "fisheye"
},
"func_arg_fix" : {
"Calib" : {
"findCirclesGrid" : { "blobDetector" : {"defval" : "cv::SimpleBlobDetector::create()"} }

@ -531,14 +531,14 @@ bool findChessboardCorners(InputArray image_, Size pattern_size,
const int min_dilations = 0;
const int max_dilations = is_plain ? 0 : 7;
// Try our standard "1" dilation, but if the pattern is not found, iterate the whole procedure with higher dilations.
// This is necessary because some squares simply do not separate properly with a single dilation. However,
// Try our standard "0" and "1" dilations, but if the pattern is not found, iterate the whole procedure with higher dilations.
// This is necessary because some squares simply do not separate properly without and with a single dilations. However,
// we want to use the minimum number of dilations possible since dilations cause the squares to become smaller,
// making it difficult to detect smaller squares.
for (int dilations = min_dilations; dilations <= max_dilations; dilations++)
{
//USE BINARY IMAGE COMPUTED USING icvBinarizationHistogramBased METHOD
if(!is_plain)
if(!is_plain && dilations > 0)
dilate( thresh_img_new, thresh_img_new, Mat(), Point(-1, -1), 1 );
// So we can find rectangles that go to the edge, we draw a white line around the image edge.
@ -596,13 +596,13 @@ bool findChessboardCorners(InputArray image_, Size pattern_size,
block_size = block_size | 1;
// convert to binary
adaptiveThreshold( img, thresh_img, 255, ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY, block_size, (k/2)*5 );
if (dilations > 0)
dilate( thresh_img, thresh_img, Mat(), Point(-1, -1), dilations-1 );
dilate( thresh_img, thresh_img, Mat(), Point(-1, -1), dilations );
}
else
{
dilate( thresh_img, thresh_img, Mat(), Point(-1, -1), 1 );
if (dilations > 0)
dilate( thresh_img, thresh_img, Mat(), Point(-1, -1), 1 );
}
SHOW("Old binarization", thresh_img);

@ -110,7 +110,7 @@ double cv::fisheye::calibrate(InputArrayOfArrays objectPoints, InputArrayOfArray
}
else
{
finalParam.Init(Vec2d(max(image_size.width, image_size.height) / CV_PI, max(image_size.width, image_size.height) / CV_PI),
finalParam.Init(Vec2d(max(image_size.width, image_size.height) / 2., max(image_size.width, image_size.height) / 2.),
Vec2d(image_size.width / 2.0 - 0.5, image_size.height / 2.0 - 0.5));
}

@ -100,6 +100,11 @@ TEST_F(fisheyeTest, Calibration)
{
const int n_images = 34;
const cv::Matx33d goldK(558.4780870585967, 0, 620.4585053962692,
0, 560.5067667343917, 381.9394122875291,
0, 0, 1);
const cv::Vec4d goldD(-0.00146136, -0.00329847, 0.00605742, -0.00374201);
std::vector<std::vector<cv::Point2d> > imagePoints(n_images);
std::vector<std::vector<cv::Point3d> > objectPoints(n_images);
@ -127,8 +132,8 @@ TEST_F(fisheyeTest, Calibration)
cv::fisheye::calibrate(objectPoints, imagePoints, imageSize, theK, theD,
cv::noArray(), cv::noArray(), flag, cv::TermCriteria(3, 20, 1e-6));
EXPECT_MAT_NEAR(theK, this->K, 1e-10);
EXPECT_MAT_NEAR(theD, this->D, 1e-10);
EXPECT_MAT_NEAR(theK, goldK, 1e-8);
EXPECT_MAT_NEAR(theD, goldD, 1e-8);
}
TEST_F(fisheyeTest, CalibrationWithFixedFocalLength)
@ -287,10 +292,10 @@ TEST_F(fisheyeTest, EstimateUncertainties)
cv::internal::EstimateUncertainties(objectPoints, imagePoints, param, rvec, tvec,
errors, err_std, thresh_cond, check_cond, rms);
EXPECT_MAT_NEAR(errors.f, cv::Vec2d(1.34250246865020720, 1.36037536429654530), 1e-10);
EXPECT_MAT_NEAR(errors.c, cv::Vec2d(0.92070526160049848, 0.84383585812851514), 1e-10);
EXPECT_MAT_NEAR(errors.k, cv::Vec4d(0.0053379581373996041, 0.017389792901700545, 0.022036256089491224, 0.0094714594258908952), 1e-10);
EXPECT_MAT_NEAR(err_std, cv::Vec2d(0.187475975266883, 0.185678953263995), 1e-10);
EXPECT_MAT_NEAR(errors.f, cv::Vec2d(1.34250246865020720, 1.36037536429654530), 1e-6);
EXPECT_MAT_NEAR(errors.c, cv::Vec2d(0.92070526160049848, 0.84383585812851514), 1e-6);
EXPECT_MAT_NEAR(errors.k, cv::Vec4d(0.0053379581373996041, 0.017389792901700545, 0.022036256089491224, 0.0094714594258908952), 1e-7);
EXPECT_MAT_NEAR(err_std, cv::Vec2d(0.187475975266883, 0.185678953263995), 1e-7);
CV_Assert(fabs(rms - 0.263782587133546) < 1e-10);
CV_Assert(errors.alpha == 0);
}

@ -212,6 +212,7 @@ void initializeLayerFactory()
CV_DNN_REGISTER_LAYER_CLASS(SigmoidInt8, ActivationLayerInt8);
CV_DNN_REGISTER_LAYER_CLASS(TanHInt8, ActivationLayerInt8);
CV_DNN_REGISTER_LAYER_CLASS(SwishInt8, ActivationLayerInt8);
CV_DNN_REGISTER_LAYER_CLASS(HardSwishInt8, ActivationLayerInt8);
CV_DNN_REGISTER_LAYER_CLASS(MishInt8, ActivationLayerInt8);
CV_DNN_REGISTER_LAYER_CLASS(ELUInt8, ActivationLayerInt8);
CV_DNN_REGISTER_LAYER_CLASS(BNLLInt8, ActivationLayerInt8);

@ -267,6 +267,8 @@ public:
res = std::make_shared<ngraph::op::Elu>(input, 1.0f);
} else if (type == "MishInt8") {
res = std::make_shared<ngraph::op::v4::Mish>(input);
} else if (type == "HardSwishInt8") {
res = std::make_shared<ngraph::op::v4::HSwish>(input);
} else if (type == "AbsValInt8") {
res = std::make_shared<ngraph::op::Abs>(input);
} else if (type == "SigmoidInt8") {

@ -939,6 +939,8 @@ void TFLiteImporter::parseActivation(const Operator& op, const std::string& opco
y = std::min(std::max(x, 0.f), 6.f);
else if (opcode == "LOGISTIC")
y = 1.0f / (1.0f + std::exp(-x));
else if (opcode == "HARD_SWISH")
y = x * max(0.f, min(1.f, x / 6.f + 0.5f));
else
CV_Error(Error::StsNotImplemented, "Lookup table for " + opcode);

@ -1,4 +1,4 @@
/***********************************************************************
/***********************************************************************
* Software License Agreement (BSD License)
*
* Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.

@ -794,13 +794,24 @@ public:
}
}
void cfgScaleMean(const std::string &input_name) {
void cfgScaleMean(const std::string &input_name,
const GMetaArg &input_meta) {
auto &input_info = m_ppp.input(input_name);
const auto mean_vec = lookUp(m_mean_values, input_name);
const auto scale_vec = lookUp(m_scale_values, input_name);
if (mean_vec || scale_vec) {
GAPI_Assert(cv::util::holds_alternative<cv::GMatDesc>(input_meta));
const auto depth = cv::util::get<cv::GMatDesc>(input_meta).depth;
const bool depth_is_real = (depth == CV_32F) || (depth == CV_16F);
if (!depth_is_real) {
input_info.preprocess().convert_element_type(toOV(CV_32F));
}
}
if (mean_vec) {
input_info.preprocess().mean(*mean_vec);
}
const auto scale_vec = lookUp(m_scale_values, input_name);
if (scale_vec) {
input_info.preprocess().scale(*scale_vec);
}
@ -974,7 +985,7 @@ struct Infer: public cv::detail::KernelTag {
ppp.cfgLayouts(input_name);
ppp.cfgPreProcessing(input_name, mm);
ppp.cfgScaleMean(input_name);
ppp.cfgScaleMean(input_name, mm);
}
ppp.cfgPostProcessing();
ppp.finalize();
@ -1062,7 +1073,7 @@ struct InferROI: public cv::detail::KernelTag {
ppp.cfgLayouts(input_name);
ppp.cfgPreProcessing(input_name, mm, true /*disable_img_resize*/);
ppp.cfgScaleMean(input_name);
ppp.cfgScaleMean(input_name, mm);
ppp.cfgPostProcessing();
ppp.finalize();
}
@ -1148,7 +1159,7 @@ struct InferList: public cv::detail::KernelTag {
ppp.cfgLayouts(input_name);
ppp.cfgPreProcessing(input_name, mm, true /*disable_img_resize*/);
ppp.cfgScaleMean(input_name);
ppp.cfgScaleMean(input_name, mm);
}
ppp.cfgPostProcessing();
ppp.finalize();
@ -1267,7 +1278,7 @@ struct InferList2: public cv::detail::KernelTag {
GAPI_Assert(op.k.inKinds[idx] == cv::detail::OpaqueKind::CV_MAT);
}
ppp.cfgScaleMean(input_name);
ppp.cfgScaleMean(input_name, mm_0);
idx++; // NB: Never forget to increment the counter
}
ppp.cfgPostProcessing();

@ -657,6 +657,187 @@ TEST_F(TestAgeGenderListOV, InferList2Generic_Image) {
validate();
}
static ov::element::Type toOV(int depth) {
switch (depth) {
case CV_8U: return ov::element::u8;
case CV_32S: return ov::element::i32;
case CV_32F: return ov::element::f32;
case CV_16F: return ov::element::f16;
default: GAPI_Error("OV Backend: Unsupported data type");
}
return ov::element::undefined;
}
struct TestMeanScaleOV : public ::testing::TestWithParam<int>{
G_API_NET(IdentityNet, <cv::GMat(cv::GMat)>, "test-identity-net");
static cv::GComputation create() {
cv::GMat in;
cv::GMat out;
out = cv::gapi::infer<IdentityNet>(in);
return cv::GComputation{cv::GIn(in), cv::GOut(out)};
}
using Params = cv::gapi::ov::Params<IdentityNet>;
static Params params(const std::string &xml_path,
const std::string &bin_path,
const std::string &device) {
return Params {
xml_path, bin_path, device
}.cfgInputModelLayout("NHWC")
.cfgOutputLayers({ "output" });
}
TestMeanScaleOV() {
initDLDTDataPath();
m_model_path = findDataFile("gapi/ov/identity_net_100x100.xml");
m_weights_path = findDataFile("gapi/ov/identity_net_100x100.bin");
m_device_id = "CPU";
m_ov_model = cv::gapi::ov::wrap::getCore()
.read_model(m_model_path, m_weights_path);
auto input_depth = GetParam();
auto input = cv::imread(findDataFile("gapi/gapi_logo.jpg"));
input.convertTo(m_in_mat, input_depth);
}
void addPreprocToOV(
std::function<void(ov::preprocess::PrePostProcessor&)> f) {
auto input_depth = GetParam();
ov::preprocess::PrePostProcessor ppp(m_ov_model);
ppp.input().tensor().set_layout(ov::Layout("NHWC"))
.set_element_type(toOV(input_depth))
.set_shape({ 1, 100, 100, 3 });
ppp.input().model().set_layout(ov::Layout("NHWC"));
f(ppp);
m_ov_model = ppp.build();
}
void runOV() {
auto compiled_model = cv::gapi::ov::wrap::getCore()
.compile_model(m_ov_model, m_device_id);
auto infer_request = compiled_model.create_infer_request();
auto input_tensor = infer_request.get_input_tensor();
cv::gapi::ov::util::to_ov(m_in_mat, input_tensor);
infer_request.infer();
auto out_tensor = infer_request.get_tensor("output");
m_out_mat_ov.create(cv::gapi::ov::util::to_ocv(out_tensor.get_shape()),
cv::gapi::ov::util::to_ocv(out_tensor.get_element_type()));
cv::gapi::ov::util::to_ocv(out_tensor, m_out_mat_ov);
}
std::string m_model_path;
std::string m_weights_path;
std::string m_device_id;
std::shared_ptr<ov::Model> m_ov_model;
cv::Mat m_in_mat;
cv::Mat m_out_mat_gapi;
cv::Mat m_out_mat_ov;
};
TEST_P(TestMeanScaleOV, Mean)
{
int input_depth = GetParam();
std::vector<float> mean_values{ 220.1779, 218.9857, 217.8986 };
// Run OV reference pipeline:
{
addPreprocToOV([&](ov::preprocess::PrePostProcessor& ppp) {
if (input_depth == CV_8U || input_depth == CV_32S) {
ppp.input().preprocess().convert_element_type(ov::element::f32);
}
ppp.input().preprocess().mean(mean_values);
});
runOV();
}
// Run G-API
GComputation comp = create();
auto pp = params(m_model_path, m_weights_path, m_device_id);
pp.cfgMean(mean_values);
comp.apply(cv::gin(m_in_mat), cv::gout(m_out_mat_gapi),
cv::compile_args(cv::gapi::networks(pp)));
// Validate OV results against G-API ones:
normAssert(m_out_mat_ov, m_out_mat_gapi, "Test output");
}
TEST_P(TestMeanScaleOV, Scale)
{
int input_depth = GetParam();
std::vector<float> scale_values{ 2., 2., 2. };
// Run OV reference pipeline:
{
addPreprocToOV([&](ov::preprocess::PrePostProcessor& ppp) {
if (input_depth == CV_8U || input_depth == CV_32S) {
ppp.input().preprocess().convert_element_type(ov::element::f32);
}
ppp.input().preprocess().scale(scale_values);
});
runOV();
}
// Run G-API
GComputation comp = create();
auto pp = params(m_model_path, m_weights_path, m_device_id);
pp.cfgScale(scale_values);
comp.apply(cv::gin(m_in_mat), cv::gout(m_out_mat_gapi),
cv::compile_args(cv::gapi::networks(pp)));
// Validate OV results against G-API ones:
normAssert(m_out_mat_ov, m_out_mat_gapi, "Test output");
}
TEST_P(TestMeanScaleOV, MeanAndScale)
{
int input_depth = GetParam();
std::vector<float> mean_values{ 220.1779, 218.9857, 217.8986 };
std::vector<float> scale_values{ 2., 2., 2. };
// Run OV reference pipeline:
{
addPreprocToOV([&](ov::preprocess::PrePostProcessor& ppp) {
if (input_depth == CV_8U || input_depth == CV_32S) {
ppp.input().preprocess().convert_element_type(ov::element::f32);
}
ppp.input().preprocess().mean(mean_values);
ppp.input().preprocess().scale(scale_values);
});
runOV();
}
// Run G-API
GComputation comp = create();
auto pp = params(m_model_path, m_weights_path, m_device_id);
pp.cfgMean(mean_values);
pp.cfgScale(scale_values);
comp.apply(cv::gin(m_in_mat), cv::gout(m_out_mat_gapi),
cv::compile_args(cv::gapi::networks(pp)));
// Validate OV results against G-API ones:
normAssert(m_out_mat_ov, m_out_mat_gapi, "Test output");
}
INSTANTIATE_TEST_CASE_P(Instantiation, TestMeanScaleOV,
Values(CV_8U, CV_32S, CV_16F, CV_32F));
} // namespace opencv_test
#endif // HAVE_INF_ENGINE && INF_ENGINE_RELEASE >= 2022010000

@ -14,38 +14,6 @@ namespace opencv_test { namespace {
#define int64 int64_hack_
#include "tiff.h"
// Re-define Mat type as enum for showing on Google Test.
enum CV_ddtCn{
_CV_8UC1 = CV_8UC1, _CV_8UC3 = CV_8UC3, _CV_8UC4 = CV_8UC4,
_CV_8SC1 = CV_8SC1, _CV_8SC3 = CV_8SC3, _CV_8SC4 = CV_8SC4,
_CV_16UC1 = CV_16UC1, _CV_16UC3 = CV_16UC3, _CV_16UC4 = CV_16UC4,
_CV_16SC1 = CV_16SC1, _CV_16SC3 = CV_16SC3, _CV_16SC4 = CV_16SC4,
_CV_32SC1 = CV_32SC1, _CV_32SC3 = CV_32SC3, _CV_32SC4 = CV_32SC4,
_CV_16FC1 = CV_16FC1, _CV_16FC3 = CV_16FC3, _CV_16FC4 = CV_16FC4,
_CV_32FC1 = CV_32FC1, _CV_32FC3 = CV_32FC3, _CV_32FC4 = CV_32FC4,
_CV_64FC1 = CV_64FC1, _CV_64FC3 = CV_64FC3, _CV_64FC4 = CV_64FC4,
};
static inline
void PrintTo(const CV_ddtCn& val, std::ostream* os)
{
const int val_type = static_cast<int>(val);
switch ( CV_MAT_DEPTH(val_type) )
{
case CV_8U : *os << "CV_8U" ; break;
case CV_16U : *os << "CV_16U" ; break;
case CV_8S : *os << "CV_8S" ; break;
case CV_16S : *os << "CV_16S" ; break;
case CV_32S : *os << "CV_32S" ; break;
case CV_16F : *os << "CV_16F" ; break;
case CV_32F : *os << "CV_32F" ; break;
case CV_64F : *os << "CV_64F" ; break;
default : *os << "CV_???" ; break;
}
*os << "C" << CV_MAT_CN(val_type);
}
#ifdef __ANDROID__
// Test disabled as it uses a lot of memory.
// It is killed with SIGKILL by out of memory killer.
@ -874,7 +842,7 @@ TEST(Imgcodecs_Tiff, readWrite_predictor)
// See https://github.com/opencv/opencv/issues/23416
typedef std::pair<CV_ddtCn,bool> Imgcodes_Tiff_TypeAndComp;
typedef std::pair<perf::MatType,bool> Imgcodes_Tiff_TypeAndComp;
typedef testing::TestWithParam< Imgcodes_Tiff_TypeAndComp > Imgcodecs_Tiff_Types;
TEST_P(Imgcodecs_Tiff_Types, readWrite_alltypes)
@ -925,13 +893,13 @@ TEST_P(Imgcodecs_Tiff_Types, readWrite_alltypes)
}
Imgcodes_Tiff_TypeAndComp all_types[] = {
{ _CV_8UC1, true }, { _CV_8UC3, true }, { _CV_8UC4, true },
{ _CV_8SC1, true }, { _CV_8SC3, true }, { _CV_8SC4, true },
{ _CV_16UC1, true }, { _CV_16UC3, true }, { _CV_16UC4, true },
{ _CV_16SC1, true }, { _CV_16SC3, true }, { _CV_16SC4, true },
{ _CV_32SC1, true }, { _CV_32SC3, true }, { _CV_32SC4, true },
{ _CV_32FC1, false }, { _CV_32FC3, false }, { _CV_32FC4, false }, // No compression
{ _CV_64FC1, false }, { _CV_64FC3, false }, { _CV_64FC4, false } // No compression
{ CV_8UC1, true }, { CV_8UC3, true }, { CV_8UC4, true },
{ CV_8SC1, true }, { CV_8SC3, true }, { CV_8SC4, true },
{ CV_16UC1, true }, { CV_16UC3, true }, { CV_16UC4, true },
{ CV_16SC1, true }, { CV_16SC3, true }, { CV_16SC4, true },
{ CV_32SC1, true }, { CV_32SC3, true }, { CV_32SC4, true },
{ CV_32FC1, false }, { CV_32FC3, false }, { CV_32FC4, false }, // No compression
{ CV_64FC1, false }, { CV_64FC3, false }, { CV_64FC4, false } // No compression
};
INSTANTIATE_TEST_CASE_P(AllTypes, Imgcodecs_Tiff_Types, testing::ValuesIn(all_types));

@ -252,7 +252,7 @@ struct CV_EXPORTS_W_SIMPLE RefineParameters {
*/
CV_PROP_RW float minRepDistance;
/** @brief minRepDistance rate of allowed erroneous bits respect to the error correction capability of the used dictionary.
/** @brief errorCorrectionRate rate of allowed erroneous bits respect to the error correction capability of the used dictionary.
*
* -1 ignores the error correction step.
*/

@ -30,6 +30,7 @@ PERF_TEST_P_(Perf_Barcode_multi, detect)
}
SANITY_CHECK_NOTHING();
ASSERT_TRUE(res);
ASSERT_EQ(16ull, corners.size());
}
PERF_TEST_P_(Perf_Barcode_multi, detect_decode)
@ -54,6 +55,8 @@ PERF_TEST_P_(Perf_Barcode_multi, detect_decode)
}
SANITY_CHECK_NOTHING();
ASSERT_TRUE(res);
ASSERT_EQ(16ull, corners.size());
ASSERT_EQ(4ull, decoded_info.size());
}
PERF_TEST_P_(Perf_Barcode_single, detect)
@ -76,6 +79,7 @@ PERF_TEST_P_(Perf_Barcode_single, detect)
}
SANITY_CHECK_NOTHING();
ASSERT_TRUE(res);
ASSERT_EQ(4ull, corners.size());
}
PERF_TEST_P_(Perf_Barcode_single, detect_decode)
@ -100,6 +104,8 @@ PERF_TEST_P_(Perf_Barcode_single, detect_decode)
}
SANITY_CHECK_NOTHING();
ASSERT_TRUE(res);
ASSERT_EQ(4ull, corners.size());
ASSERT_EQ(1ull, decoded_info.size());
}
INSTANTIATE_TEST_CASE_P(/*nothing*/, Perf_Barcode_multi,

@ -302,13 +302,13 @@ string BarcodeImpl::detectAndDecode(InputArray img, OutputArray points, OutputAr
CV_UNUSED(straight_code);
vector<string> decoded_info;
vector<string> decoded_type;
vector<Point> points_;
vector<Point2f> points_;
if (!detectAndDecodeWithType(img, decoded_info, decoded_type, points_))
return string();
if (points_.size() < 4 || decoded_info.size() < 1)
return string();
points_.resize(4);
points.setTo(points_);
updatePointsResult(points, points_);
return decoded_info[0];
}

@ -467,16 +467,25 @@ bool QRDetect::localization()
CV_TRACE_FUNCTION();
Point2f begin, end;
vector<Vec3d> list_lines_x = searchHorizontalLines();
if( list_lines_x.empty() ) { return false; }
vector<Point2f> list_lines_y = separateVerticalLines(list_lines_x);
if( list_lines_y.empty() ) { return false; }
vector<Point2f> list_lines_y;
Mat labels;
kmeans(list_lines_y, 3, labels,
TermCriteria( TermCriteria::EPS + TermCriteria::COUNT, 10, 0.1),
3, KMEANS_PP_CENTERS, localization_points);
if (!list_lines_x.empty())
{
list_lines_y = separateVerticalLines(list_lines_x);
if (!list_lines_y.empty())
{
kmeans(list_lines_y, 3, labels,
TermCriteria( TermCriteria::EPS + TermCriteria::COUNT, 10, 0.1),
3, KMEANS_PP_CENTERS, localization_points);
fixationPoints(localization_points);
}
}
fixationPoints(localization_points);
if (labels.empty())
{
localization_points.clear();
}
bool square_flag = false, local_points_flag = false;
double triangle_sides[3];
@ -1564,9 +1573,9 @@ Point QRDecode::findClosestZeroPoint(Point2f original_point)
Point zero_point;
const int step = 2;
for (int i = orig_x - step; i >= 0 && i <= orig_x + step; i++)
for (int i = std::max(orig_x - step, 0); i >= 0 && i <= std::min(orig_x + step, bin_barcode.cols - 1); i++)
{
for (int j = orig_y - step; j >= 0 && j <= orig_y + step; j++)
for (int j = std::max(orig_y - step, 0); j >= 0 && j <= std::min(orig_y + step, bin_barcode.rows - 1); j++)
{
Point p(i, j);
value = bin_barcode.at<uint8_t>(p);
@ -1944,7 +1953,7 @@ vector<vector<float> > QRDecode::computeSpline(const vector<int> &x_arr, const v
}
for (int i = 0; i < n - 1; i++)
{
h[i] = static_cast<float>(y_arr[i + 1] - y_arr[i]);
h[i] = static_cast<float>(y_arr[i + 1] - y_arr[i]) + std::numeric_limits<float>::epsilon();
}
for (int i = 1; i < n - 1; i++)
{
@ -3071,7 +3080,10 @@ protected:
{
bool operator()(const Point2f& a, const Point2f& b) const
{
return a.y < b.y;
if (a.y != b.y)
return a.y < b.y;
else
return a.x < b.x;
}
};
struct compareSquare

@ -95,6 +95,13 @@ TEST_P(BarcodeDetector_main, interface)
EXPECT_EQ(1u, expected_lines.count(res));
}
{
string res = det.detectAndDecode(img, points);
ASSERT_FALSE(res.empty());
EXPECT_EQ(1u, expected_lines.count(res));
EXPECT_EQ(4u, points.size());
}
// common interface (multi)
{
bool res = det.detectMulti(img, points);

@ -614,4 +614,69 @@ TEST(Objdetect_QRCode_detectAndDecode, utf8_output)
EXPECT_NE(decoded_info.find("M\xc3\xbcllheimstrasse"), std::string::npos);
}
TEST_P(Objdetect_QRCode_detectAndDecodeMulti, detect_regression_24679)
{
const std::string name_current_image = "issue_24679.png";
const std::string root = "qrcode/";
std::string image_path = findDataFile(root + name_current_image);
Mat img = imread(image_path);
const std::string method = GetParam();
GraphicalCodeDetector qrcode = QRCodeDetector();
if (method == "aruco_based") {
qrcode = QRCodeDetectorAruco();
}
std::vector<cv::String> decoded_info;
ASSERT_TRUE(qrcode.detectAndDecodeMulti(img, decoded_info));
EXPECT_EQ(decoded_info.size(), 4U);
}
TEST_P(Objdetect_QRCode_detectAndDecodeMulti, detect_regression_24011)
{
const std::string name_current_image = "issue_24011.jpg";
const std::string root = "qrcode/";
std::string image_path = findDataFile(root + name_current_image);
Mat img = imread(image_path);
const std::string method = GetParam();
GraphicalCodeDetector qrcode = QRCodeDetector();
if (method == "aruco_based") {
qrcode = QRCodeDetectorAruco();
}
std::vector<cv::String> decoded_info;
ASSERT_TRUE(qrcode.detectAndDecodeMulti(img, decoded_info));
EXPECT_EQ(decoded_info.size(), 2U);
}
TEST(Objdetect_QRCode_detect, detect_regression_24450)
{
const std::string name_current_image = "issue_24450.png";
const std::string root = "qrcode/";
std::string image_path = findDataFile(root + name_current_image);
Mat img = imread(image_path);
GraphicalCodeDetector qrcode = QRCodeDetector();
std::vector<Point2f> points;
ASSERT_TRUE(qrcode.detect(img, points));
EXPECT_EQ(points.size(), 4U);
img.at<Vec3b>(img.rows - 1, 296) = {};
ASSERT_TRUE(qrcode.detect(img, points));
EXPECT_EQ(points.size(), 4U);
}
TEST(Objdetect_QRCode_detect, detect_regression_22892)
{
const std::string name_current_image = "issue_22892.png";
const std::string root = "qrcode/";
std::string image_path = findDataFile(root + name_current_image);
Mat img = imread(image_path);
QRCodeDetector qrcode;
std::vector<Point> corners;
Mat straight_code;
qrcode.detectAndDecodeCurved(img, corners, straight_code);
EXPECT_EQ(corners.size(), 4U);
}
}} // namespace

@ -79,7 +79,7 @@ _PREDEFINED_TYPES = (
PrimitiveTypeNode.float_("Moments::value")),
AliasTypeNode.tuple_("RotatedRect",
items=(AliasRefTypeNode("Point2f"),
AliasRefTypeNode("Size"),
AliasRefTypeNode("Size2f"),
PrimitiveTypeNode.float_()),
doc="Any type providing sequence protocol is supported"),
AliasTypeNode.tuple_("TermCriteria",

@ -250,15 +250,16 @@ class Builder:
cmake_vars['BUILD_SHARED_LIBS'] = "ON"
if self.config.modules_list is not None:
cmd.append("-DBUILD_LIST='%s'" % self.config.modules_list)
cmake_vars['BUILD_LIST'] = '%s' % self.config.modules_list
if self.config.extra_modules_path is not None:
cmd.append("-DOPENCV_EXTRA_MODULES_PATH='%s'" % self.config.extra_modules_path)
cmake_vars['OPENCV_EXTRA_MODULES_PATH'] = '%s' % self.config.extra_modules_path
if self.use_ccache == True:
cmd.append("-DNDK_CCACHE=ccache")
cmake_vars['NDK_CCACHE'] = 'ccache'
if do_install:
cmd.extend(["-DBUILD_TESTS=ON", "-DINSTALL_TESTS=ON"])
cmake_vars['BUILD_TESTS'] = "ON"
cmake_vars['INSTALL_TESTS'] = "ON"
if no_media_ndk:
cmake_vars['WITH_ANDROID_MEDIANDK'] = "OFF"

Loading…
Cancel
Save