Merge pull request #3734 from asmorkalov:as/std_move_warning

Fixed Wredundant-move produced by GCC 13.2 (Ubuntu 24.04). #3734

### Pull Request Readiness Checklist

See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request

- [x] I agree to contribute to the project under Apache 2 License.
- [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV
- [x] The PR is proposed to the proper branch
- [x] There is a reference to the original bug report and related work
- [ ] There is accuracy test, performance test and test data in opencv_extra repository, if applicable
      Patch to opencv_extra has the same branch name.
- [ ] The feature is well documented and sample code can be built with the project CMake
pull/3737/head
Alexander Smorkalov 1 year ago committed by GitHub
parent 6b1faf0d9b
commit cb08ac6b2e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 32
      modules/face/src/facemarkLBF.cpp
  2. 4
      modules/face/src/mace.cpp
  3. 6
      modules/mcc/test/test_mcc.cpp
  4. 6
      modules/rgbd/perf/perf_tsdf.cpp
  5. 2
      modules/rgbd/test/ocl/test_tsdf.cpp
  6. 20
      modules/rgbd/test/test_colored_kinfu.cpp
  7. 4
      modules/rgbd/test/test_kinfu.cpp
  8. 2
      modules/rgbd/test/test_tsdf.cpp
  9. 81
      modules/videostab/src/global_motion.cpp
  10. 14
      modules/ximgproc/test/test_sparse_match_interpolator.cpp

@ -661,24 +661,22 @@ FacemarkLBFImpl::BBox::BBox(double _x, double _y, double w, double h) {
// Project absolute shape to relative shape binding to this bbox
Mat FacemarkLBFImpl::BBox::project(const Mat &shape) const {
Mat_<double> res(shape.rows, shape.cols);
const Mat_<double> &shape_ = (Mat_<double>)shape;
Mat res(shape.rows, shape.cols, CV_64FC1);
for (int i = 0; i < shape.rows; i++) {
res(i, 0) = (shape_(i, 0) - x_center) / x_scale;
res(i, 1) = (shape_(i, 1) - y_center) / y_scale;
res.at<double>(i, 0) = (shape.at<double>(i, 0) - x_center) / x_scale;
res.at<double>(i, 1) = (shape.at<double>(i, 1) - y_center) / y_scale;
}
return std::move(res);
return res;
}
// Project relative shape to absolute shape binding to this bbox
Mat FacemarkLBFImpl::BBox::reproject(const Mat &shape) const {
Mat_<double> res(shape.rows, shape.cols);
const Mat_<double> &shape_ = (Mat_<double>)shape;
Mat res(shape.rows, shape.cols, CV_64FC1);
for (int i = 0; i < shape.rows; i++) {
res(i, 0) = shape_(i, 0)*x_scale + x_center;
res(i, 1) = shape_(i, 1)*y_scale + y_center;
res.at<double>(i, 0) = shape.at<double>(i, 0)*x_scale + x_center;
res.at<double>(i, 1) = shape.at<double>(i, 1)*y_scale + y_center;
}
return std::move(res);
return res;
}
Mat FacemarkLBFImpl::getMeanShape(std::vector<Mat> &gt_shapes, std::vector<BBox> &bboxes) {
@ -997,7 +995,7 @@ void FacemarkLBFImpl::RandomForest::train(std::vector<Mat> &imgs, std::vector<Ma
}
Mat FacemarkLBFImpl::RandomForest::generateLBF(Mat &img, Mat &current_shape, BBox &bbox, Mat &mean_shape) {
Mat_<int> lbf_feat(1, landmark_n*trees_n);
Mat lbf_feat(1, landmark_n*trees_n, CV_32SC1);
double scale;
Mat_<double> rotate;
calcSimilarityTransform(bbox.project(current_shape), mean_shape, scale, rotate);
@ -1036,10 +1034,10 @@ Mat FacemarkLBFImpl::RandomForest::generateLBF(Mat &img, Mat &current_shape, BBo
idx = 2 * idx + 1;
}
}
lbf_feat(i*trees_n + j) = (i*trees_n + j)*base + code;
lbf_feat.at<int>(i*trees_n + j) = (i*trees_n + j)*base + code;
}
}
return std::move(lbf_feat);
return lbf_feat;
}
void FacemarkLBFImpl::RandomForest::write(FileStorage fs, int k) {
@ -1365,7 +1363,7 @@ Mat FacemarkLBFImpl::Regressor::supportVectorRegression(
Mat FacemarkLBFImpl::Regressor::globalRegressionPredict(const Mat &lbf, int stage) {
const Mat_<double> &weight = (Mat_<double>)gl_regression_weights[stage];
Mat_<double> delta_shape(weight.rows / 2, 2);
Mat delta_shape(weight.rows / 2, 2, CV_64FC1);
const double *w_ptr = NULL;
const int *lbf_ptr = lbf.ptr<int>(0);
@ -1374,14 +1372,14 @@ Mat FacemarkLBFImpl::Regressor::globalRegressionPredict(const Mat &lbf, int stag
w_ptr = weight.ptr<double>(2 * i);
double y = 0;
for (int j = 0; j < lbf.cols; j++) y += w_ptr[lbf_ptr[j]];
delta_shape(i, 0) = y;
delta_shape.at<double>(i, 0) = y;
w_ptr = weight.ptr<double>(2 * i + 1);
y = 0;
for (int j = 0; j < lbf.cols; j++) y += w_ptr[lbf_ptr[j]];
delta_shape(i, 1) = y;
delta_shape.at<double>(i, 1) = y;
}
return std::move(delta_shape);
return delta_shape;
} // Regressor::globalRegressionPredict
Mat FacemarkLBFImpl::Regressor::predict(Mat &img, BBox &bbox) {

@ -102,11 +102,11 @@ struct MACEImpl CV_FINAL : MACE {
Mat complexInput;
merge(input, 2, complexInput);
Mat_<Vec2d> dftImg(IMGSIZE*2, IMGSIZE*2, 0.0);
Mat dftImg(IMGSIZE*2, IMGSIZE*2, CV_64FC2, 0.0);
complexInput.copyTo(dftImg(Rect(0,0,IMGSIZE,IMGSIZE)));
dft(dftImg, dftImg);
return std::move(dftImg);
return dftImg;
}

@ -102,7 +102,9 @@ TEST(CV_mcc_ccm_test, detect_Macbeth)
// check Macbeth corners
vector<Point2f> corners = checker->getBox();
EXPECT_MAT_NEAR(gold_corners, corners, 3.6); // diff 3.57385 in ARM only
// diff 3.57385 corresponds to ARM v8
// diff 4.37915 correspnds to Ubuntu 24.04 x86_64 configuration
EXPECT_MAT_NEAR(gold_corners, corners, 4.38);
// read gold chartsRGB
node = fs["chartsRGB"];
@ -112,7 +114,7 @@ TEST(CV_mcc_ccm_test, detect_Macbeth)
// check chartsRGB
Mat chartsRGB = checker->getChartsRGB();
EXPECT_MAT_NEAR(goldChartsRGB.col(1), chartsRGB.col(1), 0.25); // diff 0.240634 in ARM only
EXPECT_MAT_NEAR(goldChartsRGB.col(1), chartsRGB.col(1), 0.3); // diff 0.292077 on Ubuntu 20.04 ARM64
}
TEST(CV_mcc_ccm_test, compute_ccm)

@ -91,7 +91,7 @@ struct Scene
{
virtual ~Scene() {}
static Ptr<Scene> create(Size sz, Matx33f _intr, float _depthFactor, bool onlySemisphere);
virtual Mat depth(Affine3f pose) = 0;
virtual Mat_<float> depth(const Affine3f& pose) = 0;
virtual std::vector<Affine3f> getPoses() = 0;
};
@ -131,7 +131,7 @@ struct SemisphereScene : Scene
return res;
}
Mat depth(Affine3f pose) override
Mat_<float> depth(const Affine3f& pose) override
{
Mat_<float> frame(frameSize);
Reprojector reproj(intr);
@ -139,7 +139,7 @@ struct SemisphereScene : Scene
Range range(0, frame.rows);
parallel_for_(range, RenderInvoker<SemisphereScene>(frame, pose, reproj, depthFactor, onlySemisphere));
return std::move(frame);
return frame;
}
std::vector<Affine3f> getPoses() override

@ -143,7 +143,7 @@ struct SemisphereScene : Scene
Range range(0, frame.rows);
parallel_for_(range, RenderInvoker<SemisphereScene>(frame, pose, reproj, depthFactor, onlySemisphere));
return std::move(frame);
return static_cast<Mat>(frame);
}
std::vector<Affine3f> getPoses() override

@ -158,8 +158,8 @@ struct Scene
{
virtual ~Scene() {}
static Ptr<Scene> create(int nScene, Size sz, Matx33f _intr, float _depthFactor);
virtual Mat depth(Affine3f pose) = 0;
virtual Mat rgb(Affine3f pose) = 0;
virtual Mat_<float> depth(const Affine3f& pose) = 0;
virtual Mat_<Vec3f> rgb(const Affine3f& pose) = 0;
virtual std::vector<Affine3f> getPoses() = 0;
};
@ -198,7 +198,7 @@ struct CubeSpheresScene : Scene
return res;
}
Mat depth(Affine3f pose) override
Mat_<float> depth(const Affine3f& pose) override
{
Mat_<float> frame(frameSize);
Reprojector reproj(intr);
@ -206,10 +206,10 @@ struct CubeSpheresScene : Scene
Range range(0, frame.rows);
parallel_for_(range, RenderInvoker<CubeSpheresScene>(frame, pose, reproj, depthFactor));
return std::move(frame);
return frame;
}
Mat rgb(Affine3f pose) override
Mat_<Vec3f> rgb(const Affine3f& pose) override
{
Mat_<Vec3f> frame(frameSize);
Reprojector reproj(intr);
@ -217,7 +217,7 @@ struct CubeSpheresScene : Scene
Range range(0, frame.rows);
parallel_for_(range, RenderColorInvoker<CubeSpheresScene>(frame, pose, reproj, depthFactor));
return std::move(frame);
return frame;
}
std::vector<Affine3f> getPoses() override
@ -305,7 +305,7 @@ struct RotatingScene : Scene
return res;
}
Mat depth(Affine3f pose) override
Mat_<float> depth(const Affine3f& pose) override
{
Mat_<float> frame(frameSize);
Reprojector reproj(intr);
@ -313,10 +313,10 @@ struct RotatingScene : Scene
Range range(0, frame.rows);
parallel_for_(range, RenderInvoker<RotatingScene>(frame, pose, reproj, depthFactor));
return std::move(frame);
return frame;
}
Mat rgb(Affine3f pose) override
Mat_<Vec3f> rgb(const Affine3f& pose) override
{
Mat_<Vec3f> frame(frameSize);
Reprojector reproj(intr);
@ -324,7 +324,7 @@ struct RotatingScene : Scene
Range range(0, frame.rows);
parallel_for_(range, RenderColorInvoker<RotatingScene>(frame, pose, reproj, depthFactor));
return std::move(frame);
return frame;
}
std::vector<Affine3f> getPoses() override

@ -141,7 +141,7 @@ struct CubeSpheresScene : Scene
Range range(0, frame.rows);
parallel_for_(range, RenderInvoker<CubeSpheresScene>(frame, pose, reproj, depthFactor));
return std::move(frame);
return static_cast<Mat>(frame);
}
std::vector<Affine3f> getPoses() override
@ -237,7 +237,7 @@ struct RotatingScene : Scene
Range range(0, frame.rows);
parallel_for_(range, RenderInvoker<RotatingScene>(frame, pose, reproj, depthFactor));
return std::move(frame);
return static_cast<Mat>(frame);
}
std::vector<Affine3f> getPoses() override

@ -140,7 +140,7 @@ struct SemisphereScene : Scene
Range range(0, frame.rows);
parallel_for_(range, RenderInvoker<SemisphereScene>(frame, pose, reproj, depthFactor, onlySemisphere));
return std::move(frame);
return static_cast<Mat>(frame);
}
std::vector<Affine3f> getPoses() override

@ -82,7 +82,7 @@ namespace videostab
{
// does isotropic normalization
static Mat normalizePoints(int npoints, Point2f *points)
static Mat_<float> normalizePoints(int npoints, Point2f *points)
{
float cx = 0.f, cy = 0.f;
for (int i = 0; i < npoints; ++i)
@ -113,32 +113,32 @@ static Mat normalizePoints(int npoints, Point2f *points)
T(0,0) = T(1,1) = s;
T(0,2) = -cx*s;
T(1,2) = -cy*s;
return std::move(T);
return T;
}
static Mat estimateGlobMotionLeastSquaresTranslation(
int npoints, Point2f *points0, Point2f *points1, float *rmse)
{
Mat_<float> M = Mat::eye(3, 3, CV_32F);
Mat M = Mat::eye(3, 3, CV_32FC1);
for (int i = 0; i < npoints; ++i)
{
M(0,2) += points1[i].x - points0[i].x;
M(1,2) += points1[i].y - points0[i].y;
M.at<float>(0,2) += points1[i].x - points0[i].x;
M.at<float>(1,2) += points1[i].y - points0[i].y;
}
M(0,2) /= npoints;
M(1,2) /= npoints;
M.at<float>(0,2) /= npoints;
M.at<float>(1,2) /= npoints;
if (rmse)
{
*rmse = 0;
for (int i = 0; i < npoints; ++i)
*rmse += sqr(points1[i].x - points0[i].x - M(0,2)) +
sqr(points1[i].y - points0[i].y - M(1,2));
*rmse += sqr(points1[i].x - points0[i].x - M.at<float>(0,2)) +
sqr(points1[i].y - points0[i].y - M.at<float>(1,2));
*rmse = std::sqrt(*rmse / npoints);
}
return std::move(M);
return M;
}
@ -194,16 +194,16 @@ static Mat estimateGlobMotionLeastSquaresRotation(
// A*sin(alpha) + B*cos(alpha) = 0
float C = std::sqrt(A*A + B*B);
Mat_<float> M = Mat::eye(3, 3, CV_32F);
Mat M = Mat::eye(3, 3, CV_32F);
if ( C != 0 )
{
float sinAlpha = - B / C;
float cosAlpha = A / C;
M(0,0) = cosAlpha;
M(1,1) = M(0,0);
M(0,1) = sinAlpha;
M(1,0) = - M(0,1);
M.at<float>(0,0) = cosAlpha;
M.at<float>(1,1) = cosAlpha;
M.at<float>(0,1) = sinAlpha;
M.at<float>(1,0) = - sinAlpha;
}
if (rmse)
@ -213,13 +213,13 @@ static Mat estimateGlobMotionLeastSquaresRotation(
{
p0 = points0[i];
p1 = points1[i];
*rmse += sqr(p1.x - M(0,0)*p0.x - M(0,1)*p0.y) +
sqr(p1.y - M(1,0)*p0.x - M(1,1)*p0.y);
*rmse += sqr(p1.x - M.at<float>(0,0)*p0.x - M.at<float>(0,1)*p0.y) +
sqr(p1.y - M.at<float>(1,0)*p0.x - M.at<float>(1,1)*p0.y);
}
*rmse = std::sqrt(*rmse / npoints);
}
return std::move(M);
return M;
}
static Mat estimateGlobMotionLeastSquaresRigid(
@ -250,15 +250,15 @@ static Mat estimateGlobMotionLeastSquaresRigid(
A(1,1) += pt1.y * pt0.y;
}
Mat_<float> M = Mat::eye(3, 3, CV_32F);
Mat M = Mat::eye(3, 3, CV_32FC1);
SVD svd(A);
Mat_<float> R = svd.u * svd.vt;
Mat tmp(M(Rect(0,0,2,2)));
R.copyTo(tmp);
M(0,2) = mean1.x - R(0,0)*mean0.x - R(0,1)*mean0.y;
M(1,2) = mean1.y - R(1,0)*mean0.x - R(1,1)*mean0.y;
M.at<float>(0,2) = mean1.x - R(0,0)*mean0.x - R(0,1)*mean0.y;
M.at<float>(1,2) = mean1.y - R(1,0)*mean0.x - R(1,1)*mean0.y;
if (rmse)
{
@ -267,13 +267,13 @@ static Mat estimateGlobMotionLeastSquaresRigid(
{
pt0 = points0[i];
pt1 = points1[i];
*rmse += sqr(pt1.x - M(0,0)*pt0.x - M(0,1)*pt0.y - M(0,2)) +
sqr(pt1.y - M(1,0)*pt0.x - M(1,1)*pt0.y - M(1,2));
*rmse += sqr(pt1.x - M.at<float>(0,0)*pt0.x - M.at<float>(0,1)*pt0.y - M.at<float>(0,2)) +
sqr(pt1.y - M.at<float>(1,0)*pt0.x - M.at<float>(1,1)*pt0.y - M.at<float>(1,2));
}
*rmse = std::sqrt(*rmse / npoints);
}
return std::move(M);
return M;
}
@ -404,7 +404,7 @@ Mat estimateGlobalMotionRansac(
// best hypothesis
std::vector<int> bestIndices(params.size);
Mat_<float> bestM;
Mat bestM;
int ninliersMax = -1;
RNG rng(0);
@ -469,8 +469,8 @@ Mat estimateGlobalMotionRansac(
{
p0 = points0_[i];
p1 = points1_[i];
x = bestM(0,0)*p0.x + bestM(0,1)*p0.y + bestM(0,2);
y = bestM(1,0)*p0.x + bestM(1,1)*p0.y + bestM(1,2);
x = bestM.at<float>(0,0)*p0.x + bestM.at<float>(0,1)*p0.y + bestM.at<float>(0,2);
y = bestM.at<float>(1,0)*p0.x + bestM.at<float>(1,1)*p0.y + bestM.at<float>(1,2);
if (sqr(x - p1.x) + sqr(y - p1.y) < params.thresh * params.thresh)
{
subset0[j] = p0;
@ -484,7 +484,7 @@ Mat estimateGlobalMotionRansac(
if (ninliers)
*ninliers = ninliersMax;
return std::move(bestM);
return bestM;
}
@ -505,7 +505,7 @@ Mat MotionEstimatorRansacL2::estimate(InputArray points0, InputArray points1, bo
// find motion
int ninliers = 0;
Mat_<float> M;
Mat M;
if (motionModel() != MM_HOMOGRAPHY)
M = estimateGlobalMotionRansac(
@ -527,7 +527,7 @@ Mat MotionEstimatorRansacL2::estimate(InputArray points0, InputArray points1, bo
if (ok) *ok = false;
}
return std::move(M);
return M;
}
@ -675,13 +675,13 @@ FromFileMotionReader::FromFileMotionReader(const String &path)
Mat FromFileMotionReader::estimate(const Mat &/*frame0*/, const Mat &/*frame1*/, bool *ok)
{
Mat_<float> M(3, 3);
Mat M(3, 3, CV_32FC1);
bool ok_;
file_ >> M(0,0) >> M(0,1) >> M(0,2)
>> M(1,0) >> M(1,1) >> M(1,2)
>> M(2,0) >> M(2,1) >> M(2,2) >> ok_;
file_ >> M.at<float>(0,0) >> M.at<float>(0,1) >> M.at<float>(0,2)
>> M.at<float>(1,0) >> M.at<float>(1,1) >> M.at<float>(1,2)
>> M.at<float>(2,0) >> M.at<float>(2,1) >> M.at<float>(2,2) >> ok_;
if (ok) *ok = ok_;
return std::move(M);
return M;
}
@ -696,12 +696,13 @@ ToFileMotionWriter::ToFileMotionWriter(const String &path, Ptr<ImageMotionEstima
Mat ToFileMotionWriter::estimate(const Mat &frame0, const Mat &frame1, bool *ok)
{
bool ok_;
Mat_<float> M = motionEstimator_->estimate(frame0, frame1, &ok_);
file_ << M(0,0) << " " << M(0,1) << " " << M(0,2) << " "
<< M(1,0) << " " << M(1,1) << " " << M(1,2) << " "
<< M(2,0) << " " << M(2,1) << " " << M(2,2) << " " << ok_ << std::endl;
Mat M = motionEstimator_->estimate(frame0, frame1, &ok_);
file_ << M.at<float>(0,0) << " " << M.at<float>(0,1) << " " << M.at<float>(0,2) << " "
<< M.at<float>(1,0) << " " << M.at<float>(1,1) << " " << M.at<float>(1,2) << " "
<< M.at<float>(2,0) << " " << M.at<float>(2,1) << " " << M.at<float>(2,2) << " "
<< ok_ << std::endl;
if (ok) *ok = ok_;
return std::move(M);
return M;
}

@ -17,22 +17,22 @@ Mat readOpticalFlow( const String& path )
// CV_Assert(sizeof(float) == 4);
//FIXME: ensure right sizes of int and float - here and in writeOpticalFlow()
Mat_<Point2f> flow;
Mat flow;
std::ifstream file(path.c_str(), std::ios_base::binary);
if ( !file.good() )
return std::move(flow); // no file - return empty matrix
return flow; // no file - return empty matrix
float tag;
file.read((char*) &tag, sizeof(float));
if ( tag != FLOW_TAG_FLOAT )
return std::move(flow);
return flow;
int width, height;
file.read((char*) &width, 4);
file.read((char*) &height, 4);
flow.create(height, width);
flow.create(height, width, CV_32FC2);
for ( int i = 0; i < flow.rows; ++i )
{
@ -44,14 +44,14 @@ Mat readOpticalFlow( const String& path )
if ( !file.good() )
{
flow.release();
return std::move(flow);
return flow;
}
flow(i, j) = u;
flow.at<Point2f>(i, j) = u;
}
}
file.close();
return std::move(flow);
return flow;
}
CV_ENUM(GuideTypes, CV_8UC1, CV_8UC3)

Loading…
Cancel
Save