Merge remote-tracking branch 'upstream/3.4' into merge-3.4

pull/19722/head
Alexander Alekhin 4 years ago
commit b19f860384
  1. 7
      doc/js_tutorials/js_setup/js_setup/js_setup.markdown
  2. 34
      modules/calib3d/src/solvepnp.cpp
  3. 6
      modules/calib3d/src/stereobm.cpp
  4. 12
      modules/calib3d/src/stereosgbm.cpp
  5. 37
      modules/calib3d/test/test_solvepnp_ransac.cpp
  6. 3
      modules/core/include/opencv2/core/fast_math.hpp
  7. 4
      modules/core/include/opencv2/core/vsx_utils.hpp
  8. 32
      modules/core/src/system.cpp
  9. 5
      modules/core/test/ocl/test_opencl.cpp
  10. 11
      modules/dnn/src/layers/detection_output_layer.cpp
  11. 8
      modules/dnn/src/layers/mvn_layer.cpp
  12. 32
      modules/dnn/src/layers/resize_layer.cpp
  13. 11
      modules/dnn/src/nms.inl.hpp
  14. 7
      modules/js/generator/embindgen.py
  15. 41
      modules/js/test/test_objdetect.js
  16. 2
      platforms/js/build_js.py
  17. 3
      platforms/js/opencv_js.config.py
  18. 11
      samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp
  19. 5
      samples/dnn/virtual_try_on.py

@ -145,6 +145,7 @@ Building OpenCV.js from Source
python ./platforms/js/build_js.py build_js --cmake_option="-DOPENCV_EXTRA_MODULES_PATH=opencv_contrib/modules"
@endcode
Running OpenCV.js Tests
---------------------------------------
@ -310,6 +311,12 @@ The example uses latest version of emscripten. If the build fails you should try
docker run --rm -v $(pwd):/src -u $(id -u):$(id -g) emscripten/emsdk:2.0.10 emcmake python3 ./platforms/js/build_js.py build_js
@endcode
In Windows use the following PowerShell command:
@code{.bash}
docker run --rm --workdir /src -v "$(get-location):/src" "emscripten/emsdk:2.0.10" emcmake python3 ./platforms/js/build_js.py build_js
@endcode
### Building the documentation with Docker
To build the documentation `doxygen` needs to be installed. Create a file named `Dockerfile` with the following content:

@ -334,18 +334,42 @@ bool solvePnPRansac(InputArray _opoints, InputArray _ipoints,
opoints_inliers.resize(npoints1);
ipoints_inliers.resize(npoints1);
result = solvePnP(opoints_inliers, ipoints_inliers, cameraMatrix,
distCoeffs, rvec, tvec, useExtrinsicGuess,
(flags == SOLVEPNP_P3P || flags == SOLVEPNP_AP3P) ? SOLVEPNP_EPNP : flags) ? 1 : -1;
try
{
result = solvePnP(opoints_inliers, ipoints_inliers, cameraMatrix,
distCoeffs, rvec, tvec, useExtrinsicGuess,
(flags == SOLVEPNP_P3P || flags == SOLVEPNP_AP3P) ? SOLVEPNP_EPNP : flags) ? 1 : -1;
}
catch (const cv::Exception& e)
{
if (flags == SOLVEPNP_ITERATIVE &&
npoints1 == 5 &&
e.what() &&
std::string(e.what()).find("DLT algorithm needs at least 6 points") != std::string::npos
)
{
CV_LOG_INFO(NULL, "solvePnPRansac(): solvePnP stage to compute the final pose using points "
"in the consensus set raised DLT 6 points exception, use result from MSS (Minimal Sample Sets) stage instead.");
rvec = _local_model.col(0); // output rotation vector
tvec = _local_model.col(1); // output translation vector
result = 1;
}
else
{
// raise other exceptions
throw;
}
}
if( result <= 0 )
if (result <= 0)
{
_rvec.assign(_local_model.col(0)); // output rotation vector
_tvec.assign(_local_model.col(1)); // output translation vector
if( _inliers.needed() )
if (_inliers.needed())
_inliers.release();
CV_LOG_DEBUG(NULL, "solvePnPRansac(): solvePnP stage to compute the final pose using points in the consensus set failed. Return false");
return false;
}
else

@ -1148,13 +1148,15 @@ class StereoBMImpl CV_FINAL : public StereoBM
{
public:
StereoBMImpl()
: params()
{
params = StereoBMParams();
// nothing
}
StereoBMImpl( int _numDisparities, int _SADWindowSize )
: params(_numDisparities, _SADWindowSize)
{
params = StereoBMParams(_numDisparities, _SADWindowSize);
// nothing
}
void compute( InputArray leftarr, InputArray rightarr, OutputArray disparr ) CV_OVERRIDE

@ -2186,19 +2186,21 @@ class StereoSGBMImpl CV_FINAL : public StereoSGBM
{
public:
StereoSGBMImpl()
: params()
{
params = StereoSGBMParams();
// nothing
}
StereoSGBMImpl( int _minDisparity, int _numDisparities, int _SADWindowSize,
int _P1, int _P2, int _disp12MaxDiff, int _preFilterCap,
int _uniquenessRatio, int _speckleWindowSize, int _speckleRange,
int _mode )
: params(_minDisparity, _numDisparities, _SADWindowSize,
_P1, _P2, _disp12MaxDiff, _preFilterCap,
_uniquenessRatio, _speckleWindowSize, _speckleRange,
_mode)
{
params = StereoSGBMParams( _minDisparity, _numDisparities, _SADWindowSize,
_P1, _P2, _disp12MaxDiff, _preFilterCap,
_uniquenessRatio, _speckleWindowSize, _speckleRange,
_mode );
// nothing
}
void compute( InputArray leftarr, InputArray rightarr, OutputArray disparr ) CV_OVERRIDE

@ -837,6 +837,43 @@ TEST(Calib3d_SolvePnPRansac, double_support)
EXPECT_LE(cvtest::norm(t, Mat_<double>(tF), NORM_INF), 1e-3);
}
TEST(Calib3d_SolvePnPRansac, bad_input_points_19253)
{
// with this specific data
// when computing the final pose using points in the consensus set with SOLVEPNP_ITERATIVE and solvePnP()
// an exception is thrown from solvePnP because there are 5 non-coplanar 3D points and the DLT algorithm needs at least 6 non-coplanar 3D points
// with PR #19253 we choose to return true, with the pose estimated from the MSS stage instead of throwing the exception
float pts2d_[] = {
-5.38358629e-01f, -5.09638414e-02f,
-5.07192254e-01f, -2.20743284e-01f,
-5.43107152e-01f, -4.90474701e-02f,
-5.54325163e-01f, -1.86715424e-01f,
-5.59334219e-01f, -4.01909500e-02f,
-5.43504596e-01f, -4.61776406e-02f
};
Mat pts2d(6, 2, CV_32FC1, pts2d_);
float pts3d_[] = {
-3.01153604e-02f, -1.55665115e-01f, 4.50000018e-01f,
4.27827090e-01f, 4.28645730e-01f, 1.08600008e+00f,
-3.14165242e-02f, -1.52656138e-01f, 4.50000018e-01f,
-1.46217480e-01f, 5.57961613e-02f, 7.17000008e-01f,
-4.89348806e-02f, -1.38795510e-01f, 4.47000027e-01f,
-3.13065052e-02f, -1.52636901e-01f, 4.51000035e-01f
};
Mat pts3d(6, 3, CV_32FC1, pts3d_);
Mat camera_mat = Mat::eye(3, 3, CV_64FC1);
Mat rvec, tvec;
vector<int> inliers;
// solvePnPRansac will return true with 5 inliers, which means the result is from MSS stage.
bool result = solvePnPRansac(pts3d, pts2d, camera_mat, noArray(), rvec, tvec, false, 100, 4.f / 460.f, 0.99, inliers);
EXPECT_EQ(inliers.size(), size_t(5));
EXPECT_TRUE(result);
}
TEST(Calib3d_SolvePnP, input_type)
{
Matx33d intrinsics(5.4794130238156129e+002, 0., 2.9835545700043139e+002, 0.,

@ -76,6 +76,9 @@
#if defined __PPC64__ && defined __GNUC__ && defined _ARCH_PWR8 \
&& !defined(OPENCV_SKIP_INCLUDE_ALTIVEC_H)
#include <altivec.h>
#undef vector
#undef bool
#undef pixel
#endif
#if defined(CV_INLINE_ROUND_FLT)

@ -497,11 +497,13 @@ VSX_IMPL_CONV_EVEN_2_4(vec_uint4, vec_double2, vec_ctu, vec_ctuo)
VSX_FINLINE(rt) fnm(const rg& a, int only_truncate) \
{ \
assert(only_truncate == 0); \
CV_UNUSED(only_truncate); \
CV_UNUSED(only_truncate); \
return fn2(a); \
}
VSX_IMPL_CONV_2VARIANT(vec_int4, vec_float4, vec_cts, vec_cts)
VSX_IMPL_CONV_2VARIANT(vec_uint4, vec_float4, vec_ctu, vec_ctu)
VSX_IMPL_CONV_2VARIANT(vec_float4, vec_int4, vec_ctf, vec_ctf)
VSX_IMPL_CONV_2VARIANT(vec_float4, vec_uint4, vec_ctf, vec_ctf)
// define vec_cts for converting double precision to signed doubleword
// which isn't compatible with xlc but its okay since Eigen only uses it for gcc
VSX_IMPL_CONV_2VARIANT(vec_dword2, vec_double2, vec_cts, vec_ctsl)

@ -128,11 +128,14 @@ void* allocSingletonNewBuffer(size_t size) { return malloc(size); }
#endif
#if CV_VSX && defined __linux__
#if (defined __ppc64__ || defined __PPC64__) && defined __linux__
# include "sys/auxv.h"
# ifndef AT_HWCAP2
# define AT_HWCAP2 26
# endif
# ifndef PPC_FEATURE2_ARCH_2_07
# define PPC_FEATURE2_ARCH_2_07 0x80000000
# endif
# ifndef PPC_FEATURE2_ARCH_3_00
# define PPC_FEATURE2_ARCH_3_00 0x00800000
# endif
@ -588,14 +591,25 @@ struct HWFeatures
#ifdef __mips_msa
have[CV_CPU_MSA] = true;
#endif
// there's no need to check VSX availability in runtime since it's always available on ppc64le CPUs
have[CV_CPU_VSX] = (CV_VSX);
// TODO: Check VSX3 availability in runtime for other platforms
#if CV_VSX && defined __linux__
uint64 hwcap2 = getauxval(AT_HWCAP2);
have[CV_CPU_VSX3] = (hwcap2 & PPC_FEATURE2_ARCH_3_00);
#if (defined __ppc64__ || defined __PPC64__) && defined __linux__
unsigned int hwcap = getauxval(AT_HWCAP);
if (hwcap & PPC_FEATURE_HAS_VSX) {
hwcap = getauxval(AT_HWCAP2);
if (hwcap & PPC_FEATURE2_ARCH_3_00) {
have[CV_CPU_VSX] = have[CV_CPU_VSX3] = true;
} else {
have[CV_CPU_VSX] = (hwcap & PPC_FEATURE2_ARCH_2_07) != 0;
}
}
#else
have[CV_CPU_VSX3] = (CV_VSX3);
// TODO: AIX, FreeBSD
#if CV_VSX || defined _ARCH_PWR8 || defined __POWER9_VECTOR__
have[CV_CPU_VSX] = true;
#endif
#if CV_VSX3 || defined __POWER9_VECTOR__
have[CV_CPU_VSX3] = true;
#endif
#endif
#if defined __riscv && defined __riscv_vector
@ -1861,7 +1875,7 @@ class ParseError
{
std::string bad_value;
public:
ParseError(const std::string bad_value_) :bad_value(bad_value_) {}
ParseError(const std::string &bad_value_) :bad_value(bad_value_) {}
std::string toString(const std::string &param) const
{
std::ostringstream out;

@ -120,6 +120,11 @@ TEST(OpenCL, support_SPIR_programs)
cv::ocl::ProgramSource src = cv::ocl::ProgramSource::fromSPIR(module_name, "simple_spir", (uchar*)&program_binary_code[0], program_binary_code.size(), "");
cv::String errmsg;
cv::ocl::Program program(src, "", errmsg);
if (program.ptr() == NULL && device.isAMD())
{
// https://community.amd.com/t5/opencl/spir-support-in-new-drivers-lost/td-p/170165
throw cvtest::SkipTestException("Bypass AMD OpenCL runtime bug: 'cl_khr_spir' extension is declared, but it doesn't really work");
}
ASSERT_TRUE(program.ptr() != NULL);
k.create("test_kernel", program);
}

@ -138,6 +138,12 @@ public:
typedef std::map<int, std::vector<util::NormalizedBBox> > LabelBBox;
inline int getNumOfTargetClasses() {
unsigned numBackground =
(_backgroundLabelId >= 0 && _backgroundLabelId < _numClasses) ? 1 : 0;
return (_numClasses - numBackground);
}
bool getParameterDict(const LayerParams &params,
const std::string &parameterName,
DictValue& result)
@ -590,12 +596,13 @@ public:
LabelBBox::const_iterator label_bboxes = decodeBBoxes.find(label);
if (label_bboxes == decodeBBoxes.end())
CV_Error_(cv::Error::StsError, ("Could not find location predictions for label %d", label));
int limit = (getNumOfTargetClasses() == 1) ? _keepTopK : std::numeric_limits<int>::max();
if (_bboxesNormalized)
NMSFast_(label_bboxes->second, scores, _confidenceThreshold, _nmsThreshold, 1.0, _topK,
indices[c], util::caffe_norm_box_overlap);
indices[c], util::caffe_norm_box_overlap, limit);
else
NMSFast_(label_bboxes->second, scores, _confidenceThreshold, _nmsThreshold, 1.0, _topK,
indices[c], util::caffe_box_overlap);
indices[c], util::caffe_box_overlap, limit);
numDetections += indices[c].size();
}
if (_keepTopK > -1 && numDetections > (size_t)_keepTopK)

@ -403,7 +403,15 @@ public:
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2021_2)
auto mvn = std::make_shared<ngraph::op::MVN>(ieInpNode, acrossChannels, normVariance, eps);
#else
int64_t start_axis = acrossChannels ? 1 : 2;
std::vector<int64_t> axes_v(ieInpNode->get_shape().size() - start_axis);
std::iota(axes_v.begin(), axes_v.end(), start_axis);
auto axes = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{axes_v.size()}, axes_v.data());
auto mvn = std::make_shared<ngraph::op::v6::MVN>(ieInpNode, axes, normVariance, eps, ngraph::op::MVNEpsMode::INSIDE_SQRT);
#endif
return Ptr<BackendNode>(new InfEngineNgraphNode(mvn));
}
#endif // HAVE_DNN_NGRAPH

@ -267,6 +267,7 @@ public:
{
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2021_2)
ngraph::op::InterpolateAttrs attrs;
attrs.pads_begin.push_back(0);
attrs.pads_end.push_back(0);
@ -285,6 +286,37 @@ public:
std::vector<int64_t> shape = {outHeight, outWidth};
auto out_shape = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{2}, shape.data());
auto interp = std::make_shared<ngraph::op::Interpolate>(ieInpNode, out_shape, attrs);
#else
ngraph::op::v4::Interpolate::InterpolateAttrs attrs;
if (interpolation == "nearest") {
attrs.mode = ngraph::op::v4::Interpolate::InterpolateMode::nearest;
attrs.coordinate_transformation_mode = ngraph::op::v4::Interpolate::CoordinateTransformMode::half_pixel;
} else if (interpolation == "bilinear") {
attrs.mode = ngraph::op::v4::Interpolate::InterpolateMode::linear_onnx;
attrs.coordinate_transformation_mode = ngraph::op::v4::Interpolate::CoordinateTransformMode::asymmetric;
} else {
CV_Error(Error::StsNotImplemented, format("Unsupported interpolation: %s", interpolation.c_str()));
}
attrs.shape_calculation_mode = ngraph::op::v4::Interpolate::ShapeCalcMode::sizes;
if (alignCorners) {
attrs.coordinate_transformation_mode = ngraph::op::v4::Interpolate::CoordinateTransformMode::align_corners;
}
attrs.nearest_mode = ngraph::op::v4::Interpolate::NearestMode::round_prefer_floor;
std::vector<int64_t> shape = {outHeight, outWidth};
auto out_shape = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{2}, shape.data());
auto& input_shape = ieInpNode->get_shape();
CV_Assert_N(input_shape[2] != 0, input_shape[3] != 0);
std::vector<float> scales = {static_cast<float>(outHeight) / input_shape[2], static_cast<float>(outWidth) / input_shape[3]};
auto scales_shape = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape{2}, scales.data());
auto axes = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{2}, std::vector<int64_t>{2, 3});
auto interp = std::make_shared<ngraph::op::v4::Interpolate>(ieInpNode, out_shape, scales_shape, axes, attrs);
#endif
return Ptr<BackendNode>(new InfEngineNgraphNode(interp));
}
#endif // HAVE_DNN_NGRAPH

@ -62,12 +62,15 @@ inline void GetMaxScoreIndex(const std::vector<float>& scores, const float thres
// score_threshold: a threshold used to filter detection results.
// nms_threshold: a threshold used in non maximum suppression.
// top_k: if not > 0, keep at most top_k picked indices.
// limit: early terminate once the # of picked indices has reached it.
// indices: the kept indices of bboxes after nms.
template <typename BoxType>
inline void NMSFast_(const std::vector<BoxType>& bboxes,
const std::vector<float>& scores, const float score_threshold,
const float nms_threshold, const float eta, const int top_k,
std::vector<int>& indices, float (*computeOverlap)(const BoxType&, const BoxType&))
std::vector<int>& indices,
float (*computeOverlap)(const BoxType&, const BoxType&),
int limit = std::numeric_limits<int>::max())
{
CV_Assert(bboxes.size() == scores.size());
@ -86,8 +89,12 @@ inline void NMSFast_(const std::vector<BoxType>& bboxes,
float overlap = computeOverlap(bboxes[idx], bboxes[kept_idx]);
keep = overlap <= adaptive_threshold;
}
if (keep)
if (keep) {
indices.push_back(idx);
if (indices.size() >= limit) {
break;
}
}
if (keep && eta < 1 && adaptive_threshold > 0.5) {
adaptive_threshold *= eta;
}

@ -119,6 +119,7 @@ type_dict = {
'InputOutputArray': 'cv::Mat&',
'InputArrayOfArrays': 'const std::vector<cv::Mat>&',
'OutputArrayOfArrays': 'std::vector<cv::Mat>&',
'string': 'std::string',
'String': 'std::string',
'const String&':'const std::string&'
}
@ -462,8 +463,7 @@ class JSWrapperGenerator(object):
ret_type = type_dict[ptr_type]
for key in type_dict:
if key in ret_type:
ret_type = ret_type.replace(key, type_dict[key])
ret_type = re.sub('(^|[^\w])' + key + '($|[^\w])', type_dict[key], ret_type)
arg_types = []
unwrapped_arg_types = []
for arg in variant.args:
@ -567,7 +567,7 @@ class JSWrapperGenerator(object):
# consider the default parameter variants
args_num = len(variant.args) - j
if args_num in class_info.constructor_arg_num:
# FIXME: workaournd for constructor overload with same args number
# FIXME: workaround for constructor overload with same args number
# e.g. DescriptorMatcher
continue
class_info.constructor_arg_num.add(args_num)
@ -627,7 +627,6 @@ class JSWrapperGenerator(object):
ret_type = 'void' if variant.rettype.strip() == '' else variant.rettype
ret_type = ret_type.strip()
if ret_type.startswith('Ptr'): #smart pointer
ptr_type = ret_type.replace('Ptr<', '').replace('>', '')
if ptr_type in type_dict:

@ -159,3 +159,44 @@ QUnit.test('Cascade classification', function(assert) {
locations.delete();
}
});
QUnit.test('QR code detect and decode', function (assert) {
{
const detector = new cv.QRCodeDetector();
let mat = cv.Mat.ones(800, 600, cv.CV_8U);
assert.ok(mat);
// test detect
let points = new cv.Mat();
let qrCodeFound = detector.detect(mat, points);
assert.equal(points.rows, 0)
assert.equal(points.cols, 0)
assert.equal(qrCodeFound, false);
// test detectMult
qrCodeFound = detector.detectMulti(mat, points);
assert.equal(points.rows, 0)
assert.equal(points.cols, 0)
assert.equal(qrCodeFound, false);
// test decode (with random numbers)
let decodeTestPoints = cv.matFromArray(1, 4, cv.CV_32FC2, [10, 20, 30, 40, 60, 80, 90, 100]);
let qrCodeContent = detector.decode(mat, decodeTestPoints);
assert.equal(typeof qrCodeContent, 'string');
assert.equal(qrCodeContent, '');
//test detectAndDecode
qrCodeContent = detector.detectAndDecode(mat);
assert.equal(typeof qrCodeContent, 'string');
assert.equal(qrCodeContent, '');
// test decodeCurved
qrCodeContent = detector.decodeCurved(mat, decodeTestPoints);
assert.equal(typeof qrCodeContent, 'string');
assert.equal(qrCodeContent, '');
decodeTestPoints.delete();
points.delete();
mat.delete();
}
});

@ -115,7 +115,7 @@ class Builder:
"-DWITH_GPHOTO2=OFF",
"-DWITH_LAPACK=OFF",
"-DWITH_ITT=OFF",
"-DWITH_QUIRC=OFF",
"-DWITH_QUIRC=ON",
"-DBUILD_ZLIB=ON",
"-DBUILD_opencv_apps=OFF",
"-DBUILD_opencv_calib3d=ON",

@ -36,7 +36,8 @@ imgproc = {'': ['Canny', 'GaussianBlur', 'Laplacian', 'HoughLines', 'HoughLinesP
objdetect = {'': ['groupRectangles'],
'HOGDescriptor': ['load', 'HOGDescriptor', 'getDefaultPeopleDetector', 'getDaimlerPeopleDetector', 'setSVMDetector', 'detectMultiScale'],
'CascadeClassifier': ['load', 'detectMultiScale2', 'CascadeClassifier', 'detectMultiScale3', 'empty', 'detectMultiScale']}
'CascadeClassifier': ['load', 'detectMultiScale2', 'CascadeClassifier', 'detectMultiScale3', 'empty', 'detectMultiScale'],
'QRCodeDetector': ['QRCodeDetector', 'decode', 'decodeCurved', 'detect', 'detectAndDecode', 'detectMulti', 'setEpsX', 'setEpsY']}
video = {'': ['CamShift', 'calcOpticalFlowFarneback', 'calcOpticalFlowPyrLK', 'createBackgroundSubtractorMOG2', \
'findTransformECC', 'meanShift'],

@ -418,7 +418,12 @@ int main(int argc, char* argv[])
{
Mat temp = view.clone();
if (s.useFisheye)
cv::fisheye::undistortImage(temp, view, cameraMatrix, distCoeffs);
{
Mat newCamMat;
fisheye::estimateNewCameraMatrixForUndistortRectify(cameraMatrix, distCoeffs, imageSize,
Matx33d::eye(), newCamMat, 1);
cv::fisheye::undistortImage(temp, view, cameraMatrix, distCoeffs, newCamMat);
}
else
undistort(temp, view, cameraMatrix, distCoeffs);
}
@ -547,7 +552,7 @@ static bool runCalibration( Settings& s, Size& imageSize, Mat& cameraMatrix, Mat
{
//! [fixed_aspect]
cameraMatrix = Mat::eye(3, 3, CV_64F);
if( s.flag & CALIB_FIX_ASPECT_RATIO )
if( !s.useFisheye && s.flag & CALIB_FIX_ASPECT_RATIO )
cameraMatrix.at<double>(0,0) = s.aspectRatio;
//! [fixed_aspect]
if (s.useFisheye) {
@ -630,7 +635,7 @@ static void saveCameraParams( Settings& s, Size& imageSize, Mat& cameraMatrix, M
fs << "board_height" << s.boardSize.height;
fs << "square_size" << s.squareSize;
if( s.flag & CALIB_FIX_ASPECT_RATIO )
if( !s.useFisheye && s.flag & CALIB_FIX_ASPECT_RATIO )
fs << "fix_aspect_ratio" << s.aspectRatio;
if (s.flag)

@ -113,7 +113,7 @@ class BilinearFilter(object):
out[yy] = np.round(np.sum(img[ymin : ymin + ymax, 0:out.shape[1]] * k[:, np.newaxis], axis=0))
def imaging_resample(self, img, xsize, ysize):
height, width, *args = img.shape
height, width = img.shape[0:2]
bounds_horiz, kk_horiz, ksize_horiz = self._precompute_coeffs(width, xsize)
bounds_vert, kk_vert, ksize_vert = self._precompute_coeffs(height, ysize)
@ -233,7 +233,6 @@ class CpVton(object):
return Li
def _prepare_to_transform(self, out_h=256, out_w=192, grid_size=5):
grid = np.zeros([out_h, out_w, 3], dtype=np.float32)
grid_X, grid_Y = np.meshgrid(np.linspace(-1, 1, out_w), np.linspace(-1, 1, out_h))
grid_X = np.expand_dims(np.expand_dims(grid_X, axis=0), axis=3)
grid_Y = np.expand_dims(np.expand_dims(grid_Y, axis=0), axis=3)
@ -398,7 +397,7 @@ class CorrelationLayer(object):
def getMemoryShapes(self, inputs):
fetureAShape = inputs[0]
b, c, h, w = fetureAShape
b, _, h, w = fetureAShape
return [[b, h * w, h, w]]
def forward(self, inputs):

Loading…
Cancel
Save