Merge remote-tracking branch 'upstream/3.4' into merge-3.4

pull/20798/head
Alexander Alekhin 3 years ago
commit 37c3f0d8a0
  1. 2
      doc/tutorials/app/video_input_psnr_ssim.markdown
  2. 28
      modules/dnn/src/ocl4dnn/src/ocl4dnn_conv_spatial.cpp
  3. 23
      modules/dnn/src/opencl/conv_layer_spatial.cl
  4. 15
      modules/imgproc/include/opencv2/imgproc.hpp
  5. 6
      modules/imgproc/src/hough.cpp
  6. 987
      modules/imgproc/src/lsd.cpp
  7. 5
      modules/imgproc/src/smooth.simd.hpp
  8. 4
      modules/imgproc/test/test_lsd.cpp
  9. 11
      modules/imgproc/test/test_smooth_bitexact.cpp
  10. 2
      modules/java/android_sdk/build.gradle.in
  11. 8
      modules/python/src2/hdr_parser.py
  12. 6
      modules/videoio/src/cap_gstreamer.cpp
  13. 73
      samples/cpp/lsd_lines.cpp

@ -188,7 +188,7 @@ implementation below.
This will return a similarity index for each channel of the image. This value is between zero and
one, where one corresponds to perfect fit. Unfortunately, the many Gaussian blurring is quite
costly, so while the PSNR may work in a real time like environment (24 frame per second) this will
costly, so while the PSNR may work in a real time like environment (24 frames per second) this will
take significantly more than to accomplish similar performance results.
Therefore, the source code presented at the start of the tutorial will perform the PSNR measurement

@ -1034,12 +1034,15 @@ bool OCL4DNNConvSpatial<float>::convolve(const UMat &bottom, UMat &top,
kernel.set(argIdx++, (uint16_t)output_w_);
kernel.set(argIdx++, (uint16_t)output_h_);
size_t global_size[3];
global_size[0] = output_w_;
global_size[1] = output_h_;
global_size[2] = num_output_ * num_;
if (!kernel.run_(3, global_size, NULL, false))
size_t wgs = kernel.workGroupSize();
if (!wgs)
{
CV_LOG_ERROR(NULL, "DNN/OpenCL: Can't query workGroupSize of DWCONV kernel");
return false;
}
size_t lws[1] = { wgs };
size_t gws[1] = { roundUp((size_t)output_w_ * output_h_ * num_output_ * num_, (unsigned)lws[0]) };
if (!kernel.run_(1, gws, lws, false))
{
CV_LOG_ERROR(NULL, "DNN/OpenCL: DWCONV kernel run failed");
return false;
@ -1081,9 +1084,16 @@ bool OCL4DNNConvSpatial<float>::convolve(const UMat &bottom, UMat &top,
kernel.set(argIdx++, (uint16_t)output_h_);
kernel.set(argIdx++, (uint16_t)pad_w_);
kernel.set(argIdx++, (uint16_t)pad_h_);
if (!kernel.run_(3, config->global_work_size,
(config->use_null_local) ? NULL : config->local_work_size,
false))
size_t wgs = kernel.workGroupSize();
if (!wgs)
{
CV_LOG_ERROR(NULL, "DNN/OpenCL: Can't query workGroupSize of Basic kernel");
return false;
}
size_t lws[1] = { wgs };
size_t gws[1] = { roundUp((size_t)output_w_ * output_h_ * M_, (unsigned)lws[0]) };
if (!kernel.run_(1, gws, lws, false))
{
CV_LOG_ERROR(NULL, "DNN/OpenCL: Basic kernel run failed");
return false;

@ -158,10 +158,14 @@ __kernel void ConvolveBasic(
)
{
__global Dtype* convolved_image = convolved_image_base + convolved_image_base_offset;
const int outputX = get_global_id(0);
const int outputY = get_global_id(1);
const int kernelNum = get_global_id(2) * ZPAR;
if (outputX < output_width && outputY < output_height)
const int out_idx = get_global_id(0); // 1D task layout: [output_width * output_height * OUTPUT_Z]
const int plane_size = output_width * output_height;
const int out_plane_idx = out_idx % plane_size;
const int outputZ = out_idx / plane_size;
const int outputY = out_plane_idx / output_width;
const int outputX = out_plane_idx % output_width;
const int kernelNum = outputZ * ZPAR;
if (kernelNum < OUTPUT_Z)
{
Dtype sum[ZPAR];
for (int kern = 0; kern < ZPAR; kern++)
@ -1846,10 +1850,13 @@ __kernel void DWCONV(
const ushort output_width,
const ushort output_height) {
__global Dtype* convolved_image = convolved_image_base + convolved_image_offset;
const int outputX = get_global_id(0);
const int outputY = get_global_id(1);
const int outputZ = get_global_id(2);
if(outputX < output_width && outputY < output_height)
const int out_idx = get_global_id(0); // 1D task layout: [output_width * output_height * OUTPUT_Z]
const int plane_size = output_width * output_height;
const int out_plane_idx = out_idx % plane_size;
const int outputZ = out_idx / plane_size;
const int outputY = out_plane_idx / output_width;
const int outputX = out_plane_idx % output_width;
if (outputZ < OUTPUT_Z)
{
Dtype sum = 0.;

@ -1259,12 +1259,17 @@ protected:
//! @addtogroup imgproc_feature
//! @{
/** @example samples/cpp/lsd_lines.cpp
An example using the LineSegmentDetector
\image html building_lsd.png "Sample output image" width=434 height=300
*/
/** @brief Line segment detector class
following the algorithm described at @cite Rafael12 .
@note Implementation has been removed due original code license conflict
@note Implementation has been removed from OpenCV version 3.4.6 to 3.4.15 and version 4.1.0 to 4.5.3 due original code license conflict.
restored again after [Computation of a NFA](https://github.com/rafael-grompone-von-gioi/binomial_nfa) code published under the MIT license.
*/
class CV_EXPORTS_W LineSegmentDetector : public Algorithm
{
@ -1278,8 +1283,8 @@ public:
@param image A grayscale (CV_8UC1) input image. If only a roi needs to be selected, use:
`lsd_ptr-\>detect(image(roi), lines, ...); lines += Scalar(roi.x, roi.y, roi.x, roi.y);`
@param lines A vector of Vec4i or Vec4f elements specifying the beginning and ending point of a line. Where
Vec4i/Vec4f is (x1, y1, x2, y2), point 1 is the start, point 2 - end. Returned lines are strictly
@param lines A vector of Vec4f elements specifying the beginning and ending point of a line. Where
Vec4f is (x1, y1, x2, y2), point 1 is the start, point 2 - end. Returned lines are strictly
oriented depending on the gradient.
@param width Vector of widths of the regions, where the lines are found. E.g. Width of line.
@param prec Vector of precisions with which the lines are found.
@ -1327,8 +1332,6 @@ to edit those, as to tailor it for their own application.
@param log_eps Detection threshold: -log10(NFA) \> log_eps. Used only when advance refinement is chosen.
@param density_th Minimal density of aligned region points in the enclosing rectangle.
@param n_bins Number of bins in pseudo-ordering of gradient modulus.
@note Implementation has been removed due original code license conflict
*/
CV_EXPORTS_W Ptr<LineSegmentDetector> createLineSegmentDetector(
int refine = LSD_REFINE_STD, double scale = 0.8,

@ -435,12 +435,14 @@ HoughLinesSDiv( InputArray image, OutputArray lines, int type,
}
}
int pos = (int)(lst.size() - 1);
if( pos >= 0 && lst[pos].rho < 0 )
lst.pop_back();
lines.create((int)lst.size(), 1, type);
Mat _lines = lines.getMat();
for( size_t idx = 0; idx < lst.size(); idx++ )
{
if( lst[idx].rho < 0 )
continue;
if (type == CV_32FC2)
{
_lines.at<Vec2f>((int)idx) = Vec2f(lst[idx].rho, lst[idx].theta);

File diff suppressed because it is too large Load Diff

@ -1958,7 +1958,10 @@ public:
}
else if (kxlen % 2 == 1)
{
hlineSmoothFunc = hlineSmoothONa_yzy_a;
if (kx[(kxlen - 1)/ 2] == FT::one())
hlineSmoothFunc = hlineSmooth1N1;
else
hlineSmoothFunc = hlineSmoothONa_yzy_a;
for (int i = 0; i < kxlen / 2; i++)
if (!(kx[i] == kx[kxlen - 1 - i]))
{

@ -5,8 +5,6 @@
namespace opencv_test { namespace {
#if 0 // LSD implementation has been removed due original code license issues
const Size img_size(640, 480);
const int LSD_TEST_SEED = 0x134679;
const int EPOCHS = 20;
@ -404,6 +402,4 @@ TEST_F(Imgproc_LSD_Common, compareSegmentsVec4i)
ASSERT_EQ(result2, 11);
}
#endif
}} // namespace

@ -249,4 +249,15 @@ TEST(GaussianBlur_Bitexact, regression_9863)
checkGaussianBlur_8Uvs32F(src8u, src32f, 151, 30);
}
TEST(GaussianBlur_Bitexact, overflow_20792)
{
Mat src(128, 128, CV_16UC1, Scalar(255));
Mat dst;
double sigma = theRNG().uniform(0.0, 0.2); // a peaky kernel
GaussianBlur(src, dst, Size(7, 7), sigma, 0.9);
int count = (int)countNonZero(dst);
int nintyPercent = (int)(src.rows*src.cols * 0.9);
EXPECT_GT(count, nintyPercent);
}
}} // namespace

@ -58,7 +58,7 @@
//
// - Use find_package() in app/CMakeLists.txt:
//
// find_package(OpenCV 3.4 REQUIRED java)
// find_package(OpenCV @OPENCV_VERSION_MAJOR@.@OPENCV_VERSION_MINOR@ REQUIRED java)
// ...
// target_link_libraries(native-lib ${OpenCV_LIBRARIES})
//

@ -53,13 +53,13 @@ class CppHeaderParser(object):
def get_macro_arg(self, arg_str, npos):
npos2 = npos3 = arg_str.find("(", npos)
if npos2 < 0:
print("Error: no arguments for the macro at %d" % (self.lineno,))
print("Error: no arguments for the macro at %s:%d" % (self.hname, self.lineno))
sys.exit(-1)
balance = 1
while 1:
t, npos3 = self.find_next_token(arg_str, ['(', ')'], npos3+1)
if npos3 < 0:
print("Error: no matching ')' in the macro call at %d" % (self.lineno,))
print("Error: no matching ')' in the macro call at %s:%d" % (self.hname, self.lineno))
sys.exit(-1)
if t == '(':
balance += 1
@ -166,7 +166,7 @@ class CppHeaderParser(object):
angle_stack.append(0)
elif w == "," or w == '>':
if not angle_stack:
print("Error at %d: argument contains ',' or '>' not within template arguments" % (self.lineno,))
print("Error at %s:%d: argument contains ',' or '>' not within template arguments" % (self.hname, self.lineno))
sys.exit(-1)
if w == ",":
arg_type += "_and_"
@ -196,7 +196,7 @@ class CppHeaderParser(object):
p1 = arg_name.find("[")
p2 = arg_name.find("]",p1+1)
if p2 < 0:
print("Error at %d: no closing ]" % (self.lineno,))
print("Error at %s:%d: no closing ]" % (self.hname, self.lineno))
sys.exit(-1)
counter_str = arg_name[p1+1:p2].strip()
if counter_str == "":

@ -221,10 +221,10 @@ private:
start_loop = utils::getConfigurationParameterBool("OPENCV_VIDEOIO_GSTREAMER_START_MAINLOOP", false);
GSafePtr<GError> err;
gst_init_check(NULL, NULL, err.getRef());
if (err)
gboolean gst_init_res = gst_init_check(NULL, NULL, err.getRef());
if (!gst_init_res)
{
CV_WARN("Can't initialize GStreamer: " << err->message);
CV_WARN("Can't initialize GStreamer: " << (err ? err->message : "<unknown reason>"));
isFailed = true;
return;
}

@ -0,0 +1,73 @@
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>
using namespace std;
using namespace cv;
int main(int argc, char** argv)
{
cv::CommandLineParser parser(argc, argv,
"{input i|building.jpg|input image}"
"{refine r|false|if true use LSD_REFINE_STD method, if false use LSD_REFINE_NONE method}"
"{canny c|false|use Canny edge detector}"
"{overlay o|false|show result on input image}"
"{help h|false|show help message}");
if (parser.get<bool>("help"))
{
parser.printMessage();
return 0;
}
parser.printMessage();
String filename = samples::findFile(parser.get<String>("input"));
bool useRefine = parser.get<bool>("refine");
bool useCanny = parser.get<bool>("canny");
bool overlay = parser.get<bool>("overlay");
Mat image = imread(filename, IMREAD_GRAYSCALE);
if( image.empty() )
{
cout << "Unable to load " << filename;
return 1;
}
imshow("Source Image", image);
if (useCanny)
{
Canny(image, image, 50, 200, 3); // Apply Canny edge detector
}
// Create and LSD detector with standard or no refinement.
Ptr<LineSegmentDetector> ls = useRefine ? createLineSegmentDetector(LSD_REFINE_STD) : createLineSegmentDetector(LSD_REFINE_NONE);
double start = double(getTickCount());
vector<Vec4f> lines_std;
// Detect the lines
ls->detect(image, lines_std);
double duration_ms = (double(getTickCount()) - start) * 1000 / getTickFrequency();
std::cout << "It took " << duration_ms << " ms." << std::endl;
// Show found lines
if (!overlay || useCanny)
{
image = Scalar(0, 0, 0);
}
ls->drawSegments(image, lines_std);
String window_name = useRefine ? "Result - standard refinement" : "Result - no refinement";
window_name += useCanny ? " - Canny edge detector used" : "";
imshow(window_name, image);
waitKey();
return 0;
}
Loading…
Cancel
Save