improve code quality

- eliminate rand() calls
- non initialized members/ variables
- unused return values
- missing/useless NULL checks
pull/11567/head
Alexander Alekhin 7 years ago
parent e1b96b6d82
commit 471c17321f
  1. 4
      modules/dnn/misc/python/pyopencv_dnn.hpp
  2. 6
      modules/dnn/src/dnn.cpp
  3. 3
      modules/dnn/src/layers/recurrent_layers.cpp
  4. 2
      modules/dnn/src/ocl4dnn/src/math_functions.cpp
  5. 7
      modules/dnn/test/test_halide_layers.cpp
  6. 3
      modules/dnn/test/test_tf_importer.cpp
  7. 3
      modules/imgproc/src/color.hpp
  8. 16
      modules/imgproc/test/test_intersection.cpp

@ -142,7 +142,7 @@ public:
PyGILState_Release(gstate);
if (!res)
CV_Error(Error::StsNotImplemented, "Failed to call \"getMemoryShapes\" method");
pyopencv_to_generic_vec(res, outputs, ArgInfo("", 0));
CV_Assert(pyopencv_to_generic_vec(res, outputs, ArgInfo("", 0)));
return false;
}
@ -163,7 +163,7 @@ public:
CV_Error(Error::StsNotImplemented, "Failed to call \"forward\" method");
std::vector<Mat> pyOutputs;
pyopencv_to(res, pyOutputs, ArgInfo("", 0));
CV_Assert(pyopencv_to(res, pyOutputs, ArgInfo("", 0)));
CV_Assert(pyOutputs.size() == outputs.size());
for (size_t i = 0; i < outputs.size(); ++i)

@ -1530,10 +1530,12 @@ struct Net::Impl
LayerData *eltwiseData = nextData;
// go down from the second input and find the first non-skipped layer.
LayerData *downLayerData = &layers[eltwiseData->inputBlobsId[1].lid];
CV_Assert(downLayerData);
while (downLayerData->skip)
{
downLayerData = &layers[downLayerData->inputBlobsId[0].lid];
}
CV_Assert(downLayerData);
// second input layer is current layer.
if ( ld.id == downLayerData->id )
@ -1548,9 +1550,7 @@ struct Net::Impl
downLayerData = &layers[downLayerData->inputBlobsId[0].lid];
}
Ptr<ConvolutionLayer> convLayer;
if( downLayerData )
convLayer = downLayerData->layerInstance.dynamicCast<ConvolutionLayer>();
Ptr<ConvolutionLayer> convLayer = downLayerData->layerInstance.dynamicCast<ConvolutionLayer>();
// first input layer is convolution layer
if( !convLayer.empty() && eltwiseData->consumers.size() == 1 )

@ -119,9 +119,10 @@ public:
if (blobs.size() > 3)
{
CV_Assert(blobs.size() == 6);
const int N = Wh.cols;
for (int i = 3; i < 6; ++i)
{
CV_Assert(blobs[i].rows == Wh.cols && blobs[i].cols == Wh.cols);
CV_Assert(blobs[i].rows == N && blobs[i].cols == N);
CV_Assert(blobs[i].type() == bias.type());
}
}

@ -504,7 +504,7 @@ static bool ocl4dnnFastBufferGEMM(const CBLAS_TRANSPOSE TransA,
oclk_gemm_float.set(arg_idx++, (float)alpha);
oclk_gemm_float.set(arg_idx++, (float)beta);
bool ret;
bool ret = true;
if (TransB == CblasNoTrans || TransA != CblasNoTrans) {
int stride = 256;
for (int start_index = 0; start_index < K; start_index += stride) {

@ -40,17 +40,18 @@ TEST(Padding_Halide, Accuracy)
{
static const int kNumRuns = 10;
std::vector<int> paddings(8);
cv::RNG& rng = cv::theRNG();
for (int t = 0; t < kNumRuns; ++t)
{
for (int i = 0; i < paddings.size(); ++i)
paddings[i] = rand() % 5;
paddings[i] = rng(5);
LayerParams lp;
lp.set("paddings", DictValue::arrayInt<int*>(&paddings[0], paddings.size()));
lp.type = "Padding";
lp.name = "testLayer";
Mat input({1 + rand() % 10, 1 + rand() % 10, 1 + rand() % 10, 1 + rand() % 10}, CV_32F);
Mat input({1 + rng(10), 1 + rng(10), 1 + rng(10), 1 + rng(10)}, CV_32F);
test(lp, input);
}
}
@ -633,7 +634,7 @@ TEST_P(Eltwise, Accuracy)
eltwiseParam.set("operation", op);
if (op == "sum" && weighted)
{
RNG rng = cv::theRNG();
RNG& rng = cv::theRNG();
std::vector<float> coeff(1 + numConv);
for (int i = 0; i < coeff.size(); ++i)
{

@ -376,7 +376,8 @@ TEST(Test_TensorFlow, memory_read)
class ResizeBilinearLayer CV_FINAL : public Layer
{
public:
ResizeBilinearLayer(const LayerParams &params) : Layer(params)
ResizeBilinearLayer(const LayerParams &params) : Layer(params),
outWidth(0), outHeight(0), factorWidth(1), factorHeight(1)
{
CV_Assert(!params.get<bool>("align_corners", false));
CV_Assert(!blobs.empty());

@ -285,7 +285,8 @@ struct CvtHelper
template< typename VScn, typename VDcn, typename VDepth, SizePolicy sizePolicy = NONE >
struct OclHelper
{
OclHelper( InputArray _src, OutputArray _dst, int dcn)
OclHelper( InputArray _src, OutputArray _dst, int dcn) :
nArgs(0)
{
src = _src.getUMat();
Size sz = src.size(), dstSz;

@ -357,20 +357,22 @@ void CV_RotatedRectangleIntersectionTest::test13()
void CV_RotatedRectangleIntersectionTest::test14()
{
const int kNumTests = 100;
const int kWidth = 5;
const int kHeight = 5;
const float kWidth = 5;
const float kHeight = 5;
RotatedRect rects[2];
std::vector<Point2f> inter;
cv::RNG& rng = cv::theRNG();
for (int i = 0; i < kNumTests; ++i)
{
for (int j = 0; j < 2; ++j)
{
rects[j].center = Point2f((float)(rand() % kWidth), (float)(rand() % kHeight));
rects[j].size = Size2f(rand() % kWidth + 1.0f, rand() % kHeight + 1.0f);
rects[j].angle = (float)(rand() % 360);
rects[j].center = Point2f(rng.uniform(0.0f, kWidth), rng.uniform(0.0f, kHeight));
rects[j].size = Size2f(rng.uniform(1.0f, kWidth), rng.uniform(1.0f, kHeight));
rects[j].angle = rng.uniform(0.0f, 360.0f);
}
rotatedRectangleIntersection(rects[0], rects[1], inter);
ASSERT_TRUE(inter.size() < 4 || isContourConvex(inter));
int res = rotatedRectangleIntersection(rects[0], rects[1], inter);
EXPECT_TRUE(res == INTERSECT_NONE || res == INTERSECT_PARTIAL || res == INTERSECT_FULL) << res;
ASSERT_TRUE(inter.size() < 4 || isContourConvex(inter)) << inter;
}
}

Loading…
Cancel
Save