diff --git a/modules/dnn/include/opencv2/dnn/blob.hpp b/modules/dnn/include/opencv2/dnn/blob.hpp index d830f3c78..6a02fd42b 100644 --- a/modules/dnn/include/opencv2/dnn/blob.hpp +++ b/modules/dnn/include/opencv2/dnn/blob.hpp @@ -51,6 +51,9 @@ namespace dnn { public: explicit Blob(); + + explicit Blob(const BlobShape &shape, int type = CV_32F); + /** @brief constucts 4-dimensional blob from input * @param in 2-dimensional or 3-dimensional single-channel image (or vector from them) * @param dstCn if specified force size of ouptut blob channel-dimension diff --git a/modules/dnn/include/opencv2/dnn/blob.inl.hpp b/modules/dnn/include/opencv2/dnn/blob.inl.hpp index a3e340c18..7cae032de 100644 --- a/modules/dnn/include/opencv2/dnn/blob.inl.hpp +++ b/modules/dnn/include/opencv2/dnn/blob.inl.hpp @@ -7,15 +7,16 @@ namespace cv namespace dnn { -inline BlobShape::BlobShape(int ndims, int fill) : sz( (size_t)std::max(ndims, 1) ) +inline BlobShape::BlobShape(int ndims, int fill) : sz( (size_t)std::max(ndims, 0) ) { + CV_Assert(ndims >= 0); for (int i = 0; i < ndims; i++) sz[i] = fill; } -inline BlobShape::BlobShape(int ndims, const int *sizes) : sz( (size_t)std::max(ndims, 1) ) +inline BlobShape::BlobShape(int ndims, const int *sizes) : sz( (size_t)std::max(ndims, 0) ) { - CV_Assert(ndims > 0); + CV_Assert(ndims >= 0); for (int i = 0; i < ndims; i++) sz[i] = sizes[i]; } @@ -30,7 +31,6 @@ inline BlobShape::BlobShape(int num, int cn, int rows, int cols) : sz(4) inline BlobShape::BlobShape(const std::vector &sizes) : sz( sizes.size() ) { - CV_Assert(sizes.size() > 0); for (int i = 0; i < (int)sizes.size(); i++) sz[i] = sizes[i]; } @@ -81,7 +81,8 @@ inline int &BlobShape::operator[] (int axis) inline ptrdiff_t BlobShape::total() { - CV_Assert(dims() >= 1); + if (dims() == 0) + return 0; ptrdiff_t res = 1; for (int i = 0; i < dims(); i++) @@ -89,7 +90,6 @@ inline ptrdiff_t BlobShape::total() return res; } - inline const int *BlobShape::ptr() const { return sz; @@ -119,12 +119,7 @@ inline bool operator== (const BlobShape &l, const BlobShape &r) inline int Blob::canonicalAxis(int axis) const { CV_Assert(-dims() <= axis && axis < dims()); - - if (axis < 0) - { - return dims() + axis; - } - return axis; + return (axis < 0) ? axis + dims() : axis; } inline int Blob::dims() const diff --git a/modules/dnn/src/blob.cpp b/modules/dnn/src/blob.cpp index b23ec2863..d64e7fa30 100644 --- a/modules/dnn/src/blob.cpp +++ b/modules/dnn/src/blob.cpp @@ -114,6 +114,11 @@ namespace dnn } } + Blob::Blob(const BlobShape &shape, int type) + { + this->create(shape, type); + } + void Blob::fill(const BlobShape &shape, int type, void *data, bool deepCopy) { CV_Assert(type == CV_32F || type == CV_64F); @@ -129,14 +134,6 @@ namespace dnn } } - void Blob::fill(InputArray in) - { - CV_Assert(in.isMat() || in.isMatVector()); - - //TODO - *this = Blob(in); - } - void Blob::create(const BlobShape &shape, int type) { CV_Assert(type == CV_32F || type == CV_64F); diff --git a/modules/dnn/src/dnn.cpp b/modules/dnn/src/dnn.cpp index cbf046533..6eac0849a 100644 --- a/modules/dnn/src/dnn.cpp +++ b/modules/dnn/src/dnn.cpp @@ -309,7 +309,7 @@ struct Net::Impl { LayerPin from = ld.inputBlobsId[i]; CV_Assert(from.valid()); - CV_Assert(layers.count(from.lid) && layers[from.lid].outputBlobs.size() > from.oid); + CV_DbgAssert(layers.count(from.lid) && (int)layers[from.lid].outputBlobs.size() > from.oid); ld.inputBlobs[i] = &layers[from.lid].outputBlobs[from.oid]; } diff --git a/modules/dnn/src/layers/reshape_layer.cpp b/modules/dnn/src/layers/reshape_layer.cpp index 9d1edb0c0..89f342a7a 100644 --- a/modules/dnn/src/layers/reshape_layer.cpp +++ b/modules/dnn/src/layers/reshape_layer.cpp @@ -25,9 +25,20 @@ protected: ReshapeLayer::ReshapeLayer(LayerParams ¶ms) { + inAxis = params.get("axis", 0); + inNumAxes = params.get("num_axes", -1); + CV_Assert(inNumAxes >= -1); + + autoAxisIdx = -1; + + if (!params.has("dim")) + { + shapeDesc = BlobShape(0); + return; + } + DictValue paramShape = params.get("dim"); shapeDesc = BlobShape(paramShape.size()); - autoAxisIdx = -1; for (int i = 0; i < paramShape.size(); i++) { @@ -43,33 +54,31 @@ ReshapeLayer::ReshapeLayer(LayerParams ¶ms) shapeDesc[i] = dim; } - - inAxis = params.get("axis", 0); - inNumAxes = params.get("num_axes", -1); - CV_Assert(inNumAxes >= -1); } void ReshapeLayer::allocate(const std::vector &inputs, std::vector &outputs) { - CV_Assert(inputs.size() == 1); - outputs.resize(1); + outputs.resize(inputs.size()); - Blob &inpBlob = *inputs[0]; - Blob &outBlob = outputs[0]; - BlobShape inpShape = inpBlob.shape(); + for (size_t i = 0; i < inputs.size(); i++) + { + Blob &inpBlob = *inputs[i]; + Blob &outBlob = outputs[i]; + BlobShape inpShape = inpBlob.shape(); - int startAxis = (inAxis >= 0) ? inAxis : inpShape.dims() + 1 + inAxis; - int endAxis = (inNumAxes == -1) ? inpShape.dims() : startAxis + inNumAxes; - CV_Assert(0 <= startAxis && startAxis <= inpShape.dims()); - CV_Assert(0 <= endAxis && endAxis <= inpShape.dims()); + int startAxis = (inAxis >= 0) ? inAxis : inpShape.dims() + 1 + inAxis; + int endAxis = (inNumAxes == -1) ? inpShape.dims() : startAxis + inNumAxes; + CV_Assert(0 <= startAxis && startAxis <= inpShape.dims()); + CV_Assert(0 <= endAxis && endAxis <= inpShape.dims()); - int newDims = inpShape.dims() - (endAxis - startAxis) + shapeDesc.dims(); - BlobShape outShape(newDims); + int newDims = inpShape.dims() - (endAxis - startAxis) + shapeDesc.dims(); + BlobShape outShape(newDims); - computeOutputShape(startAxis, endAxis, inpShape, outShape); + computeOutputShape(startAxis, endAxis, inpShape, outShape); - outBlob.shareFrom(inpBlob); - outBlob.reshape(outShape); + outBlob.shareFrom(inpBlob); + outBlob.reshape(outShape); + } } void ReshapeLayer::computeOutputShape(int startAxis, int endAxis, BlobShape &inpShape, BlobShape &outShape) @@ -84,7 +93,7 @@ void ReshapeLayer::computeOutputShape(int startAxis, int endAxis, BlobShape &inp { int inpAxisIdx = startAxis + i; if (inpAxisIdx < 0 || inpShape.dims() <= inpAxisIdx) - CV_Error(Error::StsOutOfRange, "new shape contains a 0, but there was no corresponding bottom axis to copy"); + CV_Error(Error::StsOutOfRange, "copy dimension (which has zero size) is not presented into reshaped blob"); outShape[idx++] = inpShape[startAxis + i]; } else @@ -113,7 +122,7 @@ void ReshapeLayer::computeOutputShape(int startAxis, int endAxis, BlobShape &inp if (inpShape.total() != outShape.total()) { - CV_Error(Error::StsBadArg, "Mismatch between input and output blob elements count"); + CV_Error(Error::StsUnmatchedSizes, "Mismatch between input and output blob elements count"); } } diff --git a/modules/dnn/src/torch/THDiskFile.cpp b/modules/dnn/src/torch/THDiskFile.cpp index ca8247dae..17a4d291e 100644 --- a/modules/dnn/src/torch/THDiskFile.cpp +++ b/modules/dnn/src/torch/THDiskFile.cpp @@ -1,3 +1,4 @@ +#if defined(ENABLE_TORCH_IMPORTER) && ENABLE_TORCH_IMPORTER #include "THGeneral.h" #include "THDiskFile.h" #include "THFilePrivate.h" @@ -607,3 +608,4 @@ THFile *THPipeFile_new(const char *name, const char *mode, int isQuiet) } } +#endif diff --git a/modules/dnn/src/torch/THFile.cpp b/modules/dnn/src/torch/THFile.cpp index db71a066d..148f17915 100644 --- a/modules/dnn/src/torch/THFile.cpp +++ b/modules/dnn/src/torch/THFile.cpp @@ -1,3 +1,4 @@ +#if defined(ENABLE_TORCH_IMPORTER) && ENABLE_TORCH_IMPORTER #include "THFile.h" #include "THFilePrivate.h" @@ -158,4 +159,5 @@ IMPLEMENT_THFILE_STORAGE(Float, float) IMPLEMENT_THFILE_STORAGE(Double, double) */ -} \ No newline at end of file +} +#endif diff --git a/modules/dnn/src/torch/THGeneral.cpp b/modules/dnn/src/torch/THGeneral.cpp index 792c5516a..3a951e7cb 100644 --- a/modules/dnn/src/torch/THGeneral.cpp +++ b/modules/dnn/src/torch/THGeneral.cpp @@ -1,4 +1,6 @@ +#if defined(ENABLE_TORCH_IMPORTER) && ENABLE_TORCH_IMPORTER #include "THGeneral.h" +#include extern "C" { @@ -15,10 +17,9 @@ extern "C" #endif /* Torch Error Handling */ -static void defaultTorchErrorHandlerFunction(const char *msg, void *data) +static void defaultTorchErrorHandlerFunction(const char *msg, void*) { - printf("$ Error: %s\n", msg); - exit(-1); + CV_Error(cv::Error::StsError, cv::String("Torch Error: ") + msg); } static __thread void (*torchErrorHandlerFunction)(const char *msg, void *data) = defaultTorchErrorHandlerFunction; @@ -61,13 +62,12 @@ void THSetErrorHandler( void (*torchErrorHandlerFunction_)(const char *msg, void } /* Torch Arg Checking Handling */ -static void defaultTorchArgErrorHandlerFunction(int argNumber, const char *msg, void *data) +static void defaultTorchArgErrorHandlerFunction(int argNumber, const char *msg, void*) { if(msg) - printf("$ Invalid argument %d: %s\n", argNumber, msg); + CV_Error(cv::Error::StsError, cv::format("Torch invalid argument %d: %s", argNumber, msg)); else - printf("$ Invalid argument %d\n", argNumber); - exit(-1); + CV_Error(cv::Error::StsError, cv::format("Invalid argument %d", argNumber)); } static __thread void (*torchArgErrorHandlerFunction)(int argNumber, const char *msg, void *data) = defaultTorchArgErrorHandlerFunction; @@ -252,3 +252,4 @@ double THLog1p(const double x) } } +#endif diff --git a/modules/dnn/src/torch/torch_importer.cpp b/modules/dnn/src/torch/torch_importer.cpp index 29f7fd02d..d94d3e6b5 100644 --- a/modules/dnn/src/torch/torch_importer.cpp +++ b/modules/dnn/src/torch/torch_importer.cpp @@ -8,7 +8,7 @@ namespace cv { namespace dnn { -#if ENABLE_TORCH_IMPORTER || 1 +#if defined(ENABLE_TORCH_IMPORTER) && ENABLE_TORCH_IMPORTER #include "THDiskFile.h" enum LuaType @@ -575,32 +575,44 @@ struct TorchImporter : public ::cv::dnn::Importer } return prevLayerId; } - else if (module->thName == "Parallel" || module->thName == "Concat") + else if (module->thName == "Concat") { - int splitId, mergeId, newId; + int newId, splitId, mergeId; + LayerParams mergeParams, splitParams; + mergeParams.set("axis", module->params.get("dimension") - 1); - String splitType; - LayerParams splitParams, mergeParams; - if (module->thName == "Parallel") - { - splitType = "Slice"; - splitParams.set("axis", module->params.get("inputDimension") - 1); - mergeParams.set("axis", module->params.get("outputDimension") - 1); - } - else + splitId = net.addLayer(generateLayerName("torchSplit"), "Split", splitParams); + mergeId = net.addLayer(generateLayerName("torchMerge"), "Concat", mergeParams); + net.connect(prevLayerId, prevOutNum, splitId, 0); + + for (int i = 0; i < (int)module->modules.size(); i++) { - splitType = "Split"; - mergeParams.set("axis", module->params.get("dimension") - 1); + newId = fill(module->modules[i], splitId, i); + net.connect(newId, 0, mergeId, i); } - splitId = net.addLayer(generateLayerName("torchSplit"), splitType, splitParams); + return mergeId; + } + else if (module->thName == "Parallel") + { + int newId, splitId, mergeId, reshapeId; + + LayerParams splitParams, mergeParams, reshapeParams; + splitParams.set("axis", module->params.get("inputDimension") - 1); + mergeParams.set("axis", module->params.get("outputDimension") - 1); + reshapeParams.set("axis", splitParams.get("axis")); + reshapeParams.set("num_axes", 1); + + splitId = net.addLayer(generateLayerName("torchSplit"), "Slice", splitParams); mergeId = net.addLayer(generateLayerName("torchMerge"), "Concat", mergeParams); + reshapeId = net.addLayer(generateLayerName("torchReshape"), "Reshape", reshapeParams); net.connect(prevLayerId, prevOutNum, splitId, 0); - for (size_t i = 0; i < module->modules.size(); i++) + for (int i = 0; i < (int)module->modules.size(); i++) { - newId = fill(module->modules[i], splitId, (int)i); - net.connect(newId, 0, mergeId, (int)i); + net.connect(splitId, i, reshapeId, i); + newId = fill(module->modules[i], reshapeId, i); + net.connect(newId, 0, mergeId, i); } return mergeId; diff --git a/modules/dnn/test/npy_blob.hpp b/modules/dnn/test/npy_blob.hpp index c72c51d62..14d11a1a0 100644 --- a/modules/dnn/test/npy_blob.hpp +++ b/modules/dnn/test/npy_blob.hpp @@ -8,7 +8,7 @@ inline cv::dnn::Blob blobFromNPY(const cv::String &path) cnpy::NpyArray npyBlob = cnpy::npy_load(path.c_str()); cv::dnn::BlobShape shape((int)npyBlob.shape.size(), (int*)&npyBlob.shape[0]); - cv::dnn::Blob blob; + cv::dnn::Blob blob(shape); blob.fill(shape, CV_32F, npyBlob.data); npyBlob.destruct(); diff --git a/modules/dnn/test/test_layers.cpp b/modules/dnn/test/test_layers.cpp index a2dd72998..f07b828a9 100644 --- a/modules/dnn/test/test_layers.cpp +++ b/modules/dnn/test/test_layers.cpp @@ -75,6 +75,23 @@ TEST(Layer_LRN_channels_Test, Accuracy) testLayer("lrn_channels.prototxt"); } +TEST(Layer_Reshape_squeeze, Accuracy) +{ + LayerParams params; + params.set("axis", 2); + params.set("num_axes", 1); + + Blob inp(BlobShape(4, 3, 1, 2)); + std::vector inpVec(1, &inp); + std::vector outVec; + + Ptr rl = LayerRegister::createLayerInstance("Reshape", params); + rl->allocate(inpVec, outVec); + rl->forward(inpVec, outVec); + + EXPECT_EQ(outVec[0].shape(), BlobShape(Vec3i(4, 3, 2))); +} + TEST(Layer_Reshape_Split_Slice_Test, Accuracy) { Net net; diff --git a/modules/dnn/test/test_torch_importer.cpp b/modules/dnn/test/test_torch_importer.cpp index 32c2bd6dc..514c46d5b 100644 --- a/modules/dnn/test/test_torch_importer.cpp +++ b/modules/dnn/test/test_torch_importer.cpp @@ -1,5 +1,5 @@ -#if 1 || defined(ENABLE_TORCH_IMPORTER) && ENABLE_TORCH_IMPORTER -#if 1 || defined(ENABLE_TORCH_TESTS) && ENABLE_TORCH_TESTS +#if defined(ENABLE_TORCH_IMPORTER) && ENABLE_TORCH_IMPORTER +#if defined(ENABLE_TORCH_TESTS) && ENABLE_TORCH_TESTS #include "test_precomp.hpp" namespace cvtest @@ -31,10 +31,8 @@ static void runTorchNet(String prefix, String outLayerName, bool isBinary) String suffix = (isBinary) ? ".dat" : ".txt"; Net net; - Ptr importer; - ASSERT_NO_THROW( importer = createTorchImporter(_tf(prefix + "_net" + suffix), isBinary) ); + Ptr importer = createTorchImporter(_tf(prefix + "_net" + suffix), isBinary); ASSERT_TRUE(importer != NULL); - //ASSERT_NO_THROW( importer->populateNet(net) ); importer->populateNet(net); Blob inp, outRef; @@ -45,10 +43,6 @@ static void runTorchNet(String prefix, String outLayerName, bool isBinary) net.forward(); Blob out = net.getBlob(outLayerName); - std::cout << "inp " << inp.shape() << "\n"; - std::cout << "out " << out.shape() << "\n"; - std::cout << "ref " << outRef.shape() << "\n"; - normAssert(outRef, out); } @@ -81,8 +75,7 @@ TEST(Torch_Importer, run_linear) TEST(Torch_Importer, run_paralel) { - //TODO: fix and add Reshape - //runTorchNet("net_parallel", "l2_torchMerge", false); + runTorchNet("net_parallel", "l2_torchMerge", false); } TEST(Torch_Importer, run_concat)