From d02bced118ade95a6bd1276d0b7135699c0c5949 Mon Sep 17 00:00:00 2001
From: Vitaliy Lyudvichenko <ludv1x@yandex.ru>
Date: Sun, 5 Jul 2015 22:52:34 +0300
Subject: [PATCH] More refactoring over Blob.

More refactoring over Blob.
Fix warnings and eliminated zlib dependency in cnpy.h
---
 modules/dnn/include/opencv2/dnn/blob.hpp      | 204 ++++----------
 modules/dnn/include/opencv2/dnn/blob.inl.hpp  | 260 ++++++++++++++++++
 modules/dnn/src/blob.cpp                      |  77 +++---
 modules/dnn/src/caffe_importer.cpp            |  36 +--
 modules/dnn/src/dnn.cpp                       |   5 +-
 modules/dnn/src/layers/convolution_layer.cpp  |   4 +-
 .../dnn/src/layers/fully_connected_layer.cpp  |  26 +-
 modules/dnn/src/layers/pooling_layer.cpp      |   2 +-
 modules/dnn/src/layers/softmax_layer.cpp      |  10 +-
 modules/dnn/test/cnpy.h                       |  12 +-
 modules/dnn/test/npy_blob.hpp                 |  13 +-
 modules/dnn/test/test_alexnet.cpp             |   3 +-
 12 files changed, 392 insertions(+), 260 deletions(-)
 create mode 100644 modules/dnn/include/opencv2/dnn/blob.inl.hpp

diff --git a/modules/dnn/include/opencv2/dnn/blob.hpp b/modules/dnn/include/opencv2/dnn/blob.hpp
index b91114021..78cc7836e 100644
--- a/modules/dnn/include/opencv2/dnn/blob.hpp
+++ b/modules/dnn/include/opencv2/dnn/blob.hpp
@@ -2,11 +2,47 @@
 #define __OPENCV_DNN_DNN_BLOB_HPP__
 #include <opencv2/core.hpp>
 #include <vector>
+#include <ostream>
 
 namespace cv
 {
 namespace dnn
 {
+    struct BlobShape
+    {
+        explicit BlobShape(int ndims, int fill = 1);
+        BlobShape(int num, int cn, int rows, int cols);
+        BlobShape(int ndims, const int *sizes);
+        BlobShape(const std::vector<int> &sizes);
+        template<int n>
+        BlobShape(const Vec<int, n> &shape);
+
+        int dims() const;
+        int size(int axis) const;
+        int &size(int axis);
+
+        //do the same as size()
+        int operator[](int axis) const;
+        int &operator[](int axis);
+
+        int xsize(int axis) const;
+
+        const int *ptr() const;
+
+        bool equal(const BlobShape &other) const;
+
+    private:
+
+        BlobShape();
+        cv::AutoBuffer<int,4> sz;
+    };
+
+    bool operator== (const BlobShape &l, const BlobShape &r);
+
+    //maybe useless
+    CV_EXPORTS std::ostream &operator<< (std::ostream &stream, const BlobShape &shape);
+
+
     /** @brief provides convenient methods for continuous n-dimensional array processing, dedicated for convolution neural networks
     It's realized as wrapper over \ref cv::Mat and \ref cv::UMat and will support methods for CPU/GPU switching
     */
@@ -16,16 +52,13 @@ namespace dnn
         explicit Blob();
         explicit Blob(InputArray in);
 
-        void create(int ndims, const int *sizes, int type = CV_32F);
-        void create(Vec4i shape, int type = CV_32F);
-        void create(int num, int cn, int rows, int cols, int type = CV_32F);
+        void create(const BlobShape &shape, int type = CV_32F);
 
         void fill(InputArray in);
-        void fill(int ndims, const int *sizes, int type, void *data, bool deepCopy = true);
+        void fill(const BlobShape &shape, int type, void *data, bool deepCopy = true);
 
         Mat& getMatRef();
         const Mat& getMatRef() const;
-        Mat getMat();
         Mat getMat(int n, int cn);
 
         //shape getters
@@ -37,14 +70,14 @@ namespace dnn
         Python-like indexing is supported, so \p axis can be negative, i. e. -1 is last dimension.
         Supposed that size of non-existing dimensions equal to 1, so the method always finished.
         */
-        int size(int axis) const;
+        int xsize(int axis) const;
 
         /** @brief returns size of corresponding dimension (axis)
         @param axis dimension index
         Python-like indexing is supported, so \p axis can be negative, i. e. -1 is last dimension.
-        @note Unlike ::size, if \p axis points to non-existing dimension then an error will be generated.
+        @note Unlike ::xsize, if \p axis points to non-existing dimension then an error will be generated.
         */
-        int sizeAt(int axis) const;
+        int size(int axis) const;
         
         /** @brief returns number of elements
         @param startAxis starting axis (inverse indexing can be used)
@@ -59,7 +92,9 @@ namespace dnn
 
         /** @brief returns real shape of the blob
         */
-        std::vector<int> shape() const;
+        BlobShape shape() const;
+
+        bool equalShape(const Blob &other) const;
 
         //shape getters, oriented for 4-dim Blobs processing
         int cols() const;
@@ -85,156 +120,9 @@ namespace dnn
 
         Mat m;
     };
-
-    //////////////////////////////////////////////////////////////////////////
-
-    inline int Blob::canonicalAxis(int axis) const
-    {
-        CV_Assert(-dims() <= axis && axis < dims());
-
-        if (axis < 0)
-        {
-            return dims() + axis;
-        }
-        return axis;
-    }
-
-    inline int Blob::size(int axis) const
-    {
-        if (axis < 0)
-            axis += dims();
-        
-        if (axis < 0 || axis >= dims())
-            return 1;
-        
-        return sizes()[axis];
-    }
-
-    inline int Blob::sizeAt(int axis) const
-    {
-        CV_Assert(-dims() <= axis && axis < dims());
-
-        if (axis < 0)
-            axis += dims();
-
-        return sizes()[axis];
-    }
-
-    inline size_t Blob::total(int startAxis, int endAxis) const
-    {
-        startAxis = canonicalAxis(startAxis);
-
-        if (endAxis == -1)
-            endAxis = dims();
-
-        CV_Assert(startAxis <= endAxis && endAxis <= dims());
-
-        size_t size = 1; //assume that blob isn't empty
-        for (int i = startAxis; i < endAxis; i++)
-            size *= (size_t)sizes()[i];
-
-        return size;
-    }
-
-    inline int Blob::offset(int n, int cn, int row, int col) const
-    {
-        CV_DbgAssert(0 <= n && n < num() && 0 <= cn && cn < channels() && 0 <= row && row < rows() && 0 <= col && col < cols());
-        return ((n*channels() + cn)*rows() + row)*cols() + col;
-    }
-
-    inline float *Blob::ptrf(int n, int cn, int row, int col)
-    {
-        CV_Assert(type() == CV_32F);
-        return (float*)m.data + offset(n, cn, row, col);
-    }
-
-    inline uchar *Blob::ptrRaw(int n, int cn, int row, int col)
-    {
-        return m.data + m.elemSize() * offset(n, cn, row, col);
-    }
-
-    template<typename TFloat>
-    inline TFloat* Blob::ptr(int n, int cn, int row, int col)
-    {
-        CV_Assert(type() == cv::DataDepth<TFloat>::value);
-        return (TFloat*) ptrRaw(n, cn, row, col);
-    }
-
-    inline std::vector<int> Blob::shape() const
-    {
-        return std::vector<int>(sizes(), sizes() + dims());
-    }
-
-    inline Mat& Blob::getMatRef()
-    {
-        return m;
-    }
-
-    inline const Mat& Blob::getMatRef() const
-    {
-        return m;
-    }
-
-    inline Mat Blob::getMat()
-    {
-        return m;
-    }
-
-    inline Mat Blob::getMat(int n, int cn)
-    {
-        return Mat(rows(), cols(), m.type(), this->ptrRaw(n, cn));
-    }
-
-    inline int Blob::cols() const
-    {
-        return size(-1);
-    }
-
-    inline int Blob::rows() const
-    {
-        return size(-2);
-    }
-
-    inline Size Blob::size2() const
-    {
-        return Size(cols(), rows());
-    }
-
-    inline int Blob::channels() const
-    {
-        return size(-3);
-    }
-
-    inline int Blob::num() const
-    {
-        return size(-4);
-    }
-
-    inline int Blob::type() const
-    {
-        return m.depth();
-    }
-
-    inline bool Blob::isFloat() const
-    {
-        return (type() == CV_32F);
-    }
-
-    inline bool Blob::isDouble() const
-    {
-        return (type() == CV_32F);
-    }
-
-    inline const int * Blob::sizes() const
-    {
-        return &m.size[0];
-    }
-
-    inline int Blob::dims() const
-    {
-        return m.dims;
-    }
 }
 }
 
+#include "blob.inl.hpp"
+
 #endif
diff --git a/modules/dnn/include/opencv2/dnn/blob.inl.hpp b/modules/dnn/include/opencv2/dnn/blob.inl.hpp
new file mode 100644
index 000000000..0ee571c61
--- /dev/null
+++ b/modules/dnn/include/opencv2/dnn/blob.inl.hpp
@@ -0,0 +1,260 @@
+#ifndef __OPENCV_DNN_DNN_BLOB_INL_HPP__
+#define __OPENCV_DNN_DNN_BLOB_INL_HPP__
+#include "blob.hpp"
+
+namespace cv
+{
+namespace dnn
+{
+
+inline BlobShape::BlobShape(int ndims, int fill) : sz( (size_t)std::max(ndims, 1) )
+{
+    for (int i = 0; i < ndims; i++)
+        sz[i] = fill;
+}
+
+inline BlobShape::BlobShape(int ndims, const int *sizes) : sz( (size_t)std::max(ndims, 1) )
+{
+    CV_Assert(ndims > 0);
+    for (int i = 0; i < ndims; i++)
+        sz[i] = sizes[i];
+}
+
+inline BlobShape::BlobShape(int num, int cn, int rows, int cols) : sz(4)
+{
+    sz[0] = num;
+    sz[1] = cn;
+    sz[2] = rows;
+    sz[3] = cols;
+}
+
+inline BlobShape::BlobShape(const std::vector<int> &sizes) : sz( sizes.size() )
+{
+    CV_Assert(sizes.size() > 0);
+    for (int i = 0; i < (int)sizes.size(); i++)
+        sz[i] = sizes[i];
+}
+
+template<int n>
+inline BlobShape::BlobShape(const Vec<int, n> &shape) : sz(n)
+{
+    for (int i = 0; i < n; i++)
+        sz[i] = shape[i];
+}
+
+inline int BlobShape::dims() const
+{
+    return (int)sz.size();
+}
+
+inline int BlobShape::xsize(int axis) const
+{
+    if (axis < -dims() || axis >= dims())
+        return 1;
+
+    return sz[(axis < 0) ? axis + dims() : axis];
+}
+
+inline int BlobShape::size(int axis) const
+{
+    CV_Assert(-dims() <= axis && axis < dims());
+    return sz[(axis < 0) ? axis + dims() : axis];
+}
+
+inline int &BlobShape::size(int axis)
+{
+    CV_Assert(-dims() <= axis && axis < dims());
+    return sz[(axis < 0) ? axis + dims() : axis];
+}
+
+inline int BlobShape::operator[] (int axis) const
+{
+    CV_Assert(-dims() <= axis && axis < dims());
+    return sz[(axis < 0) ? axis + dims() : axis];
+}
+
+inline int &BlobShape::operator[] (int axis)
+{
+    CV_Assert(-dims() <= axis && axis < dims());
+    return sz[(axis < 0) ? axis + dims() : axis];
+}
+
+inline const int *BlobShape::ptr() const
+{
+    return sz;
+}
+
+inline bool BlobShape::equal(const BlobShape &other) const
+{
+    if (this->dims() != other.dims())
+        return false;
+
+    for (int i = 0; i < other.dims(); i++)
+    {
+        if (sz[i] != other.sz[i])
+            return false;
+    }
+
+    return true;
+}
+
+inline bool operator== (const BlobShape &l, const BlobShape &r)
+{
+    return l.equal(r);
+}
+
+
+
+inline int Blob::canonicalAxis(int axis) const
+{
+    CV_Assert(-dims() <= axis && axis < dims());
+
+    if (axis < 0)
+    {
+        return dims() + axis;
+    }
+    return axis;
+}
+
+inline int Blob::dims() const
+{
+    return m.dims;
+}
+
+inline int Blob::xsize(int axis) const
+{
+    if (axis < -dims() || axis >= dims())
+        return 1;
+
+    return sizes()[(axis < 0) ? axis + dims() : axis];
+}
+
+inline int Blob::size(int axis) const
+{
+    CV_Assert(-dims() <= axis && axis < dims());
+    return sizes()[(axis < 0) ? axis + dims() : axis];
+}
+
+inline size_t Blob::total(int startAxis, int endAxis) const
+{
+    if (startAxis < 0)
+        startAxis += dims();
+
+    if (endAxis == -1)
+        endAxis = dims();
+
+    CV_Assert(0 <= startAxis && startAxis <= endAxis && endAxis <= dims());
+
+    size_t size = 1; //assume that blob isn't empty
+    for (int i = startAxis; i < endAxis; i++)
+        size *= (size_t)sizes()[i];
+
+    return size;
+}
+
+inline int Blob::offset(int n, int cn, int row, int col) const
+{
+    CV_DbgAssert(0 <= n && n < num() && 0 <= cn && cn < channels() && 0 <= row && row < rows() && 0 <= col && col < cols());
+    return ((n*channels() + cn)*rows() + row)*cols() + col;
+}
+
+inline float *Blob::ptrf(int n, int cn, int row, int col)
+{
+    CV_Assert(type() == CV_32F);
+    return (float*)m.data + offset(n, cn, row, col);
+}
+
+inline uchar *Blob::ptrRaw(int n, int cn, int row, int col)
+{
+    return m.data + m.elemSize() * offset(n, cn, row, col);
+}
+
+template<typename TFloat>
+inline TFloat* Blob::ptr(int n, int cn, int row, int col)
+{
+    CV_Assert(type() == cv::DataDepth<TFloat>::value);
+    return (TFloat*) ptrRaw(n, cn, row, col);
+}
+
+inline BlobShape Blob::shape() const
+{
+    return BlobShape(dims(), sizes());
+}
+
+inline bool Blob::equalShape(const Blob &other) const
+{
+    if (this->dims() != other.dims())
+        return false;
+
+    for (int i = 0; i < dims(); i++)
+    {
+        if (this->sizes()[i] != other.sizes()[i])
+            return false;
+    }
+    return true;
+}
+
+inline Mat& Blob::getMatRef()
+{
+    return m;
+}
+
+inline const Mat& Blob::getMatRef() const
+{
+    return m;
+}
+
+inline Mat Blob::getMat(int n, int cn)
+{
+    return Mat(rows(), cols(), m.type(), this->ptrRaw(n, cn));
+}
+
+inline int Blob::cols() const
+{
+    return xsize(3);
+}
+
+inline int Blob::rows() const
+{
+    return xsize(2);
+}
+
+inline int Blob::channels() const
+{
+    return xsize(1);
+}
+
+inline int Blob::num() const
+{
+    return xsize(0);
+}
+
+inline Size Blob::size2() const
+{
+    return Size(cols(), rows());
+}
+
+inline int Blob::type() const
+{
+    return m.depth();
+}
+
+inline bool Blob::isFloat() const
+{
+    return (type() == CV_32F);
+}
+
+inline bool Blob::isDouble() const
+{
+    return (type() == CV_32F);
+}
+
+inline const int * Blob::sizes() const
+{
+    return &m.size[0];
+}
+
+}
+}
+
+#endif
diff --git a/modules/dnn/src/blob.cpp b/modules/dnn/src/blob.cpp
index ea16bb4d9..9f0c66660 100644
--- a/modules/dnn/src/blob.cpp
+++ b/modules/dnn/src/blob.cpp
@@ -26,8 +26,7 @@ namespace dnn
             int type = mat.type();
             int dstType = CV_MAKE_TYPE(CV_MAT_DEPTH(type), 1);
 
-            int size[3] = { cn, rows, cols };
-            this->create(3, size, dstType);
+            this->create(BlobShape(1, cn, rows, cols), dstType);
             uchar *data = m.data;
             int step = rows * cols * CV_ELEM_SIZE(dstType);
 
@@ -54,45 +53,18 @@ namespace dnn
         }
     }
 
-    inline void squeezeShape_(const int srcDims, const int *srcSizes, const int dstDims, int *dstSizes)
-    {
-        const int m = std::min(dstDims, srcDims);
-
-        //copy common(last) dimensions
-        for (int i = 0; i < m; i++)
-            dstSizes[dstDims - 1 - i] = srcSizes[srcDims - 1 - i];
-
-        //either flatten extra dimensions
-        for (int i = m; i < srcDims; i++)
-            dstSizes[0] *= srcSizes[srcDims - 1 - i];
-
-        //either fill gaps
-        for (int i = m; i < dstDims; i++)
-            dstSizes[dstDims - 1 - i] = 1;
-    }
-
-    static Vec4i squeezeShape4(const int ndims, const int *sizes)
-    {
-        Vec4i res;
-        squeezeShape_(ndims, sizes, 4, &res[0]);
-        return res;
-    }
-
-    void Blob::fill(int ndims, const int *sizes, int type, void *data, bool deepCopy)
+    void Blob::fill(const BlobShape &shape, int type, void *data, bool deepCopy)
     {
         CV_Assert(type == CV_32F || type == CV_64F);
 
-        Vec4i shape = squeezeShape4(ndims, sizes);
-
         if (deepCopy)
         {
-            m.create(4, &shape[0], type);
-            size_t dataSize = m.total() * m.elemSize();
-            memcpy(m.data, data, dataSize);
+            m.create(shape.dims(), shape.ptr(), type);
+            memcpy(m.data, data, m.total() * m.elemSize());
         }
         else
         {
-            m = Mat(shape.channels, &shape[0], type, data);
+            m = Mat(shape.dims(), shape.ptr(), type, data);
         }
     }
 
@@ -104,29 +76,44 @@ namespace dnn
         *this = Blob(in);
     }
 
-    void Blob::create(int ndims, const int *sizes, int type)
+    void Blob::create(const BlobShape &shape, int type)
     {
         CV_Assert(type == CV_32F || type == CV_64F);
-        Vec4i shape = squeezeShape4(ndims, sizes);
-        m.create(shape.channels, &shape[0], type);
+        m.create(shape.dims(), shape.ptr(), type);
     }
 
-    void Blob::create(Vec4i shape, int type)
+    inline void squeezeShape(const int srcDims, const int *srcSizes, const int dstDims, int *dstSizes)
     {
-        m.create(shape.channels, &shape[0], type);
-    }
+        const int m = std::min(dstDims, srcDims);
 
-    void Blob::create(int num, int cn, int rows, int cols, int type)
-    {
-        Vec4i shape(num, cn, rows, cols);
-        create(4, &shape[0], type);
+        //copy common(last) dimensions
+        for (int i = 0; i < m; i++)
+            dstSizes[dstDims - 1 - i] = srcSizes[srcDims - 1 - i];
+
+        //either flatten extra dimensions
+        for (int i = m; i < srcDims; i++)
+            dstSizes[0] *= srcSizes[srcDims - 1 - i];
+
+        //either fill gaps
+        for (int i = m; i < dstDims; i++)
+            dstSizes[dstDims - 1 - i] = 1;
     }
 
     Vec4i Blob::shape4() const
     {
-        return squeezeShape4(dims(), sizes());
+        return Vec4i(num(), channels(), rows(), cols());
     }
 
+    std::ostream &operator<< (std::ostream &stream, const BlobShape &shape)
+    {
+        stream << "[";
+
+        for (int i = 0; i < shape.dims() - 1; i++)
+            stream << shape[i] << ", ";
+        if (shape.dims() > 0)
+            stream << shape[-1];
 
+        return stream << "]";
+    }
+}
 }
-}
\ No newline at end of file
diff --git a/modules/dnn/src/caffe_importer.cpp b/modules/dnn/src/caffe_importer.cpp
index 87c94dd65..d934610f2 100644
--- a/modules/dnn/src/caffe_importer.cpp
+++ b/modules/dnn/src/caffe_importer.cpp
@@ -128,39 +128,41 @@ namespace
             }
         }
 
-        void blobFromProto(const caffe::BlobProto &protoBlob, cv::dnn::Blob &dstBlob)
+        BlobShape blobShapeFromProto(const caffe::BlobProto &pbBlob)
         {
-            AutoBuffer<int, 4> shape;
-
-            if (protoBlob.has_num() || protoBlob.has_channels() || protoBlob.has_height() || protoBlob.has_width())
+            if (pbBlob.has_num() || pbBlob.has_channels() || pbBlob.has_height() || pbBlob.has_width())
             {
-                shape.resize(4);
-                shape[0] = protoBlob.num();
-                shape[1] = protoBlob.channels();
-                shape[2] = protoBlob.height();
-                shape[3] = protoBlob.width();
+                return BlobShape(pbBlob.num(), pbBlob.channels(), pbBlob.height(), pbBlob.width());
             }
-            else if (protoBlob.has_shape())
+            else if (pbBlob.has_shape())
             {
-                const caffe::BlobShape &_shape = protoBlob.shape();
-                shape.resize(_shape.dim_size());
+                const caffe::BlobShape &_shape = pbBlob.shape();
+                BlobShape shape(_shape.dim_size());
 
                 for (int i = 0; i < _shape.dim_size(); i++)
                     shape[i] = _shape.dim(i);
+
+                return shape;
             }
             else
             {
                 CV_Error(cv::Error::StsAssert, "Unknown shape of input blob");
+                return BlobShape(-1);
             }
+        }
+
+        void blobFromProto(const caffe::BlobProto &pbBlob, cv::dnn::Blob &dstBlob)
+        {
+            BlobShape shape = blobShapeFromProto(pbBlob);
 
-            dstBlob.create(shape.size(), shape, CV_32F);
-            CV_Assert(protoBlob.data_size() == (int)dstBlob.getMatRef().total());
+            dstBlob.create(shape, CV_32F);
+            CV_Assert(pbBlob.data_size() == (int)dstBlob.getMatRef().total());
 
-            CV_DbgAssert(protoBlob.GetDescriptor()->FindFieldByLowercaseName("data")->cpp_type() == FieldDescriptor::CPPTYPE_FLOAT);
+            CV_DbgAssert(pbBlob.GetDescriptor()->FindFieldByLowercaseName("data")->cpp_type() == FieldDescriptor::CPPTYPE_FLOAT);
             float *dstData = dstBlob.getMatRef().ptr<float>();
 
-            for (int i = 0; i < protoBlob.data_size(); i++)
-                dstData[i] = protoBlob.data(i);
+            for (int i = 0; i < pbBlob.data_size(); i++)
+                dstData[i] = pbBlob.data(i);
         }
 
         void extractBinaryLayerParms(const caffe::LayerParameter& layer, LayerParams& layerParams)
diff --git a/modules/dnn/src/dnn.cpp b/modules/dnn/src/dnn.cpp
index 43ef32703..4ba7f1a4e 100644
--- a/modules/dnn/src/dnn.cpp
+++ b/modules/dnn/src/dnn.cpp
@@ -309,9 +309,10 @@ struct Net::Impl
 
         //forward itself
         if (ld.layerInstance && layerId != 0)
+        {
+            //std::cout << ld.name << " shape:" << ld.outputBlobs[0].shape4() << std::endl;
             ld.layerInstance->forward(ld.inputBlobs, ld.outputBlobs);
-
-        //std::cout << ld.name << " shape:" << ld.outputBlobs[0].shape() << std::endl;
+        }
 
         ld.flag = 1;
     }
diff --git a/modules/dnn/src/layers/convolution_layer.cpp b/modules/dnn/src/layers/convolution_layer.cpp
index 8bc9d4ae9..72b7b9fb0 100644
--- a/modules/dnn/src/layers/convolution_layer.cpp
+++ b/modules/dnn/src/layers/convolution_layer.cpp
@@ -73,9 +73,7 @@ namespace dnn
         for (size_t i = 0; i < inputs.size(); i++)
         {
             CV_Assert(inputs[i]->rows() == inH && inputs[i]->cols() == inW && inputs[i]->channels() == inCn);
-            int num = inputs[i]->num();
-
-            outputs[i].create(num, numOutput, outH, outW);
+            outputs[i].create(BlobShape(inputs[i]->num(), numOutput, outH, outW));
         }
 
         kerSize = kernelH * kernelW * groupCn;
diff --git a/modules/dnn/src/layers/fully_connected_layer.cpp b/modules/dnn/src/layers/fully_connected_layer.cpp
index 616b6bb73..527f918ad 100644
--- a/modules/dnn/src/layers/fully_connected_layer.cpp
+++ b/modules/dnn/src/layers/fully_connected_layer.cpp
@@ -10,7 +10,7 @@ namespace dnn
     {
         bool bias;
         int numOutputs;
-        int axis;
+        int axis_, axis;
 
         int innerSize;
 
@@ -30,9 +30,8 @@ namespace dnn
     {
         numOutputs = params.get<int>("num_output");
         bias = params.get<bool>("bias_term", true);
-        axis = params.get<int>("axis", 1);
+        axis_ = params.get<int>("axis", 1);
 
-        CV_Assert(0 <= axis && axis < 4);
         CV_Assert(params.learnedBlobs.size() >= 1);
         CV_Assert(!bias || (params.learnedBlobs.size() >= 2 && (int)params.learnedBlobs[1].total() == numOutputs));
 
@@ -48,7 +47,9 @@ namespace dnn
     {
         CV_Assert(inputs.size() > 0);
 
+        axis = inputs[0]->canonicalAxis(axis_);
         innerSize = (int)inputs[0]->total(axis);
+
         CV_Assert((size_t)innerSize * (size_t)numOutputs == learnedParams[0].total());
         CV_Assert(learnedParams[0].rows() == numOutputs && learnedParams[0].cols() == innerSize);
 
@@ -56,7 +57,7 @@ namespace dnn
         for (size_t i = 0; i < inputs.size(); i++)
         {
             if (i != 0)
-                CV_Assert(inputs[i]->total(axis) == (size_t)innerSize);
+                CV_Assert(inputs[i]->equalShape(*inputs[0]));
 
             this->reshape(*inputs[i], outputs[i]);
         }
@@ -64,12 +65,9 @@ namespace dnn
 
     void FullyConnectedLayer::reshape(const Blob &inp, Blob &out)
     {
-        Vec4i inpShape = inp.shape4();
-        Vec4i outShape = Vec4i::all(1);
-
-        for (int a = 0; a < axis; a++)
-            outShape[a] = inpShape[a];
-        outShape[3] = numOutputs;
+        BlobShape inpShape = inp.shape();
+        BlobShape outShape(axis+1, inpShape.ptr());
+        outShape[axis] = numOutputs;
 
         out.create(outShape, inp.type());
     }
@@ -82,12 +80,12 @@ namespace dnn
             int N = numOutputs;
             int K = innerSize;
 
-            Mat srcMat(M, K, CV_32F, inputs[i]->ptrf());
-            Mat weights(N, K, CV_32F, learnedParams[0].ptrf());
-            Mat dstMat(M, N, CV_32F, outputs[i].ptrf());
+            Mat srcMat(M, K, inputs[i]->type(), inputs[i]->ptrf());
+            Mat weight(N, K, learnedParams[0].type(), learnedParams[0].ptrf());
+            Mat dstMat(M, N, outputs[i].type(), outputs[i].ptrf());
 
             //important: Caffe stores weights as transposed array
-            cv::gemm(srcMat, weights, 1, noArray(), 0, dstMat, GEMM_2_T);
+            cv::gemm(srcMat, weight, 1, noArray(), 0, dstMat, GEMM_2_T);
 
             if (bias)
             {
diff --git a/modules/dnn/src/layers/pooling_layer.cpp b/modules/dnn/src/layers/pooling_layer.cpp
index 5fe154b8d..0bb899638 100644
--- a/modules/dnn/src/layers/pooling_layer.cpp
+++ b/modules/dnn/src/layers/pooling_layer.cpp
@@ -73,7 +73,7 @@ namespace dnn
         for (size_t i = 0; i < inputs.size(); i++)
         {
             CV_Assert(inputs[i]->rows() == inH && inputs[i]->cols() == inW);
-            outputs[i].create(inputs[i]->num(), inputs[i]->channels(), pooledH, pooledW);
+            outputs[i].create(BlobShape(inputs[i]->num(), inputs[i]->channels(), pooledH, pooledW));
         }
     }
 
diff --git a/modules/dnn/src/layers/softmax_layer.cpp b/modules/dnn/src/layers/softmax_layer.cpp
index fe0e8bd0d..9761f9626 100644
--- a/modules/dnn/src/layers/softmax_layer.cpp
+++ b/modules/dnn/src/layers/softmax_layer.cpp
@@ -11,7 +11,7 @@ namespace dnn
     //TODO: set default axis number to 1, and add custom shape length in FullyConnected
     class SoftMaxLayer : public Layer
     {
-        int axis;
+        int axis_, axis;
         Blob maxAggregator;
 
     public:
@@ -27,15 +27,15 @@ namespace dnn
     SoftMaxLayer::SoftMaxLayer(LayerParams &params)
     {
         //hotfix!!!
-        axis = params.get<int>("axis", 3);
-        CV_Assert(0 <= axis && axis < 4);
+        axis_ = params.get<int>("axis", 1);
     }
 
     void SoftMaxLayer::allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs)
     {
         CV_Assert(inputs.size() == 1);
+        axis = inputs[0]->canonicalAxis(axis_);
 
-        Vec4i shape = inputs[0]->shape4();
+        BlobShape shape = inputs[0]->shape();
         outputs.resize(1);
         outputs[0].create(shape);
 
@@ -87,7 +87,7 @@ namespace dnn
             }
         }
 
-        cv::exp(dst.getMat(), dst.getMat());
+        cv::exp(dst.getMatRef(), dst.getMatRef());
 
         for (size_t outerDim = 0; outerDim < outerSize; outerDim++)
         {
diff --git a/modules/dnn/test/cnpy.h b/modules/dnn/test/cnpy.h
index a071ab89f..9886ec530 100644
--- a/modules/dnn/test/cnpy.h
+++ b/modules/dnn/test/cnpy.h
@@ -13,8 +13,10 @@
 #include<typeinfo>
 #include<iostream>
 #include<cassert>
-//#include<zlib.h>
 #include<map>
+#if defined(HAVE_ZLIB) && HAVE_ZLIB
+#include<zlib.h>
+#endif
 
 namespace cnpy {
 
@@ -57,7 +59,7 @@ namespace cnpy {
     template<> std::vector<char>& operator+=(std::vector<char>& lhs, const char* rhs); 
 
 
-    template<typename T> std::string tostring(T i, int pad = 0, char padval = ' ') {
+    template<typename T> std::string tostring(T i, int = 0, char = ' ') {
         std::stringstream s;
         s << i;
         return s.str();
@@ -152,8 +154,12 @@ namespace cnpy {
         int nbytes = nels*sizeof(T) + npy_header.size();
 
         //get the CRC of the data to be added
+        #if defined(HAVE_ZLIB) && HAVE_ZLIB
         unsigned int crc = crc32(0L,(unsigned char*)&npy_header[0],npy_header.size());
         crc = crc32(crc,(unsigned char*)data,nels*sizeof(T));
+        #else
+        unsigned int crc = 0;
+        #endif
 
         //build the local header
         std::vector<char> local_header;
@@ -204,7 +210,7 @@ namespace cnpy {
         fclose(fp);
     }
 
-    template<typename T> std::vector<char> create_npy_header(const T* data, const unsigned int* shape, const unsigned int ndims) {  
+    template<typename T> std::vector<char> create_npy_header(const T*, const unsigned int* shape, const unsigned int ndims) {
 
         std::vector<char> dict;
         dict += "{'descr': '";
diff --git a/modules/dnn/test/npy_blob.hpp b/modules/dnn/test/npy_blob.hpp
index b7a7d44b8..c72c51d62 100644
--- a/modules/dnn/test/npy_blob.hpp
+++ b/modules/dnn/test/npy_blob.hpp
@@ -1,24 +1,15 @@
 #ifndef __OPENCV_DNN_TEST_NPY_BLOB_HPP__
 #define __OPENCV_DNN_TEST_NPY_BLOB_HPP__
 #include "test_precomp.hpp"
-
-#ifdef __GNUC__
-#   pragma GCC diagnostic ignored "-Wunused-parameter"
-#   pragma GCC diagnostic push
-#endif
-
 #include "cnpy.h"
 
-#ifdef __GNUC__
-#   pragma GCC diagnostic pop
-#endif
-
 inline cv::dnn::Blob blobFromNPY(const cv::String &path)
 {
     cnpy::NpyArray npyBlob = cnpy::npy_load(path.c_str());
+    cv::dnn::BlobShape shape((int)npyBlob.shape.size(), (int*)&npyBlob.shape[0]);
 
     cv::dnn::Blob blob;
-    blob.fill((int)npyBlob.shape.size(), (int*)&npyBlob.shape[0], CV_32F, npyBlob.data);
+    blob.fill(shape, CV_32F, npyBlob.data);
 
     npyBlob.destruct();
     return blob;
diff --git a/modules/dnn/test/test_alexnet.cpp b/modules/dnn/test/test_alexnet.cpp
index aad9966d0..bee5169e6 100644
--- a/modules/dnn/test/test_alexnet.cpp
+++ b/modules/dnn/test/test_alexnet.cpp
@@ -29,8 +29,9 @@ inline void normAssert(InputArray ref, InputArray get, const char *comment = "")
     EXPECT_LE(normInf, 0.001) << comment;
 }
 
-inline void normAssert(Blob ref, Blob test, const char *comment = "")
+inline void normAssert(Blob &ref, Blob &test, const char *comment = "")
 {
+    EXPECT_EQ(ref.shape(), test.shape());
     normAssert(ref.getMatRef(), test.getMatRef(), comment);
 }