commit
dd15521860
53 changed files with 3939 additions and 867 deletions
@ -0,0 +1,142 @@ |
||||
#ifndef __OPENCV_DNN_DNN_BLOB_HPP__ |
||||
#define __OPENCV_DNN_DNN_BLOB_HPP__ |
||||
#include <opencv2/core.hpp> |
||||
#include <vector> |
||||
#include <ostream> |
||||
|
||||
namespace cv |
||||
{ |
||||
namespace dnn |
||||
{ |
||||
struct BlobShape |
||||
{ |
||||
explicit BlobShape(int ndims = 4, int fill = 1); |
||||
BlobShape(int num, int cn, int rows, int cols); |
||||
BlobShape(int ndims, const int *sizes); |
||||
BlobShape(const std::vector<int> &sizes); |
||||
template<int n> |
||||
BlobShape(const Vec<int, n> &shape); |
||||
|
||||
int dims() const; |
||||
int size(int axis) const; |
||||
int &size(int axis); |
||||
|
||||
//do the same as size()
|
||||
int operator[](int axis) const; |
||||
int &operator[](int axis); |
||||
|
||||
//same as size(), but size of non-existing dimensions equal to 1
|
||||
int xsize(int axis) const; |
||||
|
||||
ptrdiff_t total(); |
||||
|
||||
const int *ptr() const; |
||||
|
||||
bool equal(const BlobShape &other) const; |
||||
|
||||
private: |
||||
cv::AutoBuffer<int,4> sz; |
||||
}; |
||||
|
||||
bool operator== (const BlobShape &l, const BlobShape &r); |
||||
|
||||
//maybe useless
|
||||
CV_EXPORTS std::ostream &operator<< (std::ostream &stream, const BlobShape &shape); |
||||
|
||||
|
||||
/** @brief provides convenient methods for continuous n-dimensional array processing, dedicated for convolution neural networks
|
||||
It's realized as wrapper over \ref cv::Mat and \ref cv::UMat and will support methods for CPU/GPU switching |
||||
*/ |
||||
class CV_EXPORTS Blob |
||||
{ |
||||
public: |
||||
explicit Blob(); |
||||
/** @brief constucts 4-dimensional blob from input
|
||||
* @param in 2-dimensional or 3-dimensional single-channel image (or vector from them) |
||||
* @param dstCn if specified force size of ouptut blob channel-dimension |
||||
*/ |
||||
explicit Blob(InputArray in, int dstCn = -1); |
||||
|
||||
void create(const BlobShape &shape, int type = CV_32F); |
||||
|
||||
void fill(InputArray in); |
||||
void fill(const BlobShape &shape, int type, void *data, bool deepCopy = true); |
||||
|
||||
Mat& getMatRef(); |
||||
const Mat& getMatRef() const; |
||||
Mat getMat(int n, int cn); |
||||
|
||||
//shape getters
|
||||
///returns real count of blob dimensions
|
||||
int dims() const; |
||||
|
||||
/** @brief returns size of corresponding dimension (axis)
|
||||
@param axis dimension index |
||||
Python-like indexing is supported, so \p axis can be negative, i. e. -1 is last dimension. |
||||
Supposed that size of non-existing dimensions equal to 1, so the method always finished. |
||||
*/ |
||||
int xsize(int axis) const; |
||||
|
||||
/** @brief returns size of corresponding dimension (axis)
|
||||
@param axis dimension index |
||||
Python-like indexing is supported, so \p axis can be negative, i. e. -1 is last dimension. |
||||
@note Unlike ::xsize, if \p axis points to non-existing dimension then an error will be generated. |
||||
*/ |
||||
int size(int axis) const; |
||||
|
||||
/** @brief returns number of elements
|
||||
@param startAxis starting axis (inverse indexing can be used) |
||||
@param endAxis ending (excluded) axis |
||||
@see ::canonicalAxis |
||||
*/ |
||||
size_t total(int startAxis = 0, int endAxis = -1) const; |
||||
|
||||
/** @brief converts axis index to canonical format (where 0 <= axis <= ::dims)
|
||||
*/ |
||||
int canonicalAxis(int axis) const; |
||||
|
||||
/** @brief returns real shape of the blob
|
||||
*/ |
||||
BlobShape shape() const; |
||||
|
||||
bool equalShape(const Blob &other) const; |
||||
|
||||
//shape getters, oriented for 4-dim Blobs processing
|
||||
int cols() const; |
||||
int rows() const; |
||||
int channels() const; |
||||
int num() const; |
||||
Size size2() const; |
||||
Vec4i shape4() const; |
||||
|
||||
//CPU data pointer functions
|
||||
int offset(int n = 0, int cn = 0, int row = 0, int col = 0) const; |
||||
uchar *ptrRaw(int n = 0, int cn = 0, int row = 0, int col = 0); |
||||
float *ptrf(int n = 0, int cn = 0, int row = 0, int col = 0); |
||||
template<typename TFloat> |
||||
TFloat *ptr(int n = 0, int cn = 0, int row = 0, int col = 0); |
||||
|
||||
/** @brief share data with other blob and returns *this
|
||||
@returns *this |
||||
*/ |
||||
Blob &shareFrom(const Blob &blob); |
||||
/** @brief adjust blob shape to required (data reallocated if needed)
|
||||
@returns *this |
||||
*/ |
||||
Blob &reshape(const BlobShape &shape); |
||||
|
||||
int type() const; |
||||
bool isFloat() const; |
||||
bool isDouble() const; |
||||
|
||||
private: |
||||
const int *sizes() const; |
||||
|
||||
Mat m; |
||||
}; |
||||
} |
||||
} |
||||
|
||||
#include "blob.inl.hpp" |
||||
|
||||
#endif |
@ -0,0 +1,284 @@ |
||||
#ifndef __OPENCV_DNN_DNN_BLOB_INL_HPP__ |
||||
#define __OPENCV_DNN_DNN_BLOB_INL_HPP__ |
||||
#include "blob.hpp" |
||||
|
||||
namespace cv |
||||
{ |
||||
namespace dnn |
||||
{ |
||||
|
||||
inline BlobShape::BlobShape(int ndims, int fill) : sz( (size_t)std::max(ndims, 1) ) |
||||
{ |
||||
for (int i = 0; i < ndims; i++) |
||||
sz[i] = fill; |
||||
} |
||||
|
||||
inline BlobShape::BlobShape(int ndims, const int *sizes) : sz( (size_t)std::max(ndims, 1) ) |
||||
{ |
||||
CV_Assert(ndims > 0); |
||||
for (int i = 0; i < ndims; i++) |
||||
sz[i] = sizes[i]; |
||||
} |
||||
|
||||
inline BlobShape::BlobShape(int num, int cn, int rows, int cols) : sz(4) |
||||
{ |
||||
sz[0] = num; |
||||
sz[1] = cn; |
||||
sz[2] = rows; |
||||
sz[3] = cols; |
||||
} |
||||
|
||||
inline BlobShape::BlobShape(const std::vector<int> &sizes) : sz( sizes.size() ) |
||||
{ |
||||
CV_Assert(sizes.size() > 0); |
||||
for (int i = 0; i < (int)sizes.size(); i++) |
||||
sz[i] = sizes[i]; |
||||
} |
||||
|
||||
template<int n> |
||||
inline BlobShape::BlobShape(const Vec<int, n> &shape) : sz(n) |
||||
{ |
||||
for (int i = 0; i < n; i++) |
||||
sz[i] = shape[i]; |
||||
} |
||||
|
||||
inline int BlobShape::dims() const |
||||
{ |
||||
return (int)sz.size(); |
||||
} |
||||
|
||||
inline int BlobShape::xsize(int axis) const |
||||
{ |
||||
if (axis < -dims() || axis >= dims()) |
||||
return 1; |
||||
|
||||
return sz[(axis < 0) ? axis + dims() : axis]; |
||||
} |
||||
|
||||
inline int BlobShape::size(int axis) const |
||||
{ |
||||
CV_Assert(-dims() <= axis && axis < dims()); |
||||
return sz[(axis < 0) ? axis + dims() : axis]; |
||||
} |
||||
|
||||
inline int &BlobShape::size(int axis) |
||||
{ |
||||
CV_Assert(-dims() <= axis && axis < dims()); |
||||
return sz[(axis < 0) ? axis + dims() : axis]; |
||||
} |
||||
|
||||
inline int BlobShape::operator[] (int axis) const |
||||
{ |
||||
CV_Assert(-dims() <= axis && axis < dims()); |
||||
return sz[(axis < 0) ? axis + dims() : axis]; |
||||
} |
||||
|
||||
inline int &BlobShape::operator[] (int axis) |
||||
{ |
||||
CV_Assert(-dims() <= axis && axis < dims()); |
||||
return sz[(axis < 0) ? axis + dims() : axis]; |
||||
} |
||||
|
||||
inline ptrdiff_t BlobShape::total() |
||||
{ |
||||
CV_Assert(dims() >= 1); |
||||
|
||||
ptrdiff_t res = 1; |
||||
for (int i = 0; i < dims(); i++) |
||||
res *= sz[i]; |
||||
return res; |
||||
} |
||||
|
||||
|
||||
inline const int *BlobShape::ptr() const |
||||
{ |
||||
return sz; |
||||
} |
||||
|
||||
inline bool BlobShape::equal(const BlobShape &other) const |
||||
{ |
||||
if (this->dims() != other.dims()) |
||||
return false; |
||||
|
||||
for (int i = 0; i < other.dims(); i++) |
||||
{ |
||||
if (sz[i] != other.sz[i]) |
||||
return false; |
||||
} |
||||
|
||||
return true; |
||||
} |
||||
|
||||
inline bool operator== (const BlobShape &l, const BlobShape &r) |
||||
{ |
||||
return l.equal(r); |
||||
} |
||||
|
||||
|
||||
|
||||
inline int Blob::canonicalAxis(int axis) const |
||||
{ |
||||
CV_Assert(-dims() <= axis && axis < dims()); |
||||
|
||||
if (axis < 0) |
||||
{ |
||||
return dims() + axis; |
||||
} |
||||
return axis; |
||||
} |
||||
|
||||
inline int Blob::dims() const |
||||
{ |
||||
return m.dims; |
||||
} |
||||
|
||||
inline int Blob::xsize(int axis) const |
||||
{ |
||||
if (axis < -dims() || axis >= dims()) |
||||
return 1; |
||||
|
||||
return sizes()[(axis < 0) ? axis + dims() : axis]; |
||||
} |
||||
|
||||
inline int Blob::size(int axis) const |
||||
{ |
||||
CV_Assert(-dims() <= axis && axis < dims()); |
||||
return sizes()[(axis < 0) ? axis + dims() : axis]; |
||||
} |
||||
|
||||
inline size_t Blob::total(int startAxis, int endAxis) const |
||||
{ |
||||
if (startAxis < 0) |
||||
startAxis += dims(); |
||||
|
||||
if (endAxis == -1) |
||||
endAxis = dims(); |
||||
|
||||
CV_Assert(0 <= startAxis && startAxis <= endAxis && endAxis <= dims()); |
||||
|
||||
size_t size = 1; //assume that blob isn't empty
|
||||
for (int i = startAxis; i < endAxis; i++) |
||||
size *= (size_t)sizes()[i]; |
||||
|
||||
return size; |
||||
} |
||||
|
||||
inline int Blob::offset(int n, int cn, int row, int col) const |
||||
{ |
||||
CV_DbgAssert(0 <= n && n < num() && 0 <= cn && cn < channels() && 0 <= row && row < rows() && 0 <= col && col < cols()); |
||||
return ((n*channels() + cn)*rows() + row)*cols() + col; |
||||
} |
||||
|
||||
inline float *Blob::ptrf(int n, int cn, int row, int col) |
||||
{ |
||||
CV_Assert(type() == CV_32F); |
||||
return (float*)m.data + offset(n, cn, row, col); |
||||
} |
||||
|
||||
inline uchar *Blob::ptrRaw(int n, int cn, int row, int col) |
||||
{ |
||||
return m.data + m.elemSize() * offset(n, cn, row, col); |
||||
} |
||||
|
||||
template<typename TFloat> |
||||
inline TFloat* Blob::ptr(int n, int cn, int row, int col) |
||||
{ |
||||
CV_Assert(type() == cv::DataDepth<TFloat>::value); |
||||
return (TFloat*) ptrRaw(n, cn, row, col); |
||||
} |
||||
|
||||
inline BlobShape Blob::shape() const |
||||
{ |
||||
return BlobShape(dims(), sizes()); |
||||
} |
||||
|
||||
inline bool Blob::equalShape(const Blob &other) const |
||||
{ |
||||
if (this->dims() != other.dims()) |
||||
return false; |
||||
|
||||
for (int i = 0; i < dims(); i++) |
||||
{ |
||||
if (this->sizes()[i] != other.sizes()[i]) |
||||
return false; |
||||
} |
||||
return true; |
||||
} |
||||
|
||||
inline Mat& Blob::getMatRef() |
||||
{ |
||||
return m; |
||||
} |
||||
|
||||
inline const Mat& Blob::getMatRef() const |
||||
{ |
||||
return m; |
||||
} |
||||
|
||||
inline Mat Blob::getMat(int n, int cn) |
||||
{ |
||||
return Mat(rows(), cols(), m.type(), this->ptrRaw(n, cn)); |
||||
} |
||||
|
||||
inline int Blob::cols() const |
||||
{ |
||||
return xsize(3); |
||||
} |
||||
|
||||
inline int Blob::rows() const |
||||
{ |
||||
return xsize(2); |
||||
} |
||||
|
||||
inline int Blob::channels() const |
||||
{ |
||||
return xsize(1); |
||||
} |
||||
|
||||
inline int Blob::num() const |
||||
{ |
||||
return xsize(0); |
||||
} |
||||
|
||||
inline Size Blob::size2() const |
||||
{ |
||||
return Size(cols(), rows()); |
||||
} |
||||
|
||||
inline int Blob::type() const |
||||
{ |
||||
return m.depth(); |
||||
} |
||||
|
||||
inline bool Blob::isFloat() const |
||||
{ |
||||
return (type() == CV_32F); |
||||
} |
||||
|
||||
inline bool Blob::isDouble() const |
||||
{ |
||||
return (type() == CV_32F); |
||||
} |
||||
|
||||
inline const int * Blob::sizes() const |
||||
{ |
||||
return &m.size[0]; |
||||
} |
||||
|
||||
|
||||
inline Blob &Blob::shareFrom(const Blob &blob) |
||||
{ |
||||
this->m = blob.m; |
||||
return *this; |
||||
} |
||||
|
||||
inline Blob &Blob::reshape(const BlobShape &shape) |
||||
{ |
||||
m = m.reshape(1, shape.dims(), shape.ptr()); |
||||
return *this; |
||||
} |
||||
|
||||
} |
||||
} |
||||
|
||||
#endif |
After Width: | Height: | Size: 27 KiB |
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,79 @@ |
||||
#!/usr/bin/env python |
||||
import os |
||||
import sys |
||||
import time |
||||
import urllib |
||||
import hashlib |
||||
import argparse |
||||
import json |
||||
|
||||
|
||||
def reporthook(count, block_size, total_size): |
||||
""" |
||||
From http://blog.moleculea.com/2012/10/04/urlretrieve-progres-indicator/ |
||||
""" |
||||
global start_time |
||||
global prev_duration |
||||
if count == 0: |
||||
start_time = time.time() |
||||
prev_duration = -1 |
||||
return |
||||
duration = max(1, time.time() - start_time) |
||||
if int(duration) == int(prev_duration): |
||||
return |
||||
|
||||
progress_size = int(count * block_size) |
||||
speed = int(progress_size / (1024 * duration)) |
||||
percent = int(count * block_size * 100 / total_size) |
||||
sys.stdout.write("\r...%d%%, %d MB, %d KB/s, %d seconds passed" % |
||||
(percent, progress_size / (1024 * 1024), speed, duration)) |
||||
sys.stdout.flush() |
||||
prev_duration = duration |
||||
|
||||
|
||||
# Function for checking SHA1. |
||||
def model_checks_out(filename, sha1): |
||||
with open(filename, 'r') as f: |
||||
return hashlib.sha1(f.read()).hexdigest() == sha1 |
||||
|
||||
def model_download(filename, url, sha1): |
||||
# Check if model exists. |
||||
if os.path.exists(filename) and model_checks_out(filename, sha1): |
||||
print("Model {} already exists.".format(filename)) |
||||
return |
||||
|
||||
# Download and verify model. |
||||
urllib.urlretrieve(url, filename, reporthook) |
||||
print model_checks_out(filename, sha1) |
||||
if not model_checks_out(filename, sha1): |
||||
print("ERROR: model {} did not download correctly!".format(url)) |
||||
sys.exit(1) |
||||
|
||||
if __name__ == '__main__': |
||||
parser = argparse.ArgumentParser(description="Downloading trained model binaries.") |
||||
parser.add_argument("download_list") |
||||
args = parser.parse_args() |
||||
|
||||
test_dir = os.environ.get("OPENCV_TEST_DATA_PATH") |
||||
if not test_dir: |
||||
print "ERROR: OPENCV_TEST_DATA_PATH environment not specified" |
||||
sys.exit(1) |
||||
|
||||
try: |
||||
with open(args.download_list, 'r') as f: |
||||
models_to_download = json.load(f) |
||||
except: |
||||
print "ERROR: Can't pasrse {}".format(args.download_list) |
||||
sys.exit(1) |
||||
|
||||
for model_name in models_to_download: |
||||
model = models_to_download[model_name] |
||||
|
||||
dst_dir = os.path.join(test_dir, os.path.dirname(model['file'])) |
||||
dst_file = os.path.join(test_dir, model['file']) |
||||
if not os.path.exists(dst_dir): |
||||
print "ERROR: Can't find module testdata path '{}'".format(dst_dir) |
||||
sys.exit(1) |
||||
|
||||
print "Downloading model '{}' to {} from {} ...".format(model_name, dst_file, model['url']) |
||||
model_download(dst_file, model['url'], model['sha1']) |
@ -0,0 +1,7 @@ |
||||
{ |
||||
"googlenet": { |
||||
"file": "dnn/bvlc_googlenet.caffemodel", |
||||
"url": "http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel", |
||||
"sha1": "405fc5acd08a3bb12de8ee5e23a96bec22f08204" |
||||
} |
||||
} |
@ -0,0 +1,180 @@ |
||||
#include "precomp.hpp" |
||||
|
||||
namespace cv |
||||
{ |
||||
namespace dnn |
||||
{ |
||||
|
||||
Blob::Blob() |
||||
{ |
||||
int zeros[4] = { 0, 0, 0, 0 }; |
||||
m = Mat(4, zeros, CV_32F, NULL); |
||||
} |
||||
|
||||
static inline int getMatChannels(const Mat &mat) |
||||
{ |
||||
return (mat.dims <= 2) ? mat.channels() : mat.size[0]; |
||||
} |
||||
|
||||
static BlobShape getBlobShpae(std::vector<Mat> &vmat, int requestedCn = -1) |
||||
{ |
||||
BlobShape shape(4); |
||||
int cnSum = 0, matCn; |
||||
|
||||
CV_Assert(vmat.size() > 0); |
||||
|
||||
for (size_t i = 0; i < vmat.size(); i++) |
||||
{ |
||||
Mat &mat = vmat[i]; |
||||
CV_Assert(!mat.empty()); |
||||
CV_Assert((mat.dims == 3 && mat.channels() == 1) || mat.dims <= 2); |
||||
|
||||
matCn = getMatChannels(mat); |
||||
cnSum += getMatChannels(mat); |
||||
|
||||
if (i == 0) |
||||
{ |
||||
shape[-1] = mat.cols; |
||||
shape[-2] = mat.rows; |
||||
shape[-3] = (requestedCn <= 0) ? matCn : requestedCn; |
||||
} |
||||
else |
||||
{ |
||||
if (mat.cols != shape[-1] || mat.rows != shape[-2]) |
||||
CV_Error(Error::StsError, "Each Mat.size() must be equal"); |
||||
|
||||
if (requestedCn <= 0 && matCn != shape[-3]) |
||||
CV_Error(Error::StsError, "Each Mat.chnannels() (or number of planes) must be equal"); |
||||
} |
||||
} |
||||
|
||||
if (cnSum % shape[-3] != 0) |
||||
CV_Error(Error::StsError, "Total number of channels in vector is not a multiple of requsted channel number"); |
||||
|
||||
shape[0] = cnSum / shape[-3]; |
||||
return shape; |
||||
} |
||||
|
||||
static std::vector<Mat> extractMatVector(InputArray in) |
||||
{ |
||||
if (in.isMat() || in.isUMat()) |
||||
{ |
||||
return std::vector<Mat>(1, in.getMat()); |
||||
} |
||||
else if (in.isMatVector()) |
||||
{ |
||||
return *static_cast<const std::vector<Mat>*>(in.getObj()); |
||||
} |
||||
else if (in.isUMatVector()) |
||||
{ |
||||
std::vector<Mat> vmat; |
||||
in.getMatVector(vmat); |
||||
return vmat; |
||||
} |
||||
else |
||||
{ |
||||
CV_Assert(in.isMat() || in.isMatVector() || in.isUMat() || in.isUMatVector()); |
||||
return std::vector<Mat>(); |
||||
} |
||||
} |
||||
|
||||
Blob::Blob(InputArray in, int dstCn) |
||||
{ |
||||
CV_Assert(dstCn == -1 || dstCn > 0); |
||||
std::vector<Mat> inMats = extractMatVector(in); |
||||
BlobShape dstShape = getBlobShpae(inMats, dstCn); |
||||
|
||||
m.create(dstShape.dims(), dstShape.ptr(), CV_32F); |
||||
|
||||
std::vector<Mat> wrapBuf(dstShape[-3]); |
||||
int elemSize = (int)m.elemSize(); |
||||
uchar *ptr = this->ptrRaw(); |
||||
for (size_t i = 0; i < inMats.size(); i++) |
||||
{ |
||||
Mat inMat = inMats[i]; |
||||
|
||||
if (inMat.dims <= 2) |
||||
{ |
||||
inMat.convertTo(inMat, m.type()); |
||||
|
||||
wrapBuf.resize(0); |
||||
for (int cn = 0; cn < inMat.channels(); cn++) |
||||
{ |
||||
wrapBuf.push_back(Mat(inMat.rows, inMat.cols, m.type(), ptr)); |
||||
ptr += elemSize * inMat.total(); |
||||
} |
||||
|
||||
cv::split(inMat, wrapBuf); |
||||
} |
||||
else |
||||
{ |
||||
inMat.convertTo(Mat(inMat.dims, inMat.size, m.type(), ptr), m.type()); |
||||
ptr += elemSize * inMat.total(); |
||||
} |
||||
} |
||||
} |
||||
|
||||
void Blob::fill(const BlobShape &shape, int type, void *data, bool deepCopy) |
||||
{ |
||||
CV_Assert(type == CV_32F || type == CV_64F); |
||||
|
||||
if (deepCopy) |
||||
{ |
||||
m.create(shape.dims(), shape.ptr(), type); |
||||
memcpy(m.data, data, m.total() * m.elemSize()); |
||||
} |
||||
else |
||||
{ |
||||
m = Mat(shape.dims(), shape.ptr(), type, data); |
||||
} |
||||
} |
||||
|
||||
void Blob::fill(InputArray in) |
||||
{ |
||||
CV_Assert(in.isMat() || in.isMatVector()); |
||||
|
||||
//TODO
|
||||
*this = Blob(in); |
||||
} |
||||
|
||||
void Blob::create(const BlobShape &shape, int type) |
||||
{ |
||||
CV_Assert(type == CV_32F || type == CV_64F); |
||||
m.create(shape.dims(), shape.ptr(), type); |
||||
} |
||||
|
||||
inline void squeezeShape(const int srcDims, const int *srcSizes, const int dstDims, int *dstSizes) |
||||
{ |
||||
const int m = std::min(dstDims, srcDims); |
||||
|
||||
//copy common(last) dimensions
|
||||
for (int i = 0; i < m; i++) |
||||
dstSizes[dstDims - 1 - i] = srcSizes[srcDims - 1 - i]; |
||||
|
||||
//either flatten extra dimensions
|
||||
for (int i = m; i < srcDims; i++) |
||||
dstSizes[0] *= srcSizes[srcDims - 1 - i]; |
||||
|
||||
//either fill gaps
|
||||
for (int i = m; i < dstDims; i++) |
||||
dstSizes[dstDims - 1 - i] = 1; |
||||
} |
||||
|
||||
Vec4i Blob::shape4() const |
||||
{ |
||||
return Vec4i(num(), channels(), rows(), cols()); |
||||
} |
||||
|
||||
std::ostream &operator<< (std::ostream &stream, const BlobShape &shape) |
||||
{ |
||||
stream << "["; |
||||
|
||||
for (int i = 0; i < shape.dims() - 1; i++) |
||||
stream << shape[i] << ", "; |
||||
if (shape.dims() > 0) |
||||
stream << shape[-1]; |
||||
|
||||
return stream << "]"; |
||||
} |
||||
} |
||||
} |
@ -0,0 +1,73 @@ |
||||
#include "../precomp.hpp" |
||||
#include "layers_common.hpp" |
||||
|
||||
namespace cv |
||||
{ |
||||
namespace dnn |
||||
{ |
||||
class ConcatLayer : public Layer |
||||
{ |
||||
int axis; |
||||
|
||||
public: |
||||
ConcatLayer(LayerParams& params); |
||||
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs); |
||||
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs); |
||||
}; |
||||
|
||||
|
||||
REGISTER_LAYER_CLASS(Concat, ConcatLayer) |
||||
|
||||
|
||||
ConcatLayer::ConcatLayer(LayerParams ¶ms) |
||||
{ |
||||
axis = params.get<int>("axis", 1); |
||||
CV_Assert(axis >= 0); |
||||
} |
||||
|
||||
void ConcatLayer::allocate(const std::vector<Blob *> &inputs, std::vector<Blob> &outputs) |
||||
{ |
||||
CV_Assert(inputs.size() > 0); |
||||
|
||||
int refType = inputs[0]->type(); |
||||
BlobShape refShape = inputs[0]->shape(); |
||||
CV_Assert(axis < refShape.dims()); |
||||
|
||||
int axisSum = 0; |
||||
for (size_t i = 0; i < inputs.size(); i++) |
||||
{ |
||||
BlobShape curShape = inputs[i]->shape(); |
||||
|
||||
CV_Assert(curShape.dims() == refShape.dims() && inputs[i]->type() == refType); |
||||
for (int axisId = 0; axisId < refShape.dims(); axisId++) |
||||
{ |
||||
if (axisId != axis && refShape[axisId] != curShape[axisId]) |
||||
CV_Error(Error::StsBadSize, "Inconsitent shape for ConcatLayer"); |
||||
} |
||||
|
||||
axisSum += curShape[axis]; |
||||
} |
||||
|
||||
refShape[axis] = axisSum; |
||||
outputs.resize(1); |
||||
outputs[0].create(refShape); |
||||
} |
||||
|
||||
void ConcatLayer::forward(std::vector<Blob *> &inputs, std::vector<Blob> &outputs) |
||||
{ |
||||
const Mat& outMat = outputs[0].getMatRef(); |
||||
std::vector<Range> ranges(outputs[0].dims(), Range::all()); |
||||
int sizeStart = 0; |
||||
for (size_t i = 0; i < inputs.size(); i++) |
||||
{ |
||||
int sizeEnd = sizeStart + inputs[i]->size(axis); |
||||
ranges[axis] = Range(sizeStart, sizeEnd); |
||||
|
||||
Mat outSubMat = outMat(&ranges[0]); |
||||
inputs[i]->getMatRef().copyTo(outSubMat); |
||||
|
||||
sizeStart = sizeEnd; |
||||
} |
||||
} |
||||
} |
||||
} |
@ -0,0 +1,74 @@ |
||||
#ifndef __OPENCV_DNN_LAYERS_IM2COL_HPP__ |
||||
#define __OPENCV_DNN_LAYERS_IM2COL_HPP__ |
||||
|
||||
namespace cv |
||||
{ |
||||
namespace dnn |
||||
{ |
||||
|
||||
template <typename Dtype> |
||||
void im2col_cpu(const Dtype* data_im, const int channels, |
||||
const int height, const int width, const int kernel_h, const int kernel_w, |
||||
const int pad_h, const int pad_w, |
||||
const int stride_h, const int stride_w, |
||||
Dtype* data_col) |
||||
{ |
||||
int height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1; |
||||
int width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1; |
||||
int channels_col = channels * kernel_h * kernel_w; |
||||
for (int c = 0; c < channels_col; ++c) { |
||||
int w_offset = c % kernel_w; |
||||
int h_offset = (c / kernel_w) % kernel_h; |
||||
int c_im = c / kernel_h / kernel_w; |
||||
for (int h = 0; h < height_col; ++h) { |
||||
for (int w = 0; w < width_col; ++w) { |
||||
int h_pad = h * stride_h - pad_h + h_offset; |
||||
int w_pad = w * stride_w - pad_w + w_offset; |
||||
if (h_pad >= 0 && h_pad < height && w_pad >= 0 && w_pad < width) |
||||
data_col[(c * height_col + h) * width_col + w] = |
||||
data_im[(c_im * height + h_pad) * width + w_pad]; |
||||
else |
||||
data_col[(c * height_col + h) * width_col + w] = 0; |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
template <typename Dtype> |
||||
void col2im_cpu(const Dtype* data_col, const int channels, |
||||
const int height, const int width, const int patch_h, const int patch_w, |
||||
const int pad_h, const int pad_w, |
||||
const int stride_h, const int stride_w, |
||||
Dtype* data_im) |
||||
{ |
||||
memset(data_im, 0, height * width * channels * sizeof(Dtype)); |
||||
|
||||
int height_col = (height + 2 * pad_h - patch_h) / stride_h + 1; |
||||
int width_col = (width + 2 * pad_w - patch_w) / stride_w + 1; |
||||
int channels_col = channels * patch_h * patch_w; |
||||
|
||||
for (int c = 0; c < channels_col; ++c) |
||||
{ |
||||
int w_offset = c % patch_w; |
||||
int h_offset = (c / patch_w) % patch_h; |
||||
int c_im = c / patch_h / patch_w; |
||||
|
||||
for (int h = 0; h < height_col; ++h) |
||||
{ |
||||
for (int w = 0; w < width_col; ++w) |
||||
{ |
||||
int h_pad = h * stride_h - pad_h + h_offset; |
||||
int w_pad = w * stride_w - pad_w + w_offset; |
||||
|
||||
if (h_pad >= 0 && h_pad < height && w_pad >= 0 && w_pad < width) |
||||
data_im[(c_im * height + h_pad) * width + w_pad] += |
||||
data_col[(c * height_col + h) * width_col + w]; |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
} |
||||
} |
||||
|
||||
#endif |
@ -0,0 +1,137 @@ |
||||
#include "../precomp.hpp" |
||||
#include "layers_common.hpp" |
||||
|
||||
namespace cv |
||||
{ |
||||
namespace dnn |
||||
{ |
||||
|
||||
//TODO: Extend cv::Mat::reshape method
|
||||
class ReshapeLayer : public Layer |
||||
{ |
||||
public: |
||||
ReshapeLayer(LayerParams ¶ms); |
||||
|
||||
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs); |
||||
|
||||
void forward(std::vector<Blob*>&, std::vector<Blob>&) {} |
||||
|
||||
protected: |
||||
BlobShape shapeDesc; |
||||
int inAxis, inNumAxes, autoAxisIdx; |
||||
|
||||
void computeOutputShape(int startAxis, int endAxis, BlobShape &inpShape, BlobShape &outShape); |
||||
}; |
||||
|
||||
ReshapeLayer::ReshapeLayer(LayerParams ¶ms) |
||||
{ |
||||
DictValue paramShape = params.get("dim"); |
||||
shapeDesc = BlobShape(paramShape.size()); |
||||
autoAxisIdx = -1; |
||||
|
||||
for (int i = 0; i < paramShape.size(); i++) |
||||
{ |
||||
int dim = paramShape.get<int>(i); |
||||
CV_Assert(dim >= -1); |
||||
|
||||
if (dim == -1) |
||||
{ |
||||
if (autoAxisIdx != -1) |
||||
CV_Error(Error::StsBadArg, "New shape contains multiple -1 dims"); |
||||
autoAxisIdx = i; |
||||
} |
||||
|
||||
shapeDesc[i] = dim; |
||||
} |
||||
|
||||
inAxis = params.get<int>("axis", 0); |
||||
inNumAxes = params.get<int>("num_axes", -1); |
||||
CV_Assert(inNumAxes >= -1); |
||||
} |
||||
|
||||
void ReshapeLayer::allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs) |
||||
{ |
||||
CV_Assert(inputs.size() == 1); |
||||
outputs.resize(1); |
||||
|
||||
Blob &inpBlob = *inputs[0]; |
||||
Blob &outBlob = outputs[0]; |
||||
BlobShape inpShape = inpBlob.shape(); |
||||
|
||||
int startAxis = (inAxis >= 0) ? inAxis : inpShape.dims() + 1 + inAxis; |
||||
int endAxis = (inNumAxes == -1) ? inpShape.dims() : startAxis + inNumAxes; |
||||
CV_Assert(0 <= startAxis && startAxis <= inpShape.dims()); |
||||
CV_Assert(0 <= endAxis && endAxis <= inpShape.dims()); |
||||
|
||||
int newDims = inpShape.dims() - (endAxis - startAxis) + shapeDesc.dims(); |
||||
BlobShape outShape(newDims); |
||||
|
||||
computeOutputShape(startAxis, endAxis, inpShape, outShape); |
||||
|
||||
outBlob.shareFrom(inpBlob); |
||||
outBlob.reshape(outShape); |
||||
} |
||||
|
||||
void ReshapeLayer::computeOutputShape(int startAxis, int endAxis, BlobShape &inpShape, BlobShape &outShape) |
||||
{ |
||||
int idx = 0; |
||||
for (int i = 0; i < startAxis; i++) |
||||
outShape[idx++] = inpShape[i]; |
||||
|
||||
for (int i = 0; i < shapeDesc.dims(); i++) |
||||
{ |
||||
if (shapeDesc[i] == 0) |
||||
{ |
||||
int inpAxisIdx = startAxis + i; |
||||
if (inpAxisIdx < 0 || inpShape.dims() <= inpAxisIdx) |
||||
CV_Error(Error::StsOutOfRange, "new shape contains a 0, but there was no corresponding bottom axis to copy"); |
||||
outShape[idx++] = inpShape[startAxis + i]; |
||||
} |
||||
else |
||||
{ |
||||
outShape[idx++] = (shapeDesc[i] > 0) ? shapeDesc[i] : 1; |
||||
} |
||||
} |
||||
|
||||
for (int i = endAxis; i < inpShape.dims(); i++) |
||||
outShape[idx++] = inpShape[i]; |
||||
|
||||
if (autoAxisIdx >= 0) |
||||
{ |
||||
size_t total = inpShape.total(); |
||||
size_t curTotal = 1; |
||||
for (int i = 0; i < outShape.dims(); i++) |
||||
{ |
||||
if (i != startAxis + autoAxisIdx) |
||||
curTotal *= outShape[i]; |
||||
} |
||||
|
||||
CV_DbgAssert(curTotal <= total && total % curTotal == 0); |
||||
|
||||
outShape[startAxis + autoAxisIdx] = (int)(total / curTotal); |
||||
} |
||||
|
||||
if (inpShape.total() != outShape.total()) |
||||
{ |
||||
CV_Error(Error::StsBadArg, "Mismatch between input and output blob elements count"); |
||||
} |
||||
} |
||||
|
||||
|
||||
Ptr<Layer> createFlattenLayer(LayerParams&) |
||||
{ |
||||
LayerParams params; |
||||
|
||||
int shapeDesc[] = {0, -1}; |
||||
params.set("dim", DictValue::arrayInt(shapeDesc, 2)); |
||||
|
||||
return Ptr<Layer>(new ReshapeLayer(params)); |
||||
} |
||||
|
||||
|
||||
REGISTER_LAYER_CLASS(Reshape, ReshapeLayer) |
||||
REGISTER_LAYER_FUNC(Flatten, createFlattenLayer) |
||||
|
||||
|
||||
} |
||||
} |
@ -0,0 +1,103 @@ |
||||
#include "../precomp.hpp" |
||||
#include "layers_common.hpp" |
||||
|
||||
namespace cv |
||||
{ |
||||
namespace dnn |
||||
{ |
||||
|
||||
class SliceLayer : public Layer |
||||
{ |
||||
public: |
||||
SliceLayer(LayerParams ¶ms); |
||||
|
||||
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs); |
||||
|
||||
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs); |
||||
|
||||
private: |
||||
int inAxis; |
||||
std::vector<int> slicePoints; |
||||
}; |
||||
|
||||
|
||||
REGISTER_LAYER_CLASS(Slice, SliceLayer) |
||||
|
||||
|
||||
SliceLayer::SliceLayer(LayerParams ¶ms) |
||||
{ |
||||
inAxis = params.get<int>("axis", 1); |
||||
|
||||
const DictValue &_slicePoints = params.get("slice_point"); |
||||
slicePoints.resize(_slicePoints.size()); |
||||
for (int i = 0; i < _slicePoints.size(); i++) |
||||
{ |
||||
slicePoints[i] = _slicePoints.get<int>(i); |
||||
CV_Assert(slicePoints[i] > 0 && (i == 0 || slicePoints[i-1] < slicePoints[i])); |
||||
} |
||||
} |
||||
|
||||
void SliceLayer::allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs) |
||||
{ |
||||
CV_Assert(inputs.size() == 1); |
||||
|
||||
const Blob inpBlob = *inputs[0]; |
||||
int axis = inpBlob.canonicalAxis(inAxis); |
||||
int axisSize = inpBlob.size(axis); |
||||
BlobShape inpShape = inpBlob.shape(); |
||||
|
||||
if (slicePoints.size()) //divide blob with respect to passed parameters
|
||||
{ |
||||
std::vector<int> outAxisSize; |
||||
int prevSlice = 0; |
||||
|
||||
for (size_t i = 0; i < slicePoints.size(); i++) |
||||
{ |
||||
CV_Assert(prevSlice < slicePoints[i] && slicePoints[i] < axisSize); |
||||
outAxisSize.push_back(slicePoints[i] - prevSlice); |
||||
prevSlice = slicePoints[i]; |
||||
} |
||||
outAxisSize.push_back(axisSize - prevSlice); |
||||
|
||||
outputs.resize(outAxisSize.size()); |
||||
for (size_t i = 0; i < outAxisSize.size(); i++) |
||||
{ |
||||
inpShape[axis] = outAxisSize[i]; |
||||
outputs[i].create(inpShape, inpBlob.type()); |
||||
} |
||||
} |
||||
else //divide blob with respect to count of output blobs
|
||||
{ |
||||
CV_Assert(outputs.size() > 0 && axisSize % outputs.size() == 0); |
||||
int outAxisSize = axisSize / (int)outputs.size(); |
||||
|
||||
for (size_t i = 0; i < outputs.size(); i++) |
||||
{ |
||||
inpShape[axis] = outAxisSize; |
||||
outputs[i].create(inpShape, inpBlob.type()); |
||||
} |
||||
} |
||||
} |
||||
|
||||
void SliceLayer::forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs) |
||||
{ |
||||
Blob &inpBlob = *inputs[0]; |
||||
const int axis = inpBlob.canonicalAxis(inAxis); |
||||
const Mat& inpMat = inpBlob.getMatRef(); |
||||
|
||||
std::vector<Range> ranges(inpBlob.dims(), Range::all()); |
||||
int sizeStart = 0; |
||||
for (size_t i = 0; i < outputs.size(); i++) |
||||
{ |
||||
int sizeEnd = sizeStart + outputs[i].size(axis); |
||||
ranges[axis] = Range(sizeStart, sizeEnd); |
||||
|
||||
Mat inpSubMat = inpMat(&ranges[0]); |
||||
inpSubMat.copyTo(outputs[i].getMatRef()); |
||||
|
||||
sizeStart = sizeEnd; |
||||
} |
||||
} |
||||
|
||||
} |
||||
} |
@ -0,0 +1,58 @@ |
||||
#include "../precomp.hpp" |
||||
#include "layers_common.hpp" |
||||
|
||||
namespace cv |
||||
{ |
||||
namespace dnn |
||||
{ |
||||
|
||||
//TODO: maybe "top_count" param is useless because it can be determined by output connections number?
|
||||
class SplitLayer : public Layer |
||||
{ |
||||
public: |
||||
SplitLayer(LayerParams ¶ms); |
||||
|
||||
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs); |
||||
|
||||
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs); |
||||
|
||||
private: |
||||
int outputsNum; |
||||
}; |
||||
|
||||
|
||||
REGISTER_LAYER_CLASS(Split, SplitLayer) |
||||
|
||||
|
||||
SplitLayer::SplitLayer(LayerParams ¶ms) |
||||
{ |
||||
if (params.has("top_count")) |
||||
{ |
||||
outputsNum = params.get<int>("top_count"); |
||||
CV_Assert(outputsNum >= 0); |
||||
} |
||||
else |
||||
{ |
||||
outputsNum = -1; |
||||
} |
||||
} |
||||
|
||||
void SplitLayer::allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs) |
||||
{ |
||||
CV_Assert(inputs.size() == 1); |
||||
|
||||
if (outputsNum >= 0) |
||||
outputs.resize(outputsNum); |
||||
|
||||
for (size_t i = 0; i < outputs.size(); i++) |
||||
outputs[i].create(inputs[0]->shape(), inputs[0]->type()); |
||||
} |
||||
|
||||
void SplitLayer::forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs) |
||||
{ |
||||
for (size_t i = 0; i < outputs.size(); i++) |
||||
inputs[0]->getMatRef().copyTo(outputs[i].getMatRef()); |
||||
} |
||||
|
||||
} |
||||
} |
@ -0,0 +1,247 @@ |
||||
//Copyright (C) 2011 Carl Rogers
|
||||
//Released under MIT License
|
||||
//license available in LICENSE file, or at http://www.opensource.org/licenses/mit-license.php
|
||||
|
||||
#include"cnpy.h" |
||||
#include<complex> |
||||
#include<cstdlib> |
||||
#include<algorithm> |
||||
#include<cstring> |
||||
#include<iomanip> |
||||
|
||||
char cnpy::BigEndianTest() { |
||||
unsigned char x[] = {1,0}; |
||||
short y = *(short*) x; |
||||
return y == 1 ? '<' : '>'; |
||||
} |
||||
|
||||
char cnpy::map_type(const std::type_info& t) |
||||
{ |
||||
if(t == typeid(float) ) return 'f'; |
||||
if(t == typeid(double) ) return 'f'; |
||||
if(t == typeid(long double) ) return 'f'; |
||||
|
||||
if(t == typeid(int) ) return 'i'; |
||||
if(t == typeid(char) ) return 'i'; |
||||
if(t == typeid(short) ) return 'i'; |
||||
if(t == typeid(long) ) return 'i'; |
||||
if(t == typeid(long long) ) return 'i'; |
||||
|
||||
if(t == typeid(unsigned char) ) return 'u'; |
||||
if(t == typeid(unsigned short) ) return 'u'; |
||||
if(t == typeid(unsigned long) ) return 'u'; |
||||
if(t == typeid(unsigned long long) ) return 'u'; |
||||
if(t == typeid(unsigned int) ) return 'u'; |
||||
|
||||
if(t == typeid(bool) ) return 'b'; |
||||
|
||||
if(t == typeid(std::complex<float>) ) return 'c'; |
||||
if(t == typeid(std::complex<double>) ) return 'c'; |
||||
if(t == typeid(std::complex<long double>) ) return 'c'; |
||||
|
||||
else return '?'; |
||||
} |
||||
|
||||
template<> std::vector<char>& cnpy::operator+=(std::vector<char>& lhs, const std::string rhs) { |
||||
lhs.insert(lhs.end(),rhs.begin(),rhs.end()); |
||||
return lhs; |
||||
} |
||||
|
||||
template<> std::vector<char>& cnpy::operator+=(std::vector<char>& lhs, const char* rhs) { |
||||
//write in little endian
|
||||
size_t len = strlen(rhs); |
||||
lhs.reserve(len); |
||||
for(size_t byte = 0; byte < len; byte++) { |
||||
lhs.push_back(rhs[byte]); |
||||
} |
||||
return lhs; |
||||
} |
||||
|
||||
void cnpy::parse_npy_header(FILE* fp, unsigned int& word_size, unsigned int*& shape, unsigned int& ndims, bool& fortran_order) { |
||||
char buffer[256]; |
||||
size_t res = fread(buffer,sizeof(char),11,fp); |
||||
if(res != 11) |
||||
throw std::runtime_error("parse_npy_header: failed fread"); |
||||
std::string header = fgets(buffer,256,fp); |
||||
assert(header[header.size()-1] == '\n'); |
||||
|
||||
size_t loc1, loc2; |
||||
|
||||
//fortran order
|
||||
loc1 = header.find("fortran_order")+16; |
||||
fortran_order = (header.substr(loc1,5) == "True" ? true : false); |
||||
|
||||
//shape
|
||||
loc1 = header.find("("); |
||||
loc2 = header.find(")"); |
||||
std::string str_shape = header.substr(loc1+1,loc2-loc1-1); |
||||
if(str_shape[str_shape.size()-1] == ',') ndims = 1; |
||||
else ndims = (unsigned)std::count(str_shape.begin(),str_shape.end(),',')+1; |
||||
shape = new unsigned int[ndims]; |
||||
for(unsigned int i = 0;i < ndims;i++) { |
||||
loc1 = str_shape.find(","); |
||||
shape[i] = atoi(str_shape.substr(0,loc1).c_str()); |
||||
str_shape = str_shape.substr(loc1+1); |
||||
} |
||||
|
||||
//endian, word size, data type
|
||||
//byte order code | stands for not applicable.
|
||||
//not sure when this applies except for byte array
|
||||
loc1 = header.find("descr")+9; |
||||
bool littleEndian = (header[loc1] == '<' || header[loc1] == '|' ? true : false); |
||||
assert(littleEndian); |
||||
|
||||
//char type = header[loc1+1];
|
||||
//assert(type == map_type(T));
|
||||
|
||||
std::string str_ws = header.substr(loc1+2); |
||||
loc2 = str_ws.find("'"); |
||||
word_size = atoi(str_ws.substr(0,loc2).c_str()); |
||||
} |
||||
|
||||
void cnpy::parse_zip_footer(FILE* fp, unsigned short& nrecs, unsigned int& global_header_size, unsigned int& global_header_offset) |
||||
{ |
||||
std::vector<char> footer(22); |
||||
fseek(fp,-22,SEEK_END); |
||||
size_t res = fread(&footer[0],sizeof(char),22,fp); |
||||
if(res != 22) |
||||
throw std::runtime_error("parse_zip_footer: failed fread"); |
||||
|
||||
unsigned short disk_no, disk_start, nrecs_on_disk, comment_len; |
||||
disk_no = *(unsigned short*) &footer[4]; |
||||
disk_start = *(unsigned short*) &footer[6]; |
||||
nrecs_on_disk = *(unsigned short*) &footer[8]; |
||||
nrecs = *(unsigned short*) &footer[10]; |
||||
global_header_size = *(unsigned int*) &footer[12]; |
||||
global_header_offset = *(unsigned int*) &footer[16]; |
||||
comment_len = *(unsigned short*) &footer[20]; |
||||
|
||||
assert(disk_no == 0); |
||||
assert(disk_start == 0); |
||||
assert(nrecs_on_disk == nrecs); |
||||
assert(comment_len == 0); |
||||
} |
||||
|
||||
cnpy::NpyArray load_the_npy_file(FILE* fp) { |
||||
unsigned int* shape; |
||||
unsigned int ndims, word_size; |
||||
bool fortran_order; |
||||
cnpy::parse_npy_header(fp,word_size,shape,ndims,fortran_order); |
||||
unsigned long long size = 1; //long long so no overflow when multiplying by word_size
|
||||
for(unsigned int i = 0;i < ndims;i++) size *= shape[i]; |
||||
|
||||
cnpy::NpyArray arr; |
||||
arr.word_size = word_size; |
||||
arr.shape = std::vector<unsigned int>(shape,shape+ndims); |
||||
delete[] shape; |
||||
arr.data = new char[size*word_size]; |
||||
arr.fortran_order = fortran_order; |
||||
size_t nread = fread(arr.data,word_size,size,fp); |
||||
if(nread != size) |
||||
throw std::runtime_error("load_the_npy_file: failed fread"); |
||||
return arr; |
||||
} |
||||
|
||||
cnpy::npz_t cnpy::npz_load(std::string fname) { |
||||
FILE* fp = fopen(fname.c_str(),"rb"); |
||||
|
||||
if(!fp) printf("npz_load: Error! Unable to open file %s!\n",fname.c_str()); |
||||
assert(fp); |
||||
|
||||
cnpy::npz_t arrays; |
||||
|
||||
while(1) { |
||||
std::vector<char> local_header(30); |
||||
size_t headerres = fread(&local_header[0],sizeof(char),30,fp); |
||||
if(headerres != 30) |
||||
throw std::runtime_error("npz_load: failed fread"); |
||||
|
||||
//if we've reached the global header, stop reading
|
||||
if(local_header[2] != 0x03 || local_header[3] != 0x04) break; |
||||
|
||||
//read in the variable name
|
||||
unsigned short name_len = *(unsigned short*) &local_header[26]; |
||||
std::string varname(name_len,' '); |
||||
size_t vname_res = fread(&varname[0],sizeof(char),name_len,fp); |
||||
if(vname_res != name_len) |
||||
throw std::runtime_error("npz_load: failed fread"); |
||||
|
||||
//erase the lagging .npy
|
||||
varname.erase(varname.end()-4,varname.end()); |
||||
|
||||
//read in the extra field
|
||||
unsigned short extra_field_len = *(unsigned short*) &local_header[28]; |
||||
if(extra_field_len > 0) { |
||||
std::vector<char> buff(extra_field_len); |
||||
size_t efield_res = fread(&buff[0],sizeof(char),extra_field_len,fp); |
||||
if(efield_res != extra_field_len) |
||||
throw std::runtime_error("npz_load: failed fread"); |
||||
} |
||||
|
||||
arrays[varname] = load_the_npy_file(fp); |
||||
} |
||||
|
||||
fclose(fp); |
||||
return arrays; |
||||
} |
||||
|
||||
cnpy::NpyArray cnpy::npz_load(std::string fname, std::string varname) { |
||||
FILE* fp = fopen(fname.c_str(),"rb"); |
||||
|
||||
if(!fp) { |
||||
printf("npz_load: Error! Unable to open file %s!\n",fname.c_str()); |
||||
abort(); |
||||
} |
||||
|
||||
while(1) { |
||||
std::vector<char> local_header(30); |
||||
size_t header_res = fread(&local_header[0],sizeof(char),30,fp); |
||||
if(header_res != 30) |
||||
throw std::runtime_error("npz_load: failed fread"); |
||||
|
||||
//if we've reached the global header, stop reading
|
||||
if(local_header[2] != 0x03 || local_header[3] != 0x04) break; |
||||
|
||||
//read in the variable name
|
||||
unsigned short name_len = *(unsigned short*) &local_header[26]; |
||||
std::string vname(name_len,' '); |
||||
size_t vname_res = fread(&vname[0],sizeof(char),name_len,fp); |
||||
if(vname_res != name_len) |
||||
throw std::runtime_error("npz_load: failed fread"); |
||||
vname.erase(vname.end()-4,vname.end()); //erase the lagging .npy
|
||||
|
||||
//read in the extra field
|
||||
unsigned short extra_field_len = *(unsigned short*) &local_header[28]; |
||||
fseek(fp,extra_field_len,SEEK_CUR); //skip past the extra field
|
||||
|
||||
if(vname == varname) { |
||||
NpyArray array = load_the_npy_file(fp); |
||||
fclose(fp); |
||||
return array; |
||||
} |
||||
else { |
||||
//skip past the data
|
||||
unsigned int size = *(unsigned int*) &local_header[22]; |
||||
fseek(fp,size,SEEK_CUR); |
||||
} |
||||
} |
||||
|
||||
fclose(fp); |
||||
printf("npz_load: Error! Variable name %s not found in %s!\n",varname.c_str(),fname.c_str()); |
||||
abort(); |
||||
} |
||||
|
||||
cnpy::NpyArray cnpy::npy_load(std::string fname) { |
||||
|
||||
FILE* fp = fopen(fname.c_str(), "rb"); |
||||
|
||||
if(!fp) { |
||||
printf("npy_load: Error! Unable to open file %s!\n",fname.c_str()); |
||||
abort(); |
||||
} |
||||
|
||||
NpyArray arr = load_the_npy_file(fp); |
||||
|
||||
fclose(fp); |
||||
return arr; |
||||
} |
@ -0,0 +1,247 @@ |
||||
//Copyright (C) 2011 Carl Rogers
|
||||
//Released under MIT License
|
||||
//license available in LICENSE file, or at http://www.opensource.org/licenses/mit-license.php
|
||||
|
||||
#ifndef LIBCNPY_H_ |
||||
#define LIBCNPY_H_ |
||||
|
||||
#include<string> |
||||
#include<stdexcept> |
||||
#include<sstream> |
||||
#include<vector> |
||||
#include<cstdio> |
||||
#include<typeinfo> |
||||
#include<iostream> |
||||
#include<cassert> |
||||
#include<map> |
||||
#if defined(HAVE_ZLIB) && HAVE_ZLIB |
||||
#include<zlib.h> |
||||
#endif |
||||
|
||||
namespace cnpy { |
||||
|
||||
struct NpyArray { |
||||
char* data; |
||||
std::vector<unsigned int> shape; |
||||
unsigned int word_size; |
||||
bool fortran_order; |
||||
void destruct() {delete[] data;} |
||||
}; |
||||
|
||||
struct npz_t : public std::map<std::string, NpyArray> |
||||
{ |
||||
void destruct() |
||||
{ |
||||
npz_t::iterator it = this->begin(); |
||||
for(; it != this->end(); ++it) (*it).second.destruct(); |
||||
} |
||||
}; |
||||
|
||||
char BigEndianTest(); |
||||
char map_type(const std::type_info& t); |
||||
template<typename T> std::vector<char> create_npy_header(const T* data, const unsigned int* shape, const unsigned int ndims); |
||||
void parse_npy_header(FILE* fp,unsigned int& word_size, unsigned int*& shape, unsigned int& ndims, bool& fortran_order); |
||||
void parse_zip_footer(FILE* fp, unsigned short& nrecs, unsigned int& global_header_size, unsigned int& global_header_offset); |
||||
npz_t npz_load(std::string fname); |
||||
NpyArray npz_load(std::string fname, std::string varname); |
||||
NpyArray npy_load(std::string fname); |
||||
|
||||
template<typename T> std::vector<char>& operator+=(std::vector<char>& lhs, const T rhs) { |
||||
//write in little endian
|
||||
for(char byte = 0; byte < sizeof(T); byte++) { |
||||
char val = *((char*)&rhs+byte); |
||||
lhs.push_back(val); |
||||
} |
||||
return lhs; |
||||
} |
||||
|
||||
template<> std::vector<char>& operator+=(std::vector<char>& lhs, const std::string rhs); |
||||
template<> std::vector<char>& operator+=(std::vector<char>& lhs, const char* rhs); |
||||
|
||||
|
||||
template<typename T> std::string tostring(T i, int = 0, char = ' ') { |
||||
std::stringstream s; |
||||
s << i; |
||||
return s.str(); |
||||
} |
||||
|
||||
template<typename T> void npy_save(std::string fname, const T* data, const unsigned int* shape, const unsigned int ndims, std::string mode = "w") { |
||||
FILE* fp = NULL; |
||||
|
||||
if(mode == "a") fp = fopen(fname.c_str(),"r+b"); |
||||
|
||||
if(fp) { |
||||
//file exists. we need to append to it. read the header, modify the array size
|
||||
unsigned int word_size, tmp_dims; |
||||
unsigned int* tmp_shape = 0; |
||||
bool fortran_order; |
||||
parse_npy_header(fp,word_size,tmp_shape,tmp_dims,fortran_order); |
||||
assert(!fortran_order); |
||||
|
||||
if(word_size != sizeof(T)) { |
||||
std::cout<<"libnpy error: "<<fname<<" has word size "<<word_size<<" but npy_save appending data sized "<<sizeof(T)<<"\n"; |
||||
assert( word_size == sizeof(T) ); |
||||
} |
||||
if(tmp_dims != ndims) { |
||||
std::cout<<"libnpy error: npy_save attempting to append misdimensioned data to "<<fname<<"\n"; |
||||
assert(tmp_dims == ndims); |
||||
} |
||||
|
||||
for(unsigned i = 1; i < ndims; i++) { |
||||
if(shape[i] != tmp_shape[i]) { |
||||
std::cout<<"libnpy error: npy_save attempting to append misshaped data to "<<fname<<"\n"; |
||||
assert(shape[i] == tmp_shape[i]); |
||||
} |
||||
} |
||||
tmp_shape[0] += shape[0]; |
||||
|
||||
fseek(fp,0,SEEK_SET); |
||||
std::vector<char> header = create_npy_header(data,tmp_shape,ndims); |
||||
fwrite(&header[0],sizeof(char),header.size(),fp); |
||||
fseek(fp,0,SEEK_END); |
||||
|
||||
delete[] tmp_shape; |
||||
} |
||||
else { |
||||
fp = fopen(fname.c_str(),"wb"); |
||||
std::vector<char> header = create_npy_header(data,shape,ndims); |
||||
fwrite(&header[0],sizeof(char),header.size(),fp); |
||||
} |
||||
|
||||
unsigned int nels = 1; |
||||
for(unsigned i = 0;i < ndims;i++) nels *= shape[i]; |
||||
|
||||
fwrite(data,sizeof(T),nels,fp); |
||||
fclose(fp); |
||||
} |
||||
|
||||
template<typename T> void npz_save(std::string zipname, std::string fname, const T* data, const unsigned int* shape, const unsigned int ndims, std::string mode = "w") |
||||
{ |
||||
//first, append a .npy to the fname
|
||||
fname += ".npy"; |
||||
|
||||
//now, on with the show
|
||||
FILE* fp = NULL; |
||||
unsigned short nrecs = 0; |
||||
unsigned int global_header_offset = 0; |
||||
std::vector<char> global_header; |
||||
|
||||
if(mode == "a") fp = fopen(zipname.c_str(),"r+b"); |
||||
|
||||
if(fp) { |
||||
//zip file exists. we need to add a new npy file to it.
|
||||
//first read the footer. this gives us the offset and size of the global header
|
||||
//then read and store the global header.
|
||||
//below, we will write the the new data at the start of the global header then append the global header and footer below it
|
||||
unsigned int global_header_size; |
||||
parse_zip_footer(fp,nrecs,global_header_size,global_header_offset); |
||||
fseek(fp,global_header_offset,SEEK_SET); |
||||
global_header.resize(global_header_size); |
||||
size_t res = fread(&global_header[0],sizeof(char),global_header_size,fp); |
||||
if(res != global_header_size){ |
||||
throw std::runtime_error("npz_save: header read error while adding to existing zip"); |
||||
} |
||||
fseek(fp,global_header_offset,SEEK_SET); |
||||
} |
||||
else { |
||||
fp = fopen(zipname.c_str(),"wb"); |
||||
} |
||||
|
||||
std::vector<char> npy_header = create_npy_header(data,shape,ndims); |
||||
|
||||
unsigned long nels = 1; |
||||
for (int m=0; m<ndims; m++ ) nels *= shape[m]; |
||||
int nbytes = nels*sizeof(T) + npy_header.size(); |
||||
|
||||
//get the CRC of the data to be added
|
||||
#if defined(HAVE_ZLIB) && HAVE_ZLIB |
||||
unsigned int crc = crc32(0L,(unsigned char*)&npy_header[0],npy_header.size()); |
||||
crc = crc32(crc,(unsigned char*)data,nels*sizeof(T)); |
||||
#else |
||||
unsigned int crc = 0; |
||||
#endif |
||||
|
||||
//build the local header
|
||||
std::vector<char> local_header; |
||||
local_header += "PK"; //first part of sig
|
||||
local_header += (unsigned short) 0x0403; //second part of sig
|
||||
local_header += (unsigned short) 20; //min version to extract
|
||||
local_header += (unsigned short) 0; //general purpose bit flag
|
||||
local_header += (unsigned short) 0; //compression method
|
||||
local_header += (unsigned short) 0; //file last mod time
|
||||
local_header += (unsigned short) 0; //file last mod date
|
||||
local_header += (unsigned int) crc; //crc
|
||||
local_header += (unsigned int) nbytes; //compressed size
|
||||
local_header += (unsigned int) nbytes; //uncompressed size
|
||||
local_header += (unsigned short) fname.size(); //fname length
|
||||
local_header += (unsigned short) 0; //extra field length
|
||||
local_header += fname; |
||||
|
||||
//build global header
|
||||
global_header += "PK"; //first part of sig
|
||||
global_header += (unsigned short) 0x0201; //second part of sig
|
||||
global_header += (unsigned short) 20; //version made by
|
||||
global_header.insert(global_header.end(),local_header.begin()+4,local_header.begin()+30); |
||||
global_header += (unsigned short) 0; //file comment length
|
||||
global_header += (unsigned short) 0; //disk number where file starts
|
||||
global_header += (unsigned short) 0; //internal file attributes
|
||||
global_header += (unsigned int) 0; //external file attributes
|
||||
global_header += (unsigned int) global_header_offset; //relative offset of local file header, since it begins where the global header used to begin
|
||||
global_header += fname; |
||||
|
||||
//build footer
|
||||
std::vector<char> footer; |
||||
footer += "PK"; //first part of sig
|
||||
footer += (unsigned short) 0x0605; //second part of sig
|
||||
footer += (unsigned short) 0; //number of this disk
|
||||
footer += (unsigned short) 0; //disk where footer starts
|
||||
footer += (unsigned short) (nrecs+1); //number of records on this disk
|
||||
footer += (unsigned short) (nrecs+1); //total number of records
|
||||
footer += (unsigned int) global_header.size(); //nbytes of global headers
|
||||
footer += (unsigned int) (global_header_offset + nbytes + local_header.size()); //offset of start of global headers, since global header now starts after newly written array
|
||||
footer += (unsigned short) 0; //zip file comment length
|
||||
|
||||
//write everything
|
||||
fwrite(&local_header[0],sizeof(char),local_header.size(),fp); |
||||
fwrite(&npy_header[0],sizeof(char),npy_header.size(),fp); |
||||
fwrite(data,sizeof(T),nels,fp); |
||||
fwrite(&global_header[0],sizeof(char),global_header.size(),fp); |
||||
fwrite(&footer[0],sizeof(char),footer.size(),fp); |
||||
fclose(fp); |
||||
} |
||||
|
||||
template<typename T> std::vector<char> create_npy_header(const T*, const unsigned int* shape, const unsigned int ndims) { |
||||
|
||||
std::vector<char> dict; |
||||
dict += "{'descr': '"; |
||||
dict += BigEndianTest(); |
||||
dict += map_type(typeid(T)); |
||||
dict += tostring(sizeof(T)); |
||||
dict += "', 'fortran_order': False, 'shape': ("; |
||||
dict += tostring(shape[0]); |
||||
for(unsigned i = 1;i < ndims;i++) { |
||||
dict += ", "; |
||||
dict += tostring(shape[i]); |
||||
} |
||||
if(ndims == 1) dict += ","; |
||||
dict += "), }"; |
||||
//pad with spaces so that preamble+dict is modulo 16 bytes. preamble is 10 bytes. dict needs to end with \n
|
||||
int remainder = 16 - (10 + dict.size()) % 16; |
||||
dict.insert(dict.end(),remainder,' '); |
||||
dict.back() = '\n'; |
||||
|
||||
std::vector<char> header; |
||||
header += (unsigned char) 0x93; |
||||
header += "NUMPY"; |
||||
header += (char) 0x01; //major version of numpy format
|
||||
header += (char) 0x00; //minor version of numpy format
|
||||
header += (unsigned short) dict.size(); |
||||
header.insert(header.end(),dict.begin(),dict.end()); |
||||
|
||||
return header; |
||||
} |
||||
|
||||
|
||||
} |
||||
|
||||
#endif |
@ -0,0 +1,24 @@ |
||||
#ifndef __OPENCV_DNN_TEST_NPY_BLOB_HPP__ |
||||
#define __OPENCV_DNN_TEST_NPY_BLOB_HPP__ |
||||
#include "test_precomp.hpp" |
||||
#include "cnpy.h" |
||||
|
||||
inline cv::dnn::Blob blobFromNPY(const cv::String &path) |
||||
{ |
||||
cnpy::NpyArray npyBlob = cnpy::npy_load(path.c_str()); |
||||
cv::dnn::BlobShape shape((int)npyBlob.shape.size(), (int*)&npyBlob.shape[0]); |
||||
|
||||
cv::dnn::Blob blob; |
||||
blob.fill(shape, CV_32F, npyBlob.data); |
||||
|
||||
npyBlob.destruct(); |
||||
return blob; |
||||
} |
||||
|
||||
inline void saveBlobToNPY(cv::dnn::Blob &blob, const cv::String &path) |
||||
{ |
||||
cv::Vec4i shape = blob.shape4(); |
||||
cnpy::npy_save(path.c_str(), blob.ptr<float>(), (unsigned*)&shape[0], 4); |
||||
} |
||||
|
||||
#endif |
@ -0,0 +1,40 @@ |
||||
#include "test_precomp.hpp" |
||||
#include "npy_blob.hpp" |
||||
|
||||
namespace cvtest |
||||
{ |
||||
|
||||
using namespace std; |
||||
using namespace testing; |
||||
using namespace cv; |
||||
using namespace cv::dnn; |
||||
|
||||
template<typename TString> |
||||
static std::string getTestFile(TString filename) |
||||
{ |
||||
return (getOpenCVExtraDir() + "/dnn/") + filename; |
||||
} |
||||
|
||||
TEST(Reproducibility_AlexNet, Accuracy) |
||||
{ |
||||
Net net; |
||||
{ |
||||
Ptr<Importer> importer = createCaffeImporter(getTestFile("bvlc_alexnet.prototxt"), getTestFile("bvlc_alexnet.caffemodel")); |
||||
ASSERT_TRUE(importer != NULL); |
||||
importer->populateNet(net); |
||||
} |
||||
|
||||
std::vector<Mat> inpMats; |
||||
inpMats.push_back( imread(getTestFile("alexnet_0.png")) ); |
||||
inpMats.push_back( imread(getTestFile("alexnet_1.png")) ); |
||||
ASSERT_TRUE(!inpMats[0].empty() && !inpMats[1].empty()); |
||||
|
||||
net.setBlob(".data", Blob(inpMats)); |
||||
net.forward(); |
||||
|
||||
Blob out = net.getBlob("prob"); |
||||
Blob ref = blobFromNPY(getTestFile("alexnet.npy")); |
||||
normAssert(ref, out, "prob"); |
||||
} |
||||
|
||||
} |
@ -0,0 +1,24 @@ |
||||
#ifndef __OPENCV_TEST_COMMON_HPP__ |
||||
#define __OPENCV_TEST_COMMON_HPP__ |
||||
|
||||
inline const std::string &getOpenCVExtraDir() |
||||
{ |
||||
return cvtest::TS::ptr()->get_data_path(); |
||||
} |
||||
|
||||
inline void normAssert(cv::InputArray ref, cv::InputArray get, const char *comment = "") |
||||
{ |
||||
double normL1 = cvtest::norm(ref, get, cv::NORM_L1)/ ref.getMat().total(); |
||||
EXPECT_NEAR(normL1, 0, 0.0001) << comment; |
||||
|
||||
double normInf = cvtest::norm(ref, get, cv::NORM_INF); |
||||
EXPECT_NEAR(normInf, 0, 0.001) << comment; |
||||
} |
||||
|
||||
inline void normAssert(cv::dnn::Blob &ref, cv::dnn::Blob &test, const char *comment = "") |
||||
{ |
||||
EXPECT_EQ(ref.shape(), test.shape()); |
||||
normAssert(ref.getMatRef(), test.getMatRef(), comment); |
||||
} |
||||
|
||||
#endif |
@ -0,0 +1,41 @@ |
||||
#include "test_precomp.hpp" |
||||
#include "npy_blob.hpp" |
||||
|
||||
namespace cvtest |
||||
{ |
||||
|
||||
using namespace std; |
||||
using namespace testing; |
||||
using namespace cv; |
||||
using namespace cv::dnn; |
||||
|
||||
template<typename TString> |
||||
static std::string getTestFile(TString filename) |
||||
{ |
||||
return (getOpenCVExtraDir() + "/dnn/") + filename; |
||||
} |
||||
|
||||
TEST(Reproducibility_GoogLeNet, Accuracy) |
||||
{ |
||||
Net net; |
||||
{ |
||||
Ptr<Importer> importer = createCaffeImporter(getTestFile("bvlc_googlenet.prototxt"), getTestFile("bvlc_googlenet.caffemodel")); |
||||
ASSERT_TRUE(importer != NULL); |
||||
importer->populateNet(net); |
||||
} |
||||
|
||||
std::vector<Mat> inpMats; |
||||
inpMats.push_back( imread(getTestFile("googlenet_0.jpg")) ); |
||||
inpMats.push_back( imread(getTestFile("googlenet_1.jpg")) ); |
||||
ASSERT_TRUE(!inpMats[0].empty() && !inpMats[1].empty()); |
||||
|
||||
Blob inp(inpMats); |
||||
net.setBlob(".data", inp); |
||||
net.forward(); |
||||
|
||||
Blob out = net.getBlob("prob"); |
||||
Blob ref = blobFromNPY(getTestFile("googlenet_prob.npy")); |
||||
normAssert(out, ref); |
||||
} |
||||
|
||||
} |
@ -0,0 +1,103 @@ |
||||
#include "test_precomp.hpp" |
||||
#include <iostream> |
||||
#include "npy_blob.hpp" |
||||
|
||||
namespace cvtest |
||||
{ |
||||
|
||||
using namespace std; |
||||
using namespace testing; |
||||
using namespace cv; |
||||
using namespace cv::dnn; |
||||
|
||||
static std::string getOpenCVExtraDir() |
||||
{ |
||||
return cvtest::TS::ptr()->get_data_path(); |
||||
} |
||||
|
||||
template<typename TStr> |
||||
static std::string getTestFile(TStr filename) |
||||
{ |
||||
return (getOpenCVExtraDir() + "/dnn/layers/") + filename; |
||||
} |
||||
|
||||
template<typename T, int n> |
||||
bool isEqual(const cv::Vec<T, n> &l, const cv::Vec<T, n> &r) |
||||
{ |
||||
for (int i = 0; i < n; i++) |
||||
{ |
||||
if (l[i] != r[i]) |
||||
return false; |
||||
} |
||||
return true; |
||||
} |
||||
|
||||
static void testLayer(String proto, String caffemodel = String()) |
||||
{ |
||||
Blob inp = blobFromNPY(getTestFile("blob.npy")); |
||||
Blob ref = blobFromNPY(getTestFile(proto + ".caffe.npy")); |
||||
|
||||
Net net; |
||||
{ |
||||
Ptr<Importer> importer = createCaffeImporter(getTestFile(proto), caffemodel); |
||||
ASSERT_TRUE(importer != NULL); |
||||
importer->populateNet(net); |
||||
} |
||||
|
||||
net.setBlob(".input", inp); |
||||
net.forward(); |
||||
Blob out = net.getBlob("output"); |
||||
|
||||
EXPECT_EQ(ref.shape(), out.shape()); |
||||
|
||||
Mat &mRef = ref.getMatRef(); |
||||
Mat &mOut = out.getMatRef(); |
||||
|
||||
double normL1 = cvtest::norm(mRef, mOut, NORM_L1) / ref.total(); |
||||
EXPECT_LE(normL1, 0.0001); |
||||
|
||||
double normInf = cvtest::norm(mRef, mOut, NORM_INF); |
||||
EXPECT_LE(normInf, 0.0001); |
||||
} |
||||
|
||||
TEST(Layer_Softmax_Test, Accuracy) |
||||
{ |
||||
testLayer("softmax.prototxt"); |
||||
} |
||||
|
||||
TEST(Layer_LRN_spatial_Test, Accuracy) |
||||
{ |
||||
testLayer("lrn_spatial.prototxt"); |
||||
} |
||||
|
||||
TEST(Layer_LRN_channels_Test, Accuracy) |
||||
{ |
||||
testLayer("lrn_channels.prototxt"); |
||||
} |
||||
|
||||
TEST(Layer_Reshape_Split_Slice_Test, Accuracy) |
||||
{ |
||||
Net net; |
||||
{ |
||||
Ptr<Importer> importer = createCaffeImporter(getTestFile("reshape_and_slice_routines.prototxt")); |
||||
ASSERT_TRUE(importer != NULL); |
||||
importer->populateNet(net); |
||||
} |
||||
|
||||
BlobShape shape = BlobShape(Vec2i(6, 12)); |
||||
|
||||
Mat1f inputMat(shape[0], shape[1]); |
||||
RNG rng(0); |
||||
rng.fill(inputMat, RNG::UNIFORM, -1, 1); |
||||
|
||||
Blob input(inputMat); |
||||
input.reshape(shape); |
||||
net.setBlob(".input", input); |
||||
net.forward(); |
||||
Blob output = net.getBlob("output"); |
||||
|
||||
input.fill(shape, CV_32F, inputMat.data); |
||||
normAssert(input, output); |
||||
} |
||||
|
||||
} |
After Width: | Height: | Size: 19 KiB |
After Width: | Height: | Size: 20 KiB |
Binary file not shown.
Binary file not shown.
@ -0,0 +1,21 @@ |
||||
name: "test_LRN_channels" |
||||
input: "input" |
||||
|
||||
input_dim: 2 |
||||
input_dim: 6 |
||||
input_dim: 75 |
||||
input_dim: 113 |
||||
|
||||
layer { |
||||
type: "LRN" |
||||
lrn_param { |
||||
norm_region: ACROSS_CHANNELS; |
||||
local_size: 5 |
||||
alpha: 1.1 |
||||
beta: 0.75 |
||||
} |
||||
|
||||
name: "output" |
||||
bottom: "input" |
||||
top: "output" |
||||
} |
Binary file not shown.
@ -0,0 +1,22 @@ |
||||
name: "test_LRN_spatial" |
||||
input: "input" |
||||
|
||||
input_dim: 2 |
||||
input_dim: 6 |
||||
input_dim: 75 |
||||
input_dim: 113 |
||||
|
||||
layer { |
||||
type: "LRN" |
||||
|
||||
lrn_param { |
||||
norm_region: WITHIN_CHANNEL; |
||||
local_size: 5 |
||||
alpha: 0.9 |
||||
beta: 0.75 |
||||
} |
||||
|
||||
name: "output" |
||||
bottom: "input" |
||||
top: "output" |
||||
} |
Binary file not shown.
@ -0,0 +1,77 @@ |
||||
name: "test_reshape_splice_split" |
||||
input: "input" |
||||
|
||||
layer{ |
||||
type: "Split" |
||||
name: "dummy_split" |
||||
bottom: "input" |
||||
top: "dummy_split_0" |
||||
top: "dummy_split_1" |
||||
} |
||||
layer{ |
||||
type: "Slice" |
||||
name: "dummy_slice_0" |
||||
bottom: "dummy_split_0" |
||||
slice_param{ |
||||
slice_point: 1 |
||||
slice_point: 2 |
||||
} |
||||
top: "dummy_slice_0_0" |
||||
top: "dummy_slice_0_1" |
||||
top: "dummy_slice_0_2" |
||||
} |
||||
layer{ |
||||
type: "Slice" |
||||
name: "dummy_slice_1" |
||||
bottom: "dummy_split_1" |
||||
slice_param{ |
||||
slice_point: 1 |
||||
slice_point: 2 |
||||
} |
||||
top: "dummy_slice_1_0" |
||||
top: "dummy_slice_1_1" |
||||
top: "dummy_slice_1_2" |
||||
} |
||||
layer{ |
||||
type: "Sigmoid" |
||||
name: "alter_sliced_split" |
||||
bottom: "dummy_slice_1_2" |
||||
top: "dummy_slice_1_2" |
||||
} |
||||
layer{ |
||||
type: "Concat" |
||||
name: "dummy_concat" |
||||
bottom: "dummy_slice_0_0" |
||||
bottom: "dummy_slice_1_1" |
||||
bottom: "dummy_slice_0_2" |
||||
top: "dummy_concat" |
||||
} |
||||
layer{ |
||||
type: "Reshape" |
||||
name: "dummy_reshape" |
||||
bottom: "dummy_concat" |
||||
reshape_param{ |
||||
shape{ |
||||
dim: 0 |
||||
dim: 1 |
||||
dim: 1 |
||||
dim: -1 |
||||
dim: 1 |
||||
} |
||||
axis: 1 |
||||
num_axes: 1 |
||||
} |
||||
top: "dummy_reshape" |
||||
} |
||||
layer{ |
||||
type: "Flatten" |
||||
name: "dummy_reshape_undo" |
||||
bottom: "dummy_reshape" |
||||
top: "dummy_reshape_undo" |
||||
} |
||||
layer{ |
||||
type: "Split" |
||||
name: "output" |
||||
bottom: "dummy_reshape_undo" |
||||
top: "output" |
||||
} |
@ -0,0 +1,15 @@ |
||||
name: "test_Softmax" |
||||
input: "input" |
||||
|
||||
input_dim: 2 |
||||
input_dim: 5 |
||||
input_dim: 75 |
||||
input_dim: 113 |
||||
|
||||
layer { |
||||
type: "Softmax" |
||||
|
||||
name: "output" |
||||
bottom: "input" |
||||
top: "output" |
||||
} |
Binary file not shown.
File diff suppressed because one or more lines are too long
Loading…
Reference in new issue