Added element-wise layers. Fixed downloader and ConcatLayer.

pull/265/head
Vitaliy Lyudvichenko 9 years ago
parent de4d8005a9
commit ec74678920
  1. 18
      modules/dnn/scripts/download_model.py
  2. 10
      modules/dnn/src/layers/blank_layer.cpp
  3. 15
      modules/dnn/src/layers/concat_layer.cpp
  4. 95
      modules/dnn/src/layers/elementwise_layers.cpp
  5. 2
      modules/dnn/test/cnpy.h

@ -13,18 +13,25 @@ def reporthook(count, block_size, total_size):
From http://blog.moleculea.com/2012/10/04/urlretrieve-progres-indicator/
"""
global start_time
global prev_duration
if count == 0:
start_time = time.time()
prev_duration = -1
return
duration = time.time() - start_time
duration = max(1, time.time() - start_time)
if int(duration) == int(prev_duration):
return
progress_size = int(count * block_size)
speed = int(progress_size / (1024 * duration))
percent = int(count * block_size * 100 / total_size)
sys.stdout.write("\r...%d%%, %d MB, %d KB/s, %d seconds passed" %
(percent, progress_size / (1024 * 1024), speed, duration))
sys.stdout.write("\r...%d%%, %d MB, %d KB/s, %d seconds passed" %
(percent, progress_size / (1024 * 1024), speed, duration))
sys.stdout.flush()
prev_duration = duration
# Closure-d function for checking SHA1.
# Function for checking SHA1.
def model_checks_out(filename, sha1):
with open(filename, 'r') as f:
return hashlib.sha1(f.read()).hexdigest() == sha1
@ -37,7 +44,8 @@ def model_download(filename, url, sha1):
# Download and verify model.
urllib.urlretrieve(url, filename, reporthook)
if not model_checks_out():
print model_checks_out(filename, sha1)
if not model_checks_out(filename, sha1):
print("ERROR: model {} did not download correctly!".format(url))
sys.exit(1)

@ -18,7 +18,7 @@ namespace dnn
{
outputs.resize(inputs.size());
for (size_t i = 0; i < inputs.size(); i++)
outputs[i] = *inputs[i];
outputs[i].shareFrom(*inputs[i]);
}
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs)
@ -28,12 +28,6 @@ namespace dnn
}
};
static Ptr<Layer> blankLayerRegisterer(LayerParams &params)
{
return Ptr<Layer>(new BlankLayer(params));
}
REGISTER_LAYER_FUNC(Dropout, blankLayerRegisterer)
REGISTER_LAYER_CLASS(Dropout, BlankLayer)
}
}

@ -1,7 +1,8 @@
#include "../precomp.hpp"
#include "layers_common.hpp"
#include <iostream>
#include <stdlib.h>
#include <cstdlib>
using std::memcpy;
namespace cv
{
@ -32,21 +33,23 @@ namespace dnn
CV_Assert(inputs.size() > 0);
int axisSum = 0;
BlobShape refShape = inputs[0]->shape();
for (size_t i = 0; i < inputs.size(); i++)
{
Vec4i refShape = inputs[0]->shape4();
Vec4i curShape = inputs[i]->shape4();
BlobShape curShape = inputs[i]->shape();
for (int axisId = 0; axisId < 4; axisId++)
CV_Assert(curShape.dims() > axis && curShape.dims() == refShape.dims());
for (int axisId = 0; axisId < refShape.dims(); axisId++)
{
if (axisId != axis && refShape[axisId] != curShape[axisId])
CV_Error(cv::Error::StsBadArg, "Inconsitent shape for ConcatLayer");
CV_Error(Error::StsBadArg, "Inconsitent shape for ConcatLayer");
}
axisSum += curShape[axis];
}
Vec4i shape = inputs[0]->shape4();
BlobShape shape = refShape;
shape[axis] = axisSum;
outputs.resize(1);
outputs[0].create(shape);

@ -1,6 +1,10 @@
#include "../precomp.hpp"
#include "layers_common.hpp"
#include <math.h>
#include <cmath>
using std::abs;
using std::exp;
using std::tanh;
using std::pow;
namespace cv
{
@ -19,21 +23,33 @@ namespace dnn
{
outputs.resize(inputs.size());
for (size_t i = 0; i < inputs.size(); i++)
outputs[i] = *inputs[i]; //no data copy
outputs[i].shareFrom(*inputs[i]); //no data copy
}
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs)
{
CV_Assert(inputs.size() == outputs.size());
for (size_t i = 0; i < inputs.size(); i++)
{
CV_Assert(inputs[i]->ptrf() == outputs[i].ptrf());
float *data = outputs[i].ptrf();
CV_Assert(inputs[i]->ptrRaw() == outputs[i].ptrRaw() && inputs[i]->type() == outputs[i].type());
size_t size = outputs[i].total();
for (size_t j = 0; j < size; j++)
data[j] = func(data[j]);
if (outputs[i].isFloat())
{
float *data = outputs[i].ptrf();
for (size_t j = 0; j < size; j++)
data[j] = func(data[j]);
}
else if (outputs[i].isDouble())
{
double *data = outputs[i].ptr<double>();
for (size_t j = 0; j < size; j++)
data[j] = func(data[j]);
}
else
{
CV_Error(Error::StsNotImplemented, "Only CV_32F and CV_64F blobs are supported");
}
}
}
};
@ -51,9 +67,10 @@ namespace dnn
negative_slope = 0.f;
}
inline float operator()(float x)
template<typename TFloat>
inline TFloat operator()(TFloat x)
{
return (x >= 0) ? x : negative_slope * x;
return (x >= (TFloat)0) ? x : negative_slope * x;
}
};
@ -61,14 +78,70 @@ namespace dnn
{
TanHFunctor(LayerParams&) {}
inline float operator()(float x)
template<typename TFloat>
inline TFloat operator()(TFloat x)
{
return tanh(x);
}
};
struct SigmoidFunctor
{
SigmoidFunctor(LayerParams&) {}
template<typename TFloat>
inline TFloat operator()(TFloat x)
{
return (TFloat)1 / ((TFloat)1 + exp(-x));
}
};
struct AbsValFunctor
{
AbsValFunctor(LayerParams&) {}
template<typename TFloat>
inline TFloat operator()(TFloat x)
{
return abs(x);
}
};
struct PowerFunctor
{
float power, scale, shift;
PowerFunctor(LayerParams &params)
{
power = params.get<float>("power", 1.0f);
scale = params.get<float>("scale", 1.0f);
shift = params.get<float>("shift", 0.0f);
}
template<typename TFloat>
inline TFloat operator()(TFloat x)
{
return pow((TFloat)shift + (TFloat)scale * x, (TFloat)power);
}
};
struct BNLLFunctor
{
BNLLFunctor(LayerParams&) {}
template<typename TFloat>
inline TFloat operator()(TFloat x)
{
return log((TFloat)1 + exp(x));
}
};
REGISTER_LAYER_CLASS(ReLU, ElementWiseLayer<ReLUFunctor>)
REGISTER_LAYER_CLASS(TanH, ElementWiseLayer<TanHFunctor>)
REGISTER_LAYER_CLASS(BNLL, ElementWiseLayer<BNLLFunctor>)
REGISTER_LAYER_CLASS(Power, ElementWiseLayer<PowerFunctor>)
REGISTER_LAYER_CLASS(AbsVal, ElementWiseLayer<AbsValFunctor>)
REGISTER_LAYER_CLASS(Sigmoid, ElementWiseLayer<SigmoidFunctor>)
}
}

@ -231,7 +231,7 @@ namespace cnpy {
dict.back() = '\n';
std::vector<char> header;
header += (char) 0x93;
header += (unsigned char) 0x93;
header += "NUMPY";
header += (char) 0x01; //major version of numpy format
header += (char) 0x00; //minor version of numpy format

Loading…
Cancel
Save