Rewrited concat_layer.cpp in OpenCV-style. Fixed slice layer, added test for slice, split and reshape layers.

pull/265/head
Vitaliy Lyudvichenko 10 years ago
parent 71cfae419c
commit 527fa656a5
  1. 2
      modules/dnn/include/opencv2/dnn/blob.inl.hpp
  2. 2
      modules/dnn/include/opencv2/dnn/dnn.hpp
  3. 12
      modules/dnn/src/dnn.cpp
  4. 52
      modules/dnn/src/layers/concat_layer.cpp
  5. 18
      modules/dnn/src/layers/slice_layer.cpp
  6. 25
      modules/dnn/test/test_layers.cpp
  7. 77
      modules/dnn/testdata/dnn/layers/reshape_and_slice_routines.prototxt

@ -274,7 +274,7 @@ inline Blob &Blob::shareFrom(const Blob &blob)
inline Blob &Blob::reshape(const BlobShape &shape)
{
m.reshape(1, shape.dims(), shape.ptr());
m = m.reshape(1, shape.dims(), shape.ptr());
return *this;
}

@ -89,7 +89,7 @@ namespace dnn
virtual ~Importer();
};
CV_EXPORTS Ptr<Importer> createCaffeImporter(const String &prototxt, const String &caffeModel);
CV_EXPORTS Ptr<Importer> createCaffeImporter(const String &prototxt, const String &caffeModel = String());
//Layer factory allows to create instances of registered layers.
class CV_EXPORTS LayerRegister

@ -309,7 +309,7 @@ struct Net::Impl
}
//allocate layer
ld.outputBlobs.resize(ld.requiredOutputs.size());
ld.outputBlobs.resize(std::max((size_t)1, ld.requiredOutputs.size())); //layer produce at least one output blob
ld.getLayerInstance()->allocate(ld.inputBlobs, ld.outputBlobs);
ld.flag = 1;
@ -431,7 +431,7 @@ void Net::setBlob(String outputName, const Blob &blob)
{
LayerPin pin = impl->getPinByAlias(outputName);
if (!pin.valid())
CV_Error(Error::StsObjectNotFound, "Request blob \"" + outputName + "\" not found");
CV_Error(Error::StsObjectNotFound, "Requested blob \"" + outputName + "\" not found");
LayerData &ld = impl->layers[pin.lid];
ld.outputBlobs.resize( std::max(pin.oid+1, (int)ld.requiredOutputs.size()) );
@ -442,10 +442,14 @@ Blob Net::getBlob(String outputName)
{
LayerPin pin = impl->getPinByAlias(outputName);
if (!pin.valid())
CV_Error(Error::StsObjectNotFound, "Request blob \"" + outputName + "\" not found");
CV_Error(Error::StsObjectNotFound, "Requested blob \"" + outputName + "\" not found");
LayerData &ld = impl->layers[pin.lid];
CV_Assert(pin.oid < (int)ld.outputBlobs.size());
if ((size_t)pin.oid >= ld.outputBlobs.size())
{
CV_Error(Error::StsOutOfRange, "Layer \"" + ld.name + "\" produce only " + toString(ld.outputBlobs.size()) +
" outputs, the #" + toString(pin.oid) + " was requsted");
}
return ld.outputBlobs[pin.oid];
}

@ -1,8 +1,5 @@
#include "../precomp.hpp"
#include "layers_common.hpp"
#include <iostream>
#include <cstdlib>
using std::memcpy;
namespace cv
{
@ -25,60 +22,51 @@ namespace dnn
ConcatLayer::ConcatLayer(LayerParams &params)
{
axis = params.get<int>("axis", 1);
CV_Assert(axis == 0 || axis == 1);
CV_Assert(axis >= 0);
}
void ConcatLayer::allocate(const std::vector<Blob *> &inputs, std::vector<Blob> &outputs)
{
CV_Assert(inputs.size() > 0);
int axisSum = 0;
int refType = inputs[0]->type();
BlobShape refShape = inputs[0]->shape();
CV_Assert(axis < refShape.dims());
int axisSum = 0;
for (size_t i = 0; i < inputs.size(); i++)
{
BlobShape curShape = inputs[i]->shape();
CV_Assert(curShape.dims() > axis && curShape.dims() == refShape.dims());
CV_Assert(curShape.dims() == refShape.dims() && inputs[i]->type() == refType);
for (int axisId = 0; axisId < refShape.dims(); axisId++)
{
if (axisId != axis && refShape[axisId] != curShape[axisId])
CV_Error(Error::StsBadArg, "Inconsitent shape for ConcatLayer");
CV_Error(Error::StsBadSize, "Inconsitent shape for ConcatLayer");
}
axisSum += curShape[axis];
}
BlobShape shape = refShape;
shape[axis] = axisSum;
refShape[axis] = axisSum;
outputs.resize(1);
outputs[0].create(shape);
outputs[0].create(refShape);
}
void ConcatLayer::forward(std::vector<Blob *> &inputs, std::vector<Blob> &outputs)
{
float *dstPtr = outputs[0].ptrf();
if (axis == 0)
{
for (size_t i = 0; i < inputs.size(); i++)
{
const float *srcPtr = inputs[i]->ptrf();
memcpy(dstPtr, srcPtr, inputs[i]->total() * sizeof(float));
dstPtr += inputs[i]->total();
}
}
else
const Mat& outMat = outputs[0].getMatRef();
std::vector<Range> ranges(outputs[0].dims(), Range::all());
int sizeStart = 0;
for (size_t i = 0; i < inputs.size(); i++)
{
for (int n = 0; n < outputs[0].num(); n++)
{
for (size_t i = 0; i < inputs.size(); i++)
{
Blob &inp = *inputs[i];
memcpy(dstPtr, inp.ptrf(n), inp.total(1) * sizeof(float));
dstPtr += inp.total(1);
}
}
int sizeEnd = sizeStart + inputs[i]->size(axis);
ranges[axis] = Range(sizeStart, sizeEnd);
Mat outSubMat = outMat(&ranges[0]);
inputs[i]->getMatRef().copyTo(outSubMat);
sizeStart = sizeEnd;
}
}
}

@ -1,6 +1,5 @@
#include "../precomp.hpp"
#include "layers_common.hpp"
#include <stdlib.h>
namespace cv
{
@ -83,15 +82,20 @@ void SliceLayer::allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &o
void SliceLayer::forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs)
{
Blob &inpBlob = *inputs[0];
const uchar *inpPtr = inpBlob.ptrRaw();
const size_t elemSize = CV_ELEM_SIZE(inpBlob.type());
const int axis = inpBlob.canonicalAxis(inAxis);
const Mat& inpMat = inpBlob.getMatRef();
std::vector<Range> ranges(inpBlob.dims(), Range::all());
int sizeStart = 0;
for (size_t i = 0; i < outputs.size(); i++)
{
uchar *outPtr = outputs[i].ptrRaw();
size_t size = outputs[i].total() * elemSize;
memcpy(outPtr, inpPtr, size);
inpPtr += size;
int sizeEnd = sizeStart + outputs[i].size(axis);
ranges[axis] = Range(sizeStart, sizeEnd);
Mat inpSubMat = inpMat(&ranges[0]);
inpSubMat.copyTo(outputs[i].getMatRef());
sizeStart = sizeEnd;
}
}

@ -75,4 +75,29 @@ TEST(Layer_LRN_channels_Test, Accuracy)
testLayer("lrn_channels.prototxt");
}
TEST(Layer_Reshape_Split_Slice_Test, Accuracy)
{
Net net;
{
Ptr<Importer> importer = createCaffeImporter(getTestFile("reshape_and_slice_routines.prototxt"));
ASSERT_TRUE(importer != NULL);
importer->populateNet(net);
}
BlobShape shape = BlobShape(Vec2i(6, 12));
Mat1f inputMat(shape[0], shape[1]);
RNG rng(0);
rng.fill(inputMat, RNG::UNIFORM, -1, 1);
Blob input(inputMat);
input.reshape(shape);
net.setBlob(".input", input);
net.forward();
Blob output = net.getBlob("output");
input.fill(shape, CV_32F, inputMat.data);
normAssert(input, output);
}
}

@ -0,0 +1,77 @@
name: "test_reshape_splice_split"
input: "input"
layer{
type: "Split"
name: "dummy_split"
bottom: "input"
top: "dummy_split_0"
top: "dummy_split_1"
}
layer{
type: "Slice"
name: "dummy_slice_0"
bottom: "dummy_split_0"
slice_param{
slice_point: 1
slice_point: 2
}
top: "dummy_slice_0_0"
top: "dummy_slice_0_1"
top: "dummy_slice_0_2"
}
layer{
type: "Slice"
name: "dummy_slice_1"
bottom: "dummy_split_1"
slice_param{
slice_point: 1
slice_point: 2
}
top: "dummy_slice_1_0"
top: "dummy_slice_1_1"
top: "dummy_slice_1_2"
}
layer{
type: "Sigmoid"
name: "alter_sliced_split"
bottom: "dummy_slice_1_2"
top: "dummy_slice_1_2"
}
layer{
type: "Concat"
name: "dummy_concat"
bottom: "dummy_slice_0_0"
bottom: "dummy_slice_1_1"
bottom: "dummy_slice_0_2"
top: "dummy_concat"
}
layer{
type: "Reshape"
name: "dummy_reshape"
bottom: "dummy_concat"
reshape_param{
shape{
dim: 0
dim: 1
dim: 1
dim: -1
dim: 1
}
axis: 1
num_axes: 1
}
top: "dummy_reshape"
}
layer{
type: "Flatten"
name: "dummy_reshape_undo"
bottom: "dummy_reshape"
top: "dummy_reshape_undo"
}
layer{
type: "Split"
name: "output"
bottom: "dummy_reshape_undo"
top: "output"
}
Loading…
Cancel
Save