Added simple tests for Torch importer, fixed some importing issues.

pull/265/head
Vitaliy Lyudvichenko 10 years ago
parent 8df8936810
commit 1c220cf03b
  1. 18
      modules/dnn/CMakeLists.txt
  2. 5
      modules/dnn/src/dnn.cpp
  3. 6
      modules/dnn/src/layers/fully_connected_layer.cpp
  4. 1
      modules/dnn/src/layers/pooling_layer.cpp
  5. 2
      modules/dnn/src/layers/reshape_layer.cpp
  6. 5
      modules/dnn/src/layers/slice_layer.cpp
  7. 47
      modules/dnn/src/torch/torch_importer.cpp
  8. 2
      modules/dnn/test/test_common.hpp
  9. 82
      modules/dnn/test/test_torch_importer.cpp
  10. 67
      modules/dnn/testdata/dnn/torch/torch_gen_test_data.lua
  11. 2
      modules/dnn/testdata/dnn/torch/torch_nn_echo.lua

@ -50,6 +50,24 @@ if(${the_module}_BUILD_TORCH_IMPORTER)
add_definitions(-DENABLE_TORCH_IMPORTER=1)
endif()
OCV_OPTION(${the_module}_BUILD_TORCH_TESTS "Build Torch tests (installed Torch7 with nn module is required)" ON IF BUILD_TESTS AND ${the_module}_BUILD_TORCH_IMPORTER)
if(${the_module}_BUILD_TORCH_TESTS)
if(NOT DEFINED ENV{OPENCV_TEST_DATA_PATH})
message(FATAL_ERROR "OPENCV_TEST_DATA_PATH environment variable was not specified")
endif()
execute_process(COMMAND th ${CMAKE_CURRENT_SOURCE_DIR}/testdata/dnn/torch/torch_nn_echo.lua RESULT_VARIABLE TORCH_STATUS)
if(TORCH_STATUS)
message(FATAL_ERROR "Torch executable \"th\" not found (status: ${TORCH_STATUS}) or nn module not found")
endif()
add_custom_command( TARGET opencv_test_${name} POST_BUILD
COMMAND th ${CMAKE_CURRENT_SOURCE_DIR}/testdata/dnn/torch/torch_gen_test_data.lua
WORKING_DIRECTORY $ENV{OPENCV_TEST_DATA_PATH}/dnn/torch )
add_definitions(-DENABLE_TORCH_TESTS=1)
endif()
else()#build as standalone module (for development purposes)
project(dnn_standalone)

@ -295,6 +295,10 @@ struct Net::Impl
if (ld.flag)
return;
//determine parent layers
for (size_t i = 0; i < ld.inputBlobsId.size(); i++)
ld.inputLayersId.insert(ld.inputBlobsId[i].lid);
//allocate parents
for (set<int>::iterator i = ld.inputLayersId.begin(); i != ld.inputLayersId.end(); i++)
allocateLayer(*i);
@ -305,6 +309,7 @@ struct Net::Impl
{
LayerPin from = ld.inputBlobsId[i];
CV_Assert(from.valid());
CV_Assert(layers.count(from.lid) && layers[from.lid].outputBlobs.size() > from.oid);
ld.inputBlobs[i] = &layers[from.lid].outputBlobs[from.oid];
}

@ -33,13 +33,15 @@ namespace dnn
axis_ = params.get<int>("axis", 1);
CV_Assert(params.learnedBlobs.size() >= 1);
CV_Assert(!bias || (params.learnedBlobs.size() >= 2 && (int)params.learnedBlobs[1].total() == numOutputs));
CV_Assert(!bias || params.learnedBlobs.size() >= 2);
learnedParams.resize(bias ? 2 : 1);
learnedParams[0] = params.learnedBlobs[0];
CV_Assert(learnedParams[0].dims() >= 2 && learnedParams[0].total() >= (size_t)numOutputs);
if (bias)
{
learnedParams[1] = params.learnedBlobs[1];
CV_Assert(learnedParams[1].total() == (size_t)numOutputs);
}
}
@ -51,7 +53,7 @@ namespace dnn
innerSize = (int)inputs[0]->total(axis);
CV_Assert((size_t)innerSize * (size_t)numOutputs == learnedParams[0].total());
CV_Assert(learnedParams[0].rows() == numOutputs && learnedParams[0].cols() == innerSize);
CV_Assert(learnedParams[0].size(-2) == numOutputs && learnedParams[0].size(-1) == innerSize);
outputs.resize(inputs.size());
for (size_t i = 0; i < inputs.size(); i++)

@ -8,6 +8,7 @@ namespace cv
{
namespace dnn
{
//TODO: add ceil_mode param
class PoolingLayer : public Layer
{
enum

@ -134,4 +134,4 @@ REGISTER_LAYER_FUNC(Flatten, createFlattenLayer)
}
}
}

@ -28,6 +28,9 @@ SliceLayer::SliceLayer(LayerParams &params)
{
inAxis = params.get<int>("axis", 1);
if (!params.has("slice_point"))
return;
const DictValue &_slicePoints = params.get("slice_point");
slicePoints.resize(_slicePoints.size());
for (int i = 0; i < _slicePoints.size(); i++)
@ -100,4 +103,4 @@ void SliceLayer::forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs)
}
}
}
}

@ -253,7 +253,7 @@ struct TorchImporter : public ::cv::dnn::Importer
}
String key = readString();
std::cout << "key: " << key << "\n";
std::cout << i << "th key: " << key << "\n";
fpos = THFile_position(file);
int vtype = readInt();
@ -296,7 +296,6 @@ struct TorchImporter : public ::cv::dnn::Importer
{
THFile_seek(file, fpos);
readObject();
continue;
}
}
@ -425,6 +424,16 @@ struct TorchImporter : public ::cv::dnn::Importer
curModule = newModule;
readTorchTable(scalarParams, tensorParams);
curModule = parentModule;
if (nnName == "Parallel")
{
layerParams.set("inputDimension", scalarParams.get<int>("inputDimension"));
layerParams.set("outputDimension", scalarParams.get<int>("outputDimension"));
}
if (nnName == "Concat")
{
layerParams.set("dimension", scalarParams.get<int>("dimension"));
}
}
else if (nnName == "SpatialConvolution")
{
@ -471,8 +480,7 @@ struct TorchImporter : public ::cv::dnn::Importer
layerParams.learnedBlobs.push_back(tensorParams["bias"]);
layerParams.set("bias_term", bias);
//TODO: axis detect
layerParams.set("num_output", weightBlob.size(1));
layerParams.set("num_output", weightBlob.size(0));
curModule->modules.push_back(newModule);
}
else if (nnName == "Reshape")
@ -485,25 +493,31 @@ struct TorchImporter : public ::cv::dnn::Importer
DictValue dimParam = scalarParams.get("size");
layerParams.set("dim", dimParam);
if (scalarParams.has("batchMode") && scalarParams.get<bool>("batchMode"))
layerParams.set("axis", 1);
curModule->modules.push_back(newModule);
}
else if (nnName == "ReLU")
{
curModule->modules.push_back(new Module(nnName, "ReLU"));
readObject();
}
else if (nnName == "Tanh")
{
curModule->modules.push_back(new Module(nnName, "TanH"));
readObject();
}
else if (nnName == "Sigmoid")
{
curModule->modules.push_back(new Module(nnName, "Sigmoid"));
readObject();
}
else
{
delete newModule;
readTorchTable(scalarParams, tensorParams);
CV_Error(Error::StsNotImplemented, "Unknown nn class \"" + className + "\"");
readObject();
}
}
else
@ -542,8 +556,7 @@ struct TorchImporter : public ::cv::dnn::Importer
inline String generateLayerName(const String &label = String())
{
this->moduleCounter++;
return "l" + toString(this->moduleCounter) + "_" + label;
return "l" + toString(++this->moduleCounter) + "_" + label;
}
int fill(Module *module, int prevLayerId = 0, int prevOutNum = 0)
@ -555,7 +568,6 @@ struct TorchImporter : public ::cv::dnn::Importer
{
int newLayerId = this->net.addLayer(generateLayerName(module->apiType), module->apiType, module->params);
net.connect(prevLayerId, prevOutNum, newLayerId, 0);
std::cout << "added " << module->thName << " i.e. " << module->apiType << "\n";
return newLayerId;
}
else
@ -573,10 +585,23 @@ struct TorchImporter : public ::cv::dnn::Importer
{
int splitId, mergeId, newId;
String splitType = (module->thName == "Parallel") ? "Slice" : "Split";
splitId = net.addLayer(generateLayerName("torchSplit"), splitType, module->params);
String splitType;
LayerParams splitParams, mergeParams;
if (module->thName == "Parallel")
{
splitType = "Slice";
splitParams.set("axis", module->params.get<int>("inputDimension") - 1);
mergeParams.set("axis", module->params.get<int>("outputDimension") - 1);
}
else
{
splitType = "Split";
mergeParams.set("axis", module->params.get<int>("dimension") - 1);
}
splitId = net.addLayer(generateLayerName("torchSplit"), splitType, splitParams);
mergeId = net.addLayer(generateLayerName("torchMerge"), "Concat", mergeParams);
net.connect(prevLayerId, prevOutNum, splitId, 0);
mergeId = net.addLayer(generateLayerName("torchMerge"), "Concat", module->params);
for (size_t i = 0; i < module->modules.size(); i++)
{

@ -17,7 +17,7 @@ inline void normAssert(cv::InputArray ref, cv::InputArray get, const char *comme
inline void normAssert(cv::dnn::Blob &ref, cv::dnn::Blob &test, const char *comment = "")
{
EXPECT_EQ(ref.shape(), test.shape());
ASSERT_EQ(ref.shape(), test.shape());
normAssert(ref.getMatRef(), test.getMatRef(), comment);
}

@ -1,4 +1,5 @@
#if 1 || defined(ENABLE_TORCH_IMPORTER) && ENABLE_TORCH_IMPORTER
#if 1 || defined(ENABLE_TORCH_TESTS) && ENABLE_TORCH_TESTS
#include "test_precomp.hpp"
namespace cvtest
@ -9,16 +10,10 @@ using namespace testing;
using namespace cv;
using namespace cv::dnn;
static std::string getOpenCVExtraDir()
{
return cvtest::TS::ptr()->get_data_path();
}
template<typename TStr>
static std::string getTestFile(TStr filename)
static std::string _tf(TStr filename)
{
//return (getOpenCVExtraDir() + "/dnn/") + filename;
return String("/home/vitaliy/th/") + filename;
return (getOpenCVExtraDir() + "/dnn/torch/") + filename;
}
TEST(Torch_Importer, simple_read)
@ -26,38 +21,75 @@ TEST(Torch_Importer, simple_read)
Net net;
Ptr<Importer> importer;
ASSERT_NO_THROW( importer = createTorchImporter(getTestFile("conv1.txt"), false) );
ASSERT_NO_THROW( importer = createTorchImporter(_tf("net_simple_net.txt"), false) );
ASSERT_TRUE( importer != NULL );
ASSERT_NO_THROW( importer->populateNet(net) );
importer->populateNet(net);
}
static Blob convertBlob(const Blob &inp, int type)
static void runTorchNet(String prefix, String outLayerName, bool isBinary)
{
Mat tmpMat;
inp.getMatRef().convertTo(tmpMat, type);
String suffix = (isBinary) ? ".dat" : ".txt";
Blob res;
res.create(inp.shape(), type);
res.fill(inp.shape(), type, (void*)tmpMat.data);
return res;
}
TEST(Torch_Importer, run_convolution)
{
Net net;
Ptr<Importer> importer = createTorchImporter(getTestFile("run_conv_net.txt"), false);
Ptr<Importer> importer;
ASSERT_NO_THROW( importer = createTorchImporter(_tf(prefix + "_net" + suffix), isBinary) );
ASSERT_TRUE(importer != NULL);
//ASSERT_NO_THROW( importer->populateNet(net) );
importer->populateNet(net);
Blob inp = convertBlob( readTorchMat(getTestFile("run_conv_input.txt"), false), CV_32F );
Blob outRef = convertBlob( readTorchMat(getTestFile("run_conv_output.txt"), false), CV_32F );
Blob inp, outRef;
ASSERT_NO_THROW( inp = readTorchMat(_tf(prefix + "_input" + suffix), isBinary) );
ASSERT_NO_THROW( outRef = readTorchMat(_tf(prefix + "_output" + suffix), isBinary) );
net.setBlob(".0", inp);
net.forward();
Blob out = net.getBlob("l1_Convolution");
Blob out = net.getBlob(outLayerName);
std::cout << "inp " << inp.shape() << "\n";
std::cout << "out " << out.shape() << "\n";
std::cout << "ref " << outRef.shape() << "\n";
normAssert(outRef, out);
}
TEST(Torch_Importer, run_convolution)
{
runTorchNet("net_conv", "l1_Convolution", false);
}
TEST(Torch_Importer, run_pool_max)
{
runTorchNet("net_pool_max", "l1_Pooling", false);
}
TEST(Torch_Importer, run_pool_ave)
{
//TODO: fix
//runTorchNet("net_pool_ave", "l1_Pooling", false);
}
TEST(Torch_Importer, run_reshape)
{
runTorchNet("net_reshape", "l1_Reshape", false);
runTorchNet("net_reshape_batch", "l1_Reshape", false);
}
TEST(Torch_Importer, run_linear)
{
runTorchNet("net_linear_2d", "l1_InnerProduct", false);
}
TEST(Torch_Importer, run_paralel)
{
//TODO: fix and add Reshape
//runTorchNet("net_parallel", "l2_torchMerge", false);
}
TEST(Torch_Importer, run_concat)
{
runTorchNet("net_concat", "l2_torchMerge", false);
}
}
#endif
#endif

@ -0,0 +1,67 @@
import 'nn'
function fill_net(net)
if net.modules then
for i = 1, #net.modules do
fill_net(net.modules[i])
end
end
if net.weight then
net.weight = torch.rand(net.weight:size())
end
if net.bias then
net.bias = torch.rand(net.bias:size())
end
end
function save(net, input, label)
fill_net(net)
output = net:forward(input)
--torch.save(label .. '_net.dat', net)
torch.save(label .. '_net.txt', net, 'ascii')
--torch.save(label .. '_input.dat', input)
torch.save(label .. '_input.txt', input, 'ascii')
--torch.save(label .. '_output.dat', output)
torch.save(label .. '_output.txt', output, 'ascii')
end
local net_simple = nn.Sequential()
--net_simple:add(nn.ReLU())
net_simple:add(nn.SpatialConvolution(3,64, 11,7, 3,4, 3,2))
net_simple:add(nn.SpatialMaxPooling(4,5, 3,2, 1,2))
net_simple:add(nn.Sigmoid())
save(net_simple, torch.Tensor(2, 3, 25, 35), 'net_simple')
local net_pool_max = nn.Sequential()
net_pool_max:add(nn.SpatialMaxPooling(4,5, 3,2, 1,2):ceil()) --TODO: add ceil and floor modes
save(net_pool_max, torch.rand(2, 3, 50, 30), 'net_pool_max')
local net_pool_ave = nn.Sequential()
net_pool_ave:add(nn.SpatialAveragePooling(4,5, 2,1, 1,2))
save(net_pool_ave, torch.rand(2, 3, 50, 30), 'net_pool_ave')
local net_conv = nn.Sequential()
net_conv:add(nn.SpatialConvolution(3,64, 11,7, 3,4, 3,2))
save(net_conv, torch.rand(1, 3, 50, 60), 'net_conv')
local net_reshape = nn.Sequential()
net_reshape:add(nn.Reshape(5, 4, 3, 2))
save(net_reshape, torch.rand(2, 3, 4, 5), 'net_reshape')
local net_reshape_batch = nn.Sequential()
net_reshape_batch:add(nn.Reshape(5, 4, 3, true))
save(net_reshape_batch, torch.rand(2, 3, 4, 5), 'net_reshape_batch')
save(nn.Linear(7, 3), torch.rand(13, 7), 'net_linear_2d')
local net_parallel = nn.Parallel(4, 2)
net_parallel:add(nn.Sigmoid())
net_parallel:add(nn.Tanh())
save(net_parallel, torch.rand(2, 6, 4, 2), 'net_parallel')
local net_concat = nn.Concat(2)
net_concat:add(nn.ReLU())
net_concat:add(nn.Tanh())
net_concat:add(nn.Sigmoid())
save(net_concat, torch.rand(2, 6, 4, 3) - 0.5, 'net_concat')

@ -0,0 +1,2 @@
require 'nn'
print("nn module exists!")
Loading…
Cancel
Save