From b0d008ce039e2bdbe91840b375246312248719df Mon Sep 17 00:00:00 2001 From: Aleksandr Rybnikov Date: Thu, 22 Jun 2017 16:47:49 +0300 Subject: [PATCH] Enabled tests for intermediate blobs in goolgenet --- modules/dnn/src/dnn.cpp | 152 +++++++++++++++------------- modules/dnn/test/test_googlenet.cpp | 3 +- 2 files changed, 82 insertions(+), 73 deletions(-) diff --git a/modules/dnn/src/dnn.cpp b/modules/dnn/src/dnn.cpp index af7b20c19..4e5480817 100644 --- a/modules/dnn/src/dnn.cpp +++ b/modules/dnn/src/dnn.cpp @@ -324,7 +324,6 @@ struct LayerData //add logging info params.name = name; params.type = type; - skip = false; } int id; @@ -347,7 +346,6 @@ struct LayerData std::map skipFlags; int flag; - bool skip; Ptr getLayerInstance() { @@ -666,18 +664,39 @@ struct Net::Impl } } - void setUpNet(const std::vector& blobsToKeep_ = std::vector()) + void clear() { - if (!netWasAllocated || this->blobsToKeep != blobsToKeep_) + MapIdToLayerData::iterator it; + for (it = layers.begin(); it != layers.end(); it++) { - MapIdToLayerData::iterator it; - for (it = layers.begin(); it != layers.end(); it++) + if (it->second.id != 0) { + it->second.outputBlobs.clear(); + it->second.internals.clear(); + } + it->second.skipFlags.clear(); + it->second.consumers.clear(); + Ptr convLayer = it->second.layerInstance.dynamicCast(); + + if( !convLayer.empty() ) { - if (it->second.id != 0) { - it->second.outputBlobs.clear(); - it->second.internals.clear(); - } + convLayer->setActivation(Ptr()); + convLayer->setBatchNorm(Ptr()); + } + + Ptr poolingLayer = it->second.layerInstance.dynamicCast(); + if( !poolingLayer.empty() ) + { + poolingLayer->computeMaxIdx = true; } + } + } + + + void setUpNet(const std::vector& blobsToKeep_ = std::vector()) + { + if (!netWasAllocated || this->blobsToKeep != blobsToKeep_) + { + clear(); allocateLayers(blobsToKeep_); computeNetOutputLayers(); @@ -1005,69 +1024,41 @@ struct Net::Impl ld.flag = 1; } - void allocateLayers(const std::vector& blobsToKeep_) + void fuseLayers(const std::vector& blobsToKeep_) { - MapIdToLayerData::iterator it; - for (it = layers.begin(); it != layers.end(); it++) - it->second.flag = 0; - - CV_Assert(!layers[0].outputBlobs.empty()); - ShapesVec inputShapes; - for(int i = 0; i < layers[0].outputBlobs.size(); i++) - { - CV_Assert(layers[0].outputBlobs[i].total()); - inputShapes.push_back(shape(layers[0].outputBlobs[i])); - } - LayersShapesMap layersShapes; - getLayersShapes(inputShapes, layersShapes); - - blobManager.reset(); - for (it = layers.begin(); it != layers.end(); ++it) - { - const LayerData& ld = it->second; - blobManager.addReferences(ld.inputBlobsId); - } - - for (int i = 0; i < blobsToKeep_.size(); i++) - { - blobManager.addReference(blobsToKeep_[i]); - } - - for (it = layers.begin(); it != layers.end(); it++) - { - int lid = it->first; - allocateLayer(lid, layersShapes); - } - // scan through all the layers. If there is convolution layer followed by the activation layer, // we try to embed this activation into the convolution and disable separate execution of the activation std::vector outnames; + std::set pinsToKeep(blobsToKeep_.begin(), + blobsToKeep_.end()); + MapIdToLayerData::iterator it; for (it = layers.begin(); it != layers.end(); it++) { int lid = it->first; LayerData& ld = layers[lid]; - if( ld.skip ) + if( ld.skipFlags[DNN_BACKEND_DEFAULT] ) { - //printf("skipping %s\n", ld.layerInstance->name.c_str()); continue; } - //printf("analyzing %s\n", ld.layerInstance->name.c_str()); if( ld.consumers.size() == 0 ) outnames.push_back(ld.layerInstance->name); Ptr convLayer = ld.layerInstance.dynamicCast(); - if( !convLayer.empty() && ld.consumers.size() == 1 ) + LayerPin lp(lid, 0); + if( !convLayer.empty() && ld.consumers.size() == 1 && + pinsToKeep.count(lp) == 0 ) { LayerData* nextData = &layers[ld.consumers[0].lid]; Ptr nextBNormLayer = nextData->layerInstance.dynamicCast(); - if( !nextBNormLayer.empty() ) + LayerPin lpNext(ld.consumers[0].lid, 0); + if( !nextBNormLayer.empty() && pinsToKeep.count(lpNext) == 0 ) { LayerData* bnormData = nextData; nextData = 0; if( convLayer->setBatchNorm(nextBNormLayer) ) { - //printf("fused convolution (%s) and batch norm (%s)\n", convLayer->name.c_str(), nextBNormLayer->name.c_str()); - bnormData->skip = true; + bnormData->skipFlags[DNN_BACKEND_DEFAULT] = true; + ld.outputBlobs = layers[lpNext.lid].outputBlobs; if( bnormData->consumers.size() == 1 ) nextData = &layers[bnormData->consumers[0].lid]; } @@ -1079,8 +1070,8 @@ struct Net::Impl if( !nextActivLayer.empty() && convLayer->setActivation(nextActivLayer) ) { - //printf("fused convolution (%s) and activation (%s)\n", convLayer->name.c_str(), nextActivLayer->name.c_str()); - nextData->skip = true; + nextData->skipFlags[DNN_BACKEND_DEFAULT] = true; + ld.outputBlobs = layers[lpNext.lid].outputBlobs; } } Ptr poolingLayer = ld.layerInstance.dynamicCast(); @@ -1096,10 +1087,43 @@ struct Net::Impl poolingLayer->computeMaxIdx = false; } } - /*printf("outputs: "); - for( size_t j = 0; j < outnames.size(); j++ ) - printf("%s ", outnames[j].c_str()); - printf("\n");*/ + } + + void allocateLayers(const std::vector& blobsToKeep_) + { + MapIdToLayerData::iterator it; + for (it = layers.begin(); it != layers.end(); it++) + it->second.flag = 0; + + CV_Assert(!layers[0].outputBlobs.empty()); + ShapesVec inputShapes; + for(int i = 0; i < layers[0].outputBlobs.size(); i++) + { + CV_Assert(layers[0].outputBlobs[i].total()); + inputShapes.push_back(shape(layers[0].outputBlobs[i])); + } + LayersShapesMap layersShapes; + getLayersShapes(inputShapes, layersShapes); + + blobManager.reset(); + for (it = layers.begin(); it != layers.end(); ++it) + { + const LayerData& ld = it->second; + blobManager.addReferences(ld.inputBlobsId); + } + + for (int i = 0; i < blobsToKeep_.size(); i++) + { + blobManager.addReference(blobsToKeep_[i]); + } + + for (it = layers.begin(); it != layers.end(); it++) + { + int lid = it->first; + allocateLayer(lid, layersShapes); + } + + fuseLayers(blobsToKeep_); } void forwardLayer(LayerData &ld) @@ -1109,7 +1133,7 @@ struct Net::Impl if (preferableBackend == DNN_BACKEND_DEFAULT || !layer->supportBackend(preferableBackend)) { - if( !ld.skip ) + if( !ld.skipFlags[DNN_BACKEND_DEFAULT] ) layer->forward(ld.inputBlobs, ld.outputBlobs, ld.internals); } else if (!ld.skipFlags[preferableBackend]) @@ -1300,20 +1324,6 @@ void Net::connect(String _outPin, String _inPin) impl->connect(outPin.lid, outPin.oid, inpPin.lid, inpPin.oid); } -//void Net::forward(LayerId toLayer) -//{ -// if (!impl->netWasAllocated) -// { -// impl->setUpNet(); - -// } - -// if (toLayer.isString() && toLayer.get().empty()) -// impl->forwardAll(); -// else -// impl->forwardLayer(impl->getLayerData(toLayer)); -//} - Mat Net::forward(const String& outputName) { String layerName = outputName; diff --git a/modules/dnn/test/test_googlenet.cpp b/modules/dnn/test/test_googlenet.cpp index d909355cf..e97281b3d 100644 --- a/modules/dnn/test/test_googlenet.cpp +++ b/modules/dnn/test/test_googlenet.cpp @@ -95,8 +95,7 @@ static void launchGoogleNetTest() std::replace( filename.begin(), filename.end(), '/', '#'); Mat ref = blobFromNPY(_tf("googlenet_" + filename + ".npy")); - // TODO: disabled the check for now, because it conflicts with the layer fusion - // normAssert(outs[i], ref, "", 1E-4, 1E-2); + normAssert(outs[i], ref, "", 1E-4, 1E-2); } }