Merge pull request #9569 from dkurt:test_dnn_ssd_halide

pull/9454/merge
Vadim Pisarevsky 8 years ago
commit 93c3f20deb
  1. 5
      modules/dnn/include/opencv2/dnn/dnn.hpp
  2. 244
      modules/dnn/src/dnn.cpp
  3. 56
      modules/dnn/src/op_halide.cpp
  4. 4
      modules/dnn/src/op_halide.hpp
  5. 42
      modules/dnn/test/test_halide_layers.cpp
  6. 29
      modules/dnn/test/test_halide_nets.cpp

@ -146,6 +146,11 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
*/ */
virtual void copyToHost() = 0; virtual void copyToHost() = 0;
/**
* @brief Indicate that an actual data is on CPU.
*/
virtual void setHostDirty() = 0;
int backendId; //!< Backend identifier. int backendId; //!< Backend identifier.
int targetId; //!< Target identifier. int targetId; //!< Target identifier.
}; };

@ -199,125 +199,6 @@ struct LayerPin
} }
}; };
// Objects of this class manages wrappers. For every CPU memory pointer and shape
// one and only wrapper. Now it support wrapping for single backend and target.
class BackendWrapManager
{
public:
Ptr<BackendWrapper> wrap(const Mat& m, int backendId, int targetId)
{
CV_TRACE_FUNCTION();
CV_Assert(backendId != DNN_BACKEND_DEFAULT);
std::map<void*, Ptr<BackendWrapper> >::iterator hostsIt;
// Check that the same CPU memory was previously wrapped.
hostsIt = hostWrappers.find(m.data);
if (hostsIt == hostWrappers.end())
{
// If not wrapped before.
return (hostWrappers[m.data] = wrapHost(m, backendId, targetId));
}
else
{
// Find if wrapper of this host and shape was created before.
std::map<std::pair<void*, MatSize>, Ptr<BackendWrapper> >::iterator it;
std::pair<void*, MatSize> key(m.data, m.size);
it = extraWrappers.find(key);
if (it == extraWrappers.end())
{
MatShape shape(m.dims);
for (int i = 0; i < m.dims; ++i)
shape[i] = m.size.p[i];
return (extraWrappers[key] = wrapUser(hostsIt->second, shape));
}
else
return it->second;
}
}
std::vector<Ptr<BackendWrapper> > wrap(const std::vector<Mat*>& mats,
int backendId, int targetId)
{
const int num = mats.size();
std::vector<Ptr<BackendWrapper> > dst(num);
for (int i = 0; i < num; ++i)
{
dst[i] = wrap(*mats[i], backendId, targetId);
}
return dst;
}
std::vector<Ptr<BackendWrapper> > wrap(const std::vector<Mat>& mats,
int backendId, int targetId)
{
const int num = mats.size();
std::vector<Ptr<BackendWrapper> > dst(num);
for (int i = 0; i < num; ++i)
{
dst[i] = wrap(mats[i], backendId, targetId);
}
return dst;
}
void reset()
{
CV_TRACE_FUNCTION();
hostWrappers.clear();
extraWrappers.clear();
}
private:
// Backend-specific wrapping function.
Ptr<BackendWrapper> wrapHost(const Mat& m, int backendId, int targetId)
{
if (backendId == DNN_BACKEND_DEFAULT)
{
return Ptr<BackendWrapper>();
}
else if (backendId == DNN_BACKEND_HALIDE)
{
CV_Assert(haveHalide());
#ifdef HAVE_HALIDE
return Ptr<BackendWrapper>(new HalideBackendWrapper(targetId, m));
#endif // HAVE_HALIDE
}
else
{
CV_Error(Error::StsNotImplemented, "Unknown backend identifier");
}
return Ptr<BackendWrapper>();
}
// Backend-specific wrapping function.
Ptr<BackendWrapper> wrapUser(const Ptr<BackendWrapper>& host, const MatShape& shape)
{
int backendId = host->backendId;
if (backendId == DNN_BACKEND_DEFAULT)
{
return Ptr<BackendWrapper>();
}
else if (backendId == DNN_BACKEND_HALIDE)
{
CV_Assert(haveHalide());
#ifdef HAVE_HALIDE
return Ptr<BackendWrapper>(new HalideBackendWrapper(host, shape));
#endif // HAVE_HALIDE
}
else
{
CV_Error(Error::StsNotImplemented, "Unknown backend identifier");
}
return Ptr<BackendWrapper>();
}
// Wrappers that initialized for memory hosts (first wrapping of CPU data).
std::map<void*, Ptr<BackendWrapper> > hostWrappers;
// The rest of wrappers. They initialized for non-host cv::Mat.
std::map<std::pair<void*, MatSize>, Ptr<BackendWrapper> > extraWrappers;
};
struct LayerData struct LayerData
{ {
LayerData() : id(-1), flag(0) {} LayerData() : id(-1), flag(0) {}
@ -340,6 +221,8 @@ struct LayerData
std::set<int> inputLayersId; std::set<int> inputLayersId;
std::set<int> requiredOutputs; std::set<int> requiredOutputs;
std::vector<LayerPin> consumers; std::vector<LayerPin> consumers;
std::vector<Ptr<BackendWrapper> > outputBlobsWrappers;
std::vector<Ptr<BackendWrapper> > inputBlobsWrappers;
Ptr<Layer> layerInstance; Ptr<Layer> layerInstance;
std::vector<Mat> outputBlobs; std::vector<Mat> outputBlobs;
@ -618,6 +501,24 @@ private:
std::map<LayerPin, Mat> memHosts; std::map<LayerPin, Mat> memHosts;
}; };
static Ptr<BackendWrapper> wrapMat(int backendId, int targetId, const cv::Mat& m)
{
if (backendId == DNN_BACKEND_DEFAULT)
{
return Ptr<BackendWrapper>();
}
else if (backendId == DNN_BACKEND_HALIDE)
{
CV_Assert(haveHalide());
#ifdef HAVE_HALIDE
return Ptr<BackendWrapper>(new HalideBackendWrapper(targetId, m));
#endif // HAVE_HALIDE
}
else
CV_Error(Error::StsNotImplemented, "Unknown backend identifier");
return Ptr<BackendWrapper>();
}
struct Net::Impl struct Net::Impl
{ {
typedef std::map<int, LayerShapes> LayersShapesMap; typedef std::map<int, LayerShapes> LayersShapesMap;
@ -650,8 +551,8 @@ struct Net::Impl
int preferableBackend; int preferableBackend;
int preferableTarget; int preferableTarget;
String halideConfigFile; String halideConfigFile;
// Backend-specific wrapping manager. // Map host data to backend specific wrapper.
BackendWrapManager backendWrapper; std::map<void*, Ptr<BackendWrapper> > backendWrappers;
int lastLayerId; int lastLayerId;
@ -659,6 +560,62 @@ struct Net::Impl
bool fusion; bool fusion;
std::vector<int64> layersTimings; std::vector<int64> layersTimings;
Ptr<BackendWrapper> wrap(const Mat& host)
{
if (preferableBackend == DNN_BACKEND_DEFAULT)
return Ptr<BackendWrapper>();
MatShape shape(host.dims);
for (int i = 0; i < host.dims; ++i)
shape[i] = host.size[i];
void* data = host.data;
if (backendWrappers.find(data) != backendWrappers.end())
{
Ptr<BackendWrapper> baseBuffer = backendWrappers[data];
if (preferableBackend == DNN_BACKEND_HALIDE)
{
CV_Assert(haveHalide());
#ifdef HAVE_HALIDE
return Ptr<BackendWrapper>(new HalideBackendWrapper(baseBuffer, shape));
#endif // HAVE_HALIDE
}
else
CV_Error(Error::StsNotImplemented, "Unknown backend identifier");
}
Ptr<BackendWrapper> wrapper = wrapMat(preferableBackend, preferableTarget, host);
backendWrappers[data] = wrapper;
return wrapper;
}
class HalideCompiler : public ParallelLoopBody
{
public:
HalideCompiler(const MapIdToLayerData& layers_, int preferableTarget_)
: layers(&layers_), preferableTarget(preferableTarget_) {}
void operator()(const Range& r) const
{
MapIdToLayerData::const_iterator it = layers->begin();
for (int i = 0; i < r.start && it != layers->end(); ++i, ++it) {}
for (int i = r.start; i < r.end && it != layers->end(); ++i, ++it)
{
const LayerData &ld = it->second;
Ptr<Layer> layer = ld.layerInstance;
bool skip = ld.skipFlags.find(DNN_BACKEND_HALIDE)->second;
if (layer->supportBackend(DNN_BACKEND_HALIDE) && !skip)
{
Ptr<BackendNode> node = ld.backendNodes.find(DNN_BACKEND_HALIDE)->second;
dnn::compileHalide(ld.outputBlobs, node, preferableTarget);
}
}
}
private:
const MapIdToLayerData* layers;
int preferableTarget;
};
void compileHalide() void compileHalide()
{ {
CV_TRACE_FUNCTION(); CV_TRACE_FUNCTION();
@ -682,10 +639,9 @@ struct Net::Impl
ld.inputBlobs, ld.outputBlobs, ld.inputBlobs, ld.outputBlobs,
preferableTarget); preferableTarget);
} }
dnn::compileHalide(ld.outputBlobs, ld.backendNodes[DNN_BACKEND_HALIDE],
preferableTarget);
} }
} }
parallel_for_(Range(0, layers.size()), HalideCompiler(layers, preferableTarget));
} }
void clear() void clear()
@ -917,7 +873,6 @@ struct Net::Impl
{ {
CV_TRACE_FUNCTION(); CV_TRACE_FUNCTION();
backendWrapper.reset();
if (preferableBackend == DNN_BACKEND_DEFAULT) if (preferableBackend == DNN_BACKEND_DEFAULT)
{ {
CV_Assert(preferableTarget == DNN_TARGET_CPU); CV_Assert(preferableTarget == DNN_TARGET_CPU);
@ -967,12 +922,10 @@ struct Net::Impl
} }
// No layers fusion. // No layers fusion.
ldTop.skipFlags[preferableBackend] = false; ldTop.skipFlags[preferableBackend] = false;
std::vector<Ptr<BackendWrapper> > inputs =
backendWrapper.wrap(ldTop.inputBlobs, preferableBackend,
preferableTarget);
if (preferableBackend == DNN_BACKEND_HALIDE) if (preferableBackend == DNN_BACKEND_HALIDE)
{ {
ldTop.backendNodes[DNN_BACKEND_HALIDE] = layerTop->initHalide(inputs); ldTop.backendNodes[DNN_BACKEND_HALIDE] =
layerTop->initHalide(ldTop.inputBlobsWrappers);
baseIt = it; baseIt = it;
} }
else else
@ -1021,12 +974,14 @@ struct Net::Impl
//bind inputs //bind inputs
ld.inputBlobs.resize(ninputs); ld.inputBlobs.resize(ninputs);
ld.inputBlobsWrappers.resize(ninputs);
for (size_t i = 0; i < ninputs; i++) for (size_t i = 0; i < ninputs; i++)
{ {
LayerPin from = ld.inputBlobsId[i]; LayerPin from = ld.inputBlobsId[i];
CV_Assert(from.valid()); CV_Assert(from.valid());
CV_DbgAssert(layers.count(from.lid) && (int)layers[from.lid].outputBlobs.size() > from.oid); CV_DbgAssert(layers.count(from.lid) && (int)layers[from.lid].outputBlobs.size() > from.oid);
ld.inputBlobs[i] = &layers[from.lid].outputBlobs[from.oid]; ld.inputBlobs[i] = &layers[from.lid].outputBlobs[from.oid];
ld.inputBlobsWrappers[i] = layers[from.lid].outputBlobsWrappers[from.oid];
} }
LayersShapesMap::const_iterator layerShapesIt = layersShapes.find(lid); LayersShapesMap::const_iterator layerShapesIt = layersShapes.find(lid);
@ -1036,6 +991,11 @@ struct Net::Impl
std::vector<LayerPin> pinsForInternalBlobs; std::vector<LayerPin> pinsForInternalBlobs;
bool maximizeReuse = preferableBackend == DNN_BACKEND_HALIDE; bool maximizeReuse = preferableBackend == DNN_BACKEND_HALIDE;
blobManager.allocateBlobsForLayer(ld, layerShapesIt->second, pinsForInternalBlobs, maximizeReuse); blobManager.allocateBlobsForLayer(ld, layerShapesIt->second, pinsForInternalBlobs, maximizeReuse);
ld.outputBlobsWrappers.resize(ld.outputBlobs.size());
for (int i = 0; i < ld.outputBlobs.size(); ++i)
{
ld.outputBlobsWrappers[i] = wrap(ld.outputBlobs[i]);
}
Ptr<Layer> layerPtr = ld.getLayerInstance(); Ptr<Layer> layerPtr = ld.getLayerInstance();
{ {
@ -1256,6 +1216,8 @@ struct Net::Impl
getLayersShapes(inputShapes, layersShapes); getLayersShapes(inputShapes, layersShapes);
blobManager.reset(); blobManager.reset();
backendWrappers.clear();
blobManager.addReference(LayerPin(0, 0));
for (it = layers.begin(); it != layers.end(); ++it) for (it = layers.begin(); it != layers.end(); ++it)
{ {
const LayerData& ld = it->second; const LayerData& ld = it->second;
@ -1291,18 +1253,28 @@ struct Net::Impl
!layer->supportBackend(preferableBackend)) !layer->supportBackend(preferableBackend))
{ {
if( !ld.skipFlags[DNN_BACKEND_DEFAULT] ) if( !ld.skipFlags[DNN_BACKEND_DEFAULT] )
{
for (int i = 0, n = ld.inputBlobsWrappers.size(); i < n; ++i)
{
if (!ld.inputBlobsWrappers[i].empty())
ld.inputBlobsWrappers[i]->copyToHost();
}
layer->forward(ld.inputBlobs, ld.outputBlobs, ld.internals); layer->forward(ld.inputBlobs, ld.outputBlobs, ld.internals);
for (int i = 0, n = ld.outputBlobsWrappers.size(); i < n; ++i)
{
if (!ld.outputBlobsWrappers[i].empty())
ld.outputBlobsWrappers[i]->setHostDirty();
}
}
else else
tm.reset(); tm.reset();
} }
else if (!ld.skipFlags[preferableBackend]) else if (!ld.skipFlags[preferableBackend])
{ {
std::vector<Ptr<BackendWrapper> > outputs =
backendWrapper.wrap(ld.outputBlobs, preferableBackend, preferableTarget);
Ptr<BackendNode> node = ld.backendNodes[preferableBackend]; Ptr<BackendNode> node = ld.backendNodes[preferableBackend];
if (preferableBackend == DNN_BACKEND_HALIDE) if (preferableBackend == DNN_BACKEND_HALIDE)
{ {
forwardHalide(outputs, node); forwardHalide(ld.outputBlobsWrappers, node);
} }
else else
{ {
@ -1423,11 +1395,10 @@ struct Net::Impl
CV_Error(Error::StsOutOfRange, "Layer \"" + ld.name + "\" produce only " + toString(ld.outputBlobs.size()) + CV_Error(Error::StsOutOfRange, "Layer \"" + ld.name + "\" produce only " + toString(ld.outputBlobs.size()) +
" outputs, the #" + toString(pin.oid) + " was requsted"); " outputs, the #" + toString(pin.oid) + " was requsted");
} }
if (preferableBackend != DNN_BACKEND_DEFAULT) if (preferableBackend != DNN_TARGET_CPU)
{ {
// Transfer data to CPU if it's require. // Transfer data to CPU if it's require.
backendWrapper.wrap(ld.outputBlobs[pin.oid], preferableBackend, ld.outputBlobsWrappers[pin.oid]->copyToHost();
preferableTarget)->copyToHost();
} }
else else
{ {
@ -1635,6 +1606,7 @@ void Net::setInput(const Mat &blob_, const String& name)
LayerData &ld = impl->layers[pin.lid]; LayerData &ld = impl->layers[pin.lid];
ld.outputBlobs.resize( std::max(pin.oid+1, (int)ld.requiredOutputs.size()) ); ld.outputBlobs.resize( std::max(pin.oid+1, (int)ld.requiredOutputs.size()) );
ld.outputBlobsWrappers.resize(ld.outputBlobs.size());
MatShape prevShape = shape(ld.outputBlobs[pin.oid]); MatShape prevShape = shape(ld.outputBlobs[pin.oid]);
bool oldShape = prevShape == shape(blob_); bool oldShape = prevShape == shape(blob_);
if (oldShape) if (oldShape)
@ -1642,6 +1614,10 @@ void Net::setInput(const Mat &blob_, const String& name)
else else
ld.outputBlobs[pin.oid] = blob_.clone(); ld.outputBlobs[pin.oid] = blob_.clone();
if (!ld.outputBlobsWrappers[pin.oid].empty())
{
ld.outputBlobsWrappers[pin.oid]->setHostDirty();
}
impl->netWasAllocated = impl->netWasAllocated && oldShape; impl->netWasAllocated = impl->netWasAllocated && oldShape;
} }

@ -18,11 +18,30 @@ namespace dnn
{ {
#ifdef HAVE_HALIDE #ifdef HAVE_HALIDE
static MatShape getBufferShape(const MatShape& shape)
{
if (shape.size() == 2 || shape.size() == 4)
{
int w, h, c, n;
getCanonicalSize(shape, &w, &h, &c, &n);
return {w, h, c, n};
}
else
{
MatShape bufferShape(shape);
std::reverse(bufferShape.begin(), bufferShape.end());
return bufferShape;
}
}
static MatShape getBufferShape(const MatSize& size)
{
return getBufferShape(MatShape(size.p, size.p + size[-1]));
}
Halide::Buffer<float> wrapToHalideBuffer(const Mat& mat) Halide::Buffer<float> wrapToHalideBuffer(const Mat& mat)
{ {
int n, c, w, h; return wrapToHalideBuffer(mat, getBufferShape(mat.size));
getCanonicalSize(mat.size, &w, &h, &c, &n);
return wrapToHalideBuffer(mat, {w, h, c, n});
} }
Halide::Buffer<float> wrapToHalideBuffer(const Mat& mat, Halide::Buffer<float> wrapToHalideBuffer(const Mat& mat,
@ -97,11 +116,9 @@ HalideBackendWrapper::HalideBackendWrapper(const Ptr<BackendWrapper>& base,
: BackendWrapper(DNN_BACKEND_HALIDE, base->targetId) : BackendWrapper(DNN_BACKEND_HALIDE, base->targetId)
{ {
managesDevMemory = false; managesDevMemory = false;
int w, h, c, n;
getCanonicalSize(shape, &w, &h, &c, &n);
Halide::Buffer<float> baseBuffer = halideBuffer(base); Halide::Buffer<float> baseBuffer = halideBuffer(base);
buffer = Halide::Buffer<float>((float*)baseBuffer.raw_buffer()->host, buffer = Halide::Buffer<float>((float*)baseBuffer.raw_buffer()->host,
{w, h, c, n}); getBufferShape(shape));
if (baseBuffer.has_device_allocation()) if (baseBuffer.has_device_allocation())
{ {
buffer.raw_buffer()->device = baseBuffer.raw_buffer()->device; buffer.raw_buffer()->device = baseBuffer.raw_buffer()->device;
@ -127,32 +144,23 @@ HalideBackendWrapper::~HalideBackendWrapper()
void HalideBackendWrapper::copyToHost() void HalideBackendWrapper::copyToHost()
{ {
CV_Assert(targetId == DNN_TARGET_CPU || buffer.device_dirty());
if (buffer.device_dirty()) if (buffer.device_dirty())
{ {
buffer.device_sync(); buffer.device_sync();
buffer.copy_to_host(); buffer.copy_to_host();
} }
} }
void HalideBackendWrapper::setHostDirty()
{
buffer.set_device_dirty(false);
buffer.set_host_dirty();
}
#endif // HAVE_HALIDE #endif // HAVE_HALIDE
void getCanonicalSize(const MatSize& size, int* width, int* height, void getCanonicalSize(const MatSize& size, int* w, int* h, int* c, int* n)
int* channels, int* batch)
{ {
const int dims = size.p[-1]; getCanonicalSize(MatShape(size.p, size.p + size[-1]), w, h, c, n);
CV_Assert(dims == 2 || dims == 4);
*batch = size[0];
*channels = size[1];
if (dims == 4)
{
*width = size[3];
*height = size[2];
}
else
{
*width = 1;
*height = 1;
}
} }
void getCanonicalSize(const MatShape& shape, int* width, int* height, void getCanonicalSize(const MatShape& shape, int* width, int* height,
@ -174,7 +182,7 @@ void getCanonicalSize(const MatShape& shape, int* width, int* height,
} }
} }
void compileHalide(std::vector<Mat> &outputs, Ptr<BackendNode>& node, int targetId) void compileHalide(const std::vector<Mat> &outputs, Ptr<BackendNode>& node, int targetId)
{ {
#ifdef HAVE_HALIDE #ifdef HAVE_HALIDE
CV_Assert(!node.empty()); CV_Assert(!node.empty());

@ -61,6 +61,8 @@ namespace dnn
virtual void copyToHost(); virtual void copyToHost();
virtual void setHostDirty();
Halide::Buffer<float> buffer; Halide::Buffer<float> buffer;
private: private:
@ -80,7 +82,7 @@ namespace dnn
const Ptr<BackendNode>& node); const Ptr<BackendNode>& node);
// Compile Halide pipeline to specific target. Use outputs to set bounds of functions. // Compile Halide pipeline to specific target. Use outputs to set bounds of functions.
void compileHalide(std::vector<Mat> &outputs, Ptr<BackendNode>& node, int targetId); void compileHalide(const std::vector<Mat> &outputs, Ptr<BackendNode>& node, int targetId);
bool haveHalide(); bool haveHalide();
} // namespace dnn } // namespace dnn

@ -646,6 +646,48 @@ INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, Eltwise, Combine(
/*num convs*/ Values(1, 2, 3), /*num convs*/ Values(1, 2, 3),
/*weighted(for sum only)*/ Bool() /*weighted(for sum only)*/ Bool()
)); ));
////////////////////////////////////////////////////////////////////////////
// Mixed backends
////////////////////////////////////////////////////////////////////////////
TEST(MixedBackends_Halide_Default_Halide, Accuracy)
{
// Just a layer that supports Halide backend.
LayerParams lrn;
lrn.type = "LRN";
lrn.name = "testLRN";
// Some of layers that doesn't supports Halide backend yet.
LayerParams mvn;
mvn.type = "MVN";
mvn.name = "testMVN";
// Halide layer again.
LayerParams lrn2;
lrn2.type = "LRN";
lrn2.name = "testLRN2";
Net net;
int lrnId = net.addLayer(lrn.name, lrn.type, lrn);
net.connect(0, 0, lrnId, 0);
net.addLayerToPrev(mvn.name, mvn.type, mvn);
net.addLayerToPrev(lrn2.name, lrn2.type, lrn2);
Mat input({4, 3, 5, 6}, CV_32F);
randu(input, -1.0f, 1.0f);
net.setInput(input);
Mat outputDefault = net.forward().clone();
net.setPreferableBackend(DNN_BACKEND_HALIDE);
net.setInput(input);
Mat outputHalide = net.forward().clone();
normAssert(outputDefault, outputHalide);
net.setPreferableTarget(DNN_TARGET_OPENCL);
net.setInput(input);
outputHalide = net.forward().clone();
normAssert(outputDefault, outputHalide);
}
#endif // HAVE_HALIDE #endif // HAVE_HALIDE
} // namespace cvtest } // namespace cvtest

@ -62,6 +62,7 @@ static void test(const std::string& weights, const std::string& proto,
netHalide.setInput(blobFromImage(input.clone(), 1.0, Size(), Scalar(), false)); netHalide.setInput(blobFromImage(input.clone(), 1.0, Size(), Scalar(), false));
normAssert(outputDefault, outputHalide, "Second run", l1, lInf); normAssert(outputDefault, outputHalide, "Second run", l1, lInf);
std::cout << "." << std::endl;
// Swap backends. // Swap backends.
netHalide.setPreferableBackend(DNN_BACKEND_DEFAULT); netHalide.setPreferableBackend(DNN_BACKEND_DEFAULT);
@ -79,6 +80,20 @@ static void test(const std::string& weights, const std::string& proto,
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// CPU target // CPU target
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
TEST(Reproducibility_MobileNetSSD_Halide, Accuracy)
{
test(findDataFile("dnn/MobileNetSSD_deploy.caffemodel", false),
findDataFile("dnn/MobileNetSSD_deploy.prototxt", false),
"", 300, 300, "detection_out", "caffe", DNN_TARGET_CPU);
};
TEST(Reproducibility_SSD_Halide, Accuracy)
{
test(findDataFile("dnn/VGG_ILSVRC2016_SSD_300x300_iter_440000.caffemodel", false),
findDataFile("dnn/ssd_vgg16.prototxt", false),
"", 300, 300, "detection_out", "caffe", DNN_TARGET_CPU);
};
TEST(Reproducibility_GoogLeNet_Halide, Accuracy) TEST(Reproducibility_GoogLeNet_Halide, Accuracy)
{ {
test(findDataFile("dnn/bvlc_googlenet.caffemodel", false), test(findDataFile("dnn/bvlc_googlenet.caffemodel", false),
@ -126,6 +141,20 @@ TEST(Reproducibility_ENet_Halide, Accuracy)
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// OpenCL target // OpenCL target
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
TEST(Reproducibility_MobileNetSSD_Halide_opencl, Accuracy)
{
test(findDataFile("dnn/MobileNetSSD_deploy.caffemodel", false),
findDataFile("dnn/MobileNetSSD_deploy.prototxt", false),
"", 300, 300, "detection_out", "caffe", DNN_TARGET_OPENCL);
};
TEST(Reproducibility_SSD_Halide_opencl, Accuracy)
{
test(findDataFile("dnn/VGG_ILSVRC2016_SSD_300x300_iter_440000.caffemodel", false),
findDataFile("dnn/ssd_vgg16.prototxt", false),
"", 300, 300, "detection_out", "caffe", DNN_TARGET_OPENCL);
};
TEST(Reproducibility_GoogLeNet_Halide_opencl, Accuracy) TEST(Reproducibility_GoogLeNet_Halide_opencl, Accuracy)
{ {
test(findDataFile("dnn/bvlc_googlenet.caffemodel", false), test(findDataFile("dnn/bvlc_googlenet.caffemodel", false),

Loading…
Cancel
Save