|
|
|
@ -64,6 +64,7 @@ public: |
|
|
|
|
ONNXImporter(Net& net, const char *onnxFile) |
|
|
|
|
: dstNet(net) |
|
|
|
|
{ |
|
|
|
|
hasDynamicShapes = false; |
|
|
|
|
CV_Assert(onnxFile); |
|
|
|
|
CV_LOG_DEBUG(NULL, "DNN/ONNX: processing ONNX model from file: " << onnxFile); |
|
|
|
|
|
|
|
|
@ -84,6 +85,7 @@ public: |
|
|
|
|
ONNXImporter(Net& net, const char* buffer, size_t sizeBuffer) |
|
|
|
|
: dstNet(net) |
|
|
|
|
{ |
|
|
|
|
hasDynamicShapes = false; |
|
|
|
|
CV_LOG_DEBUG(NULL, "DNN/ONNX: processing in-memory ONNX model (" << sizeBuffer << " bytes)"); |
|
|
|
|
|
|
|
|
|
struct _Buf : public std::streambuf |
|
|
|
@ -115,6 +117,7 @@ protected: |
|
|
|
|
std::map<std::string, Mat> constBlobs; |
|
|
|
|
|
|
|
|
|
std::map<std::string, MatShape> outShapes; // List of internal blobs shapes.
|
|
|
|
|
bool hasDynamicShapes; // Whether the model has inputs with dynamic shapes
|
|
|
|
|
typedef std::map<std::string, MatShape>::iterator IterShape_t; |
|
|
|
|
|
|
|
|
|
std::map<std::string, LayerInfo> layer_id; |
|
|
|
@ -413,8 +416,10 @@ void ONNXImporter::populateNet() |
|
|
|
|
for (int j = 0; j < inpShape.size(); ++j) |
|
|
|
|
{ |
|
|
|
|
inpShape[j] = tensorShape.dim(j).dim_value(); |
|
|
|
|
if (!tensorShape.dim(j).dim_param().empty()) |
|
|
|
|
hasDynamicShapes = true; |
|
|
|
|
} |
|
|
|
|
if (!inpShape.empty()) |
|
|
|
|
if (!inpShape.empty() && !hasDynamicShapes) |
|
|
|
|
{ |
|
|
|
|
inpShape[0] = std::max(inpShape[0], 1); // It's OK to have undetermined batch size
|
|
|
|
|
} |
|
|
|
@ -461,6 +466,7 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_) |
|
|
|
|
|
|
|
|
|
layerParams.name = name; |
|
|
|
|
layerParams.type = layer_type; |
|
|
|
|
layerParams.set("has_dynamic_shapes", hasDynamicShapes); |
|
|
|
|
|
|
|
|
|
if (layer_type == "MaxPool") |
|
|
|
|
{ |
|
|
|
@ -1276,6 +1282,20 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_) |
|
|
|
|
{ |
|
|
|
|
layerParams.type = "Reshape"; |
|
|
|
|
layerParams.set("dim", DictValue::arrayInt(&outShape[0], outShape.size())); |
|
|
|
|
if (hasDynamicShapes) |
|
|
|
|
{ |
|
|
|
|
std::vector<int> dynamicAxes; |
|
|
|
|
std::vector<int> inputIndices; |
|
|
|
|
for (int index = 0; index < inpShape.size(); ++index) |
|
|
|
|
{ |
|
|
|
|
if (!maskedAxes[index]) |
|
|
|
|
inputIndices.push_back(index); |
|
|
|
|
} |
|
|
|
|
for (int index = 0; index < outShape.size(); ++index) |
|
|
|
|
dynamicAxes.push_back(index); |
|
|
|
|
layerParams.set("dynamic_axes", DictValue::arrayInt(dynamicAxes.data(), dynamicAxes.size())); |
|
|
|
|
layerParams.set("input_indices", DictValue::arrayInt(inputIndices.data(), inputIndices.size())); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
else |
|
|
|
|
layerParams.type = "Identity"; |
|
|
|
@ -1338,6 +1358,19 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_) |
|
|
|
|
outShape.insert(outShape.begin() + axis, 1); |
|
|
|
|
layerParams.type = "Reshape"; |
|
|
|
|
layerParams.set("dim", DictValue::arrayInt(&outShape[0], outShape.size())); |
|
|
|
|
if (hasDynamicShapes) |
|
|
|
|
{ |
|
|
|
|
std::vector<int> dynamicAxes; |
|
|
|
|
std::vector<int> inputIndices; |
|
|
|
|
for (int index = 0; index < outShape.size(); ++index) { |
|
|
|
|
if (index != axis) |
|
|
|
|
dynamicAxes.push_back(index); |
|
|
|
|
} |
|
|
|
|
for (int index = 0; index < inpShape.size(); ++index) |
|
|
|
|
inputIndices.push_back(index); |
|
|
|
|
layerParams.set("dynamic_axes", DictValue::arrayInt(dynamicAxes.data(), dynamicAxes.size())); |
|
|
|
|
layerParams.set("input_indices", DictValue::arrayInt(inputIndices.data(), inputIndices.size())); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
else if (layer_type == "Expand") |
|
|
|
|
{ |
|
|
|
@ -1625,6 +1658,7 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_) |
|
|
|
|
cv::dnn::DictValue paramEnd = cv::dnn::DictValue::arrayInt(end.data(), end.size()); |
|
|
|
|
sliceLp.set("begin", paramBegin); |
|
|
|
|
sliceLp.set("end", paramEnd); |
|
|
|
|
sliceLp.set("has_dynamic_shapes", hasDynamicShapes); |
|
|
|
|
|
|
|
|
|
if (inpShape.size() > 1) |
|
|
|
|
{ |
|
|
|
@ -1637,6 +1671,17 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_) |
|
|
|
|
layerParams.type = "Reshape"; |
|
|
|
|
layerParams.set("axis", 0); |
|
|
|
|
layerParams.set("dim", DictValue::arrayInt(&inpShape[0], inpShape.size())); |
|
|
|
|
if (hasDynamicShapes) |
|
|
|
|
{ |
|
|
|
|
std::vector<int> dynamicAxes; |
|
|
|
|
std::vector<int> inputIndices; |
|
|
|
|
for (int index = 0; index < inpShape.size(); ++index) |
|
|
|
|
dynamicAxes.push_back(index); |
|
|
|
|
for (int index = 0; index < inpShape.size(); ++index) |
|
|
|
|
inputIndices.push_back(index); |
|
|
|
|
layerParams.set("dynamic_axes", DictValue::arrayInt(dynamicAxes.data(), dynamicAxes.size())); |
|
|
|
|
layerParams.set("input_indices", DictValue::arrayInt(inputIndices.data(), inputIndices.size())); |
|
|
|
|
} |
|
|
|
|
node_proto.set_input(0, sliceLp.name); |
|
|
|
|
} |
|
|
|
|
else |
|
|
|
@ -1676,7 +1721,11 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_) |
|
|
|
|
for (int i = 1; i < node_proto.input_size(); i++) |
|
|
|
|
CV_Assert(layer_id.find(node_proto.input(i)) == layer_id.end()); |
|
|
|
|
|
|
|
|
|
String interp_mode = layerParams.get<String>("coordinate_transformation_mode"); |
|
|
|
|
String interp_mode; |
|
|
|
|
if (layerParams.has("coordinate_transformation_mode")) |
|
|
|
|
interp_mode = layerParams.get<String>("coordinate_transformation_mode"); |
|
|
|
|
else |
|
|
|
|
interp_mode = layerParams.get<String>("mode"); |
|
|
|
|
CV_Assert_N(interp_mode != "tf_crop_and_resize", interp_mode != "tf_half_pixel_for_nn"); |
|
|
|
|
|
|
|
|
|
layerParams.set("align_corners", interp_mode == "align_corners"); |
|
|
|
@ -1688,16 +1737,23 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_) |
|
|
|
|
shapes.convertTo(shapes, CV_32S); |
|
|
|
|
int height = shapes.at<int>(2); |
|
|
|
|
int width = shapes.at<int>(3); |
|
|
|
|
if (node_proto.input_size() == 3) |
|
|
|
|
if (hasDynamicShapes) |
|
|
|
|
{ |
|
|
|
|
IterShape_t shapeIt = outShapes.find(node_proto.input(0)); |
|
|
|
|
CV_Assert(shapeIt != outShapes.end()); |
|
|
|
|
MatShape scales = shapeIt->second; |
|
|
|
|
height *= scales[2]; |
|
|
|
|
width *= scales[3]; |
|
|
|
|
layerParams.set("zoom_factor_x", width); |
|
|
|
|
layerParams.set("zoom_factor_y", height); |
|
|
|
|
} |
|
|
|
|
else |
|
|
|
|
{ |
|
|
|
|
if (node_proto.input_size() == 3) { |
|
|
|
|
IterShape_t shapeIt = outShapes.find(node_proto.input(0)); |
|
|
|
|
CV_Assert(shapeIt != outShapes.end()); |
|
|
|
|
MatShape scales = shapeIt->second; |
|
|
|
|
height *= scales[2]; |
|
|
|
|
width *= scales[3]; |
|
|
|
|
} |
|
|
|
|
layerParams.set("width", width); |
|
|
|
|
layerParams.set("height", height); |
|
|
|
|
} |
|
|
|
|
layerParams.set("width", width); |
|
|
|
|
layerParams.set("height", height); |
|
|
|
|
|
|
|
|
|
if (layerParams.get<String>("mode") == "linear") { |
|
|
|
|
layerParams.set("mode", interp_mode == "pytorch_half_pixel" ? |
|
|
|
|