Merge pull request #25582 from fengyuentau:dnn/dump_pbtxt

Current net exporter `dump` and `dumpToFile` exports the network structure (and its params) to a .dot file which works with `graphviz`. This is hard to use and not friendly to new user. What's worse, the produced picture is not looking pretty.
dnn: better net exporter that works with netron #25582

This PR introduces new exporter `dumpToPbtxt` and uses this new exporter by default with environment variable `OPENCV_DNN_NETWORK_DUMP`. It mimics the string output of a onnx model but modified with dnn-specific changes, see below for an example.

![image](https://github.com/opencv/opencv/assets/17219438/0644bed1-da71-4019-8466-88390698e4df)

## Usage

Call `cv::dnn::Net::dumpToPbtxt`:

```cpp
TEST(DumpNet, dumpToPbtxt) {
    std::string path = "/path/to/model.onnx";
    auto net = readNet(path);

    Mat input(std::vector<int>{1, 3, 640, 480}, CV_32F);
    net.setInput(input);

    net.dumpToPbtxt("yunet.pbtxt");
}
```

Set `export OPENCV_DNN_NETWORK_DUMP=1`

```cpp
TEST(DumpNet, env) {
    std::string path = "/path/to/model.onnx";
    auto net = readNet(path);

    Mat input(std::vector<int>{1, 3, 640, 480}, CV_32F);
    net.setInput(input);

    net.forward();
}
```

---

Note:
- `pbtxt` is registered as one of the ONNX model suffix in netron. So you can see `module: ai.onnx` and such in the model.
- We can get the string output of an ONNX model with the following script

```python
import onnx
net = onnx.load("/path/to/model.onnx")
net_str = str(net)
file = open("/path/to/model.pbtxt", "w")
file.write(net_str)
file.close()
```

### Pull Request Readiness Checklist

See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request

- [x] I agree to contribute to the project under Apache 2 License.
- [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV
- [x] The PR is proposed to the proper branch
- [ ] There is a reference to the original bug report and related work
- [ ] There is accuracy test, performance test and test data in opencv_extra repository, if applicable
      Patch to opencv_extra has the same branch name.
- [x] The feature is well documented and sample code can be built with the project CMake
pull/25603/head
Yuantao Feng 6 months ago committed by GitHub
parent 0044047782
commit bc0618b688
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 86
      apps/model-diagnostics/model_diagnostics.cpp
  2. 8
      modules/dnn/include/opencv2/dnn/dnn.hpp
  3. 10
      modules/dnn/src/net.cpp
  4. 267
      modules/dnn/src/net_impl.cpp
  5. 1
      modules/dnn/src/net_impl.hpp

@ -32,12 +32,36 @@ static std::string checkFileExists(const std::string& fileName)
"Please, specify a full path to the file."); "Please, specify a full path to the file.");
} }
static std::vector<int> parseShape(const std::string &shape_str) {
std::stringstream ss(shape_str);
std::string item;
std::vector<std::string> items;
while (std::getline(ss, item, ',')) {
items.push_back(item);
}
std::vector<int> shape;
for (size_t i = 0; i < items.size(); i++) {
shape.push_back(std::stoi(items[i]));
}
return shape;
}
std::string diagnosticKeys = std::string diagnosticKeys =
"{ model m | | Path to the model file. }" "{ model m | | Path to the model file. }"
"{ config c | | Path to the model configuration file. }" "{ config c | | Path to the model configuration file. }"
"{ framework f | | [Optional] Name of the model framework. }"; "{ framework f | | [Optional] Name of the model framework. }"
"{ input0_name | | [Optional] Name of input0. Use with input0_shape}"
"{ input0_shape | | [Optional] Shape of input0. Use with input0_name}"
"{ input1_name | | [Optional] Name of input1. Use with input1_shape}"
"{ input1_shape | | [Optional] Shape of input1. Use with input1_name}"
"{ input2_name | | [Optional] Name of input2. Use with input2_shape}"
"{ input2_shape | | [Optional] Shape of input2. Use with input2_name}"
"{ input3_name | | [Optional] Name of input3. Use with input3_shape}"
"{ input3_shape | | [Optional] Shape of input3. Use with input3_name}"
"{ input4_name | | [Optional] Name of input4. Use with input4_shape}"
"{ input4_shape | | [Optional] Shape of input4. Use with input4_name}";
int main( int argc, const char** argv ) int main( int argc, const char** argv )
{ {
@ -55,6 +79,17 @@ int main( int argc, const char** argv )
std::string config = checkFileExists(argParser.get<std::string>("config")); std::string config = checkFileExists(argParser.get<std::string>("config"));
std::string frameworkId = argParser.get<std::string>("framework"); std::string frameworkId = argParser.get<std::string>("framework");
std::string input0_name = argParser.get<std::string>("input0_name");
std::string input0_shape = argParser.get<std::string>("input0_shape");
std::string input1_name = argParser.get<std::string>("input1_name");
std::string input1_shape = argParser.get<std::string>("input1_shape");
std::string input2_name = argParser.get<std::string>("input2_name");
std::string input2_shape = argParser.get<std::string>("input2_shape");
std::string input3_name = argParser.get<std::string>("input3_name");
std::string input3_shape = argParser.get<std::string>("input3_shape");
std::string input4_name = argParser.get<std::string>("input4_name");
std::string input4_shape = argParser.get<std::string>("input4_shape");
CV_Assert(!model.empty()); CV_Assert(!model.empty());
enableModelDiagnostics(true); enableModelDiagnostics(true);
@ -63,5 +98,50 @@ int main( int argc, const char** argv )
Net ocvNet = readNet(model, config, frameworkId); Net ocvNet = readNet(model, config, frameworkId);
std::vector<std::string> input_names;
std::vector<std::vector<int>> input_shapes;
if (!input0_name.empty() || !input0_shape.empty()) {
CV_CheckFalse(input0_name.empty(), "input0_name cannot be empty");
CV_CheckFalse(input0_shape.empty(), "input0_shape cannot be empty");
input_names.push_back(input0_name);
input_shapes.push_back(parseShape(input0_shape));
}
if (!input1_name.empty() || !input1_shape.empty()) {
CV_CheckFalse(input1_name.empty(), "input1_name cannot be empty");
CV_CheckFalse(input1_shape.empty(), "input1_shape cannot be empty");
input_names.push_back(input1_name);
input_shapes.push_back(parseShape(input1_shape));
}
if (!input2_name.empty() || !input2_shape.empty()) {
CV_CheckFalse(input2_name.empty(), "input2_name cannot be empty");
CV_CheckFalse(input2_shape.empty(), "input2_shape cannot be empty");
input_names.push_back(input2_name);
input_shapes.push_back(parseShape(input2_shape));
}
if (!input3_name.empty() || !input3_shape.empty()) {
CV_CheckFalse(input3_name.empty(), "input3_name cannot be empty");
CV_CheckFalse(input3_shape.empty(), "input3_shape cannot be empty");
input_names.push_back(input3_name);
input_shapes.push_back(parseShape(input3_shape));
}
if (!input4_name.empty() || !input4_shape.empty()) {
CV_CheckFalse(input4_name.empty(), "input4_name cannot be empty");
CV_CheckFalse(input4_shape.empty(), "input4_shape cannot be empty");
input_names.push_back(input4_name);
input_shapes.push_back(parseShape(input4_shape));
}
if (!input_names.empty() && !input_shapes.empty() && input_names.size() == input_shapes.size()) {
ocvNet.setInputsNames(input_names);
for (size_t i = 0; i < input_names.size(); i++) {
Mat input(input_shapes[i], CV_32F);
ocvNet.setInput(input, input_names[i]);
}
size_t dot_index = model.rfind('.');
std::string graph_filename = model.substr(0, dot_index) + ".pbtxt";
ocvNet.dumpToPbtxt(graph_filename);
}
return 0; return 0;
} }

@ -518,6 +518,14 @@ CV__DNN_INLINE_NS_BEGIN
* @see dump() * @see dump()
*/ */
CV_WRAP void dumpToFile(CV_WRAP_FILE_PATH const String& path); CV_WRAP void dumpToFile(CV_WRAP_FILE_PATH const String& path);
/** @brief Dump net structure, hyperparameters, backend, target and fusion to pbtxt file
* @param path path to output file with .pbtxt extension
*
* Use Netron (https://netron.app) to open the target file to visualize the model.
* Call method after setInput(). To see correct backend, target and fusion run after forward().
*/
CV_WRAP void dumpToPbtxt(CV_WRAP_FILE_PATH const String& path);
/** @brief Adds new layer to the net. /** @brief Adds new layer to the net.
* @param name unique name of the adding layer. * @param name unique name of the adding layer.
* @param type typename of the adding layer (type must be registered in LayerRegister). * @param type typename of the adding layer (type must be registered in LayerRegister).

@ -216,6 +216,16 @@ void Net::dumpToFile(const String& path)
file.close(); file.close();
} }
void Net::dumpToPbtxt(const String& path)
{
CV_TRACE_FUNCTION();
CV_Assert(impl);
CV_Assert(!empty());
std::ofstream file(path.c_str());
file << impl->dumpToPbtxt(true);
file.close();
}
Ptr<Layer> Net::getLayer(int layerId) const Ptr<Layer> Net::getLayer(int layerId) const
{ {
CV_Assert(impl); CV_Assert(impl);

@ -1830,15 +1830,278 @@ string Net::Impl::dump(bool forceAllocation) const
return out.str(); return out.str();
} }
static void dumpTensorToString(std::ostringstream &out, const Mat &m, const int num_indent_spaces = 4) {
string indent_spaces(num_indent_spaces, ' ');
int type = 1;
/* Check TensorProto::DataType from https://github.com/onnx/onnx/blob/main/onnx/onnx.proto */
switch (m.type()) {
case CV_32F: break;
case CV_8U: type = 2; break;
case CV_8S: type = 3; break;
case CV_16U: type = 4; break;
case CV_16S: type = 5; break;
case CV_32S: type = 6; break;
#if CV_VERSION_MAJOR > 4
case CV_64S: type = 7; break;
// STRING: 8
case CV_BOOL: type = 9; break;
#endif
case CV_16F: type = 10; break;
case CV_64F: type = 11; break;
#if CV_VERSION_MAJOR > 4
case CV_32U: type = 12; break;
case CV_64U: type = 13; break;
// COMPLEX64: 14
// COMPLEX128: 15
case CV_16BF: type = 16; break;
#endif
default: CV_Error(Error::StsUnsupportedFormat, "Type of mat is not supported");
}
const auto &mshape = shape(m);
out << indent_spaces << "type {\n"
<< indent_spaces << " tensor_type {\n"
<< indent_spaces << " elem_type: " << type << "\n";
out << indent_spaces << " shape {\n";
for (size_t i = 0; i < mshape.size(); i++) {
out << indent_spaces << format(" dim { dim_value: %d }\n", mshape[i]);
}
out << indent_spaces << " }\n" // shape{}
<< indent_spaces << " }\n" // tensor_type{}
<< indent_spaces << "}\n"; // type{}
}
static void dumpParamToString(std::ostringstream &out, const std::string &key, const DictValue &value, const int num_indent_spaces = 2) {
std::string indent_spaces(num_indent_spaces, ' ');
out << indent_spaces << "attribute {\n"
<< indent_spaces << format(" name: \"%s\"\n", key.c_str());
if (value.size() == 1) {
if (value.isString()) {
out << indent_spaces << format(" type: STRING\n")
<< indent_spaces << format(" s: \"%s\"\n", value.getStringValue(0).c_str());
} else if (value.isInt()) {
out << indent_spaces << format(" type: INT\n")
<< indent_spaces << format(" i: %d\n", value.getIntValue(0));
} else if (value.isReal()) {
out << indent_spaces << format(" type: FLOAT\n")
<< indent_spaces << format(" f: %f\n", value.getRealValue(0));
} else {
out << indent_spaces << format(" type: UNKNOWN-SCALAR\n");
}
} else {
if (value.isString()) {
out << indent_spaces << format(" type: STRINGS\n");
} else if (value.isInt()) {
out << indent_spaces << format(" type: INTS\n");
} else if (value.isReal()) {
out << indent_spaces << format(" type: FLOATS\n");
} else {
out << indent_spaces << format(" type: UNKNOWN-ARRAY\n");
}
for (int i = 0; i < value.size(); i++) {
if (value.isString()) {
out << indent_spaces << format(" strings: \"%s\"\n", value.getStringValue(i).c_str());
} else if (value.isInt()) {
out << indent_spaces << format(" ints: %d\n", value.getIntValue(i));
} else if (value.isReal()) {
out << indent_spaces << format(" floats: %f\n", value.getRealValue());
}
}
}
out << indent_spaces << "}\n"; // attribute{}
}
static void dumpLayerToString(std::ostringstream &out,
const std::vector<std::string> &inputs,
const std::vector<std::string> &outputs,
const std::string &name,
const std::string &op_type,
const LayerParams &params,
const std::string &backend_name,
const std::string &target_name,
const int num_indent_spaces = 2) {
std::string indent_spaces(num_indent_spaces, ' ');
for (size_t i = 0; i < inputs.size(); i++) {
out << indent_spaces << format("input: \"%s\"\n", inputs[i].c_str());
}
for (size_t i = 0; i < outputs.size(); i++) {
out << indent_spaces << format("output: \"%s\"\n", outputs[i].c_str());
}
if (!name.empty()) {
out << indent_spaces << format("name: \"%s\"\n", name.c_str());
}
if (!op_type.empty()) {
out << indent_spaces << format("op_type: \"%s\"\n", op_type.c_str());
}
if (!params.name.empty()) {
for (auto param_iter = params.begin(); param_iter != params.end(); param_iter++) {
auto key = param_iter->first;
auto value = param_iter->second;
dumpParamToString(out, key, value, num_indent_spaces);
}
}
if (!backend_name.empty()) {
DictValue dvb(backend_name);
dumpParamToString(out, "Backend", dvb, num_indent_spaces);
}
if (!target_name.empty()) {
DictValue dvt(target_name);
dumpParamToString(out, "Target", dvt, num_indent_spaces);
}
}
string Net::Impl::dumpToPbtxt(bool forceAllocation) const {
if (forceAllocation && !netWasAllocated) {
const_cast<Net::Impl*>(this)->setUpNet();
}
std::ostringstream out;
const std::map<int, LayerData> &map = layers;
std::map<String, Mat*> value_info;
Backend prefBackend = (Backend)preferableBackend;
Target prefTarget = (Target)preferableTarget;
auto GetBackendName = [] (int backendId) {
std::string backend = "Unknown";
switch (backendId) {
case DNN_BACKEND_DEFAULT: backend = "DEFAULT"; break;
#if CV_VERSION_MAJOR <= 4
case DNN_BACKEND_HALIDE: backend = "HALIDE"; break;
#endif
case DNN_BACKEND_INFERENCE_ENGINE: // fallthru
case DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019: // fallthru
case DNN_BACKEND_INFERENCE_ENGINE_NGRAPH: backend = "OpenVINO"; break;
case DNN_BACKEND_OPENCV: backend = "OCV"; break;
case DNN_BACKEND_VKCOM: backend = "VULKAN"; break;
case DNN_BACKEND_CUDA: backend = "CUDA"; break;
case DNN_BACKEND_WEBNN: backend = "WEBNN"; break;
case DNN_BACKEND_TIMVX: backend = "TIMVX"; break;
case DNN_BACKEND_CANN: backend = "CANN"; break;
}
return backend;
};
auto GetTargetName = [] (int targetId) {
std::string target = "Unknown";
switch (targetId) {
case DNN_TARGET_CPU: target = "CPU"; break;
case DNN_TARGET_OPENCL: target = "OCL"; break;
case DNN_TARGET_OPENCL_FP16: target = "OCL_FP16"; break;
case DNN_TARGET_MYRIAD: target = "MYRIAD"; break;
case DNN_TARGET_VULKAN: target = "VULKAN"; break;
case DNN_TARGET_FPGA: target = "FPGA"; break;
case DNN_TARGET_CUDA: target = "CUDA"; break;
case DNN_TARGET_CUDA_FP16: target = "CUDA_FP16"; break;
case DNN_TARGET_HDDL: target = "HDDL"; break;
case DNN_TARGET_NPU: target = "NPU"; break;
case DNN_TARGET_CPU_FP16: target = "CPU_FP16"; break;
}
return target;
};
const int num_indent_spaces = 2;
std::string indent_spaces(num_indent_spaces, ' ');
out << "producer_name: \"opencv dnn\"\n"
<< "producer_version: \"" << getVersionString() << "\"\n"
<< "graph {\n";
// Add nodes, inputs and outputs
for (std::map<int, LayerData>::const_iterator iter = map.begin(); iter != map.end(); iter++) {
auto &ld = iter->second;
if (ld.id == 0) {
for (int i = 0; i < ld.outputBlobs.size(); i++) {
const auto &name = netInputLayer->outNames.empty() ? cv::format("%s_%d", ld.name.c_str(), i) : netInputLayer->outNames[i];
out << indent_spaces << "input {\n"
<< indent_spaces << format(" name: \"%s\"\n", name.c_str());
// Add shape
if (!ld.outputBlobs.empty()) {
dumpTensorToString(out, ld.outputBlobs[i], num_indent_spaces + 2);
}
out << indent_spaces << "}\n"; // input{}
}
} else if (ld.consumers.size() == 0) {
out << indent_spaces << "output {\n"
<< indent_spaces << format(" name: \"%s\"\n", ld.name.c_str());
// Add shape
if (!ld.outputBlobs.empty()) {
dumpTensorToString(out, ld.outputBlobs.front(), num_indent_spaces + 2);
}
out << indent_spaces << "}\n"; // output{}
} else {
out << indent_spaces << "node {\n";
const auto &name = ld.name;
const auto &op_type = "cv::dnn::" + ld.type;
std::vector<std::string> inputs, outputs;
// Collect names of inputs
for (size_t i = 0; i < ld.inputBlobsId.size(); i++) {
int lid = ld.inputBlobsId[i].lid;
int oid = ld.inputBlobsId[i].oid;
std::string name;
if (lid == 0) {
name = netInputLayer->outNames.empty() ? cv::format("%s_%d", ld.name.c_str(), oid) : netInputLayer->outNames[oid];
} else {
name = format("%s_output%d", map.find(lid)->second.name.c_str(), oid);
if (!ld.inputBlobs.empty()) {
value_info.insert({name, ld.inputBlobs[i]});
}
}
inputs.push_back(name);
}
// Collect names of outputs
for (size_t i = 0; i < ld.consumers.size(); i++) {
int lid = ld.consumers[i].lid;
const auto &layer_output_layer = map.find(lid)->second;
std::string name;
if (layer_output_layer.consumers.size() == 0) {
name = layer_output_layer.name;
} else {
name = format("%s_output%zu", ld.name.c_str(), i);
}
outputs.push_back(name);
}
const auto &params = ld.params;
// Collect backend and target
const Backend backend = ld.backendNodes.find(prefBackend) == ld.backendNodes.end() ? DNN_BACKEND_OPENCV : prefBackend;
const std::string backend_name = GetBackendName(backend);
const Target target = ld.layerInstance.empty() ? DNN_TARGET_CPU : (Target)(ld.layerInstance->preferableTarget);
const std::string target_name = GetTargetName(target);
dumpLayerToString(out, inputs, outputs, name, op_type, params, backend_name, target_name, num_indent_spaces + 2);
out << indent_spaces << "}\n"; // node{}
}
}
// Add value_info
for (std::map<String, Mat*>::const_iterator iter = value_info.begin(); iter != value_info.end(); iter++) {
out << indent_spaces << "value_info {\n"
<< indent_spaces << format(" name: \"%s\"\n", iter->first.c_str());
dumpTensorToString(out, *(iter->second), num_indent_spaces + 2);
out << indent_spaces << "}\n"; // value_info{}
}
out << "}\n"; // graph{}
// Add preferable backend and target as metadata
out << "metadata_props {\n";
out << indent_spaces << format(" key: \"%s\"", "Preferable Backend")
<< indent_spaces << format(" value: \"%s\"", GetBackendName(prefBackend).c_str());
out << "}\n"; // metadata_props{}
out << "metadata_props {\n";
out << indent_spaces << format(" key: \"%s\"", "Preferable Target")
<< indent_spaces << format(" value: \"%s\"", GetTargetName(prefTarget).c_str());
out << "}\n"; // metadata_props{}
return out.str();
}
void Net::Impl::dumpNetworkToFile() const void Net::Impl::dumpNetworkToFile() const
{ {
#ifndef OPENCV_DNN_DISABLE_NETWORK_AUTO_DUMP #ifndef OPENCV_DNN_DISABLE_NETWORK_AUTO_DUMP
string dumpFileNameBase = getDumpFileNameBase(); string dumpFileNameBase = getDumpFileNameBase();
string dumpFileName = dumpFileNameBase + ".dot"; string dumpFileName = dumpFileNameBase + ".pbtxt";
try try
{ {
string dumpStr = dump(); string dumpStr = dumpToPbtxt();
std::ofstream out(dumpFileName.c_str(), std::ios::out | std::ios::binary); std::ofstream out(dumpFileName.c_str(), std::ios::out | std::ios::binary);
out << dumpStr; out << dumpStr;
} }

@ -278,6 +278,7 @@ struct Net::Impl : public detail::NetImplBase
AsyncArray getBlobAsync(String outputName); AsyncArray getBlobAsync(String outputName);
string dump(bool forceAllocation = false) const; string dump(bool forceAllocation = false) const;
string dumpToPbtxt(bool forceAllocation = false) const;
void dumpNetworkToFile() const; void dumpNetworkToFile() const;

Loading…
Cancel
Save