avfilter/dnn: add DLT prefix for enum DNNLayerType to avoid potential conflicts

and also change CONV to DLT_CONV2D for better description

Signed-off-by: Guo, Yejun <yejun.guo@intel.com>
Signed-off-by: Pedro Arthur <bygrandao@gmail.com>
pull/323/head
Guo, Yejun 5 years ago committed by Pedro Arthur
parent dd01947397
commit b78dc27bba
  1. 25
      libavfilter/dnn/dnn_backend_native.c
  2. 12
      libavfilter/dnn/dnn_backend_native.h
  3. 10
      libavfilter/dnn/dnn_backend_tf.c

@ -188,8 +188,9 @@ DNNModel *ff_dnn_load_model_native(const char *model_filename)
for (layer = 0; layer < network->layers_num; ++layer){ for (layer = 0; layer < network->layers_num; ++layer){
layer_type = (int32_t)avio_rl32(model_file_context); layer_type = (int32_t)avio_rl32(model_file_context);
dnn_size += 4; dnn_size += 4;
network->layers[layer].type = layer_type;
switch (layer_type){ switch (layer_type){
case CONV: case DLT_CONV2D:
conv_params = av_malloc(sizeof(ConvolutionalParams)); conv_params = av_malloc(sizeof(ConvolutionalParams));
if (!conv_params){ if (!conv_params){
avio_closep(&model_file_context); avio_closep(&model_file_context);
@ -231,10 +232,9 @@ DNNModel *ff_dnn_load_model_native(const char *model_filename)
network->layers[layer].input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context); network->layers[layer].input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context);
network->layers[layer].output_operand_index = (int32_t)avio_rl32(model_file_context); network->layers[layer].output_operand_index = (int32_t)avio_rl32(model_file_context);
dnn_size += 8; dnn_size += 8;
network->layers[layer].type = CONV;
network->layers[layer].params = conv_params; network->layers[layer].params = conv_params;
break; break;
case DEPTH_TO_SPACE: case DLT_DEPTH_TO_SPACE:
depth_to_space_params = av_malloc(sizeof(DepthToSpaceParams)); depth_to_space_params = av_malloc(sizeof(DepthToSpaceParams));
if (!depth_to_space_params){ if (!depth_to_space_params){
avio_closep(&model_file_context); avio_closep(&model_file_context);
@ -246,10 +246,9 @@ DNNModel *ff_dnn_load_model_native(const char *model_filename)
network->layers[layer].input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context); network->layers[layer].input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context);
network->layers[layer].output_operand_index = (int32_t)avio_rl32(model_file_context); network->layers[layer].output_operand_index = (int32_t)avio_rl32(model_file_context);
dnn_size += 8; dnn_size += 8;
network->layers[layer].type = DEPTH_TO_SPACE;
network->layers[layer].params = depth_to_space_params; network->layers[layer].params = depth_to_space_params;
break; break;
case MIRROR_PAD: case DLT_MIRROR_PAD:
pad_params = av_malloc(sizeof(LayerPadParams)); pad_params = av_malloc(sizeof(LayerPadParams));
if (!pad_params){ if (!pad_params){
avio_closep(&model_file_context); avio_closep(&model_file_context);
@ -266,10 +265,9 @@ DNNModel *ff_dnn_load_model_native(const char *model_filename)
network->layers[layer].input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context); network->layers[layer].input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context);
network->layers[layer].output_operand_index = (int32_t)avio_rl32(model_file_context); network->layers[layer].output_operand_index = (int32_t)avio_rl32(model_file_context);
dnn_size += 8; dnn_size += 8;
network->layers[layer].type = MIRROR_PAD;
network->layers[layer].params = pad_params; network->layers[layer].params = pad_params;
break; break;
case MAXIMUM: case DLT_MAXIMUM:
maximum_params = av_malloc(sizeof(*maximum_params)); maximum_params = av_malloc(sizeof(*maximum_params));
if (!maximum_params){ if (!maximum_params){
avio_closep(&model_file_context); avio_closep(&model_file_context);
@ -278,7 +276,6 @@ DNNModel *ff_dnn_load_model_native(const char *model_filename)
} }
maximum_params->val.u32 = avio_rl32(model_file_context); maximum_params->val.u32 = avio_rl32(model_file_context);
dnn_size += 4; dnn_size += 4;
network->layers[layer].type = MAXIMUM;
network->layers[layer].params = maximum_params; network->layers[layer].params = maximum_params;
network->layers[layer].input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context); network->layers[layer].input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context);
network->layers[layer].output_operand_index = (int32_t)avio_rl32(model_file_context); network->layers[layer].output_operand_index = (int32_t)avio_rl32(model_file_context);
@ -347,27 +344,27 @@ DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNData *output
for (layer = 0; layer < network->layers_num; ++layer){ for (layer = 0; layer < network->layers_num; ++layer){
switch (network->layers[layer].type){ switch (network->layers[layer].type){
case CONV: case DLT_CONV2D:
conv_params = (ConvolutionalParams *)network->layers[layer].params; conv_params = (ConvolutionalParams *)network->layers[layer].params;
convolve(network->operands, network->layers[layer].input_operand_indexes, convolve(network->operands, network->layers[layer].input_operand_indexes,
network->layers[layer].output_operand_index, conv_params); network->layers[layer].output_operand_index, conv_params);
break; break;
case DEPTH_TO_SPACE: case DLT_DEPTH_TO_SPACE:
depth_to_space_params = (DepthToSpaceParams *)network->layers[layer].params; depth_to_space_params = (DepthToSpaceParams *)network->layers[layer].params;
depth_to_space(network->operands, network->layers[layer].input_operand_indexes, depth_to_space(network->operands, network->layers[layer].input_operand_indexes,
network->layers[layer].output_operand_index, depth_to_space_params->block_size); network->layers[layer].output_operand_index, depth_to_space_params->block_size);
break; break;
case MIRROR_PAD: case DLT_MIRROR_PAD:
pad_params = (LayerPadParams *)network->layers[layer].params; pad_params = (LayerPadParams *)network->layers[layer].params;
dnn_execute_layer_pad(network->operands, network->layers[layer].input_operand_indexes, dnn_execute_layer_pad(network->operands, network->layers[layer].input_operand_indexes,
network->layers[layer].output_operand_index, pad_params); network->layers[layer].output_operand_index, pad_params);
break; break;
case MAXIMUM: case DLT_MAXIMUM:
maximum_params = (DnnLayerMaximumParams *)network->layers[layer].params; maximum_params = (DnnLayerMaximumParams *)network->layers[layer].params;
dnn_execute_layer_maximum(network->operands, network->layers[layer].input_operand_indexes, dnn_execute_layer_maximum(network->operands, network->layers[layer].input_operand_indexes,
network->layers[layer].output_operand_index, maximum_params); network->layers[layer].output_operand_index, maximum_params);
break; break;
case INPUT: case DLT_INPUT:
return DNN_ERROR; return DNN_ERROR;
} }
} }
@ -408,7 +405,7 @@ void ff_dnn_free_model_native(DNNModel **model)
{ {
network = (ConvolutionalNetwork *)(*model)->model; network = (ConvolutionalNetwork *)(*model)->model;
for (layer = 0; layer < network->layers_num; ++layer){ for (layer = 0; layer < network->layers_num; ++layer){
if (network->layers[layer].type == CONV){ if (network->layers[layer].type == DLT_CONV2D){
conv_params = (ConvolutionalParams *)network->layers[layer].params; conv_params = (ConvolutionalParams *)network->layers[layer].params;
av_freep(&conv_params->kernel); av_freep(&conv_params->kernel);
av_freep(&conv_params->biases); av_freep(&conv_params->biases);

@ -30,7 +30,17 @@
#include "../dnn_interface.h" #include "../dnn_interface.h"
#include "libavformat/avio.h" #include "libavformat/avio.h"
typedef enum {INPUT = 0, CONV = 1, DEPTH_TO_SPACE = 2, MIRROR_PAD = 3, MAXIMUM = 4} DNNLayerType; /**
* the enum value of DNNLayerType should not be changed,
* the same values are used in convert_from_tensorflow.py
*/
typedef enum {
DLT_INPUT = 0,
DLT_CONV2D = 1,
DLT_DEPTH_TO_SPACE = 2,
DLT_MIRROR_PAD = 3,
DLT_MAXIMUM = 4
} DNNLayerType;
typedef enum {DOT_INPUT = 1, DOT_OUTPUT = 2, DOT_INTERMEDIATE = DOT_INPUT | DOT_INPUT} DNNOperandType; typedef enum {DOT_INPUT = 1, DOT_OUTPUT = 2, DOT_INTERMEDIATE = DOT_INPUT | DOT_INPUT} DNNOperandType;

@ -499,22 +499,22 @@ static DNNReturnType load_native_model(TFModel *tf_model, const char *model_file
for (layer = 0; layer < conv_network->layers_num; ++layer){ for (layer = 0; layer < conv_network->layers_num; ++layer){
switch (conv_network->layers[layer].type){ switch (conv_network->layers[layer].type){
case INPUT: case DLT_INPUT:
layer_add_res = DNN_SUCCESS; layer_add_res = DNN_SUCCESS;
break; break;
case CONV: case DLT_CONV2D:
layer_add_res = add_conv_layer(tf_model, transpose_op, &op, layer_add_res = add_conv_layer(tf_model, transpose_op, &op,
(ConvolutionalParams *)conv_network->layers[layer].params, layer); (ConvolutionalParams *)conv_network->layers[layer].params, layer);
break; break;
case DEPTH_TO_SPACE: case DLT_DEPTH_TO_SPACE:
layer_add_res = add_depth_to_space_layer(tf_model, &op, layer_add_res = add_depth_to_space_layer(tf_model, &op,
(DepthToSpaceParams *)conv_network->layers[layer].params, layer); (DepthToSpaceParams *)conv_network->layers[layer].params, layer);
break; break;
case MIRROR_PAD: case DLT_MIRROR_PAD:
layer_add_res = add_pad_layer(tf_model, &op, layer_add_res = add_pad_layer(tf_model, &op,
(LayerPadParams *)conv_network->layers[layer].params, layer); (LayerPadParams *)conv_network->layers[layer].params, layer);
break; break;
case MAXIMUM: case DLT_MAXIMUM:
layer_add_res = add_maximum_layer(tf_model, &op, layer_add_res = add_maximum_layer(tf_model, &op,
(DnnLayerMaximumParams *)conv_network->layers[layer].params, layer); (DnnLayerMaximumParams *)conv_network->layers[layer].params, layer);
break; break;

Loading…
Cancel
Save