|
|
|
@ -104,8 +104,12 @@ def l2norm(x, name): |
|
|
|
|
inp = tf.placeholder(dtype, [1, 300, 300, 3], 'data') |
|
|
|
|
data_bn = batch_norm(inp, 'data_bn') |
|
|
|
|
data_scale = scale(data_bn, 'data_scale') |
|
|
|
|
data_scale = tf.pad(data_scale, [[0, 0], [3, 3], [3, 3], [0, 0]]) |
|
|
|
|
|
|
|
|
|
# Instead of tf.pad we use tf.space_to_batch_nd layers which override convolution's padding strategy to explicit numbers |
|
|
|
|
# data_scale = tf.pad(data_scale, [[0, 0], [3, 3], [3, 3], [0, 0]]) |
|
|
|
|
data_scale = tf.space_to_batch_nd(data_scale, [1, 1], [[3, 3], [3, 3]], name='Pad') |
|
|
|
|
conv1_h = conv(data_scale, stride=2, pad='VALID', name='conv1_h') |
|
|
|
|
|
|
|
|
|
conv1_bn_h = batch_norm(conv1_h, 'conv1_bn_h') |
|
|
|
|
conv1_scale_h = scale(conv1_bn_h, 'conv1_scale_h') |
|
|
|
|
conv1_relu = tf.nn.relu(conv1_scale_h) |
|
|
|
@ -133,8 +137,11 @@ layer_128_1_sum = layer_128_1_conv2 + layer_128_1_conv_expand_h |
|
|
|
|
layer_256_1_bn1 = batch_norm(layer_128_1_sum, 'layer_256_1_bn1') |
|
|
|
|
layer_256_1_scale1 = scale(layer_256_1_bn1, 'layer_256_1_scale1') |
|
|
|
|
layer_256_1_relu1 = tf.nn.relu(layer_256_1_scale1) |
|
|
|
|
layer_256_1_conv1 = tf.pad(layer_256_1_relu1, [[0, 0], [1, 1], [1, 1], [0, 0]]) |
|
|
|
|
|
|
|
|
|
# layer_256_1_conv1 = tf.pad(layer_256_1_relu1, [[0, 0], [1, 1], [1, 1], [0, 0]]) |
|
|
|
|
layer_256_1_conv1 = tf.space_to_batch_nd(layer_256_1_relu1, [1, 1], [[1, 1], [1, 1]], name='Pad_1') |
|
|
|
|
layer_256_1_conv1 = conv(layer_256_1_conv1, stride=2, pad='VALID', name='layer_256_1_conv1') |
|
|
|
|
|
|
|
|
|
layer_256_1_bn2 = batch_norm(layer_256_1_conv1, 'layer_256_1_bn2') |
|
|
|
|
layer_256_1_scale2 = scale(layer_256_1_bn2, 'layer_256_1_scale2') |
|
|
|
|
layer_256_1_relu2 = tf.nn.relu(layer_256_1_scale2) |
|
|
|
@ -160,8 +167,11 @@ fc7 = tf.nn.relu(last_scale_h, name='last_relu') |
|
|
|
|
conv6_1_h = conv(fc7, 'conv6_1_h', activ=tf.nn.relu) |
|
|
|
|
conv6_2_h = conv(conv6_1_h, stride=2, name='conv6_2_h', activ=tf.nn.relu) |
|
|
|
|
conv7_1_h = conv(conv6_2_h, 'conv7_1_h', activ=tf.nn.relu) |
|
|
|
|
conv7_2_h = tf.pad(conv7_1_h, [[0, 0], [1, 1], [1, 1], [0, 0]]) |
|
|
|
|
|
|
|
|
|
# conv7_2_h = tf.pad(conv7_1_h, [[0, 0], [1, 1], [1, 1], [0, 0]]) |
|
|
|
|
conv7_2_h = tf.space_to_batch_nd(conv7_1_h, [1, 1], [[1, 1], [1, 1]], name='Pad_2') |
|
|
|
|
conv7_2_h = conv(conv7_2_h, stride=2, pad='VALID', name='conv7_2_h', activ=tf.nn.relu) |
|
|
|
|
|
|
|
|
|
conv8_1_h = conv(conv7_2_h, pad='SAME', name='conv8_1_h', activ=tf.nn.relu) |
|
|
|
|
conv8_2_h = conv(conv8_1_h, pad='SAME', name='conv8_2_h', activ=tf.nn.relu) |
|
|
|
|
conv9_1_h = conv(conv8_2_h, 'conv9_1_h', activ=tf.nn.relu) |
|
|
|
@ -201,6 +211,7 @@ with tf.Session() as sess: |
|
|
|
|
inputData = np.random.standard_normal([1, 3, 300, 300]).astype(np.float32) |
|
|
|
|
|
|
|
|
|
cvNet.setInput(inputData) |
|
|
|
|
cvNet.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV) |
|
|
|
|
outDNN = cvNet.forward(out_nodes) |
|
|
|
|
|
|
|
|
|
outTF = sess.run([mbox_loc, mbox_conf_flatten], feed_dict={inp: inputData.transpose(0, 2, 3, 1)}) |
|
|
|
@ -254,7 +265,7 @@ for i in reversed(range(len(graph_def.node))): |
|
|
|
|
del graph_def.node[i] |
|
|
|
|
for attr in ['T', 'data_format', 'Tshape', 'N', 'Tidx', 'Tdim', |
|
|
|
|
'use_cudnn_on_gpu', 'Index', 'Tperm', 'is_training', |
|
|
|
|
'Tpaddings']: |
|
|
|
|
'Tpaddings', 'Tblock_shape', 'Tcrops']: |
|
|
|
|
if attr in graph_def.node[i].attr: |
|
|
|
|
del graph_def.node[i].attr[attr] |
|
|
|
|
|
|
|
|
|