[Enhancement]: make deployment tools more friendly to use (#5280)

* fisrt commit

* convert with wrapper

* update doc

* remove std and mean from args

* add deprecation warnings

* add comments and deprecated warnings

Co-authored-by: q.yao <yaoqian@sensetime.com>
pull/5418/head^2
RunningLeon 3 years ago committed by GitHub
parent 49b20ac420
commit 5ec72728a4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 16
      docs/tutorials/onnx2tensorrt.md
  2. 8
      docs/tutorials/pytorch2onnx.md
  3. 30
      mmdet/core/export/model_wrappers.py
  4. 9
      mmdet/core/export/pytorch2onnx.py
  5. 232
      tools/deployment/onnx2tensorrt.py
  6. 189
      tools/deployment/pytorch2onnx.py
  7. 8
      tools/deployment/test.py

@ -25,14 +25,13 @@
```bash
python tools/deployment/onnx2tensorrt.py \
${CONFIG} \
${MODEL} \
--trt-file ${TRT_FILE} \
--input-img ${INPUT_IMAGE_PATH} \
--shape ${IMAGE_SHAPE} \
--shape ${INPUT_IMAGE_SHAPE} \
--min-shape ${MIN_IMAGE_SHAPE} \
--max-shape ${MAX_IMAGE_SHAPE} \
--mean ${IMAGE_MEAN} \
--std ${IMAGE_STD} \
--dataset ${DATASET_NAME} \
--workspace-size {WORKSPACE_SIZE} \
--show \
--verify \
@ -40,30 +39,27 @@ python tools/deployment/onnx2tensorrt.py \
Description of all arguments:
- `config` : The path of a model config file.
- `model` : The path of an ONNX model file.
- `--trt-file`: The Path of output TensorRT engine file. If not specified, it will be set to `tmp.trt`.
- `--input-img` : The path of an input image for tracing and conversion. By default, it will be set to `demo/demo.jpg`.
- `--shape`: The height and width of model input. If not specified, it will be set to `400 600`.
- `--min-shape`: The minimum height and width of model input. If not specified, it will be set to the same as `--shape`.
- `--max-shape`: The maximum height and width of model input. If not specified, it will be set to the same as `--shape`.
- `--mean` : Three mean values for the input image. If not specified, it will be set to `123.675 116.28 103.53`.
- `--std` : Three std values for the input image. If not specified, it will be set to `58.395 57.12 57.375`.
- `--dataset` : The dataset name for the input model. If not specified, it will be set to `coco`.
- `--workspace-size` : The required GPU workspace size in GiB to build TensorRT engine. If not specified, it will be set to `1` GiB.
- `--show`: Determines whether to show the outputs of the model. If not specified, it will be set to `False`.
- `--verify`: Determines whether to verify the correctness of models between ONNXRuntime and TensorRT. If not specified, it will be set to `False`.
- `--to-rgb`: Determines whether to convert the input image to RGB mode. If not specified, it will be set to `True`.
- `--verbose`: Determines whether to print logging messages. It's useful for debugging. If not specified, it will be set to `False`.
Example:
```bash
python tools/deployment/onnx2tensorrt.py \
configs/retinanet/retinanet_r50_fpn_1x_coco.py \
checkpoints/retinanet_r50_fpn_1x_coco.onnx \
--trt-file checkpoints/retinanet_r50_fpn_1x_coco.trt \
--input-img demo/demo.jpg \
--shape 400 600 \
--mean 123.675 116.28 103.53 \
--std 58.395 57.12 57.375 \
--show \
--verify \
```

@ -39,9 +39,6 @@ python tools/deployment/pytorch2onnx.py \
--output-file ${OUTPUT_FILE} \
--input-img ${INPUT_IMAGE_PATH} \
--shape ${IMAGE_SHAPE} \
--mean ${IMAGE_MEAN} \
--std ${IMAGE_STD} \
--dataset ${DATASET_NAME} \
--test-img ${TEST_IMAGE_PATH} \
--opset-version ${OPSET_VERSION} \
--cfg-options ${CFG_OPTIONS}
@ -58,9 +55,6 @@ python tools/deployment/pytorch2onnx.py \
- `--output-file`: The path of output ONNX model. If not specified, it will be set to `tmp.onnx`.
- `--input-img`: The path of an input image for tracing and conversion. By default, it will be set to `tests/data/color.jpg`.
- `--shape`: The height and width of input tensor to the model. If not specified, it will be set to `800 1216`.
- `--mean` : Three mean values for the input image. If not specified, it will be set to `123.675 116.28 103.53`.
- `--std` : Three std values for the input image. If not specified, it will be set to `58.395 57.12 57.375`.
- `--dataset` : The dataset name for the input model. If not specified, it will be set to `coco`.
- `--test-img` : The path of an image to verify the exported ONNX model. By default, it will be set to `None`, meaning it will use `--input-img` for verification.
- `--opset-version` : The opset version of ONNX. If not specified, it will be set to `11`.
- `--dynamic-export`: Determines whether to export ONNX model with dynamic input and output shapes. If not specified, it will be set to `False`.
@ -79,8 +73,6 @@ python tools/deployment/pytorch2onnx.py \
--input-img demo/demo.jpg \
--test-img tests/data/color.jpg \
--shape 608 608 \
--mean 0 0 0 \
--std 255 255 255 \
--show \
--verify \
--dynamic-export \

@ -151,22 +151,32 @@ class ONNXRuntimeDetector(DeployBaseDetector):
class TensorRTDetector(DeployBaseDetector):
"""Wrapper for detector's inference with TensorRT."""
def __init__(self, engine_file, class_names, device_id, output_names):
def __init__(self, engine_file, class_names, device_id, output_names=None):
super(TensorRTDetector, self).__init__(class_names, device_id)
warnings.warn('`output_names` is deprecated and will be removed in '
'future releases.')
from mmcv.tensorrt import TRTWraper, load_tensorrt_plugin
try:
from mmcv.tensorrt import TRTWraper
load_tensorrt_plugin()
except (ImportError, ModuleNotFoundError):
raise RuntimeError(
'Please install TensorRT: https://mmcv.readthedocs.io/en/latest/tensorrt_plugin.html#how-to-build-tensorrt-plugins-in-mmcv' # noqa
)
self.output_names = output_names
self.model = TRTWraper(engine_file, ['input'], output_names)
warnings.warn('If input model has custom op from mmcv, \
you may have to build mmcv with TensorRT from source.')
output_names = ['dets', 'labels']
model = TRTWraper(engine_file, ['input'], output_names)
with_masks = False
# if TensorRT has totally 4 inputs/outputs, then
# the detector should have `mask` output.
if len(model.engine) == 4:
model.output_names = output_names + ['masks']
with_masks = True
self.model = model
self.with_masks = with_masks
def forward_test(self, imgs, img_metas, **kwargs):
input_data = imgs[0]
input_data = imgs[0].contiguous()
with torch.cuda.device(self.device_id), torch.no_grad():
outputs = self.model({'input': input_data})
outputs = [outputs[name] for name in self.output_names]
outputs = [outputs[name] for name in self.model.output_names]
outputs = [out.detach().cpu().numpy() for out in outputs]
return outputs

@ -90,7 +90,14 @@ def build_model_from_cfg(config_path, checkpoint_path, cfg_options=None):
# build the model
cfg.model.train_cfg = None
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
load_checkpoint(model, checkpoint_path, map_location='cpu')
checkpoint = load_checkpoint(model, checkpoint_path, map_location='cpu')
if 'CLASSES' in checkpoint.get('meta', {}):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
from mmdet.datasets import DATASETS
dataset = DATASETS.get(cfg.data.test['type'])
assert (dataset is not None)
model.CLASSES = dataset.CLASSES
model.cpu().eval()
return model

@ -1,18 +1,18 @@
import argparse
import os
import os.path as osp
import warnings
import numpy as np
import onnx
import onnxruntime as ort
import torch
from mmcv.ops import get_onnxruntime_op_path
from mmcv.tensorrt import (TRTWraper, is_tensorrt_plugin_loaded, onnx2trt,
save_trt_engine)
from mmcv import Config
from mmcv.tensorrt import is_tensorrt_plugin_loaded, onnx2trt, save_trt_engine
from mmdet.core import get_classes
from mmdet.core.export import preprocess_example_input
from mmdet.core.visualization.image import imshow_det_bboxes
from mmdet.core.export.model_wrappers import (ONNXRuntimeDetector,
TensorRTDetector)
from mmdet.datasets import DATASETS
def get_GiB(x: int):
@ -25,21 +25,22 @@ def onnx2tensorrt(onnx_file,
input_config,
verify=False,
show=False,
dataset='coco',
workspace_size=1,
verbose=False):
import tensorrt as trt
onnx_model = onnx.load(onnx_file)
input_shape = input_config['input_shape']
max_shape = input_config['max_shape']
min_shape = input_config['min_shape']
opt_shape = input_config['opt_shape']
fp16_mode = False
# create trt engine and wraper
opt_shape_dict = {'input': [input_shape, input_shape, max_shape]}
opt_shape_dict = {'input': [min_shape, opt_shape, max_shape]}
max_workspace_size = get_GiB(workspace_size)
trt_engine = onnx2trt(
onnx_model,
opt_shape_dict,
log_level=trt.Logger.VERBOSE if verbose else trt.Logger.ERROR,
fp16_mode=False,
fp16_mode=fp16_mode,
max_workspace_size=max_workspace_size)
save_dir, _ = osp.split(trt_file)
if save_dir:
@ -48,79 +49,76 @@ def onnx2tensorrt(onnx_file,
print(f'Successfully created TensorRT engine: {trt_file}')
if verify:
# prepare input
one_img, one_meta = preprocess_example_input(input_config)
input_img_cpu = one_img.detach().cpu().numpy()
input_img_cuda = one_img.cuda()
img = one_meta['show_img']
# Get results from ONNXRuntime
ort_custom_op_path = get_onnxruntime_op_path()
session_options = ort.SessionOptions()
if osp.exists(ort_custom_op_path):
session_options.register_custom_ops_library(ort_custom_op_path)
sess = ort.InferenceSession(onnx_file, session_options)
output_names = [_.name for _ in sess.get_outputs()]
ort_outputs = sess.run(None, {
'input': input_img_cpu,
})
with_mask = len(output_names) == 3
ort_outputs = [_.squeeze(0) for _ in ort_outputs]
ort_dets, ort_labels = ort_outputs[:2]
ort_masks = ort_outputs[2] if with_mask else None
ort_shapes = [_.shape for _ in ort_outputs]
print(f'ONNX Runtime output names: {output_names}, \
output shapes: {ort_shapes}')
# Get results from TensorRT
trt_model = TRTWraper(trt_file, ['input'], output_names)
img_list, img_meta_list = [one_img], [[one_meta]]
img_list = [_.cuda().contiguous() for _ in img_list]
# wrap ONNX and TensorRT model
onnx_model = ONNXRuntimeDetector(onnx_file, CLASSES, device_id=0)
trt_model = TensorRTDetector(trt_file, CLASSES, device_id=0)
# inference with wrapped model
with torch.no_grad():
trt_outputs = trt_model({'input': input_img_cuda})
trt_outputs = [
trt_outputs[_].detach().cpu().numpy().squeeze(0)
for _ in output_names
]
trt_dets, trt_labels = trt_outputs[:2]
trt_shapes = [_.shape for _ in trt_outputs]
print(f'TensorRT output names: {output_names}, \
output shapes: {trt_shapes}')
trt_masks = trt_outputs[2] if with_mask else None
if trt_masks is not None and trt_masks.dtype != np.bool:
trt_masks = trt_masks >= 0.5
ort_masks = ort_masks >= 0.5
# Show detection outputs
onnx_results = onnx_model(
img_list, img_metas=img_meta_list, return_loss=False)[0]
trt_results = trt_model(
img_list, img_metas=img_meta_list, return_loss=False)[0]
if show:
CLASSES = get_classes(dataset)
score_thr = 0.35
imshow_det_bboxes(
img.copy(),
trt_dets,
trt_labels,
segms=trt_masks,
class_names=CLASSES,
score_thr=score_thr,
win_name='TensorRT')
imshow_det_bboxes(
img.copy(),
ort_dets,
ort_labels,
segms=ort_masks,
class_names=CLASSES,
score_thr=score_thr,
win_name='ONNXRuntime')
# Compare results
np.testing.assert_allclose(ort_dets, trt_dets, rtol=1e-03, atol=1e-05)
np.testing.assert_allclose(ort_labels, trt_labels)
out_file_ort, out_file_trt = None, None
else:
out_file_ort, out_file_trt = 'show-ort.png', 'show-trt.png'
show_img = one_meta['show_img']
score_thr = 0.3
onnx_model.show_result(
show_img,
onnx_results,
score_thr=score_thr,
show=True,
win_name='ONNXRuntime',
out_file=out_file_ort)
trt_model.show_result(
show_img,
trt_results,
score_thr=score_thr,
show=True,
win_name='TensorRT',
out_file=out_file_trt)
with_mask = trt_model.with_masks
# compare a part of result
if with_mask:
np.testing.assert_allclose(
ort_masks, trt_masks, rtol=1e-03, atol=1e-05)
print('The numerical values are the same ' +
'between ONNXRuntime and TensorRT')
compare_pairs = list(zip(onnx_results, trt_results))
else:
compare_pairs = [(onnx_results, trt_results)]
err_msg = 'The numerical values are different between Pytorch' + \
' and ONNX, but it does not necessarily mean the' + \
' exported ONNX model is problematic.'
# check the numerical value
for onnx_res, pytorch_res in compare_pairs:
for o_res, p_res in zip(onnx_res, pytorch_res):
np.testing.assert_allclose(
o_res, p_res, rtol=1e-03, atol=1e-05, err_msg=err_msg)
print('The numerical values are the same between Pytorch and ONNX')
def parse_normalize_cfg(test_pipeline):
transforms = None
for pipeline in test_pipeline:
if 'transforms' in pipeline:
transforms = pipeline['transforms']
break
assert transforms is not None, 'Failed to find `transforms`'
norm_config_li = [_ for _ in transforms if _['type'] == 'Normalize']
assert len(norm_config_li) == 1, '`norm_config` should only have one'
norm_config = norm_config_li[0]
return norm_config
def parse_args():
parser = argparse.ArgumentParser(
description='Convert MMDetection models from ONNX to TensorRT')
parser.add_argument('config', help='test config file path')
parser.add_argument('model', help='Filename of input ONNX model')
parser.add_argument(
'--trt-file',
@ -132,7 +130,11 @@ def parse_args():
parser.add_argument(
'--show', action='store_true', help='Whether to show output results')
parser.add_argument(
'--dataset', type=str, default='coco', help='Dataset name')
'--dataset',
type=str,
default='coco',
help='Dataset name. This argument is deprecated and will be \
removed in future releases.')
parser.add_argument(
'--verify',
action='store_true',
@ -145,36 +147,46 @@ def parse_args():
parser.add_argument(
'--to-rgb',
action='store_false',
help='Feed model with RGB or BGR image. Default is RGB.')
help='Feed model with RGB or BGR image. Default is RGB. This \
argument is deprecated and will be removed in future releases.')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[400, 600],
help='Input size of the model')
parser.add_argument(
'--max-shape',
type=int,
nargs='+',
default=None,
help='Maximum input size of the model in TensorRT')
parser.add_argument(
'--mean',
type=float,
nargs='+',
default=[123.675, 116.28, 103.53],
help='Mean value used for preprocess input data')
help='Mean value used for preprocess input data. This argument \
is deprecated and will be removed in future releases.')
parser.add_argument(
'--std',
type=float,
nargs='+',
default=[58.395, 57.12, 57.375],
help='Variance value used for preprocess input data')
help='Variance value used for preprocess input data. \
This argument is deprecated and will be removed in future releases.')
parser.add_argument(
'--min-shape',
type=int,
nargs='+',
default=None,
help='Minimum input size of the model in TensorRT')
parser.add_argument(
'--max-shape',
type=int,
nargs='+',
default=None,
help='Maximum input size of the model in TensorRT')
parser.add_argument(
'--workspace-size',
type=int,
default=1,
help='Max workspace size in GiB')
args = parser.parse_args()
return args
@ -183,38 +195,53 @@ if __name__ == '__main__':
assert is_tensorrt_plugin_loaded(), 'TensorRT plugin should be compiled.'
args = parse_args()
warnings.warn(
'Arguments like `--to-rgb`, `--mean`, `--std`, `--dataset` would be \
parsed directly from config file and are deprecated and will be \
removed in future releases.')
if not args.input_img:
args.input_img = osp.join(osp.dirname(__file__), '../demo/demo.jpg')
if len(args.shape) == 1:
input_shape = (1, 3, args.shape[0], args.shape[0])
elif len(args.shape) == 2:
input_shape = (1, 3) + tuple(args.shape)
cfg = Config.fromfile(args.config)
def parse_shape(shape):
if len(shape) == 1:
shape = (1, 3, shape[0], shape[0])
elif len(args.shape) == 2:
shape = (1, 3) + tuple(shape)
else:
raise ValueError('invalid input shape')
return shape
if args.shape:
input_shape = parse_shape(args.shape)
else:
raise ValueError('invalid input shape')
img_scale = cfg.test_pipeline[1]['img_scale']
input_shape = (1, 3, img_scale[1], img_scale[0])
if not args.max_shape:
max_shape = input_shape
else:
if len(args.max_shape) == 1:
max_shape = (1, 3, args.max_shape[0], args.max_shape[0])
elif len(args.max_shape) == 2:
max_shape = (1, 3) + tuple(args.max_shape)
else:
raise ValueError('invalid input max_shape')
max_shape = parse_shape(args.max_shape)
assert len(args.mean) == 3
assert len(args.std) == 3
if not args.min_shape:
min_shape = input_shape
else:
min_shape = parse_shape(args.min_shape)
dataset = DATASETS.get(cfg.data.test['type'])
assert (dataset is not None)
CLASSES = dataset.CLASSES
normalize_cfg = parse_normalize_cfg(cfg.test_pipeline)
normalize_cfg = {'mean': args.mean, 'std': args.std, 'to_rgb': args.to_rgb}
input_config = {
'min_shape': min_shape,
'opt_shape': input_shape,
'max_shape': max_shape,
'input_shape': input_shape,
'input_path': args.input_img,
'normalize_cfg': normalize_cfg,
'max_shape': max_shape
'normalize_cfg': normalize_cfg
}
# Create TensorRT engine
onnx2tensorrt(
args.model,
@ -222,6 +249,5 @@ if __name__ == '__main__':
input_config,
verify=args.verify,
show=args.show,
dataset=args.dataset,
workspace_size=args.workspace_size,
verbose=args.verbose)

@ -1,31 +1,27 @@
import argparse
import os.path as osp
import warnings
from functools import partial
import numpy as np
import onnx
import onnxruntime as rt
import torch
from mmcv import DictAction
from mmcv import Config, DictAction
from mmdet.core.export import (build_model_from_cfg,
generate_inputs_and_wrap_model,
preprocess_example_input)
from mmdet.core.export import build_model_from_cfg, preprocess_example_input
from mmdet.core.export.model_wrappers import ONNXRuntimeDetector
def pytorch2onnx(config_path,
checkpoint_path,
def pytorch2onnx(model,
input_img,
input_shape,
normalize_cfg,
opset_version=11,
show=False,
output_file='tmp.onnx',
verify=False,
normalize_cfg=None,
dataset='coco',
test_img=None,
do_simplify=False,
cfg_options=None,
dynamic_export=None):
input_config = {
@ -33,13 +29,17 @@ def pytorch2onnx(config_path,
'input_path': input_img,
'normalize_cfg': normalize_cfg
}
# prepare original model and meta for verifying the onnx model
orig_model = build_model_from_cfg(
config_path, checkpoint_path, cfg_options=cfg_options)
# prepare input
one_img, one_meta = preprocess_example_input(input_config)
model, tensor_data = generate_inputs_and_wrap_model(
config_path, checkpoint_path, input_config, cfg_options=cfg_options)
img_list, img_meta_list = [one_img], [[one_meta]]
# replace original forward function
origin_forward = model.forward
model.forward = partial(
model.forward,
img_metas=img_meta_list,
return_loss=False,
rescale=False)
output_names = ['dets', 'labels']
if model.with_mask:
output_names.append('masks')
@ -66,7 +66,7 @@ def pytorch2onnx(config_path,
torch.onnx.export(
model,
tensor_data,
img_list,
output_file,
input_names=[input_name],
output_names=output_names,
@ -77,7 +77,7 @@ def pytorch2onnx(config_path,
opset_version=opset_version,
dynamic_axes=dynamic_axes)
model.forward = orig_model.forward
model.forward = origin_forward
# get the custom op path
ort_custom_op_path = ''
@ -89,79 +89,74 @@ def pytorch2onnx(config_path,
you may have to build mmcv with ONNXRuntime from source.')
if do_simplify:
from mmdet import digit_version
import onnxsim
from mmdet import digit_version
min_required_version = '0.3.0'
assert digit_version(onnxsim.__version__) >= digit_version(
min_required_version
), f'Requires to install onnx-simplify>={min_required_version}'
input_dic = {'input': one_img.detach().cpu().numpy()}
input_dic = {'input': img_list[0].detach().cpu().numpy()}
onnxsim.simplify(
output_file, input_data=input_dic, custom_lib=ort_custom_op_path)
print(f'Successfully exported ONNX model: {output_file}')
if verify:
from mmdet.core import get_classes, bbox2result
from mmdet.apis import show_result_pyplot
model.CLASSES = get_classes(dataset)
num_classes = len(model.CLASSES)
# check by onnx
onnx_model = onnx.load(output_file)
onnx.checker.check_model(onnx_model)
# wrap onnx model
onnx_model = ONNXRuntimeDetector(output_file, model.CLASSES, 0)
if dynamic_export:
# scale up to test dynamic shape
h, w = [int((_ * 1.5) // 32 * 32) for _ in input_shape[2:]]
h, w = min(1344, h), min(1344, w)
input_config['input_shape'] = (1, 3, h, w)
if test_img is not None:
input_config['input_path'] = test_img
if test_img is None:
input_config['input_path'] = input_img
# prepare input once again
one_img, one_meta = preprocess_example_input(input_config)
tensor_data = [one_img]
img_list, img_meta_list = [one_img], [[one_meta]]
# get pytorch output
pytorch_results = model(tensor_data, [[one_meta]], return_loss=False)
pytorch_results = pytorch_results[0]
# get onnx output
input_all = [node.name for node in onnx_model.graph.input]
input_initializer = [
node.name for node in onnx_model.graph.initializer
]
net_feed_input = list(set(input_all) - set(input_initializer))
assert (len(net_feed_input) == 1)
session_options = rt.SessionOptions()
# register custom op for ONNX Runtime
if osp.exists(ort_custom_op_path):
session_options.register_custom_ops_library(ort_custom_op_path)
feed_input_img = one_img.detach().numpy()
pytorch_results = model(
img_list, img_metas=img_meta_list, return_loss=False,
rescale=True)[0]
img_list = [_.cuda().contiguous() for _ in img_list]
if dynamic_export:
# test batch with two input images
feed_input_img = np.vstack([feed_input_img, feed_input_img])
sess = rt.InferenceSession(output_file, session_options)
onnx_outputs = sess.run(None, {net_feed_input[0]: feed_input_img})
output_names = [_.name for _ in sess.get_outputs()]
output_shapes = [_.shape for _ in onnx_outputs]
print(f'ONNX Runtime output names: {output_names}, \
output shapes: {output_shapes}')
# get last image's outputs
onnx_outputs = [_[-1] for _ in onnx_outputs]
ort_dets, ort_labels = onnx_outputs[:2]
onnx_results = bbox2result(ort_dets, ort_labels, num_classes)
if model.with_mask:
segm_results = onnx_outputs[2]
if segm_results.dtype != np.bool:
segm_results = (segm_results * 255).astype(np.uint8)
cls_segms = [[] for _ in range(num_classes)]
for i in range(ort_dets.shape[0]):
cls_segms[ort_labels[i]].append(segm_results[i])
onnx_results = (onnx_results, cls_segms)
img_list = img_list + [_.flip(-1).contiguous() for _ in img_list]
img_meta_list = img_meta_list * 2
# get onnx output
onnx_results = onnx_model(
img_list, img_metas=img_meta_list, return_loss=False)[0]
# visualize predictions
score_thr = 0.3
if show:
show_result_pyplot(
model, one_meta['show_img'], pytorch_results, title='Pytorch')
show_result_pyplot(
model, one_meta['show_img'], onnx_results, title='ONNXRuntime')
out_file_ort, out_file_pt = None, None
else:
out_file_ort, out_file_pt = 'show-ort.png', 'show-pt.png'
show_img = one_meta['show_img']
model.show_result(
show_img,
pytorch_results,
score_thr=score_thr,
show=True,
win_name='PyTorch',
out_file=out_file_pt)
onnx_model.show_result(
show_img,
onnx_results,
score_thr=score_thr,
show=True,
win_name='ONNXRuntime',
out_file=out_file_ort)
# compare a part of result
if model.with_mask:
@ -179,6 +174,19 @@ def pytorch2onnx(config_path,
print('The numerical values are the same between Pytorch and ONNX')
def parse_normalize_cfg(test_pipeline):
transforms = None
for pipeline in test_pipeline:
if 'transforms' in pipeline:
transforms = pipeline['transforms']
break
assert transforms is not None, 'Failed to find `transforms`'
norm_config_li = [_ for _ in transforms if _['type'] == 'Normalize']
assert len(norm_config_li) == 1, '`norm_config` should only have one'
norm_config = norm_config_li[0]
return norm_config
def parse_args():
parser = argparse.ArgumentParser(
description='Convert MMDetection models to ONNX')
@ -194,7 +202,11 @@ def parse_args():
parser.add_argument(
'--test-img', type=str, default=None, help='Images for test')
parser.add_argument(
'--dataset', type=str, default='coco', help='Dataset name')
'--dataset',
type=str,
default='coco',
help='Dataset name. This argument is deprecated and will be removed \
in future releases.')
parser.add_argument(
'--verify',
action='store_true',
@ -214,13 +226,15 @@ def parse_args():
type=float,
nargs='+',
default=[123.675, 116.28, 103.53],
help='mean value used for preprocess input data')
help='mean value used for preprocess input data.This argument \
is deprecated and will be removed in future releases.')
parser.add_argument(
'--std',
type=float,
nargs='+',
default=[58.395, 57.12, 57.375],
help='variance value used for preprocess input data')
help='variance value used for preprocess input data. '
'This argument is deprecated and will be removed in future releases.')
parser.add_argument(
'--cfg-options',
nargs='+',
@ -241,38 +255,51 @@ def parse_args():
if __name__ == '__main__':
args = parse_args()
warnings.warn('Arguments like `--mean`, `--std`, `--dataset` would be \
parsed directly from config file and are deprecated and \
will be removed in future releases.')
assert args.opset_version == 11, 'MMDet only support opset 11 now'
if not args.input_img:
args.input_img = osp.join(
osp.dirname(__file__), '../../tests/data/color.jpg')
try:
from mmcv.onnx.symbolic import register_extra_symbolics
except ModuleNotFoundError:
raise NotImplementedError('please update mmcv to version>=v1.0.4')
register_extra_symbolics(args.opset_version)
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
if len(args.shape) == 1:
if args.shape is None:
img_scale = cfg.test_pipeline[1]['img_scale']
input_shape = (1, 3, img_scale[1], img_scale[0])
elif len(args.shape) == 1:
input_shape = (1, 3, args.shape[0], args.shape[0])
elif len(args.shape) == 2:
input_shape = (1, 3) + tuple(args.shape)
else:
raise ValueError('invalid input shape')
assert len(args.mean) == 3
assert len(args.std) == 3
# build the model and load checkpoint
model = build_model_from_cfg(args.config, args.checkpoint,
args.cfg_options)
normalize_cfg = {'mean': args.mean, 'std': args.std}
if not args.input_img:
args.input_img = osp.join(osp.dirname(__file__), '../../demo/demo.jpg')
normalize_cfg = parse_normalize_cfg(cfg.test_pipeline)
# convert model to onnx file
pytorch2onnx(
args.config,
args.checkpoint,
model,
args.input_img,
input_shape,
normalize_cfg,
opset_version=args.opset_version,
show=args.show,
output_file=args.output_file,
verify=args.verify,
normalize_cfg=normalize_cfg,
dataset=args.dataset,
test_img=args.test_img,
do_simplify=args.simplify,
cfg_options=args.cfg_options,
dynamic_export=args.dynamic_export)

@ -113,14 +113,8 @@ def main():
args.model, class_names=dataset.CLASSES, device_id=0)
elif args.backend == 'tensorrt':
from mmdet.core.export.model_wrappers import TensorRTDetector
output_names = ['dets', 'labels']
if len(cfg.evaluation['metric']) == 2:
output_names.append('masks')
model = TensorRTDetector(
args.model,
class_names=dataset.CLASSES,
device_id=0,
output_names=output_names)
args.model, class_names=dataset.CLASSES, device_id=0)
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show, args.show_dir,

Loading…
Cancel
Save