OpenMMLab Detection Toolbox and Benchmark https://mmdetection.readthedocs.io/
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 

169 lines
5.6 KiB

import copy
import os.path as osp
import mmcv
import numpy as np
import pytest
from mmcv.utils import build_from_cfg
from mmdet.datasets.builder import PIPELINES
def test_resize():
# test assertion if img_scale is a list
with pytest.raises(AssertionError):
transform = dict(type='Resize', img_scale=[1333, 800], keep_ratio=True)
build_from_cfg(transform, PIPELINES)
# test assertion if len(img_scale) while ratio_range is not None
with pytest.raises(AssertionError):
transform = dict(
type='Resize',
img_scale=[(1333, 800), (1333, 600)],
ratio_range=(0.9, 1.1),
keep_ratio=True)
build_from_cfg(transform, PIPELINES)
# test assertion for invalid multiscale_mode
with pytest.raises(AssertionError):
transform = dict(
type='Resize',
img_scale=[(1333, 800), (1333, 600)],
keep_ratio=True,
multiscale_mode='2333')
build_from_cfg(transform, PIPELINES)
transform = dict(type='Resize', img_scale=(1333, 800), keep_ratio=True)
resize_module = build_from_cfg(transform, PIPELINES)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color')
results['img'] = img
results['img2'] = copy.deepcopy(img)
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
results['img_fields'] = ['img', 'img2']
results = resize_module(results)
assert np.equal(results['img'], results['img2']).all()
results.pop('scale')
transform = dict(
type='Resize',
img_scale=(1280, 800),
multiscale_mode='value',
keep_ratio=False)
resize_module = build_from_cfg(transform, PIPELINES)
results = resize_module(results)
assert np.equal(results['img'], results['img2']).all()
assert results['img_shape'] == (800, 1280, 3)
def test_flip():
# test assertion for invalid flip_ratio
with pytest.raises(AssertionError):
transform = dict(type='RandomFlip', flip_ratio=1.5)
build_from_cfg(transform, PIPELINES)
# test assertion for invalid direction
with pytest.raises(AssertionError):
transform = dict(
type='RandomFlip', flip_ratio=1, direction='horizonta')
build_from_cfg(transform, PIPELINES)
transform = dict(type='RandomFlip', flip_ratio=1)
flip_module = build_from_cfg(transform, PIPELINES)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color')
original_img = copy.deepcopy(img)
results['img'] = img
results['img2'] = copy.deepcopy(img)
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
results['img_fields'] = ['img', 'img2']
results = flip_module(results)
assert np.equal(results['img'], results['img2']).all()
flip_module = build_from_cfg(transform, PIPELINES)
results = flip_module(results)
assert np.equal(results['img'], results['img2']).all()
assert np.equal(original_img, results['img']).all()
def test_pad():
# test assertion if both size_divisor and size is None
with pytest.raises(AssertionError):
transform = dict(type='Pad')
build_from_cfg(transform, PIPELINES)
transform = dict(type='Pad', size_divisor=32)
transform = build_from_cfg(transform, PIPELINES)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color')
original_img = copy.deepcopy(img)
results['img'] = img
results['img2'] = copy.deepcopy(img)
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
results['img_fields'] = ['img', 'img2']
results = transform(results)
assert np.equal(results['img'], results['img2']).all()
# original img already divisible by 32
assert np.equal(results['img'], original_img).all()
img_shape = results['img'].shape
assert img_shape[0] % 32 == 0
assert img_shape[1] % 32 == 0
resize_transform = dict(
type='Resize', img_scale=(1333, 800), keep_ratio=True)
resize_module = build_from_cfg(resize_transform, PIPELINES)
results = resize_module(results)
results = transform(results)
img_shape = results['img'].shape
assert np.equal(results['img'], results['img2']).all()
assert img_shape[0] % 32 == 0
assert img_shape[1] % 32 == 0
def test_normalize():
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True)
transform = dict(type='Normalize', **img_norm_cfg)
transform = build_from_cfg(transform, PIPELINES)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color')
original_img = copy.deepcopy(img)
results['img'] = img
results['img2'] = copy.deepcopy(img)
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
results['img_fields'] = ['img', 'img2']
results = transform(results)
assert np.equal(results['img'], results['img2']).all()
mean = np.array(img_norm_cfg['mean'])
std = np.array(img_norm_cfg['std'])
converted_img = (original_img[..., ::-1] - mean) / std
assert np.allclose(results['img'], converted_img)