Fix CI out of memory & add PyTorch1.9 Python3.9 unit tests (#5862)

* fix ci

* fix ci

* fix ci

* fix ci

* fix ci

* fix ci

* fix ci

* fix ci

* fix ci

* fix ci

* try to fix CI

* try to fix CI

* try to fix CI

* try to fix CI

* fix ci

* Use docker to skip CUDA installation in CI

* fix mmcv install

* delete pt1.9

* fix ci

* delete py3.9

* fix ci

* fix albu

* fix albu

* skip mmpycocotools

* pycocotools

* pycocotools

* env

* env

* env

* env

* env

* env

* env

* skip some large model

* fix tracking

* reduce resnet channels

* reduce resnset channels

* reduce resnet channels

* reduce csp darknet channels

* reduce tracking channels

* fix CE loss

* enable cpu test

* add cuda10.2 and torch1.9 and python3.9

* fix python3.9 env

* fix python3.9 env

* fix python3.9 env

* fix python3.9 env

* fix apt install

* fix py3.9 ppa

* fix py3.9 ppa

* fix py3.9

* fix py3.9

* fix py3.9

* fix python dev

* fix python dev

* add cmake

* update onnxruntime

* update codecov

* update CI

* fix mmcv cuda version
pull/5946/head^2
RangiLyu 3 years ago committed by GitHub
parent 48d382013d
commit 848f53c9c8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 145
      .github/workflows/build.yml
  2. 12
      mmdet/models/backbones/hourglass.py
  3. 4
      mmdet/models/losses/focal_loss.py
  4. 2
      requirements/tests.txt
  5. 18
      tests/data/configs_mmtrack/faster_rcnn_r50_dc5.py
  6. 18
      tests/data/configs_mmtrack/faster_rcnn_r50_fpn.py
  7. 5
      tests/data/configs_mmtrack/selsa_faster_rcnn_r101_dc5_1x.py
  8. 11
      tests/data/configs_mmtrack/tracktor_faster-rcnn_r50_fpn_4e.py
  9. 95
      tests/test_models/test_backbones/test_csp_darknet.py
  10. 18
      tests/test_models/test_backbones/test_hourglass.py
  11. 10
      tests/test_models/test_backbones/test_mobilenet_v2.py
  12. 19
      tests/test_models/test_backbones/test_regnet.py
  13. 11
      tests/test_models/test_backbones/test_renext.py
  14. 11
      tests/test_models/test_backbones/test_res2net.py
  15. 27
      tests/test_models/test_backbones/test_resnest.py
  16. 231
      tests/test_models/test_backbones/test_resnet.py
  17. 5
      tests/test_models/test_backbones/test_trident_resnet.py
  18. 120
      tests/test_models/test_forward.py
  19. 7
      tests/test_models/test_loss.py
  20. 66
      tests/test_runtime/test_config.py

@ -23,11 +23,11 @@ jobs:
interrogate -v --ignore-init-method --ignore-module --ignore-nested-functions --ignore-regex "__repr__" --fail-under 80 mmdet
build_cpu:
runs-on: ubuntu-latest
runs-on: ubuntu-18.04
strategy:
matrix:
python-version: [3.7]
torch: [1.3.1, 1.5.1, 1.6.0]
torch: [1.3.1, 1.5.1, 1.6.0, 1.7.0, 1.8.0, 1.9.0]
include:
- torch: 1.3.1
torchvision: 0.4.2
@ -44,6 +44,9 @@ jobs:
- torch: 1.8.0
torchvision: 0.9.0
mmcv: "latest+torch1.8.0+cpu"
- torch: 1.9.0
torchvision: 0.10.0
mmcv: "latest+torch1.9.0+cpu"
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
@ -62,6 +65,7 @@ jobs:
- name: Install unittest dependencies
run: |
pip install -r requirements/tests.txt -r requirements/optional.txt
pip install albumentations>=0.3.2 --no-binary imgaug,albumentations
pip install git+https://github.com/cocodataset/panopticapi.git
- name: Build and install
run: rm -rf .eggs && pip install -e .
@ -71,13 +75,11 @@ jobs:
coverage xml
coverage report -m
build_cuda:
build_cuda101:
runs-on: ubuntu-18.04
container:
image: pytorch/pytorch:1.6.0-cuda10.1-cudnn7-devel
env:
CUDA: 10.1.105-1
CUDA_SHORT: 10.1
UBUNTU_VERSION: ubuntu1804
strategy:
matrix:
python-version: [3.7]
@ -88,48 +90,28 @@ jobs:
1.6.0+cu101,
1.7.0+cu101,
1.8.0+cu101,
1.9.0+cu101,
]
include:
- torch: 1.3.1
torch_version: torch1.3.1
torchvision: 0.4.2
mmcv: "latest+torch1.3.0+cu101"
mmcv_link: "torch1.3.0"
- torch: 1.5.1+cu101
torch_version: torch1.5.1
torchvision: 0.6.1+cu101
mmcv: "latest+torch1.5.0+cu101"
mmcv_link: "torch1.5.0"
- torch: 1.6.0+cu101
torch_version: torch1.6.0
torchvision: 0.7.0+cu101
mmcv: "latest+torch1.6.0+cu101"
- torch: 1.6.0+cu101
torch_version: torch1.6.0
torchvision: 0.7.0+cu101
mmcv: "latest+torch1.6.0+cu101"
python-version: 3.6
- torch: 1.6.0+cu101
torch_version: torch1.6.0
torchvision: 0.7.0+cu101
mmcv: "latest+torch1.6.0+cu101"
python-version: 3.8
- torch: 1.6.0+cu101
torch_version: torch1.6.0
torchvision: 0.7.0+cu101
mmcv: "latest+torch1.6.0+cu101"
python-version: 3.9
mmcv_link: "torch1.6.0"
- torch: 1.7.0+cu101
torch_version: torch1.7.0
torchvision: 0.8.1+cu101
mmcv: "latest+torch1.7.0+cu101"
mmcv_link: "torch1.7.0"
- torch: 1.8.0+cu101
torch_version: torch1.8.0
torchvision: 0.9.0+cu101
mmcv: "latest+torch1.8.0+cu101"
- torch: 1.9.0+cu101
torch_version: torch1.9.0
torchvision: 0.10.0+cu101
mmcv: "latest+torch1.9.0+cu101"
mmcv_link: "torch1.8.0"
steps:
- uses: actions/checkout@v2
@ -137,35 +119,27 @@ jobs:
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Install CUDA
- name: Install system dependencies
run: |
export INSTALLER=cuda-repo-${UBUNTU_VERSION}_${CUDA}_amd64.deb
wget http://developer.download.nvidia.com/compute/cuda/repos/${UBUNTU_VERSION}/x86_64/${INSTALLER}
sudo dpkg -i ${INSTALLER}
wget https://developer.download.nvidia.com/compute/cuda/repos/${UBUNTU_VERSION}/x86_64/7fa2af80.pub
sudo apt-key add 7fa2af80.pub
echo 'succeed before add key'
sudo apt update -qq
echo 'succeed after update'
sudo apt install -y cuda-${CUDA_SHORT/./-} cuda-cufft-dev-${CUDA_SHORT/./-}
echo 'succeed after install cuda and cufft'
sudo apt clean
export CUDA_HOME=/usr/local/cuda-${CUDA_SHORT}
export LD_LIBRARY_PATH=${CUDA_HOME}/lib64:${CUDA_HOME}/include:${LD_LIBRARY_PATH}
export PATH=${CUDA_HOME}/bin:${PATH}
apt-get update && apt-get install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 python${{matrix.python-version}}-dev
apt-get clean
rm -rf /var/lib/apt/lists/*
- name: Install Pillow
run: pip install Pillow==6.2.2
run: python -m pip install Pillow==6.2.2
if: ${{matrix.torchvision < 0.5}}
- name: Install PyTorch
run: pip install torch==${{matrix.torch}} torchvision==${{matrix.torchvision}} -f https://download.pytorch.org/whl/torch_stable.html
run: python -m pip install torch==${{matrix.torch}} torchvision==${{matrix.torchvision}} -f https://download.pytorch.org/whl/torch_stable.html
- name: Install dependencies for compiling onnx when python=3.9
run: pip install protobuf && sudo apt-get install libprotobuf-dev protobuf-compiler
run: python -m pip install protobuf && apt-get install libprotobuf-dev protobuf-compiler
if: ${{matrix.python-version == '3.9'}}
- name: Install mmdet dependencies
run: |
pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu101/${{matrix.torch_version}}/index.html
pip install -r requirements.txt
pip install git+https://github.com/cocodataset/panopticapi.git
python -V
python -m pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu101/${{matrix.mmcv_link}}/index.html
python -m pip install pycocotools
python -m pip install -r requirements/tests.txt -r requirements/optional.txt
python -m pip install albumentations>=0.3.2 --no-binary imgaug,albumentations
python -m pip install git+https://github.com/cocodataset/panopticapi.git
python -c 'import mmcv; print(mmcv.__version__)'
- name: Build and install
run: |
@ -185,3 +159,68 @@ jobs:
env_vars: OS,PYTHON
name: codecov-umbrella
fail_ci_if_error: false
build_cuda102:
runs-on: ubuntu-18.04
container:
image: pytorch/pytorch:1.9.0-cuda10.2-cudnn7-devel
strategy:
matrix:
python-version: [3.6, 3.7, 3.8, 3.9-dev]
torch: [1.9.0+cu102]
include:
- torch: 1.9.0+cu102
torch_version: torch1.9.0
torchvision: 0.10.0+cu102
mmcv_link: "torch1.9.0"
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Install python-dev
run: apt-get update && apt-get install -y python${{matrix.python-version}}-dev
if: ${{matrix.python-version != '3.9-dev'}}
- name: Install system dependencies
run: |
apt-get update && apt-get install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6
apt-get clean
rm -rf /var/lib/apt/lists/*
- name: Install Pillow
run: python -m pip install Pillow==6.2.2
if: ${{matrix.torchvision < 0.5}}
- name: Install PyTorch
run: python -m pip install torch==${{matrix.torch}} torchvision==${{matrix.torchvision}} -f https://download.pytorch.org/whl/torch_stable.html
- name: Install dependencies for compiling onnx when python=3.9
run: python -m pip install protobuf && apt-get update && apt-get -y install libprotobuf-dev protobuf-compiler cmake
if: ${{matrix.python-version == '3.9-dev'}}
- name: Install mmdet dependencies
run: |
python -V
python -m pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu102/${{matrix.mmcv_link}}/index.html
python -m pip install pycocotools
python -m pip install -r requirements/tests.txt -r requirements/optional.txt
python -m pip install albumentations>=0.3.2 --no-binary imgaug,albumentations
python -m pip install git+https://github.com/cocodataset/panopticapi.git
python -c 'import mmcv; print(mmcv.__version__)'
- name: Build and install
run: |
rm -rf .eggs
python setup.py check -m -s
TORCH_CUDA_ARCH_LIST=7.0 pip install .
- name: Run unittests and generate coverage report
run: |
coverage run --branch --source mmdet -m pytest tests/
coverage xml
coverage report -m
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v2
with:
files: ./coverage.xml
flags: unittests
env_vars: OS,PYTHON
name: codecov-umbrella
fail_ci_if_error: false

@ -149,8 +149,16 @@ class HourglassNet(BaseModule):
cur_channel = stage_channels[0]
self.stem = nn.Sequential(
ConvModule(3, 128, 7, padding=3, stride=2, norm_cfg=norm_cfg),
ResLayer(BasicBlock, 128, 256, 1, stride=2, norm_cfg=norm_cfg))
ConvModule(
3, cur_channel // 2, 7, padding=3, stride=2,
norm_cfg=norm_cfg),
ResLayer(
BasicBlock,
cur_channel // 2,
cur_channel,
1,
stride=2,
norm_cfg=norm_cfg))
self.hourglass_modules = nn.ModuleList([
HourglassModule(downsample_times, stage_channels, stage_blocks)

@ -83,8 +83,8 @@ def sigmoid_focal_loss(pred,
"""
# Function.apply does not accept keyword arguments, so the decorator
# "weighted_loss" is not applicable
loss = _sigmoid_focal_loss(pred.contiguous(), target, gamma, alpha, None,
'none')
loss = _sigmoid_focal_loss(pred.contiguous(), target.contiguous(), gamma,
alpha, None, 'none')
if weight is not None:
if weight.shape != loss.shape:
if weight.size(0) == loss.size(0):

@ -7,7 +7,7 @@ isort==4.3.21
kwarray
mmtrack
onnx==1.7.0
onnxruntime==1.5.1
onnxruntime>=1.8.0
pytest
ubelt
xdoctest>=0.10.0

@ -1,10 +1,10 @@
model = dict(
detector=dict(
type='FasterRCNN',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
depth=18,
base_channels=2,
num_stages=4,
out_indices=(3, ),
strides=(1, 2, 2, 1),
@ -15,13 +15,13 @@ model = dict(
style='pytorch'),
neck=dict(
type='ChannelMapper',
in_channels=[2048],
out_channels=512,
in_channels=[16],
out_channels=16,
kernel_size=3),
rpn_head=dict(
type='RPNHead',
in_channels=512,
feat_channels=512,
in_channels=16,
feat_channels=16,
anchor_generator=dict(
type='AnchorGenerator',
scales=[4, 8, 16, 32],
@ -41,12 +41,12 @@ model = dict(
type='SingleRoIExtractor',
roi_layer=dict(
type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=512,
out_channels=16,
featmap_strides=[16]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=512,
fc_out_channels=1024,
in_channels=16,
fc_out_channels=32,
roi_feat_size=7,
num_classes=30,
bbox_coder=dict(

@ -1,10 +1,10 @@
model = dict(
detector=dict(
type='FasterRCNN',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
depth=18,
base_channels=2,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
@ -12,14 +12,12 @@ model = dict(
norm_eval=True,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
type='FPN', in_channels=[2, 4, 8, 16], out_channels=16,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
in_channels=16,
feat_channels=16,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
@ -39,12 +37,12 @@ model = dict(
type='SingleRoIExtractor',
roi_layer=dict(
type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
out_channels=16,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
in_channels=16,
fc_out_channels=32,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(

@ -6,8 +6,7 @@ model = dict(
type='SELSA',
pretrains=None,
detector=dict(
pretrained='torchvision://resnet101',
backbone=dict(depth=101),
backbone=dict(depth=18, base_channels=2),
roi_head=dict(
type='SelsaRoIHead',
bbox_head=dict(
@ -15,7 +14,7 @@ model = dict(
num_shared_fcs=2,
aggregator=dict(
type='SelsaAggregator',
in_channels=1024,
in_channels=32,
num_attention_blocks=16)))))
# dataset settings

@ -19,7 +19,8 @@ model = dict(
type='BaseReID',
backbone=dict(
type='ResNet',
depth=50,
depth=18,
base_channels=2,
num_stages=4,
out_indices=(3, ),
style='pytorch'),
@ -27,10 +28,10 @@ model = dict(
head=dict(
type='LinearReIDHead',
num_fcs=1,
in_channels=2048,
fc_channels=1024,
out_channels=128,
num_classes=378,
in_channels=16,
fc_channels=32,
out_channels=16,
num_classes=8,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
loss_pairwise=dict(
type='TripletLoss', margin=0.3, loss_weight=1.0),

@ -19,7 +19,6 @@ def test_csp_darknet_backbone():
# Test CSPDarknet with first stage frozen
frozen_stages = 1
model = CSPDarknet(frozen_stages=frozen_stages)
model.init_weights()
model.train()
for mod in model.stem.modules():
@ -35,105 +34,83 @@ def test_csp_darknet_backbone():
# Test CSPDarknet with norm_eval=True
model = CSPDarknet(norm_eval=True)
model.init_weights()
model.train()
assert check_norm_state(model.modules(), False)
# Test CSPDarknet-P5 forward with widen_factor=1.0
model = CSPDarknet(arch='P5', widen_factor=1.0, out_indices=range(0, 5))
model.init_weights()
model.train()
assert check_norm_state(model.modules(), True)
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 64, 112, 112))
assert feat[1].shape == torch.Size((1, 128, 56, 56))
assert feat[2].shape == torch.Size((1, 256, 28, 28))
assert feat[3].shape == torch.Size((1, 512, 14, 14))
assert feat[4].shape == torch.Size((1, 1024, 7, 7))
# Test CSPDarknet-P5 forward with widen_factor=0.5
model = CSPDarknet(arch='P5', widen_factor=0.5, out_indices=range(0, 5))
model.init_weights()
model = CSPDarknet(arch='P5', widen_factor=0.25, out_indices=range(0, 5))
model.train()
imgs = torch.randn(1, 3, 224, 224)
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 32, 112, 112))
assert feat[1].shape == torch.Size((1, 64, 56, 56))
assert feat[2].shape == torch.Size((1, 128, 28, 28))
assert feat[3].shape == torch.Size((1, 256, 14, 14))
assert feat[4].shape == torch.Size((1, 512, 7, 7))
assert feat[0].shape == torch.Size((1, 16, 32, 32))
assert feat[1].shape == torch.Size((1, 32, 16, 16))
assert feat[2].shape == torch.Size((1, 64, 8, 8))
assert feat[3].shape == torch.Size((1, 128, 4, 4))
assert feat[4].shape == torch.Size((1, 256, 2, 2))
# Test CSPDarknet-P6 forward with widen_factor=1.5
# Test CSPDarknet-P6 forward with widen_factor=0.5
model = CSPDarknet(
arch='P6',
widen_factor=1.5,
widen_factor=0.25,
out_indices=range(0, 6),
spp_kernal_sizes=(3, 5, 7))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 320, 320)
imgs = torch.randn(1, 3, 128, 128)
feat = model(imgs)
assert feat[0].shape == torch.Size((1, 96, 160, 160))
assert feat[1].shape == torch.Size((1, 192, 80, 80))
assert feat[2].shape == torch.Size((1, 384, 40, 40))
assert feat[3].shape == torch.Size((1, 768, 20, 20))
assert feat[4].shape == torch.Size((1, 1152, 10, 10))
assert feat[5].shape == torch.Size((1, 1536, 5, 5))
assert feat[0].shape == torch.Size((1, 16, 64, 64))
assert feat[1].shape == torch.Size((1, 32, 32, 32))
assert feat[2].shape == torch.Size((1, 64, 16, 16))
assert feat[3].shape == torch.Size((1, 128, 8, 8))
assert feat[4].shape == torch.Size((1, 192, 4, 4))
assert feat[5].shape == torch.Size((1, 256, 2, 2))
# Test CSPDarknet forward with dict(type='ReLU')
model = CSPDarknet(
widen_factor=1.0, act_cfg=dict(type='ReLU'), out_indices=range(0, 5))
model.init_weights()
widen_factor=0.125, act_cfg=dict(type='ReLU'), out_indices=range(0, 5))
model.train()
imgs = torch.randn(1, 3, 224, 224)
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 64, 112, 112))
assert feat[1].shape == torch.Size((1, 128, 56, 56))
assert feat[2].shape == torch.Size((1, 256, 28, 28))
assert feat[3].shape == torch.Size((1, 512, 14, 14))
assert feat[4].shape == torch.Size((1, 1024, 7, 7))
assert feat[0].shape == torch.Size((1, 8, 32, 32))
assert feat[1].shape == torch.Size((1, 16, 16, 16))
assert feat[2].shape == torch.Size((1, 32, 8, 8))
assert feat[3].shape == torch.Size((1, 64, 4, 4))
assert feat[4].shape == torch.Size((1, 128, 2, 2))
# Test CSPDarknet with BatchNorm forward
model = CSPDarknet(widen_factor=1.0, out_indices=range(0, 5))
model = CSPDarknet(widen_factor=0.125, out_indices=range(0, 5))
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 64, 112, 112))
assert feat[1].shape == torch.Size((1, 128, 56, 56))
assert feat[2].shape == torch.Size((1, 256, 28, 28))
assert feat[3].shape == torch.Size((1, 512, 14, 14))
assert feat[4].shape == torch.Size((1, 1024, 7, 7))
assert feat[0].shape == torch.Size((1, 8, 32, 32))
assert feat[1].shape == torch.Size((1, 16, 16, 16))
assert feat[2].shape == torch.Size((1, 32, 8, 8))
assert feat[3].shape == torch.Size((1, 64, 4, 4))
assert feat[4].shape == torch.Size((1, 128, 2, 2))
# Test CSPDarknet with custom arch forward
arch_ovewrite = [[32, 56, 3, True, False], [56, 224, 2, True, False],
[224, 512, 1, True, False]]
model = CSPDarknet(
arch_ovewrite=arch_ovewrite,
widen_factor=1.0,
widen_factor=0.25,
out_indices=(0, 1, 2, 3))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size((1, 32, 112, 112))
assert feat[1].shape == torch.Size((1, 56, 56, 56))
assert feat[2].shape == torch.Size((1, 224, 28, 28))
assert feat[3].shape == torch.Size((1, 512, 14, 14))
assert feat[0].shape == torch.Size((1, 8, 16, 16))
assert feat[1].shape == torch.Size((1, 14, 8, 8))
assert feat[2].shape == torch.Size((1, 56, 4, 4))
assert feat[3].shape == torch.Size((1, 128, 2, 2))

@ -24,22 +24,26 @@ def test_hourglass_backbone():
stage_blocks=[2, 2, 2, 2, 2])
# Test HourglassNet-52
model = HourglassNet(num_stacks=1)
model.init_weights()
model = HourglassNet(
num_stacks=1,
stage_channels=(64, 64, 96, 96, 96, 128),
feat_channel=64)
model.train()
imgs = torch.randn(1, 3, 256, 256)
feat = model(imgs)
assert len(feat) == 1
assert feat[0].shape == torch.Size([1, 256, 64, 64])
assert feat[0].shape == torch.Size([1, 64, 64, 64])
# Test HourglassNet-104
model = HourglassNet(num_stacks=2)
model.init_weights()
model = HourglassNet(
num_stacks=2,
stage_channels=(64, 64, 96, 96, 96, 128),
feat_channel=64)
model.train()
imgs = torch.randn(1, 3, 256, 256)
feat = model(imgs)
assert len(feat) == 2
assert feat[0].shape == torch.Size([1, 256, 64, 64])
assert feat[1].shape == torch.Size([1, 256, 64, 64])
assert feat[0].shape == torch.Size([1, 64, 64, 64])
assert feat[1].shape == torch.Size([1, 64, 64, 64])

@ -20,7 +20,6 @@ def test_mobilenetv2_backbone():
# Test MobileNetV2 with first stage frozen
frozen_stages = 1
model = MobileNetV2(frozen_stages=frozen_stages)
model.init_weights()
model.train()
for mod in model.conv1.modules():
@ -36,14 +35,12 @@ def test_mobilenetv2_backbone():
# Test MobileNetV2 with norm_eval=True
model = MobileNetV2(norm_eval=True)
model.init_weights()
model.train()
assert check_norm_state(model.modules(), False)
# Test MobileNetV2 forward with widen_factor=1.0
model = MobileNetV2(widen_factor=1.0, out_indices=range(0, 8))
model.init_weights()
model.train()
assert check_norm_state(model.modules(), True)
@ -62,7 +59,6 @@ def test_mobilenetv2_backbone():
# Test MobileNetV2 forward with widen_factor=0.5
model = MobileNetV2(widen_factor=0.5, out_indices=range(0, 7))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
@ -78,7 +74,6 @@ def test_mobilenetv2_backbone():
# Test MobileNetV2 forward with widen_factor=2.0
model = MobileNetV2(widen_factor=2.0, out_indices=range(0, 8))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
@ -95,7 +90,6 @@ def test_mobilenetv2_backbone():
# Test MobileNetV2 forward with dict(type='ReLU')
model = MobileNetV2(
widen_factor=1.0, act_cfg=dict(type='ReLU'), out_indices=range(0, 7))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
@ -114,7 +108,6 @@ def test_mobilenetv2_backbone():
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
@ -136,7 +129,6 @@ def test_mobilenetv2_backbone():
for m in model.modules():
if is_norm(m):
assert isinstance(m, GroupNorm)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
@ -152,7 +144,6 @@ def test_mobilenetv2_backbone():
# Test MobileNetV2 with layers 1, 3, 5 out forward
model = MobileNetV2(widen_factor=1.0, out_indices=(0, 2, 4))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
@ -168,7 +159,6 @@ def test_mobilenetv2_backbone():
for m in model.modules():
if is_block(m):
assert m.with_cp
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)

@ -40,20 +40,19 @@ def test_regnet_backbone(arch_name, arch, out_channels):
# Test RegNet with arch_name
model = RegNet(arch_name)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, out_channels[0], 56, 56])
assert feat[1].shape == torch.Size([1, out_channels[1], 28, 28])
assert feat[2].shape == torch.Size([1, out_channels[2], 14, 14])
assert feat[3].shape == torch.Size([1, out_channels[3], 7, 7])
assert feat[0].shape == torch.Size([1, out_channels[0], 8, 8])
assert feat[1].shape == torch.Size([1, out_channels[1], 4, 4])
assert feat[2].shape == torch.Size([1, out_channels[2], 2, 2])
assert feat[3].shape == torch.Size([1, out_channels[3], 1, 1])
# Test RegNet with arch
model = RegNet(arch)
assert feat[0].shape == torch.Size([1, out_channels[0], 56, 56])
assert feat[1].shape == torch.Size([1, out_channels[1], 28, 28])
assert feat[2].shape == torch.Size([1, out_channels[2], 14, 14])
assert feat[3].shape == torch.Size([1, out_channels[3], 7, 7])
assert feat[0].shape == torch.Size([1, out_channels[0], 8, 8])
assert feat[1].shape == torch.Size([1, out_channels[1], 4, 4])
assert feat[2].shape == torch.Size([1, out_channels[2], 2, 2])
assert feat[3].shape == torch.Size([1, out_channels[3], 1, 1])

@ -66,16 +66,15 @@ def test_resnext_backbone():
for m in model.modules():
if is_block(m):
assert m.conv2.groups == 32
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 256, 56, 56])
assert feat[1].shape == torch.Size([1, 512, 28, 28])
assert feat[2].shape == torch.Size([1, 1024, 14, 14])
assert feat[3].shape == torch.Size([1, 2048, 7, 7])
assert feat[0].shape == torch.Size([1, 256, 8, 8])
assert feat[1].shape == torch.Size([1, 512, 4, 4])
assert feat[2].shape == torch.Size([1, 1024, 2, 2])
assert feat[3].shape == torch.Size([1, 2048, 1, 1])
regnet_test_data = [

@ -51,13 +51,12 @@ def test_res2net_backbone():
for m in model.modules():
if is_block(m):
assert m.scales == 4
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 256, 56, 56])
assert feat[1].shape == torch.Size([1, 512, 28, 28])
assert feat[2].shape == torch.Size([1, 1024, 14, 14])
assert feat[3].shape == torch.Size([1, 2048, 7, 7])
assert feat[0].shape == torch.Size([1, 256, 8, 8])
assert feat[1].shape == torch.Size([1, 512, 4, 4])
assert feat[2].shape == torch.Size([1, 1024, 2, 2])
assert feat[3].shape == torch.Size([1, 2048, 1, 1])

@ -13,15 +13,15 @@ def test_resnest_bottleneck():
# Test ResNeSt Bottleneck structure
block = BottleneckS(
64, 256, radix=2, reduction_factor=4, stride=2, style='pytorch')
2, 4, radix=2, reduction_factor=4, stride=2, style='pytorch')
assert block.avd_layer.stride == 2
assert block.conv2.channels == 256
assert block.conv2.channels == 4
# Test ResNeSt Bottleneck forward
block = BottleneckS(64, 16, radix=2, reduction_factor=4)
x = torch.randn(2, 64, 56, 56)
block = BottleneckS(16, 4, radix=2, reduction_factor=4)
x = torch.randn(2, 16, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([2, 64, 56, 56])
assert x_out.shape == torch.Size([2, 16, 56, 56])
def test_resnest_backbone():
@ -31,14 +31,17 @@ def test_resnest_backbone():
# Test ResNeSt with radix 2, reduction_factor 4
model = ResNeSt(
depth=50, radix=2, reduction_factor=4, out_indices=(0, 1, 2, 3))
model.init_weights()
depth=50,
base_channels=4,
radix=2,
reduction_factor=4,
out_indices=(0, 1, 2, 3))
model.train()
imgs = torch.randn(2, 3, 224, 224)
imgs = torch.randn(2, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([2, 256, 56, 56])
assert feat[1].shape == torch.Size([2, 512, 28, 28])
assert feat[2].shape == torch.Size([2, 1024, 14, 14])
assert feat[3].shape == torch.Size([2, 2048, 7, 7])
assert feat[0].shape == torch.Size([2, 16, 8, 8])
assert feat[1].shape == torch.Size([2, 32, 4, 4])
assert feat[2].shape == torch.Size([2, 64, 2, 2])
assert feat[3].shape == torch.Size([2, 128, 1, 1])

@ -311,45 +311,37 @@ def test_resnest_stem():
assert model.conv1.out_channels == 64
assert model.norm1.num_features == 64
# Test default stem_channels, with base_channels=32
model = ResNet(50, base_channels=32)
assert model.stem_channels == 32
assert model.conv1.out_channels == 32
assert model.norm1.num_features == 32
assert model.layer1[0].conv1.in_channels == 32
# Test stem_channels=64
model = ResNet(50, stem_channels=64)
assert model.stem_channels == 64
assert model.conv1.out_channels == 64
assert model.norm1.num_features == 64
assert model.layer1[0].conv1.in_channels == 64
# Test stem_channels=64, with base_channels=32
model = ResNet(50, stem_channels=64, base_channels=32)
assert model.stem_channels == 64
assert model.conv1.out_channels == 64
assert model.norm1.num_features == 64
assert model.layer1[0].conv1.in_channels == 64
# Test stem_channels=128
model = ResNet(depth=50, stem_channels=128)
model.init_weights()
model.train()
assert model.conv1.out_channels == 128
assert model.layer1[0].conv1.in_channels == 128
# Test default stem_channels, with base_channels=3
model = ResNet(50, base_channels=3)
assert model.stem_channels == 3
assert model.conv1.out_channels == 3
assert model.norm1.num_features == 3
assert model.layer1[0].conv1.in_channels == 3
# Test stem_channels=3
model = ResNet(50, stem_channels=3)
assert model.stem_channels == 3
assert model.conv1.out_channels == 3
assert model.norm1.num_features == 3
assert model.layer1[0].conv1.in_channels == 3
# Test stem_channels=3, with base_channels=2
model = ResNet(50, stem_channels=3, base_channels=2)
assert model.stem_channels == 3
assert model.conv1.out_channels == 3
assert model.norm1.num_features == 3
assert model.layer1[0].conv1.in_channels == 3
# Test V1d stem_channels
model = ResNetV1d(depth=50, stem_channels=128)
model.init_weights()
model = ResNetV1d(depth=50, stem_channels=6)
model.train()
assert model.stem[0].out_channels == 64
assert model.stem[1].num_features == 64
assert model.stem[3].out_channels == 64
assert model.stem[4].num_features == 64
assert model.stem[6].out_channels == 128
assert model.stem[7].num_features == 128
assert model.layer1[0].conv1.in_channels == 128
assert model.stem[0].out_channels == 3
assert model.stem[1].num_features == 3
assert model.stem[3].out_channels == 3
assert model.stem[4].num_features == 3
assert model.stem[6].out_channels == 6
assert model.stem[7].num_features == 6
assert model.layer1[0].conv1.in_channels == 6
def test_resnet_backbone():
@ -388,29 +380,25 @@ def test_resnet_backbone():
with pytest.raises(TypeError):
# pretrained must be a string path
model = ResNet(50, pretrained=0)
model.init_weights()
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
ResNet(50, style='tensorflow')
# Test ResNet50 norm_eval=True
model = ResNet(50, norm_eval=True)
model.init_weights()
model = ResNet(50, norm_eval=True, base_channels=1)
model.train()
assert check_norm_state(model.modules(), False)
# Test ResNet50 with torchvision pretrained weight
model = ResNet(
depth=50, norm_eval=True, pretrained='torchvision://resnet50')
model.init_weights()
model.train()
assert check_norm_state(model.modules(), False)
# Test ResNet50 with first stage frozen
frozen_stages = 1
model = ResNet(50, frozen_stages=frozen_stages)
model.init_weights()
model = ResNet(50, frozen_stages=frozen_stages, base_channels=1)
model.train()
assert model.norm1.training is False
for layer in [model.conv1, model.norm1]:
@ -425,9 +413,8 @@ def test_resnet_backbone():
assert param.requires_grad is False
# Test ResNet50V1d with first stage frozen
model = ResNetV1d(depth=50, frozen_stages=frozen_stages)
model = ResNetV1d(depth=50, frozen_stages=frozen_stages, base_channels=2)
assert len(model.stem) == 9
model.init_weights()
model.train()
assert check_norm_state(model.stem, False)
for param in model.stem.parameters():
@ -442,16 +429,15 @@ def test_resnet_backbone():
# Test ResNet18 forward
model = ResNet(18)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 64, 56, 56])
assert feat[1].shape == torch.Size([1, 128, 28, 28])
assert feat[2].shape == torch.Size([1, 256, 14, 14])
assert feat[3].shape == torch.Size([1, 512, 7, 7])
assert feat[0].shape == torch.Size([1, 64, 8, 8])
assert feat[1].shape == torch.Size([1, 128, 4, 4])
assert feat[2].shape == torch.Size([1, 256, 2, 2])
assert feat[3].shape == torch.Size([1, 512, 1, 1])
# Test ResNet18 with checkpoint forward
model = ResNet(18, with_cp=True)
@ -460,65 +446,63 @@ def test_resnet_backbone():
assert m.with_cp
# Test ResNet50 with BatchNorm forward
model = ResNet(50)
model = ResNet(50, base_channels=1)
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 256, 56, 56])
assert feat[1].shape == torch.Size([1, 512, 28, 28])
assert feat[2].shape == torch.Size([1, 1024, 14, 14])
assert feat[3].shape == torch.Size([1, 2048, 7, 7])
assert feat[0].shape == torch.Size([1, 4, 8, 8])
assert feat[1].shape == torch.Size([1, 8, 4, 4])
assert feat[2].shape == torch.Size([1, 16, 2, 2])
assert feat[3].shape == torch.Size([1, 32, 1, 1])
# Test ResNet50 with layers 1, 2, 3 out forward
model = ResNet(50, out_indices=(0, 1, 2))
model.init_weights()
model = ResNet(50, out_indices=(0, 1, 2), base_channels=1)
model.train()
imgs = torch.randn(1, 3, 224, 224)
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 3
assert feat[0].shape == torch.Size([1, 256, 56, 56])
assert feat[1].shape == torch.Size([1, 512, 28, 28])
assert feat[2].shape == torch.Size([1, 1024, 14, 14])
assert feat[0].shape == torch.Size([1, 4, 8, 8])
assert feat[1].shape == torch.Size([1, 8, 4, 4])
assert feat[2].shape == torch.Size([1, 16, 2, 2])
# Test ResNet50 with checkpoint forward
model = ResNet(50, with_cp=True)
model = ResNet(50, with_cp=True, base_channels=1)
for m in model.modules():
if is_block(m):
assert m.with_cp
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 256, 56, 56])
assert feat[1].shape == torch.Size([1, 512, 28, 28])
assert feat[2].shape == torch.Size([1, 1024, 14, 14])
assert feat[3].shape == torch.Size([1, 2048, 7, 7])
assert feat[0].shape == torch.Size([1, 4, 8, 8])
assert feat[1].shape == torch.Size([1, 8, 4, 4])
assert feat[2].shape == torch.Size([1, 16, 2, 2])
assert feat[3].shape == torch.Size([1, 32, 1, 1])
# Test ResNet50 with GroupNorm forward
model = ResNet(
50, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))
50,
base_channels=4,
norm_cfg=dict(type='GN', num_groups=2, requires_grad=True))
for m in model.modules():
if is_norm(m):
assert isinstance(m, GroupNorm)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 256, 56, 56])
assert feat[1].shape == torch.Size([1, 512, 28, 28])
assert feat[2].shape == torch.Size([1, 1024, 14, 14])
assert feat[3].shape == torch.Size([1, 2048, 7, 7])
assert feat[0].shape == torch.Size([1, 16, 8, 8])
assert feat[1].shape == torch.Size([1, 32, 4, 4])
assert feat[2].shape == torch.Size([1, 64, 2, 2])
assert feat[3].shape == torch.Size([1, 128, 1, 1])
# Test ResNet50 with 1 GeneralizedAttention after conv2, 1 NonLocal2D
# after conv2, 1 ContextBlock after conv3 in layers 2, 3, 4
@ -538,39 +522,38 @@ def test_resnet_backbone():
stages=(False, True, True, False),
position='after_conv3')
]
model = ResNet(50, plugins=plugins)
model = ResNet(50, plugins=plugins, base_channels=8)
for m in model.layer1.modules():
if is_block(m):
assert not hasattr(m, 'context_block')
assert not hasattr(m, 'gen_attention_block')
assert m.nonlocal_block.in_channels == 64
assert m.nonlocal_block.in_channels == 8
for m in model.layer2.modules():
if is_block(m):
assert m.nonlocal_block.in_channels == 128
assert m.gen_attention_block.in_channels == 128
assert m.context_block.in_channels == 512
assert m.nonlocal_block.in_channels == 16
assert m.gen_attention_block.in_channels == 16
assert m.context_block.in_channels == 64
for m in model.layer3.modules():
if is_block(m):
assert m.nonlocal_block.in_channels == 256
assert m.gen_attention_block.in_channels == 256
assert m.context_block.in_channels == 1024
assert m.nonlocal_block.in_channels == 32
assert m.gen_attention_block.in_channels == 32
assert m.context_block.in_channels == 128
for m in model.layer4.modules():
if is_block(m):
assert m.nonlocal_block.in_channels == 512
assert m.gen_attention_block.in_channels == 512
assert m.nonlocal_block.in_channels == 64
assert m.gen_attention_block.in_channels == 64
assert not hasattr(m, 'context_block')
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 256, 56, 56])
assert feat[1].shape == torch.Size([1, 512, 28, 28])
assert feat[2].shape == torch.Size([1, 1024, 14, 14])
assert feat[3].shape == torch.Size([1, 2048, 7, 7])
assert feat[0].shape == torch.Size([1, 32, 8, 8])
assert feat[1].shape == torch.Size([1, 64, 4, 4])
assert feat[2].shape == torch.Size([1, 128, 2, 2])
assert feat[3].shape == torch.Size([1, 256, 1, 1])
# Test ResNet50 with 1 ContextBlock after conv2, 1 ContextBlock after
# conv3 in layers 2, 3, 4
@ -585,7 +568,7 @@ def test_resnet_backbone():
position='after_conv3')
]
model = ResNet(50, plugins=plugins)
model = ResNet(50, plugins=plugins, base_channels=8)
for m in model.layer1.modules():
if is_block(m):
assert not hasattr(m, 'context_block')
@ -594,33 +577,32 @@ def test_resnet_backbone():
for m in model.layer2.modules():
if is_block(m):
assert not hasattr(m, 'context_block')
assert m.context_block1.in_channels == 512
assert m.context_block2.in_channels == 512
assert m.context_block1.in_channels == 64
assert m.context_block2.in_channels == 64
for m in model.layer3.modules():
if is_block(m):
assert not hasattr(m, 'context_block')
assert m.context_block1.in_channels == 1024
assert m.context_block2.in_channels == 1024
assert m.context_block1.in_channels == 128
assert m.context_block2.in_channels == 128
for m in model.layer4.modules():
if is_block(m):
assert not hasattr(m, 'context_block')
assert not hasattr(m, 'context_block1')
assert not hasattr(m, 'context_block2')
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 256, 56, 56])
assert feat[1].shape == torch.Size([1, 512, 28, 28])
assert feat[2].shape == torch.Size([1, 1024, 14, 14])
assert feat[3].shape == torch.Size([1, 2048, 7, 7])
assert feat[0].shape == torch.Size([1, 32, 8, 8])
assert feat[1].shape == torch.Size([1, 64, 4, 4])
assert feat[2].shape == torch.Size([1, 128, 2, 2])
assert feat[3].shape == torch.Size([1, 256, 1, 1])
# Test ResNet50 zero initialization of residual
model = ResNet(50, zero_init_residual=True)
model = ResNet(50, zero_init_residual=True, base_channels=1)
model.init_weights()
for m in model.modules():
if isinstance(m, Bottleneck):
@ -629,39 +611,22 @@ def test_resnet_backbone():
assert assert_params_all_zeros(m.norm2)
model.train()
imgs = torch.randn(1, 3, 224, 224)
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 256, 56, 56])
assert feat[1].shape == torch.Size([1, 512, 28, 28])
assert feat[2].shape == torch.Size([1, 1024, 14, 14])
assert feat[3].shape == torch.Size([1, 2048, 7, 7])
assert feat[0].shape == torch.Size([1, 4, 8, 8])
assert feat[1].shape == torch.Size([1, 8, 4, 4])
assert feat[2].shape == torch.Size([1, 16, 2, 2])
assert feat[3].shape == torch.Size([1, 32, 1, 1])
# Test ResNetV1d forward
model = ResNetV1d(depth=50)
model.init_weights()
model = ResNetV1d(depth=50, base_channels=2)
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 256, 56, 56])
assert feat[1].shape == torch.Size([1, 512, 28, 28])
assert feat[2].shape == torch.Size([1, 1024, 14, 14])
assert feat[3].shape == torch.Size([1, 2048, 7, 7])
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 256, 56, 56])
assert feat[1].shape == torch.Size([1, 512, 28, 28])
assert feat[2].shape == torch.Size([1, 1024, 14, 14])
assert feat[3].shape == torch.Size([1, 2048, 7, 7])
imgs = torch.randn(1, 3, 224, 224)
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 256, 56, 56])
assert feat[1].shape == torch.Size([1, 512, 28, 28])
assert feat[2].shape == torch.Size([1, 1024, 14, 14])
assert feat[3].shape == torch.Size([1, 2048, 7, 7])
assert feat[0].shape == torch.Size([1, 8, 8, 8])
assert feat[1].shape == torch.Size([1, 16, 4, 4])
assert feat[2].shape == torch.Size([1, 32, 2, 2])
assert feat[3].shape == torch.Size([1, 64, 1, 1])

@ -172,10 +172,9 @@ def test_trident_resnet_backbone():
TridentResNet(50, num_stages=4, **tridentresnet_config)
model = TridentResNet(50, num_stages=3, **tridentresnet_config)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 1
assert feat[0].shape == torch.Size([3, 1024, 14, 14])
assert feat[0].shape == torch.Size([3, 1024, 2, 2])

@ -43,14 +43,25 @@ def _get_detector_cfg(fname):
return model
def _replace_r50_with_r18(model):
"""Replace ResNet50 with ResNet18 in config."""
model = copy.deepcopy(model)
if model.backbone.type == 'ResNet':
model.backbone.depth = 18
model.backbone.base_channels = 2
model.neck.in_channels = [2, 4, 8, 16]
return model
def test_sparse_rcnn_forward():
config_path = 'sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py'
model = _get_detector_cfg(config_path)
model = _replace_r50_with_r18(model)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
detector.init_weights()
input_shape = (1, 3, 550, 550)
input_shape = (1, 3, 100, 100)
mm_inputs = _demo_mm_inputs(input_shape, num_items=[5])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
@ -110,12 +121,13 @@ def test_sparse_rcnn_forward():
def test_rpn_forward():
model = _get_detector_cfg('rpn/rpn_r50_fpn_1x_coco.py')
model = _replace_r50_with_r18(model)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (1, 3, 224, 224)
input_shape = (1, 3, 100, 100)
mm_inputs = _demo_mm_inputs(input_shape)
imgs = mm_inputs.pop('imgs')
@ -148,7 +160,7 @@ def test_rpn_forward():
# 'free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py',
# 'atss/atss_r50_fpn_1x_coco.py', # not ready for topk
'reppoints/reppoints_moment_r50_fpn_1x_coco.py',
'yolo/yolov3_d53_mstrain-608_273e_coco.py',
'yolo/yolov3_mobilenetv2_320_300e_coco.py',
'yolox/yolox_tiny_8x8_300e_coco.py'
])
def test_single_stage_forward_gpu(cfg_file):
@ -157,12 +169,13 @@ def test_single_stage_forward_gpu(cfg_file):
pytest.skip('test requires GPU and torch+cuda')
model = _get_detector_cfg(cfg_file)
model = _replace_r50_with_r18(model)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (2, 3, 224, 224)
input_shape = (2, 3, 128, 128)
mm_inputs = _demo_mm_inputs(input_shape)
imgs = mm_inputs.pop('imgs')
@ -181,26 +194,6 @@ def test_single_stage_forward_gpu(cfg_file):
return_loss=True)
assert isinstance(losses, dict)
# Test forward train with an empty truth batch
if cfg_file == 'yolox/yolox_tiny_8x8_300e_coco.py':
detector.bbox_head.use_l1 = True
gt_bboxes = [
torch.empty((0, 4), dtype=torch.float).cuda()
for _ in range(input_shape[0])
]
gt_labels = [
torch.empty((0, ), dtype=torch.long).cuda()
for _ in range(input_shape[0])
]
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
@ -214,12 +207,13 @@ def test_single_stage_forward_gpu(cfg_file):
def test_faster_rcnn_ohem_forward():
model = _get_detector_cfg(
'faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py')
model = _replace_r50_with_r18(model)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (1, 3, 256, 256)
input_shape = (1, 3, 100, 100)
# Test forward train with a non-empty truth batch
mm_inputs = _demo_mm_inputs(input_shape, num_items=[10])
@ -257,14 +251,14 @@ def test_faster_rcnn_ohem_forward():
@pytest.mark.parametrize(
'cfg_file',
[
'cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py',
# 'cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py',
'mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py',
'grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py',
'ms_rcnn/ms_rcnn_r50_fpn_1x_coco.py',
'htc/htc_r50_fpn_1x_coco.py',
'panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py',
'scnet/scnet_r50_fpn_20e_coco.py',
'seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py' # noqa: E501
# 'grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py',
# 'ms_rcnn/ms_rcnn_r50_fpn_1x_coco.py',
# 'htc/htc_r50_fpn_1x_coco.py',
# 'panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py',
# 'scnet/scnet_r50_fpn_20e_coco.py',
# 'seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py' # noqa: E501
])
def test_two_stage_forward(cfg_file):
models_with_semantic = [
@ -278,6 +272,7 @@ def test_two_stage_forward(cfg_file):
with_semantic = False
model = _get_detector_cfg(cfg_file)
model = _replace_r50_with_r18(model)
model.backbone.init_cfg = None
# Save cost
@ -293,7 +288,7 @@ def test_two_stage_forward(cfg_file):
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (1, 3, 256, 256)
input_shape = (1, 3, 128, 128)
# Test forward train with a non-empty truth batch
mm_inputs = _demo_mm_inputs(
@ -386,6 +381,7 @@ def test_two_stage_forward(cfg_file):
'cfg_file', ['ghm/retinanet_ghm_r50_fpn_1x_coco.py', 'ssd/ssd300_coco.py'])
def test_single_stage_forward_cpu(cfg_file):
model = _get_detector_cfg(cfg_file)
model = _replace_r50_with_r18(model)
model.backbone.init_cfg = None
from mmdet.models import build_detector
@ -498,6 +494,7 @@ def _demo_mm_inputs(input_shape=(1, 3, 300, 300),
def test_yolact_forward():
model = _get_detector_cfg('yolact/yolact_r50_1x8_coco.py')
model = _replace_r50_with_r18(model)
model.backbone.init_cfg = None
from mmdet.models import build_detector
@ -537,61 +534,8 @@ def test_yolact_forward():
def test_detr_forward():
model = _get_detector_cfg('detr/detr_r50_8x2_150e_coco.py')
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (1, 3, 100, 100)
mm_inputs = _demo_mm_inputs(input_shape)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
# Test forward train with non-empty truth batch
detector.train()
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
assert float(loss.item()) > 0
# Test forward train with an empty truth batch
mm_inputs = _demo_mm_inputs(input_shape, num_items=[0])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
assert float(loss.item()) > 0
# Test forward test
detector.eval()
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
rescale=True,
return_loss=False)
batch_results.append(result)
def test_kd_single_stage_forward():
model = _get_detector_cfg('ld/ld_r18_gflv1_r101_fpn_coco_1x.py')
model.backbone.depth = 18
model.bbox_head.in_channels = 512
model.backbone.init_cfg = None
from mmdet.models import build_detector

@ -1,6 +1,7 @@
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmcv.utils import digit_version
from mmdet.models.losses import (BalancedL1Loss, CrossEntropyLoss,
DistributionFocalLoss, FocalLoss,
@ -86,6 +87,12 @@ def test_regression_losses(loss_class, input_shape):
@pytest.mark.parametrize('loss_class', [FocalLoss, CrossEntropyLoss])
@pytest.mark.parametrize('input_shape', [(10, 5), (0, 5)])
def test_classification_losses(loss_class, input_shape):
if input_shape[0] == 0 and digit_version(
torch.__version__) < digit_version('1.5.0'):
pytest.skip(
f'CELoss in PyTorch {torch.__version__} does not support empty'
f'tensor.')
pred = torch.rand(input_shape)
target = torch.randint(0, 5, (input_shape[0], ))

@ -1,10 +1,8 @@
# Copyright (c) OpenMMLab. All rights reserved.
from os.path import dirname, exists, join, relpath
from os.path import dirname, exists, join
from unittest.mock import Mock
import pytest
import torch
from mmcv.runner import build_optimizer
from mmdet.core import BitmapMasks, PolygonMasks
from mmdet.datasets.builder import DATASETS
@ -62,68 +60,6 @@ def _check_numclasscheckhook(detector, config_mod):
compatible_check.before_val_epoch(dummy_runner)
def test_config_build_detector():
"""Test that all detection models defined in the configs can be
initialized."""
from mmcv import Config
from mmdet.models import build_detector
config_dpath = _get_config_directory()
print(f'Found config_dpath = {config_dpath}')
import glob
config_fpaths = list(glob.glob(join(config_dpath, '**', '*.py')))
config_fpaths = [
p for p in config_fpaths
if p.find('_base_') == -1 and p.find('common') == -1
]
config_names = [relpath(p, config_dpath) for p in config_fpaths]
print(f'Using {len(config_names)} config files')
for config_fname in config_names:
config_fpath = join(config_dpath, config_fname)
config_mod = Config.fromfile(config_fpath)
print(f'Building detector, config_fpath = {config_fpath}')
# Remove pretrained keys to allow for testing in an offline environment
if 'pretrained' in config_mod.model:
config_mod.model['pretrained'] = None
detector = build_detector(config_mod.model)
assert detector is not None
# Check whether NumClassCheckHook is used.
custom_hooks = config_mod.get('custom_hooks', [])
assert custom_hooks is None or isinstance(custom_hooks, list)
check_class_num = False
if custom_hooks is not None:
hooks = [hook['type'] for hook in custom_hooks]
if 'NumClassCheckHook' in hooks:
check_class_num = True
if check_class_num:
_check_numclasscheckhook(detector, config_mod)
optimizer = build_optimizer(detector, config_mod.optimizer)
assert isinstance(optimizer, torch.optim.Optimizer)
if 'roi_head' in config_mod.model.keys():
# for two stage detector
# detectors must have bbox head
assert detector.roi_head.with_bbox and detector.with_bbox
assert detector.roi_head.with_mask == detector.with_mask
head_config = config_mod.model['roi_head']
_check_roi_head(head_config, detector.roi_head)
# else:
# # for single stage detector
# # detectors must have bbox head
# # assert detector.with_bbox
# head_config = config_mod.model['bbox_head']
# _check_bbox_head(head_config, detector.bbox_head)
def _check_roi_head(config, head):
# check consistency between head_config and roi_head
assert config['type'] == head.__class__.__name__

Loading…
Cancel
Save