Add CondenseNet V2

own
Bobholamovic 2 years ago
parent d51b683942
commit a895234700
  1. 8
      paddlers/rs_models/clas/condensenetv2.py
  2. 24
      paddlers/tasks/classifier.py
  3. 10
      test_tipc/configs/clas/condensenetv2/condensenetv2_ucmerced.yaml
  4. 53
      test_tipc/configs/clas/condensenetv2/train_infer_python.txt
  5. 10
      test_tipc/configs/clas/hrnet/hrnet.yaml
  6. 2
      test_tipc/configs/clas/hrnet/hrnet_ucmerced.yaml
  7. 35
      tests/rs_models/test_clas_models.py
  8. 90
      tutorials/train/classification/condensenetv2.py

@ -20,7 +20,7 @@ Apache License [see LICENSE for details]
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
__all__ = ["CondenseNetV2_a", "CondenseNetV2_b", "CondenseNetV2_c"] __all__ = ["CondenseNetV2_A", "CondenseNetV2_B", "CondenseNetV2_C"]
class SELayer(nn.Layer): class SELayer(nn.Layer):
@ -394,7 +394,7 @@ class CondenseNetV2(nn.Layer):
nn.initializer.Constant(value=0.0)(m.bias) nn.initializer.Constant(value=0.0)(m.bias)
def CondenseNetV2_a(**kwargs): def CondenseNetV2_A(**kwargs):
model = CondenseNetV2( model = CondenseNetV2(
stages=[1, 1, 4, 6, 8], stages=[1, 1, 4, 6, 8],
growth=[8, 8, 16, 32, 64], growth=[8, 8, 16, 32, 64],
@ -410,7 +410,7 @@ def CondenseNetV2_a(**kwargs):
return model return model
def CondenseNetV2_b(**kwargs): def CondenseNetV2_B(**kwargs):
model = CondenseNetV2( model = CondenseNetV2(
stages=[2, 4, 6, 8, 6], stages=[2, 4, 6, 8, 6],
growth=[6, 12, 24, 48, 96], growth=[6, 12, 24, 48, 96],
@ -426,7 +426,7 @@ def CondenseNetV2_b(**kwargs):
return model return model
def CondenseNetV2_c(**kwargs): def CondenseNetV2_C(**kwargs):
model = CondenseNetV2( model = CondenseNetV2(
stages=[4, 6, 8, 10, 8], stages=[4, 6, 8, 10, 8],
growth=[8, 16, 32, 64, 128], growth=[8, 16, 32, 64, 128],

@ -34,9 +34,7 @@ from paddlers.utils.checkpoint import cls_pretrain_weights_dict
from paddlers.transforms import Resize, decode_image from paddlers.transforms import Resize, decode_image
from .base import BaseModel from .base import BaseModel
__all__ = [ __all__ = ["ResNet50_vd", "MobileNetV3", "HRNet", "CondenseNetV2"]
"ResNet50_vd", "MobileNetV3_small_x1_0", "HRNet_W18_C", "CondenseNetV2_b"
]
class BaseClassifier(BaseModel): class BaseClassifier(BaseModel):
@ -600,13 +598,13 @@ class ResNet50_vd(BaseClassifier):
**params) **params)
class MobileNetV3_small_x1_0(BaseClassifier): class MobileNetV3(BaseClassifier):
def __init__(self, def __init__(self,
num_classes=2, num_classes=2,
use_mixed_loss=False, use_mixed_loss=False,
losses=None, losses=None,
**params): **params):
super(MobileNetV3_small_x1_0, self).__init__( super(MobileNetV3, self).__init__(
model_name='MobileNetV3_small_x1_0', model_name='MobileNetV3_small_x1_0',
num_classes=num_classes, num_classes=num_classes,
use_mixed_loss=use_mixed_loss, use_mixed_loss=use_mixed_loss,
@ -614,13 +612,13 @@ class MobileNetV3_small_x1_0(BaseClassifier):
**params) **params)
class HRNet_W18_C(BaseClassifier): class HRNet(BaseClassifier):
def __init__(self, def __init__(self,
num_classes=2, num_classes=2,
use_mixed_loss=False, use_mixed_loss=False,
losses=None, losses=None,
**params): **params):
super(HRNet_W18_C, self).__init__( super(HRNet, self).__init__(
model_name='HRNet_W18_C', model_name='HRNet_W18_C',
num_classes=num_classes, num_classes=num_classes,
use_mixed_loss=use_mixed_loss, use_mixed_loss=use_mixed_loss,
@ -628,15 +626,21 @@ class HRNet_W18_C(BaseClassifier):
**params) **params)
class CondenseNetV2_b(BaseClassifier): class CondenseNetV2(BaseClassifier):
def __init__(self, def __init__(self,
num_classes=2, num_classes=2,
use_mixed_loss=False, use_mixed_loss=False,
losses=None, losses=None,
in_chnanels=3,
arch='A',
**params): **params):
super(CondenseNetV2_b, self).__init__( if arch not in ('A', 'B', 'C'):
model_name='CondenseNetV2_b', raise ValueError("{} is not a supported architecture.".format(arch))
model_name = 'CondenseNetV2_' + arch
super(CondenseNetV2, self).__init__(
model_name=model_name,
num_classes=num_classes, num_classes=num_classes,
use_mixed_loss=use_mixed_loss, use_mixed_loss=use_mixed_loss,
losses=losses, losses=losses,
in_channels=in_channels,
**params) **params)

@ -0,0 +1,10 @@
# Configurations of CondenseNet V2 with UCMerced dataset
_base_: ../_base_/ucmerced.yaml
save_dir: ./test_tipc/output/clas/condensenetv2/
model: !Node
type: CondenseNetV2
args:
num_classes: 21

@ -0,0 +1,53 @@
===========================train_params===========================
model_name:clas:condensenetv2
python:python
gpu_list:0|0,1
use_gpu:null|null
--precision:null
--num_epochs:lite_train_lite_infer=3|lite_train_whole_infer=3|whole_train_whole_infer=10
--save_dir:adaptive
--train_batch_size:lite_train_lite_infer=16|lite_train_whole_infer=16|whole_train_whole_infer=16
--model_path:null
--config:lite_train_lite_infer=./test_tipc/configs/clas/condensenetv2/condensenetv2_ucmerced.yaml|lite_train_whole_infer=./test_tipc/configs/clas/condensenetv2/condensenetv2_ucmerced.yaml|whole_train_whole_infer=./test_tipc/configs/clas/condensenetv2/condensenetv2_ucmerced.yaml
train_model_name:best_model
null:null
##
trainer:norm
norm_train:test_tipc/run_task.py train clas
pact_train:null
fpgm_train:null
distill_train:null
null:null
null:null
##
===========================eval_params===========================
eval:null
null:null
##
===========================export_params===========================
--save_dir:adaptive
--model_dir:adaptive
--fixed_input_shape:[-1,3,256,256]
norm_export:deploy/export/export_model.py
quant_export:null
fpgm_export:null
distill_export:null
export1:null
export2:null
===========================infer_params===========================
infer_model:null
infer_export:null
infer_quant:False
inference:test_tipc/infer.py
--device:cpu|gpu
--enable_mkldnn:True
--cpu_threads:6
--batch_size:1
--use_trt:False
--precision:fp32
--model_dir:null
--config:null
--save_log_path:null
--benchmark:True
--model_name:condensenetv2
null:null

@ -1,10 +0,0 @@
# Basic configurations of HRNet
_base_: ../_base_/ucmerced.yaml
save_dir: ./test_tipc/output/clas/hrnet/
model: !Node
type: HRNet_W18_C
args:
num_classes: 21

@ -5,6 +5,6 @@ _base_: ../_base_/ucmerced.yaml
save_dir: ./test_tipc/output/clas/hrnet/ save_dir: ./test_tipc/output/clas/hrnet/
model: !Node model: !Node
type: HRNet_W18_C type: HRNet
args: args:
num_classes: 21 num_classes: 21

@ -18,7 +18,7 @@ from rs_models.test_model import TestModel
__all__ = [] __all__ = []
class TestCDModel(TestModel): class TestClasModel(TestModel):
DEFAULT_HW = (224, 224) DEFAULT_HW = (224, 224)
def check_output(self, output, target): def check_output(self, output, target):
@ -36,3 +36,36 @@ class TestCDModel(TestModel):
def set_targets(self): def set_targets(self):
self.targets = [[self.DEFAULT_BATCH_SIZE, spec.get('num_classes', 2)] self.targets = [[self.DEFAULT_BATCH_SIZE, spec.get('num_classes', 2)]
for spec in self.specs] for spec in self.specs]
class TestCondenseNetV2AModel(TestClasModel):
MODEL_CLASS = paddlers.rs_models.clas.CondenseNetV2_A
def set_specs(self):
self.specs = [
dict(in_channels=3, num_classes=2),
dict(in_channels=10, num_classes=2),
dict(in_channels=3, num_classes=100)
] # yapf: disable
class TestCondenseNetV2BModel(TestClasModel):
MODEL_CLASS = paddlers.rs_models.clas.CondenseNetV2_B
def set_specs(self):
self.specs = [
dict(in_channels=3, num_classes=2),
dict(in_channels=10, num_classes=2),
dict(in_channels=3, num_classes=100)
] # yapf: disable
class TestCondenseNetV2CModel(TestClasModel):
MODEL_CLASS = paddlers.rs_models.clas.CondenseNetV2_C
def set_specs(self):
self.specs = [
dict(in_channels=3, num_classes=2),
dict(in_channels=10, num_classes=2),
dict(in_channels=3, num_classes=100)
] # yapf: disable

@ -0,0 +1,90 @@
#!/usr/bin/env python
# 场景分类模型CondenseNet V2训练示例脚本
# 执行此脚本前,请确认已正确安装PaddleRS库
import paddlers as pdrs
from paddlers import transforms as T
# 数据集存放目录
DATA_DIR = './data/ucmerced/'
# 训练集`file_list`文件路径
TRAIN_FILE_LIST_PATH = './data/ucmerced/train.txt'
# 验证集`file_list`文件路径
EVAL_FILE_LIST_PATH = './data/ucmerced/val.txt'
# 数据集类别信息文件路径
LABEL_LIST_PATH = './data/ucmerced/labels.txt'
# 实验目录,保存输出的模型权重和结果
EXP_DIR = './output/hrnet/'
# 下载和解压UC Merced数据集
pdrs.utils.download_and_decompress(
'https://paddlers.bj.bcebos.com/datasets/ucmerced.zip', path='./data/')
# 定义训练和验证时使用的数据变换(数据增强、预处理等)
# 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
# API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
train_transforms = T.Compose([
# 读取影像
T.DecodeImg(),
# 将影像缩放到256x256大小
T.Resize(target_size=256),
# 以50%的概率实施随机水平翻转
T.RandomHorizontalFlip(prob=0.5),
# 以50%的概率实施随机垂直翻转
T.RandomVerticalFlip(prob=0.5),
# 将数据归一化到[-1,1]
T.Normalize(
mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
T.ArrangeClassifier('train')
])
eval_transforms = T.Compose([
T.DecodeImg(),
T.Resize(target_size=256),
# 验证阶段与训练阶段的数据归一化方式必须相同
T.Normalize(
mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
T.ArrangeClassifier('eval')
])
# 分别构建训练和验证所用的数据集
train_dataset = pdrs.datasets.ClasDataset(
data_dir=DATA_DIR,
file_list=TRAIN_FILE_LIST_PATH,
label_list=LABEL_LIST_PATH,
transforms=train_transforms,
num_workers=0,
shuffle=True)
eval_dataset = pdrs.datasets.ClasDataset(
data_dir=DATA_DIR,
file_list=EVAL_FILE_LIST_PATH,
label_list=LABEL_LIST_PATH,
transforms=eval_transforms,
num_workers=0,
shuffle=False)
# 构建CondenseNet V2模型
# 目前已支持的模型请参考:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/intro/model_zoo.md
# 模型输入参数请参考:https://github.com/PaddlePaddle/PaddleRS/blob/develop/paddlers/tasks/classifier.py
model = pdrs.tasks.clas.CondenseNetV2(num_classes=len(train_dataset.labels))
# 执行模型训练
model.train(
num_epochs=2,
train_dataset=train_dataset,
train_batch_size=16,
eval_dataset=eval_dataset,
save_interval_epochs=1,
# 每多少次迭代记录一次日志
log_interval_steps=50,
save_dir=EXP_DIR,
# 初始学习率大小
learning_rate=0.01,
# 是否使用early stopping策略,当精度不再改善时提前终止训练
early_stop=False,
# 是否启用VisualDL日志功能
use_vdl=True,
# 指定从某个检查点继续训练
resume_checkpoint=None)
Loading…
Cancel
Save