parent
9f5c87e9dd
commit
69c160404a
16 changed files with 504 additions and 42 deletions
@ -1,8 +0,0 @@ |
||||
# Basic configurations of BIT |
||||
|
||||
_base_: ../_base_/airchange.yaml |
||||
|
||||
save_dir: ./test_tipc/output/cd/bit/ |
||||
|
||||
model: !Node |
||||
type: BIT |
@ -1,8 +0,0 @@ |
||||
# Basic configurations of ChangeFormer |
||||
|
||||
_base_: ../_base_/airchange.yaml |
||||
|
||||
save_dir: ./test_tipc/output/cd/changeformer/ |
||||
|
||||
model: !Node |
||||
type: ChangeFormer |
@ -1,13 +0,0 @@ |
||||
# Basic configurations of FCCDN |
||||
|
||||
_base_: ../_base_/airchange.yaml |
||||
|
||||
save_dir: ./test_tipc/output/cd/fccdn/ |
||||
|
||||
model: !Node |
||||
type: FCCDN |
||||
|
||||
learning_rate: 0.07 |
||||
lr_decay_power: 0.6 |
||||
log_interval_steps: 100 |
||||
save_interval_epochs: 3 |
@ -0,0 +1,11 @@ |
||||
# Configurations of BiSeNet V2 with RSSeg dataset |
||||
|
||||
_base_: ../_base_/rsseg.yaml |
||||
|
||||
save_dir: ./test_tipc/output/seg/bisenetv2/ |
||||
|
||||
model: !Node |
||||
type: BiSeNet V2 |
||||
args: |
||||
in_channels: 10 |
||||
num_classes: 5 |
@ -0,0 +1,53 @@ |
||||
===========================train_params=========================== |
||||
model_name:seg:bisenetv2 |
||||
python:python |
||||
gpu_list:0|0,1 |
||||
use_gpu:null|null |
||||
--precision:null |
||||
--num_epochs:lite_train_lite_infer=3|lite_train_whole_infer=3|whole_train_whole_infer=20 |
||||
--save_dir:adaptive |
||||
--train_batch_size:lite_train_lite_infer=4|lite_train_whole_infer=4|whole_train_whole_infer=4 |
||||
--model_path:null |
||||
--config:lite_train_lite_infer=./test_tipc/configs/seg/bisenetv2/bisenetv2_rsseg.yaml|lite_train_whole_infer=./test_tipc/configs/seg/bisenetv2/bisenetv2_rsseg.yaml|whole_train_whole_infer=./test_tipc/configs/seg/bisenetv2/bisenetv2_rsseg.yaml |
||||
train_model_name:best_model |
||||
null:null |
||||
## |
||||
trainer:norm |
||||
norm_train:test_tipc/run_task.py train seg |
||||
pact_train:null |
||||
fpgm_train:null |
||||
distill_train:null |
||||
null:null |
||||
null:null |
||||
## |
||||
===========================eval_params=========================== |
||||
eval:null |
||||
null:null |
||||
## |
||||
===========================export_params=========================== |
||||
--save_dir:adaptive |
||||
--model_dir:adaptive |
||||
--fixed_input_shape:[-1,10,512,512] |
||||
norm_export:deploy/export/export_model.py |
||||
quant_export:null |
||||
fpgm_export:null |
||||
distill_export:null |
||||
export1:null |
||||
export2:null |
||||
===========================infer_params=========================== |
||||
infer_model:null |
||||
infer_export:null |
||||
infer_quant:False |
||||
inference:test_tipc/infer.py |
||||
--device:cpu|gpu |
||||
--enable_mkldnn:True |
||||
--cpu_threads:6 |
||||
--batch_size:1 |
||||
--use_trt:False |
||||
--precision:fp32 |
||||
--model_dir:null |
||||
--config:null |
||||
--save_log_path:null |
||||
--benchmark:True |
||||
--model_name:bisenetv2 |
||||
null:null |
@ -0,0 +1,11 @@ |
||||
# Configurations of Fast-SCNN with RSSeg dataset |
||||
|
||||
_base_: ../_base_/rsseg.yaml |
||||
|
||||
save_dir: ./test_tipc/output/seg/fast_scnn/ |
||||
|
||||
model: !Node |
||||
type: Fast-SCNN |
||||
args: |
||||
in_channels: 10 |
||||
num_classes: 5 |
@ -0,0 +1,53 @@ |
||||
===========================train_params=========================== |
||||
model_name:seg:fast_scnn |
||||
python:python |
||||
gpu_list:0|0,1 |
||||
use_gpu:null|null |
||||
--precision:null |
||||
--num_epochs:lite_train_lite_infer=3|lite_train_whole_infer=3|whole_train_whole_infer=20 |
||||
--save_dir:adaptive |
||||
--train_batch_size:lite_train_lite_infer=4|lite_train_whole_infer=4|whole_train_whole_infer=4 |
||||
--model_path:null |
||||
--config:lite_train_lite_infer=./test_tipc/configs/seg/fast_scnn/fast_scnn_rsseg.yaml|lite_train_whole_infer=./test_tipc/configs/seg/fast_scnn/fast_scnn_rsseg.yaml|whole_train_whole_infer=./test_tipc/configs/seg/fast_scnn/fast_scnn_rsseg.yaml |
||||
train_model_name:best_model |
||||
null:null |
||||
## |
||||
trainer:norm |
||||
norm_train:test_tipc/run_task.py train seg |
||||
pact_train:null |
||||
fpgm_train:null |
||||
distill_train:null |
||||
null:null |
||||
null:null |
||||
## |
||||
===========================eval_params=========================== |
||||
eval:null |
||||
null:null |
||||
## |
||||
===========================export_params=========================== |
||||
--save_dir:adaptive |
||||
--model_dir:adaptive |
||||
--fixed_input_shape:[-1,10,512,512] |
||||
norm_export:deploy/export/export_model.py |
||||
quant_export:null |
||||
fpgm_export:null |
||||
distill_export:null |
||||
export1:null |
||||
export2:null |
||||
===========================infer_params=========================== |
||||
infer_model:null |
||||
infer_export:null |
||||
infer_quant:False |
||||
inference:test_tipc/infer.py |
||||
--device:cpu|gpu |
||||
--enable_mkldnn:True |
||||
--cpu_threads:6 |
||||
--batch_size:1 |
||||
--use_trt:False |
||||
--precision:fp32 |
||||
--model_dir:null |
||||
--config:null |
||||
--save_log_path:null |
||||
--benchmark:True |
||||
--model_name:fast_scnn |
||||
null:null |
@ -0,0 +1,11 @@ |
||||
# Configurations of HRNet with RSSeg dataset |
||||
|
||||
_base_: ../_base_/rsseg.yaml |
||||
|
||||
save_dir: ./test_tipc/output/seg/hrnet/ |
||||
|
||||
model: !Node |
||||
type: HRNet |
||||
args: |
||||
in_channels: 10 |
||||
num_classes: 5 |
@ -0,0 +1,53 @@ |
||||
===========================train_params=========================== |
||||
model_name:seg:hrnet |
||||
python:python |
||||
gpu_list:0|0,1 |
||||
use_gpu:null|null |
||||
--precision:null |
||||
--num_epochs:lite_train_lite_infer=3|lite_train_whole_infer=3|whole_train_whole_infer=20 |
||||
--save_dir:adaptive |
||||
--train_batch_size:lite_train_lite_infer=4|lite_train_whole_infer=4|whole_train_whole_infer=4 |
||||
--model_path:null |
||||
--config:lite_train_lite_infer=./test_tipc/configs/seg/hrnet/hrnet_rsseg.yaml|lite_train_whole_infer=./test_tipc/configs/seg/hrnet/hrnet_rsseg.yaml|whole_train_whole_infer=./test_tipc/configs/seg/hrnet/hrnet_rsseg.yaml |
||||
train_model_name:best_model |
||||
null:null |
||||
## |
||||
trainer:norm |
||||
norm_train:test_tipc/run_task.py train seg |
||||
pact_train:null |
||||
fpgm_train:null |
||||
distill_train:null |
||||
null:null |
||||
null:null |
||||
## |
||||
===========================eval_params=========================== |
||||
eval:null |
||||
null:null |
||||
## |
||||
===========================export_params=========================== |
||||
--save_dir:adaptive |
||||
--model_dir:adaptive |
||||
--fixed_input_shape:[-1,10,512,512] |
||||
norm_export:deploy/export/export_model.py |
||||
quant_export:null |
||||
fpgm_export:null |
||||
distill_export:null |
||||
export1:null |
||||
export2:null |
||||
===========================infer_params=========================== |
||||
infer_model:null |
||||
infer_export:null |
||||
infer_quant:False |
||||
inference:test_tipc/infer.py |
||||
--device:cpu|gpu |
||||
--enable_mkldnn:True |
||||
--cpu_threads:6 |
||||
--batch_size:1 |
||||
--use_trt:False |
||||
--precision:fp32 |
||||
--model_dir:null |
||||
--config:null |
||||
--save_log_path:null |
||||
--benchmark:True |
||||
--model_name:hrnet |
||||
null:null |
@ -0,0 +1,93 @@ |
||||
#!/usr/bin/env python |
||||
|
||||
# 图像分割模型BiSeNet V2训练示例脚本 |
||||
# 执行此脚本前,请确认已正确安装PaddleRS库 |
||||
|
||||
import paddlers as pdrs |
||||
from paddlers import transforms as T |
||||
|
||||
# 数据集存放目录 |
||||
DATA_DIR = './data/rsseg/' |
||||
# 训练集`file_list`文件路径 |
||||
TRAIN_FILE_LIST_PATH = './data/rsseg/train.txt' |
||||
# 验证集`file_list`文件路径 |
||||
EVAL_FILE_LIST_PATH = './data/rsseg/val.txt' |
||||
# 数据集类别信息文件路径 |
||||
LABEL_LIST_PATH = './data/rsseg/labels.txt' |
||||
# 实验目录,保存输出的模型权重和结果 |
||||
EXP_DIR = './output/unet/' |
||||
|
||||
# 影像波段数量 |
||||
NUM_BANDS = 10 |
||||
|
||||
# 下载和解压多光谱地块分类数据集 |
||||
pdrs.utils.download_and_decompress( |
||||
'https://paddlers.bj.bcebos.com/datasets/rsseg.zip', path='./data/') |
||||
|
||||
# 定义训练和验证时使用的数据变换(数据增强、预处理等) |
||||
# 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行 |
||||
# API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md |
||||
train_transforms = T.Compose([ |
||||
# 读取影像 |
||||
T.DecodeImg(), |
||||
# 将影像缩放到512x512大小 |
||||
T.Resize(target_size=512), |
||||
# 以50%的概率实施随机水平翻转 |
||||
T.RandomHorizontalFlip(prob=0.5), |
||||
# 将数据归一化到[-1,1] |
||||
T.Normalize( |
||||
mean=[0.5] * NUM_BANDS, std=[0.5] * NUM_BANDS), |
||||
T.ArrangeSegmenter('train') |
||||
]) |
||||
|
||||
eval_transforms = T.Compose([ |
||||
T.DecodeImg(), |
||||
T.Resize(target_size=512), |
||||
# 验证阶段与训练阶段的数据归一化方式必须相同 |
||||
T.Normalize( |
||||
mean=[0.5] * NUM_BANDS, std=[0.5] * NUM_BANDS), |
||||
T.ReloadMask(), |
||||
T.ArrangeSegmenter('eval') |
||||
]) |
||||
|
||||
# 分别构建训练和验证所用的数据集 |
||||
train_dataset = pdrs.datasets.SegDataset( |
||||
data_dir=DATA_DIR, |
||||
file_list=TRAIN_FILE_LIST_PATH, |
||||
label_list=LABEL_LIST_PATH, |
||||
transforms=train_transforms, |
||||
num_workers=0, |
||||
shuffle=True) |
||||
|
||||
eval_dataset = pdrs.datasets.SegDataset( |
||||
data_dir=DATA_DIR, |
||||
file_list=EVAL_FILE_LIST_PATH, |
||||
label_list=LABEL_LIST_PATH, |
||||
transforms=eval_transforms, |
||||
num_workers=0, |
||||
shuffle=False) |
||||
|
||||
# 构建BiSeNet V2模型 |
||||
# 目前已支持的模型请参考:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/intro/model_zoo.md |
||||
# 模型输入参数请参考:https://github.com/PaddlePaddle/PaddleRS/blob/develop/paddlers/tasks/segmenter.py |
||||
model = pdrs.tasks.seg.BiSeNetV2( |
||||
in_channels=NUM_BANDS, num_classes=len(train_dataset.labels)) |
||||
|
||||
# 执行模型训练 |
||||
model.train( |
||||
num_epochs=10, |
||||
train_dataset=train_dataset, |
||||
train_batch_size=4, |
||||
eval_dataset=eval_dataset, |
||||
save_interval_epochs=5, |
||||
# 每多少次迭代记录一次日志 |
||||
log_interval_steps=4, |
||||
save_dir=EXP_DIR, |
||||
# 初始学习率大小 |
||||
learning_rate=0.001, |
||||
# 是否使用early stopping策略,当精度不再改善时提前终止训练 |
||||
early_stop=False, |
||||
# 是否启用VisualDL日志功能 |
||||
use_vdl=True, |
||||
# 指定从某个检查点继续训练 |
||||
resume_checkpoint=None) |
@ -0,0 +1,93 @@ |
||||
#!/usr/bin/env python |
||||
|
||||
# 图像分割模型Fast-SCNN训练示例脚本 |
||||
# 执行此脚本前,请确认已正确安装PaddleRS库 |
||||
|
||||
import paddlers as pdrs |
||||
from paddlers import transforms as T |
||||
|
||||
# 数据集存放目录 |
||||
DATA_DIR = './data/rsseg/' |
||||
# 训练集`file_list`文件路径 |
||||
TRAIN_FILE_LIST_PATH = './data/rsseg/train.txt' |
||||
# 验证集`file_list`文件路径 |
||||
EVAL_FILE_LIST_PATH = './data/rsseg/val.txt' |
||||
# 数据集类别信息文件路径 |
||||
LABEL_LIST_PATH = './data/rsseg/labels.txt' |
||||
# 实验目录,保存输出的模型权重和结果 |
||||
EXP_DIR = './output/unet/' |
||||
|
||||
# 影像波段数量 |
||||
NUM_BANDS = 10 |
||||
|
||||
# 下载和解压多光谱地块分类数据集 |
||||
pdrs.utils.download_and_decompress( |
||||
'https://paddlers.bj.bcebos.com/datasets/rsseg.zip', path='./data/') |
||||
|
||||
# 定义训练和验证时使用的数据变换(数据增强、预处理等) |
||||
# 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行 |
||||
# API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md |
||||
train_transforms = T.Compose([ |
||||
# 读取影像 |
||||
T.DecodeImg(), |
||||
# 将影像缩放到512x512大小 |
||||
T.Resize(target_size=512), |
||||
# 以50%的概率实施随机水平翻转 |
||||
T.RandomHorizontalFlip(prob=0.5), |
||||
# 将数据归一化到[-1,1] |
||||
T.Normalize( |
||||
mean=[0.5] * NUM_BANDS, std=[0.5] * NUM_BANDS), |
||||
T.ArrangeSegmenter('train') |
||||
]) |
||||
|
||||
eval_transforms = T.Compose([ |
||||
T.DecodeImg(), |
||||
T.Resize(target_size=512), |
||||
# 验证阶段与训练阶段的数据归一化方式必须相同 |
||||
T.Normalize( |
||||
mean=[0.5] * NUM_BANDS, std=[0.5] * NUM_BANDS), |
||||
T.ReloadMask(), |
||||
T.ArrangeSegmenter('eval') |
||||
]) |
||||
|
||||
# 分别构建训练和验证所用的数据集 |
||||
train_dataset = pdrs.datasets.SegDataset( |
||||
data_dir=DATA_DIR, |
||||
file_list=TRAIN_FILE_LIST_PATH, |
||||
label_list=LABEL_LIST_PATH, |
||||
transforms=train_transforms, |
||||
num_workers=0, |
||||
shuffle=True) |
||||
|
||||
eval_dataset = pdrs.datasets.SegDataset( |
||||
data_dir=DATA_DIR, |
||||
file_list=EVAL_FILE_LIST_PATH, |
||||
label_list=LABEL_LIST_PATH, |
||||
transforms=eval_transforms, |
||||
num_workers=0, |
||||
shuffle=False) |
||||
|
||||
# 构建Fast-SCNN模型 |
||||
# 目前已支持的模型请参考:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/intro/model_zoo.md |
||||
# 模型输入参数请参考:https://github.com/PaddlePaddle/PaddleRS/blob/develop/paddlers/tasks/segmenter.py |
||||
model = pdrs.tasks.seg.FastSCNN( |
||||
in_channels=NUM_BANDS, num_classes=len(train_dataset.labels)) |
||||
|
||||
# 执行模型训练 |
||||
model.train( |
||||
num_epochs=10, |
||||
train_dataset=train_dataset, |
||||
train_batch_size=4, |
||||
eval_dataset=eval_dataset, |
||||
save_interval_epochs=5, |
||||
# 每多少次迭代记录一次日志 |
||||
log_interval_steps=4, |
||||
save_dir=EXP_DIR, |
||||
# 初始学习率大小 |
||||
learning_rate=0.001, |
||||
# 是否使用early stopping策略,当精度不再改善时提前终止训练 |
||||
early_stop=False, |
||||
# 是否启用VisualDL日志功能 |
||||
use_vdl=True, |
||||
# 指定从某个检查点继续训练 |
||||
resume_checkpoint=None) |
@ -0,0 +1,93 @@ |
||||
#!/usr/bin/env python |
||||
|
||||
# 图像分割模型HRNet训练示例脚本 |
||||
# 执行此脚本前,请确认已正确安装PaddleRS库 |
||||
|
||||
import paddlers as pdrs |
||||
from paddlers import transforms as T |
||||
|
||||
# 数据集存放目录 |
||||
DATA_DIR = './data/rsseg/' |
||||
# 训练集`file_list`文件路径 |
||||
TRAIN_FILE_LIST_PATH = './data/rsseg/train.txt' |
||||
# 验证集`file_list`文件路径 |
||||
EVAL_FILE_LIST_PATH = './data/rsseg/val.txt' |
||||
# 数据集类别信息文件路径 |
||||
LABEL_LIST_PATH = './data/rsseg/labels.txt' |
||||
# 实验目录,保存输出的模型权重和结果 |
||||
EXP_DIR = './output/unet/' |
||||
|
||||
# 影像波段数量 |
||||
NUM_BANDS = 10 |
||||
|
||||
# 下载和解压多光谱地块分类数据集 |
||||
pdrs.utils.download_and_decompress( |
||||
'https://paddlers.bj.bcebos.com/datasets/rsseg.zip', path='./data/') |
||||
|
||||
# 定义训练和验证时使用的数据变换(数据增强、预处理等) |
||||
# 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行 |
||||
# API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md |
||||
train_transforms = T.Compose([ |
||||
# 读取影像 |
||||
T.DecodeImg(), |
||||
# 将影像缩放到512x512大小 |
||||
T.Resize(target_size=512), |
||||
# 以50%的概率实施随机水平翻转 |
||||
T.RandomHorizontalFlip(prob=0.5), |
||||
# 将数据归一化到[-1,1] |
||||
T.Normalize( |
||||
mean=[0.5] * NUM_BANDS, std=[0.5] * NUM_BANDS), |
||||
T.ArrangeSegmenter('train') |
||||
]) |
||||
|
||||
eval_transforms = T.Compose([ |
||||
T.DecodeImg(), |
||||
T.Resize(target_size=512), |
||||
# 验证阶段与训练阶段的数据归一化方式必须相同 |
||||
T.Normalize( |
||||
mean=[0.5] * NUM_BANDS, std=[0.5] * NUM_BANDS), |
||||
T.ReloadMask(), |
||||
T.ArrangeSegmenter('eval') |
||||
]) |
||||
|
||||
# 分别构建训练和验证所用的数据集 |
||||
train_dataset = pdrs.datasets.SegDataset( |
||||
data_dir=DATA_DIR, |
||||
file_list=TRAIN_FILE_LIST_PATH, |
||||
label_list=LABEL_LIST_PATH, |
||||
transforms=train_transforms, |
||||
num_workers=0, |
||||
shuffle=True) |
||||
|
||||
eval_dataset = pdrs.datasets.SegDataset( |
||||
data_dir=DATA_DIR, |
||||
file_list=EVAL_FILE_LIST_PATH, |
||||
label_list=LABEL_LIST_PATH, |
||||
transforms=eval_transforms, |
||||
num_workers=0, |
||||
shuffle=False) |
||||
|
||||
# 构建HRNet模型 |
||||
# 目前已支持的模型请参考:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/intro/model_zoo.md |
||||
# 模型输入参数请参考:https://github.com/PaddlePaddle/PaddleRS/blob/develop/paddlers/tasks/segmenter.py |
||||
model = pdrs.tasks.seg.HRNet( |
||||
in_channels=NUM_BANDS, num_classes=len(train_dataset.labels)) |
||||
|
||||
# 执行模型训练 |
||||
model.train( |
||||
num_epochs=10, |
||||
train_dataset=train_dataset, |
||||
train_batch_size=4, |
||||
eval_dataset=eval_dataset, |
||||
save_interval_epochs=5, |
||||
# 每多少次迭代记录一次日志 |
||||
log_interval_steps=4, |
||||
save_dir=EXP_DIR, |
||||
# 初始学习率大小 |
||||
learning_rate=0.001, |
||||
# 是否使用early stopping策略,当精度不再改善时提前终止训练 |
||||
early_stop=False, |
||||
# 是否启用VisualDL日志功能 |
||||
use_vdl=True, |
||||
# 指定从某个检查点继续训练 |
||||
resume_checkpoint=None) |
Loading…
Reference in new issue