commit
a6f5407ea9
18 changed files with 608 additions and 98 deletions
@ -0,0 +1,76 @@ |
||||
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. |
||||
# |
||||
# Licensed under the Apache License, Version 2.0 (the "License"); |
||||
# you may not use this file except in compliance with the License. |
||||
# You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
|
||||
import codecs |
||||
import cv2 |
||||
import numpy as np |
||||
import argparse |
||||
import geojson |
||||
from geojson import Polygon, Feature, FeatureCollection |
||||
from utils import Raster, Timer |
||||
|
||||
|
||||
def _gt_convert(x, y, geotf): |
||||
x_geo = geotf[0] + x * geotf[1] + y * geotf[2] |
||||
y_geo = geotf[3] + x * geotf[4] + y * geotf[5] |
||||
return x_geo, y_geo |
||||
|
||||
|
||||
@Timer |
||||
def convert_data(mask_path, save_path, epsilon=0): |
||||
raster = Raster(mask_path) |
||||
img = raster.getArray() |
||||
geo_writer = codecs.open(save_path, "w", encoding="utf-8") |
||||
clas = np.unique(img) |
||||
cv2_v = (cv2.__version__.split(".")[0] == "3") |
||||
feats = [] |
||||
if not isinstance(epsilon, (int, float)): |
||||
epsilon = 0 |
||||
for iclas in range(1, len(clas)): |
||||
tmp = np.zeros_like(img).astype("uint8") |
||||
tmp[img == iclas] = 1 |
||||
# TODO: Detect internal and external contour |
||||
results = cv2.findContours(tmp, cv2.RETR_EXTERNAL, |
||||
cv2.CHAIN_APPROX_TC89_KCOS) |
||||
contours = results[1] if cv2_v else results[0] |
||||
# hierarchys = results[2] if cv2_v else results[1] |
||||
if len(contours) == 0: |
||||
continue |
||||
for contour in contours: |
||||
contour = cv2.approxPolyDP(contour, epsilon, True) |
||||
polys = [] |
||||
for point in contour: |
||||
x, y = point[0] |
||||
xg, yg = _gt_convert(x, y, raster.geot) |
||||
polys.append((xg, yg)) |
||||
polys.append(polys[0]) |
||||
feat = Feature( |
||||
geometry=Polygon([polys]), properties={"class": int(iclas)}) |
||||
feats.append(feat) |
||||
gjs = FeatureCollection(feats) |
||||
geo_writer.write(geojson.dumps(gjs)) |
||||
geo_writer.close() |
||||
|
||||
|
||||
parser = argparse.ArgumentParser(description="input parameters") |
||||
parser.add_argument("--mask_path", type=str, required=True, \ |
||||
help="The path of mask tif.") |
||||
parser.add_argument("--save_path", type=str, required=True, \ |
||||
help="The path to save the results, file suffix is `*.json`.") |
||||
parser.add_argument("--epsilon", type=float, default=0, \ |
||||
help="The CV2 simplified parameters, `0` is the default.") |
||||
|
||||
if __name__ == "__main__": |
||||
args = parser.parse_args() |
||||
convert_data(args.mask_path, args.save_path, args.epsilon) |
@ -0,0 +1,3 @@ |
||||
*.zip |
||||
*.tar.gz |
||||
sarship/ |
@ -0,0 +1,98 @@ |
||||
#!/usr/bin/env python |
||||
|
||||
# 目标检测模型Faster R-CNN训练示例脚本 |
||||
# 执行此脚本前,请确认已正确安装PaddleRS库 |
||||
|
||||
import os |
||||
|
||||
import paddlers as pdrs |
||||
from paddlers import transforms as T |
||||
|
||||
# 下载文件存放目录 |
||||
DOWNLOAD_DIR = './data/sarship/' |
||||
# 数据集存放目录 |
||||
DATA_DIR = './data/sarship/sar_ship_1/' |
||||
# 训练集`file_list`文件路径 |
||||
TRAIN_FILE_LIST_PATH = './data/sarship/sar_ship_1/train.txt' |
||||
# 验证集`file_list`文件路径 |
||||
EVAL_FILE_LIST_PATH = './data/sarship/sar_ship_1/valid.txt' |
||||
# 数据集类别信息文件路径 |
||||
LABEL_LIST_PATH = './data/sarship/sar_ship_1/labels.txt' |
||||
# 实验目录,保存输出的模型权重和结果 |
||||
EXP_DIR = './output/faster_rcnn/' |
||||
|
||||
# 下载和解压SAR影像舰船检测数据集 |
||||
sarship_dataset = 'https://paddleseg.bj.bcebos.com/dataset/sar_ship_1.tar.gz' |
||||
if not os.path.exists(DATA_DIR): |
||||
pdrs.utils.download_and_decompress(sarship_dataset, path=DOWNLOAD_DIR) |
||||
|
||||
# 定义训练和验证时使用的数据变换(数据增强、预处理等) |
||||
# 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行 |
||||
# API说明:https://github.com/PaddleCV-SIG/PaddleRS/blob/develop/docs/apis/transforms.md |
||||
train_transforms = T.Compose([ |
||||
# 对输入影像施加随机色彩扰动 |
||||
T.RandomDistort(), |
||||
# 在影像边界进行随机padding |
||||
T.RandomExpand(), |
||||
# 随机裁剪,裁块大小在一定范围内变动 |
||||
T.RandomCrop(), |
||||
# 随机水平翻转 |
||||
T.RandomHorizontalFlip(), |
||||
# 对batch进行随机缩放,随机选择插值方式 |
||||
T.BatchRandomResize( |
||||
target_sizes=[320, 352, 384, 416, 448, 480, 512, 544, 576, 608], |
||||
interp='RANDOM'), |
||||
# 影像归一化 |
||||
T.Normalize( |
||||
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) |
||||
]) |
||||
|
||||
eval_transforms = T.Compose([ |
||||
# 使用双三次插值将输入影像缩放到固定大小 |
||||
T.Resize( |
||||
target_size=608, interp='CUBIC'), |
||||
# 验证阶段与训练阶段的归一化方式必须相同 |
||||
T.Normalize( |
||||
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) |
||||
]) |
||||
|
||||
# 分别构建训练和验证所用的数据集 |
||||
train_dataset = pdrs.datasets.VOCDetection( |
||||
data_dir=DATA_DIR, |
||||
file_list=TRAIN_FILE_LIST_PATH, |
||||
label_list=LABEL_LIST_PATH, |
||||
transforms=train_transforms, |
||||
shuffle=True) |
||||
|
||||
eval_dataset = pdrs.datasets.VOCDetection( |
||||
data_dir=DATA_DIR, |
||||
file_list=EVAL_FILE_LIST_PATH, |
||||
label_list=LABEL_LIST_PATH, |
||||
transforms=eval_transforms, |
||||
shuffle=False) |
||||
|
||||
# 构建Faster R-CNN模型 |
||||
# 目前已支持的模型请参考:https://github.com/PaddleCV-SIG/PaddleRS/blob/develop/docs/apis/model_zoo.md |
||||
# 模型输入参数请参考:https://github.com/PaddleCV-SIG/PaddleRS/blob/develop/paddlers/tasks/object_detector.py |
||||
model = pdrs.tasks.FasterRCNN(num_classes=len(train_dataset.labels)) |
||||
|
||||
# 执行模型训练 |
||||
model.train( |
||||
num_epochs=10, |
||||
train_dataset=train_dataset, |
||||
train_batch_size=4, |
||||
eval_dataset=eval_dataset, |
||||
# 每多少个epoch存储一次检查点 |
||||
save_interval_epochs=5, |
||||
# 每多少次迭代记录一次日志 |
||||
log_interval_steps=4, |
||||
save_dir=EXP_DIR, |
||||
# 指定预训练权重 |
||||
pretrain_weights='COCO', |
||||
# 初始学习率大小 |
||||
learning_rate=0.005, |
||||
# 学习率预热(learning rate warm-up)步数与初始值 |
||||
warmup_steps=0, |
||||
warmup_start_lr=0.0, |
||||
# 是否启用VisualDL日志功能 |
||||
use_vdl=True) |
@ -1,64 +0,0 @@ |
||||
import os |
||||
import paddlers as pdrs |
||||
from paddlers import transforms as T |
||||
|
||||
# download dataset |
||||
data_dir = 'sar_ship_1' |
||||
if not os.path.exists(data_dir): |
||||
dataset_url = 'https://paddleseg.bj.bcebos.com/dataset/sar_ship_1.tar.gz' |
||||
pdrs.utils.download_and_decompress(dataset_url, path='./') |
||||
|
||||
# define transforms |
||||
train_transforms = T.Compose([ |
||||
T.RandomDistort(), |
||||
T.RandomExpand(), |
||||
T.RandomCrop(), |
||||
T.RandomHorizontalFlip(), |
||||
T.BatchRandomResize( |
||||
target_sizes=[320, 352, 384, 416, 448, 480, 512, 544, 576, 608], |
||||
interp='RANDOM'), |
||||
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) |
||||
]) |
||||
|
||||
eval_transforms = T.Compose([ |
||||
T.Resize(target_size=608, interp='CUBIC'), |
||||
T.Normalize( |
||||
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) |
||||
]) |
||||
|
||||
# define dataset |
||||
train_file_list = os.path.join(data_dir, 'train.txt') |
||||
val_file_list = os.path.join(data_dir, 'valid.txt') |
||||
label_file_list = os.path.join(data_dir, 'labels.txt') |
||||
train_dataset = pdrs.datasets.VOCDetection( |
||||
data_dir=data_dir, |
||||
file_list=train_file_list, |
||||
label_list=label_file_list, |
||||
transforms=train_transforms, |
||||
shuffle=True) |
||||
|
||||
eval_dataset = pdrs.datasets.VOCDetection( |
||||
data_dir=data_dir, |
||||
file_list=train_file_list, |
||||
label_list=label_file_list, |
||||
transforms=eval_transforms, |
||||
shuffle=False) |
||||
|
||||
# define models |
||||
num_classes = len(train_dataset.labels) |
||||
model = pdrs.tasks.FasterRCNN(num_classes=num_classes) |
||||
|
||||
# train |
||||
model.train( |
||||
num_epochs=60, |
||||
train_dataset=train_dataset, |
||||
train_batch_size=2, |
||||
eval_dataset=eval_dataset, |
||||
pretrain_weights='COCO', |
||||
learning_rate=0.005 / 12, |
||||
warmup_steps=10, |
||||
warmup_start_lr=0.0, |
||||
save_interval_epochs=5, |
||||
lr_decay_epochs=[20, 40], |
||||
save_dir='output/faster_rcnn_sar_ship', |
||||
use_vdl=True) |
@ -0,0 +1,99 @@ |
||||
#!/usr/bin/env python |
||||
|
||||
# 目标检测模型PP-YOLO训练示例脚本 |
||||
# 执行此脚本前,请确认已正确安装PaddleRS库 |
||||
|
||||
import os |
||||
|
||||
import paddlers as pdrs |
||||
from paddlers import transforms as T |
||||
|
||||
# 下载文件存放目录 |
||||
DOWNLOAD_DIR = './data/sarship/' |
||||
# 数据集存放目录 |
||||
DATA_DIR = './data/sarship/sar_ship_1/' |
||||
# 训练集`file_list`文件路径 |
||||
TRAIN_FILE_LIST_PATH = './data/sarship/sar_ship_1/train.txt' |
||||
# 验证集`file_list`文件路径 |
||||
EVAL_FILE_LIST_PATH = './data/sarship/sar_ship_1/valid.txt' |
||||
# 数据集类别信息文件路径 |
||||
LABEL_LIST_PATH = './data/sarship/sar_ship_1/labels.txt' |
||||
# 实验目录,保存输出的模型权重和结果 |
||||
EXP_DIR = './output/ppyolo/' |
||||
|
||||
# 下载和解压SAR影像舰船检测数据集 |
||||
# 若目录已存在则不重复下载 |
||||
sarship_dataset = 'https://paddleseg.bj.bcebos.com/dataset/sar_ship_1.tar.gz' |
||||
if not os.path.exists(DATA_DIR): |
||||
pdrs.utils.download_and_decompress(sarship_dataset, path=DOWNLOAD_DIR) |
||||
|
||||
# 定义训练和验证时使用的数据变换(数据增强、预处理等) |
||||
# 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行 |
||||
# API说明:https://github.com/PaddleCV-SIG/PaddleRS/blob/develop/docs/apis/transforms.md |
||||
train_transforms = T.Compose([ |
||||
# 对输入影像施加随机色彩扰动 |
||||
T.RandomDistort(), |
||||
# 在影像边界进行随机padding |
||||
T.RandomExpand(), |
||||
# 随机裁剪,裁块大小在一定范围内变动 |
||||
T.RandomCrop(), |
||||
# 随机水平翻转 |
||||
T.RandomHorizontalFlip(), |
||||
# 对batch进行随机缩放,随机选择插值方式 |
||||
T.BatchRandomResize( |
||||
target_sizes=[320, 352, 384, 416, 448, 480, 512, 544, 576, 608], |
||||
interp='RANDOM'), |
||||
# 影像归一化 |
||||
T.Normalize( |
||||
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) |
||||
]) |
||||
|
||||
eval_transforms = T.Compose([ |
||||
# 使用双三次插值将输入影像缩放到固定大小 |
||||
T.Resize( |
||||
target_size=608, interp='CUBIC'), |
||||
# 验证阶段与训练阶段的归一化方式必须相同 |
||||
T.Normalize( |
||||
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) |
||||
]) |
||||
|
||||
# 分别构建训练和验证所用的数据集 |
||||
train_dataset = pdrs.datasets.VOCDetection( |
||||
data_dir=DATA_DIR, |
||||
file_list=TRAIN_FILE_LIST_PATH, |
||||
label_list=LABEL_LIST_PATH, |
||||
transforms=train_transforms, |
||||
shuffle=True) |
||||
|
||||
eval_dataset = pdrs.datasets.VOCDetection( |
||||
data_dir=DATA_DIR, |
||||
file_list=EVAL_FILE_LIST_PATH, |
||||
label_list=LABEL_LIST_PATH, |
||||
transforms=eval_transforms, |
||||
shuffle=False) |
||||
|
||||
# 构建PP-YOLO模型 |
||||
# 目前已支持的模型请参考:https://github.com/PaddleCV-SIG/PaddleRS/blob/develop/docs/apis/model_zoo.md |
||||
# 模型输入参数请参考:https://github.com/PaddleCV-SIG/PaddleRS/blob/develop/paddlers/tasks/object_detector.py |
||||
model = pdrs.tasks.PPYOLO(num_classes=len(train_dataset.labels)) |
||||
|
||||
# 执行模型训练 |
||||
model.train( |
||||
num_epochs=10, |
||||
train_dataset=train_dataset, |
||||
train_batch_size=4, |
||||
eval_dataset=eval_dataset, |
||||
# 每多少个epoch存储一次检查点 |
||||
save_interval_epochs=10, |
||||
# 每多少次迭代记录一次日志 |
||||
log_interval_steps=4, |
||||
save_dir=EXP_DIR, |
||||
# 指定预训练权重 |
||||
pretrain_weights='COCO', |
||||
# 初始学习率大小 |
||||
learning_rate=0.0005, |
||||
# 学习率预热(learning rate warm-up)步数与初始值 |
||||
warmup_steps=0, |
||||
warmup_start_lr=0.0, |
||||
# 是否启用VisualDL日志功能 |
||||
use_vdl=True) |
@ -0,0 +1,99 @@ |
||||
#!/usr/bin/env python |
||||
|
||||
# 目标检测模型PP-YOLO Tiny训练示例脚本 |
||||
# 执行此脚本前,请确认已正确安装PaddleRS库 |
||||
|
||||
import os |
||||
|
||||
import paddlers as pdrs |
||||
from paddlers import transforms as T |
||||
|
||||
# 下载文件存放目录 |
||||
DOWNLOAD_DIR = './data/sarship/' |
||||
# 数据集存放目录 |
||||
DATA_DIR = './data/sarship/sar_ship_1/' |
||||
# 训练集`file_list`文件路径 |
||||
TRAIN_FILE_LIST_PATH = './data/sarship/sar_ship_1/train.txt' |
||||
# 验证集`file_list`文件路径 |
||||
EVAL_FILE_LIST_PATH = './data/sarship/sar_ship_1/valid.txt' |
||||
# 数据集类别信息文件路径 |
||||
LABEL_LIST_PATH = './data/sarship/sar_ship_1/labels.txt' |
||||
# 实验目录,保存输出的模型权重和结果 |
||||
EXP_DIR = './output/ppyolotiny/' |
||||
|
||||
# 下载和解压SAR影像舰船检测数据集 |
||||
# 若目录已存在则不重复下载 |
||||
sarship_dataset = 'https://paddleseg.bj.bcebos.com/dataset/sar_ship_1.tar.gz' |
||||
if not os.path.exists(DATA_DIR): |
||||
pdrs.utils.download_and_decompress(sarship_dataset, path=DOWNLOAD_DIR) |
||||
|
||||
# 定义训练和验证时使用的数据变换(数据增强、预处理等) |
||||
# 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行 |
||||
# API说明:https://github.com/PaddleCV-SIG/PaddleRS/blob/develop/docs/apis/transforms.md |
||||
train_transforms = T.Compose([ |
||||
# 对输入影像施加随机色彩扰动 |
||||
T.RandomDistort(), |
||||
# 在影像边界进行随机padding |
||||
T.RandomExpand(), |
||||
# 随机裁剪,裁块大小在一定范围内变动 |
||||
T.RandomCrop(), |
||||
# 随机水平翻转 |
||||
T.RandomHorizontalFlip(), |
||||
# 对batch进行随机缩放,随机选择插值方式 |
||||
T.BatchRandomResize( |
||||
target_sizes=[320, 352, 384, 416, 448, 480, 512, 544, 576, 608], |
||||
interp='RANDOM'), |
||||
# 影像归一化 |
||||
T.Normalize( |
||||
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) |
||||
]) |
||||
|
||||
eval_transforms = T.Compose([ |
||||
# 使用双三次插值将输入影像缩放到固定大小 |
||||
T.Resize( |
||||
target_size=608, interp='CUBIC'), |
||||
# 验证阶段与训练阶段的归一化方式必须相同 |
||||
T.Normalize( |
||||
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) |
||||
]) |
||||
|
||||
# 分别构建训练和验证所用的数据集 |
||||
train_dataset = pdrs.datasets.VOCDetection( |
||||
data_dir=DATA_DIR, |
||||
file_list=TRAIN_FILE_LIST_PATH, |
||||
label_list=LABEL_LIST_PATH, |
||||
transforms=train_transforms, |
||||
shuffle=True) |
||||
|
||||
eval_dataset = pdrs.datasets.VOCDetection( |
||||
data_dir=DATA_DIR, |
||||
file_list=EVAL_FILE_LIST_PATH, |
||||
label_list=LABEL_LIST_PATH, |
||||
transforms=eval_transforms, |
||||
shuffle=False) |
||||
|
||||
# 构建PP-YOLO Tiny模型 |
||||
# 目前已支持的模型请参考:https://github.com/PaddleCV-SIG/PaddleRS/blob/develop/docs/apis/model_zoo.md |
||||
# 模型输入参数请参考:https://github.com/PaddleCV-SIG/PaddleRS/blob/develop/paddlers/tasks/object_detector.py |
||||
model = pdrs.tasks.PPYOLOTiny(num_classes=len(train_dataset.labels)) |
||||
|
||||
# 执行模型训练 |
||||
model.train( |
||||
num_epochs=10, |
||||
train_dataset=train_dataset, |
||||
train_batch_size=4, |
||||
eval_dataset=eval_dataset, |
||||
# 每多少个epoch存储一次检查点 |
||||
save_interval_epochs=5, |
||||
# 每多少次迭代记录一次日志 |
||||
log_interval_steps=4, |
||||
save_dir=EXP_DIR, |
||||
# 指定预训练权重 |
||||
pretrain_weights='COCO', |
||||
# 初始学习率大小 |
||||
learning_rate=0.0001, |
||||
# 学习率预热(learning rate warm-up)步数与初始值 |
||||
warmup_steps=0, |
||||
warmup_start_lr=0.0, |
||||
# 是否启用VisualDL日志功能 |
||||
use_vdl=True) |
@ -0,0 +1,99 @@ |
||||
#!/usr/bin/env python |
||||
|
||||
# 目标检测模型PP-YOLOv2训练示例脚本 |
||||
# 执行此脚本前,请确认已正确安装PaddleRS库 |
||||
|
||||
import os |
||||
|
||||
import paddlers as pdrs |
||||
from paddlers import transforms as T |
||||
|
||||
# 下载文件存放目录 |
||||
DOWNLOAD_DIR = './data/sarship/' |
||||
# 数据集存放目录 |
||||
DATA_DIR = './data/sarship/sar_ship_1/' |
||||
# 训练集`file_list`文件路径 |
||||
TRAIN_FILE_LIST_PATH = './data/sarship/sar_ship_1/train.txt' |
||||
# 验证集`file_list`文件路径 |
||||
EVAL_FILE_LIST_PATH = './data/sarship/sar_ship_1/valid.txt' |
||||
# 数据集类别信息文件路径 |
||||
LABEL_LIST_PATH = './data/sarship/sar_ship_1/labels.txt' |
||||
# 实验目录,保存输出的模型权重和结果 |
||||
EXP_DIR = './output/ppyolov2/' |
||||
|
||||
# 下载和解压SAR影像舰船检测数据集 |
||||
# 若目录已存在则不重复下载 |
||||
sarship_dataset = 'https://paddleseg.bj.bcebos.com/dataset/sar_ship_1.tar.gz' |
||||
if not os.path.exists(DATA_DIR): |
||||
pdrs.utils.download_and_decompress(sarship_dataset, path=DOWNLOAD_DIR) |
||||
|
||||
# 定义训练和验证时使用的数据变换(数据增强、预处理等) |
||||
# 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行 |
||||
# API说明:https://github.com/PaddleCV-SIG/PaddleRS/blob/develop/docs/apis/transforms.md |
||||
train_transforms = T.Compose([ |
||||
# 对输入影像施加随机色彩扰动 |
||||
T.RandomDistort(), |
||||
# 在影像边界进行随机padding |
||||
T.RandomExpand(), |
||||
# 随机裁剪,裁块大小在一定范围内变动 |
||||
T.RandomCrop(), |
||||
# 随机水平翻转 |
||||
T.RandomHorizontalFlip(), |
||||
# 对batch进行随机缩放,随机选择插值方式 |
||||
T.BatchRandomResize( |
||||
target_sizes=[320, 352, 384, 416, 448, 480, 512, 544, 576, 608], |
||||
interp='RANDOM'), |
||||
# 影像归一化 |
||||
T.Normalize( |
||||
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) |
||||
]) |
||||
|
||||
eval_transforms = T.Compose([ |
||||
# 使用双三次插值将输入影像缩放到固定大小 |
||||
T.Resize( |
||||
target_size=608, interp='CUBIC'), |
||||
# 验证阶段与训练阶段的归一化方式必须相同 |
||||
T.Normalize( |
||||
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) |
||||
]) |
||||
|
||||
# 分别构建训练和验证所用的数据集 |
||||
train_dataset = pdrs.datasets.VOCDetection( |
||||
data_dir=DATA_DIR, |
||||
file_list=TRAIN_FILE_LIST_PATH, |
||||
label_list=LABEL_LIST_PATH, |
||||
transforms=train_transforms, |
||||
shuffle=True) |
||||
|
||||
eval_dataset = pdrs.datasets.VOCDetection( |
||||
data_dir=DATA_DIR, |
||||
file_list=EVAL_FILE_LIST_PATH, |
||||
label_list=LABEL_LIST_PATH, |
||||
transforms=eval_transforms, |
||||
shuffle=False) |
||||
|
||||
# 构建PP-YOLOv2模型 |
||||
# 目前已支持的模型请参考:https://github.com/PaddleCV-SIG/PaddleRS/blob/develop/docs/apis/model_zoo.md |
||||
# 模型输入参数请参考:https://github.com/PaddleCV-SIG/PaddleRS/blob/develop/paddlers/tasks/object_detector.py |
||||
model = pdrs.tasks.PPYOLOv2(num_classes=len(train_dataset.labels)) |
||||
|
||||
# 执行模型训练 |
||||
model.train( |
||||
num_epochs=10, |
||||
train_dataset=train_dataset, |
||||
train_batch_size=4, |
||||
eval_dataset=eval_dataset, |
||||
# 每多少个epoch存储一次检查点 |
||||
save_interval_epochs=5, |
||||
# 每多少次迭代记录一次日志 |
||||
log_interval_steps=4, |
||||
save_dir=EXP_DIR, |
||||
# 指定预训练权重 |
||||
pretrain_weights='COCO', |
||||
# 初始学习率大小 |
||||
learning_rate=0.0001, |
||||
# 学习率预热(learning rate warm-up)步数与初始值 |
||||
warmup_steps=0, |
||||
warmup_start_lr=0.0, |
||||
# 是否启用VisualDL日志功能 |
||||
use_vdl=True) |
@ -1,28 +0,0 @@ |
||||
The detection training demo: |
||||
* dataset: AIR-SARShip-1.0 |
||||
* target: ship |
||||
* model: faster_rcnn |
||||
|
||||
|
||||
Run the demo: |
||||
|
||||
1. Install PaddleRS |
||||
``` |
||||
git clone https://github.com/PaddleCV-SIG/PaddleRS.git |
||||
cd PaddleRS |
||||
pip install -r requirements.txt |
||||
python setup.py install |
||||
``` |
||||
|
||||
2. Run the demo |
||||
``` |
||||
cd tutorials/train/detection/ |
||||
|
||||
# run training on single GPU |
||||
export CUDA_VISIBLE_DEVICES=0 |
||||
python faster_rcnn_sar_ship.py |
||||
|
||||
# run traing on multi gpu |
||||
export CUDA_VISIBLE_DEVICES=0,1 |
||||
python -m paddle.distributed.launch faster_rcnn_sar_ship.py |
||||
``` |
@ -0,0 +1,98 @@ |
||||
#!/usr/bin/env python |
||||
|
||||
# 目标检测模型YOLOv3训练示例脚本 |
||||
# 执行此脚本前,请确认已正确安装PaddleRS库 |
||||
|
||||
import os |
||||
|
||||
import paddlers as pdrs |
||||
from paddlers import transforms as T |
||||
|
||||
# 下载文件存放目录 |
||||
DOWNLOAD_DIR = './data/sarship/' |
||||
# 数据集存放目录 |
||||
DATA_DIR = './data/sarship/sar_ship_1/' |
||||
# 训练集`file_list`文件路径 |
||||
TRAIN_FILE_LIST_PATH = './data/sarship/sar_ship_1/train.txt' |
||||
# 验证集`file_list`文件路径 |
||||
EVAL_FILE_LIST_PATH = './data/sarship/sar_ship_1/valid.txt' |
||||
# 数据集类别信息文件路径 |
||||
LABEL_LIST_PATH = './data/sarship/sar_ship_1/labels.txt' |
||||
# 实验目录,保存输出的模型权重和结果 |
||||
EXP_DIR = './output/yolov3/' |
||||
|
||||
# 下载和解压SAR影像舰船检测数据集 |
||||
# 若目录已存在则不重复下载 |
||||
sarship_dataset = 'https://paddleseg.bj.bcebos.com/dataset/sar_ship_1.tar.gz' |
||||
if not os.path.exists(DATA_DIR): |
||||
pdrs.utils.download_and_decompress(sarship_dataset, path=DOWNLOAD_DIR) |
||||
|
||||
# 定义训练和验证时使用的数据变换(数据增强、预处理等) |
||||
# 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行 |
||||
# API说明:https://github.com/PaddleCV-SIG/PaddleRS/blob/develop/docs/apis/transforms.md |
||||
train_transforms = T.Compose([ |
||||
# 对输入影像施加随机色彩扰动 |
||||
T.RandomDistort(), |
||||
# 在影像边界进行随机padding |
||||
T.RandomExpand(), |
||||
# 随机裁剪,裁块大小在一定范围内变动 |
||||
T.RandomCrop(), |
||||
# 随机水平翻转 |
||||
T.RandomHorizontalFlip(), |
||||
# 对batch进行随机缩放,随机选择插值方式 |
||||
T.BatchRandomResize( |
||||
target_sizes=[320, 352, 384, 416, 448, 480, 512, 544, 576, 608], |
||||
interp='RANDOM'), |
||||
# 影像归一化 |
||||
T.Normalize( |
||||
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) |
||||
]) |
||||
|
||||
eval_transforms = T.Compose([ |
||||
# 使用双三次插值将输入影像缩放到固定大小 |
||||
T.Resize( |
||||
target_size=608, interp='CUBIC'), |
||||
# 验证阶段与训练阶段的归一化方式必须相同 |
||||
T.Normalize( |
||||
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) |
||||
]) |
||||
|
||||
# 分别构建训练和验证所用的数据集 |
||||
train_dataset = pdrs.datasets.VOCDetection( |
||||
data_dir=DATA_DIR, |
||||
file_list=TRAIN_FILE_LIST_PATH, |
||||
label_list=LABEL_LIST_PATH, |
||||
transforms=train_transforms, |
||||
shuffle=True) |
||||
|
||||
eval_dataset = pdrs.datasets.VOCDetection( |
||||
data_dir=DATA_DIR, |
||||
file_list=EVAL_FILE_LIST_PATH, |
||||
label_list=LABEL_LIST_PATH, |
||||
transforms=eval_transforms, |
||||
shuffle=False) |
||||
|
||||
# 构建YOLOv3模型,使用DarkNet53作为backbone |
||||
# 目前已支持的模型请参考:https://github.com/PaddleCV-SIG/PaddleRS/blob/develop/docs/apis/model_zoo.md |
||||
# 模型输入参数请参考:https://github.com/PaddleCV-SIG/PaddleRS/blob/develop/paddlers/tasks/object_detector.py |
||||
model = pdrs.tasks.YOLOv3( |
||||
num_classes=len(train_dataset.labels), backbone='DarkNet53') |
||||
|
||||
# 执行模型训练 |
||||
model.train( |
||||
num_epochs=10, |
||||
train_dataset=train_dataset, |
||||
train_batch_size=4, |
||||
eval_dataset=eval_dataset, |
||||
# 每多少个epoch存储一次检查点 |
||||
save_interval_epochs=5, |
||||
# 每多少次迭代记录一次日志 |
||||
log_interval_steps=4, |
||||
save_dir=EXP_DIR, |
||||
# 初始学习率大小 |
||||
learning_rate=0.0001, |
||||
# 学习率预热(learning rate warm-up)步数与初始值 |
||||
warmup_steps=0, |
||||
warmup_start_lr=0.0, |
||||
# 是否启用VisualDL日志功能 |
||||
use_vdl=True) |
Loading…
Reference in new issue