# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from paddlers_slim.models.ppseg.utils.download import download_file_and_uncompress from paddlers_slim.models.ppseg.utils import seg_env from paddlers_slim.models.ppseg.cvlibs import manager from paddlers_slim.models.ppseg.transforms import Compose from paddlers_slim.models.ppseg.datasets import Dataset URL = 'https://bj.bcebos.com/paddleseg/dataset/chase_db1/chase_db1.zip' @manager.DATASETS.add_component class CHASEDB1(Dataset): """ CHASE_DB1 dataset is a dataset for retinal vessel segmentation which contains 28 color retina images with the size of 999×960 pixels. It is collected from both left and right eyes of 14 school children. Each image is annotated by two independent human experts, and we choose the labels from 1st expert. (https://blogs.kingston.ac.uk/retinal/chasedb1/) Args: transforms (list): Transforms for image. dataset_root (str): The dataset directory. Default: None edge (bool): whether extract edge infor in the output mode (str, optional): Which part of dataset to use. it is one of ('train', 'val', 'test'). Default: 'train'. """ NUM_CLASSES = 2 def __init__(self, dataset_root=None, transforms=None, edge=False, mode='train'): self.dataset_root = dataset_root self.transforms = Compose(transforms) mode = mode.lower() self.mode = mode self.edge = edge self.file_list = list() self.num_classes = self.NUM_CLASSES self.ignore_index = 255 # labels only have 1/0, thus ignore_index is not necessary if mode not in ['train', 'val', 'test']: raise ValueError( "`mode` should be 'train', 'val' or 'test', but got {}.".format( mode)) if self.transforms is None: raise ValueError("`transforms` is necessary, but it is None.") if self.dataset_root is None: self.dataset_root = download_file_and_uncompress( url=URL, savepath=seg_env.DATA_HOME, extrapath=seg_env.DATA_HOME) elif not os.path.exists(self.dataset_root): self.dataset_root = os.path.normpath(self.dataset_root) savepath, extraname = self.dataset_root.rsplit( sep=os.path.sep, maxsplit=1) self.dataset_root = download_file_and_uncompress( url=URL, savepath=savepath, extrapath=savepath, extraname=extraname) if mode == 'train': file_path = os.path.join(self.dataset_root, 'train_list.txt') elif mode == 'val': file_path = os.path.join(self.dataset_root, 'val_list.txt') with open(file_path, 'r') as f: for line in f: items = line.strip().split() if len(items) != 2: if mode == 'train' or mode == 'val': raise Exception( "File list format incorrect! It should be" " image_name label_name\\n") image_path = os.path.join(self.dataset_root, items[0]) grt_path = None else: image_path = os.path.join(self.dataset_root, items[0]) grt_path = os.path.join(self.dataset_root, items[1]) self.file_list.append([image_path, grt_path])