Merge branch 'develop' into refactor_res

own
Bobholamovic 3 years ago
commit 0533d41334
  1. 16
      paddlers/deploy/predictor.py
  2. 12
      paddlers/tasks/change_detector.py
  3. 20
      paddlers/tasks/classifier.py
  4. 302
      paddlers/tasks/object_detector.py
  5. 20
      paddlers/tasks/segmenter.py
  6. 12
      test_tipc/infer.py

@ -146,7 +146,7 @@ class Predictor(object):
return predictor
def preprocess(self, images, transforms):
preprocessed_samples = self._model._preprocess(
preprocessed_samples = self._model.preprocess(
images, transforms, to_tensor=False)
if self._model.model_type == 'classifier':
preprocessed_samples = {'image': preprocessed_samples[0]}
@ -182,12 +182,12 @@ class Predictor(object):
transforms=None):
if self._model.model_type == 'classifier':
true_topk = min(self._model.num_classes, topk)
if self._model._postprocess is None:
self._model.build_postprocess_from_labels(true_topk)
# XXX: Convert ndarray to tensor as self._model._postprocess requires
if self._model.postprocess is None:
self._model.build_postprocess_from_labels(topk)
# XXX: Convert ndarray to tensor as self._model.postprocess requires
assert len(net_outputs) == 1
net_outputs = paddle.to_tensor(net_outputs[0])
outputs = self._model._postprocess(net_outputs)
outputs = self._model.postprocess(net_outputs)
class_ids = map(itemgetter('class_ids'), outputs)
scores = map(itemgetter('scores'), outputs)
label_names = map(itemgetter('label_names'), outputs)
@ -197,7 +197,7 @@ class Predictor(object):
'label_names_map': n,
} for l, s, n in zip(class_ids, scores, label_names)]
elif self._model.model_type in ('segmenter', 'change_detector'):
label_map, score_map = self._model._postprocess(
label_map, score_map = self._model.postprocess(
net_outputs,
batch_origin_shape=ori_shape,
transforms=transforms.transforms)
@ -210,9 +210,9 @@ class Predictor(object):
k: v
for k, v in zip(['bbox', 'bbox_num', 'mask'], net_outputs)
}
preds = self._model._postprocess(net_outputs)
preds = self._model.postprocess(net_outputs)
elif self._model.model_type == 'restorer':
res_maps = self._model._postprocess(
res_maps = self._model.postprocess(
net_outputs[0],
batch_tar_shape=tar_shape,
transforms=transforms.transforms)

@ -119,10 +119,10 @@ class BaseChangeDetector(BaseModel):
if mode == 'test':
origin_shape = inputs[2]
if self.status == 'Infer':
label_map_list, score_map_list = self._postprocess(
label_map_list, score_map_list = self.postprocess(
net_out, origin_shape, transforms=inputs[3])
else:
logit_list = self._postprocess(
logit_list = self.postprocess(
logit, origin_shape, transforms=inputs[3])
label_map_list = []
score_map_list = []
@ -150,7 +150,7 @@ class BaseChangeDetector(BaseModel):
raise ValueError("Expected label.ndim == 4 but got {}".format(
label.ndim))
origin_shape = [label.shape[-2:]]
pred = self._postprocess(
pred = self.postprocess(
pred, origin_shape, transforms=inputs[3])[0] # NCHW
intersect_area, pred_area, label_area = ppseg.utils.metrics.calculate_area(
pred, label, self.num_classes)
@ -556,7 +556,7 @@ class BaseChangeDetector(BaseModel):
images = [img_file]
else:
images = img_file
batch_im1, batch_im2, batch_origin_shape = self._preprocess(
batch_im1, batch_im2, batch_origin_shape = self.preprocess(
images, transforms, self.model_type)
self.net.eval()
data = (batch_im1, batch_im2, batch_origin_shape, transforms.transforms)
@ -667,7 +667,7 @@ class BaseChangeDetector(BaseModel):
dst_data = None
print("GeoTiff saved in {}.".format(save_file))
def _preprocess(self, images, transforms, to_tensor=True):
def preprocess(self, images, transforms, to_tensor=True):
self._check_transforms(transforms, 'test')
batch_im1, batch_im2 = list(), list()
batch_ori_shape = list()
@ -739,7 +739,7 @@ class BaseChangeDetector(BaseModel):
batch_restore_list.append(restore_list)
return batch_restore_list
def _postprocess(self, batch_pred, batch_origin_shape, transforms):
def postprocess(self, batch_pred, batch_origin_shape, transforms):
batch_restore_list = BaseChangeDetector.get_transforms_shape_info(
batch_origin_shape, transforms)
if isinstance(batch_pred, (tuple, list)) and self.status == 'Infer':

@ -62,7 +62,7 @@ class BaseClassifier(BaseModel):
self.metrics = None
self.losses = losses
self.labels = None
self._postprocess = None
self.postprocess = None
if params.get('with_net', True):
params.pop('with_net', None)
self.net = self.build_net(**params)
@ -127,13 +127,12 @@ class BaseClassifier(BaseModel):
net_out = net(inputs[0])
if mode == 'test':
return self._postprocess(net_out)
return self.postprocess(net_out)
outputs = OrderedDict()
label = paddle.to_tensor(inputs[1], dtype="int64")
if mode == 'eval':
# print(self._postprocess(net_out)[0]) # for test
label = paddle.unsqueeze(label, axis=-1)
metric_dict = self.metrics(net_out, label)
outputs['top1'] = metric_dict["top1"]
@ -182,13 +181,13 @@ class BaseClassifier(BaseModel):
label_dict = dict()
for i, label in enumerate(self.labels):
label_dict[i] = label
self._postprocess = build_postprocess({
self.postprocess = build_postprocess({
"name": "Topk",
"topk": topk,
"class_id_map_file": None
})
# Add class_id_map from model.yml
self._postprocess.class_id_map = label_dict
self.postprocess.class_id_map = label_dict
def train(self,
num_epochs,
@ -253,8 +252,7 @@ class BaseClassifier(BaseModel):
if self.losses is None:
self.losses = self.default_loss()
self.metrics = self.default_metric()
self._postprocess = self.default_postprocess(train_dataset.label_list)
# print(self._postprocess.class_id_map)
self.postprocess = self.default_postprocess(train_dataset.label_list)
if optimizer is None:
num_steps_each_epoch = train_dataset.num_samples // train_batch_size
@ -460,12 +458,12 @@ class BaseClassifier(BaseModel):
images = [img_file]
else:
images = img_file
batch_im, batch_origin_shape = self._preprocess(images, transforms,
self.model_type)
batch_im, batch_origin_shape = self.preprocess(images, transforms,
self.model_type)
self.net.eval()
data = (batch_im, batch_origin_shape, transforms.transforms)
if self._postprocess is None:
if self.postprocess is None:
self.build_postprocess_from_labels()
outputs = self.run(self.net, data, 'test')
@ -486,7 +484,7 @@ class BaseClassifier(BaseModel):
}
return prediction
def _preprocess(self, images, transforms, to_tensor=True):
def preprocess(self, images, transforms, to_tensor=True):
self._check_transforms(transforms, 'test')
batch_im = list()
batch_ori_shape = list()

@ -254,6 +254,34 @@ class BaseDetector(BaseModel):
Defaults to None.
"""
args = self._pre_train(locals())
return self._real_train(**args)
def _pre_train(self, in_args):
return in_args
def _real_train(self,
num_epochs,
train_dataset,
train_batch_size=64,
eval_dataset=None,
optimizer=None,
save_interval_epochs=1,
log_interval_steps=10,
save_dir='output',
pretrain_weights='IMAGENET',
learning_rate=.001,
warmup_steps=0,
warmup_start_lr=0.0,
lr_decay_epochs=(216, 243),
lr_decay_gamma=0.1,
metric=None,
use_ema=False,
early_stop=False,
early_stop_patience=5,
use_vdl=True,
resume_checkpoint=None):
if self.status == 'Infer':
logging.error(
"Exported inference model does not support training.",
@ -583,16 +611,16 @@ class BaseDetector(BaseModel):
else:
images = img_file
batch_samples = self._preprocess(images, transforms)
batch_samples = self.preprocess(images, transforms)
self.net.eval()
outputs = self.run(self.net, batch_samples, 'test')
prediction = self._postprocess(outputs)
prediction = self.postprocess(outputs)
if isinstance(img_file, (str, np.ndarray)):
prediction = prediction[0]
return prediction
def _preprocess(self, images, transforms, to_tensor=True):
def preprocess(self, images, transforms, to_tensor=True):
self._check_transforms(transforms, 'test')
batch_samples = list()
for im in images:
@ -609,7 +637,7 @@ class BaseDetector(BaseModel):
return batch_samples
def _postprocess(self, batch_pred):
def postprocess(self, batch_pred):
infer_result = {}
if 'bbox' in batch_pred:
bboxes = batch_pred['bbox']
@ -880,108 +908,24 @@ class PicoDet(BaseDetector):
self.fixed_input_shape = image_shape
return self._define_input_spec(image_shape)
def train(self,
num_epochs,
train_dataset,
train_batch_size=64,
eval_dataset=None,
optimizer=None,
save_interval_epochs=1,
log_interval_steps=10,
save_dir='output',
pretrain_weights='IMAGENET',
learning_rate=.001,
warmup_steps=0,
warmup_start_lr=0.0,
lr_decay_epochs=(216, 243),
lr_decay_gamma=0.1,
metric=None,
use_ema=False,
early_stop=False,
early_stop_patience=5,
use_vdl=True,
resume_checkpoint=None):
"""
Train the model.
Args:
num_epochs (int): Number of epochs.
train_dataset (paddlers.datasets.COCODetDataset|paddlers.datasets.VOCDetDataset):
Training dataset.
train_batch_size (int, optional): Total batch size among all cards used in
training. Defaults to 64.
eval_dataset (paddlers.datasets.COCODetDataset|paddlers.datasets.VOCDetDataset|None, optional):
Evaluation dataset. If None, the model will not be evaluated during training
process. Defaults to None.
optimizer (paddle.optimizer.Optimizer|None, optional): Optimizer used for
training. If None, a default optimizer will be used. Defaults to None.
save_interval_epochs (int, optional): Epoch interval for saving the model.
Defaults to 1.
log_interval_steps (int, optional): Step interval for printing training
information. Defaults to 10.
save_dir (str, optional): Directory to save the model. Defaults to 'output'.
pretrain_weights (str|None, optional): None or name/path of pretrained
weights. If None, no pretrained weights will be loaded.
Defaults to 'IMAGENET'.
learning_rate (float, optional): Learning rate for training. Defaults to .001.
warmup_steps (int, optional): Number of steps of warm-up training.
Defaults to 0.
warmup_start_lr (float, optional): Start learning rate of warm-up training.
Defaults to 0..
lr_decay_epochs (list|tuple, optional): Epoch milestones for learning
rate decay. Defaults to (216, 243).
lr_decay_gamma (float, optional): Gamma coefficient of learning rate decay.
Defaults to .1.
metric (str|None, optional): Evaluation metric. Choices are {'VOC', 'COCO', None}.
If None, determine the metric according to the dataset format.
Defaults to None.
use_ema (bool, optional): Whether to use exponential moving average
strategy. Defaults to False.
early_stop (bool, optional): Whether to adopt early stop strategy.
Defaults to False.
early_stop_patience (int, optional): Early stop patience. Defaults to 5.
use_vdl(bool, optional): Whether to use VisualDL to monitor the training
process. Defaults to True.
resume_checkpoint (str|None, optional): Path of the checkpoint to resume
training from. If None, no training checkpoint will be resumed. At most
Aone of `resume_checkpoint` and `pretrain_weights` can be set simultaneously.
Defaults to None.
"""
def _pre_train(self, in_args):
optimizer = in_args['optimizer']
if optimizer is None:
num_steps_each_epoch = len(train_dataset) // train_batch_size
num_steps_each_epoch = len(in_args['train_dataset']) // in_args[
'train_batch_size']
optimizer = self.default_optimizer(
parameters=self.net.parameters(),
learning_rate=learning_rate,
warmup_steps=warmup_steps,
warmup_start_lr=warmup_start_lr,
lr_decay_epochs=lr_decay_epochs,
lr_decay_gamma=lr_decay_gamma,
num_steps_each_epoch=num_steps_each_epoch,
learning_rate=in_args['learning_rate'],
warmup_steps=in_args['warmup_steps'],
warmup_start_lr=in_args['warmup_start_lr'],
lr_decay_epochs=in_args['lr_decay_epochs'],
lr_decay_gamma=in_args['lr_decay_gamma'],
num_steps_each_epoch=in_args['num_steps_each_epoch'],
reg_coeff=4e-05,
scheduler='Cosine',
num_epochs=num_epochs)
super(PicoDet, self).train(
num_epochs=num_epochs,
train_dataset=train_dataset,
train_batch_size=train_batch_size,
eval_dataset=eval_dataset,
optimizer=optimizer,
save_interval_epochs=save_interval_epochs,
log_interval_steps=log_interval_steps,
save_dir=save_dir,
pretrain_weights=pretrain_weights,
learning_rate=learning_rate,
warmup_steps=warmup_steps,
warmup_start_lr=warmup_start_lr,
lr_decay_epochs=lr_decay_epochs,
lr_decay_gamma=lr_decay_gamma,
metric=metric,
use_ema=use_ema,
early_stop=early_stop,
early_stop_patience=early_stop_patience,
use_vdl=use_vdl,
resume_checkpoint=resume_checkpoint)
num_epochs=in_args['num_epochs'])
in_args['optimizer'] = optimizer
return in_args
def build_data_loader(self, dataset, batch_size, mode='train'):
if dataset.num_samples < batch_size:
@ -1393,82 +1337,12 @@ class FasterRCNN(BaseDetector):
super(FasterRCNN, self).__init__(
model_name='FasterRCNN', num_classes=num_classes, **params)
def train(self,
num_epochs,
train_dataset,
train_batch_size=64,
eval_dataset=None,
optimizer=None,
save_interval_epochs=1,
log_interval_steps=10,
save_dir='output',
pretrain_weights='IMAGENET',
learning_rate=.001,
warmup_steps=0,
warmup_start_lr=0.0,
lr_decay_epochs=(216, 243),
lr_decay_gamma=0.1,
metric=None,
use_ema=False,
early_stop=False,
early_stop_patience=5,
use_vdl=True,
resume_checkpoint=None):
"""
Train the model.
Args:
num_epochs (int): Number of epochs.
train_dataset (paddlers.datasets.COCODetDataset|paddlers.datasets.VOCDetDataset):
Training dataset.
train_batch_size (int, optional): Total batch size among all cards used in
training. Defaults to 64.
eval_dataset (paddlers.datasets.COCODetDataset|paddlers.datasets.VOCDetDataset|None, optional):
Evaluation dataset. If None, the model will not be evaluated during training
process. Defaults to None.
optimizer (paddle.optimizer.Optimizer|None, optional): Optimizer used for
training. If None, a default optimizer will be used. Defaults to None.
save_interval_epochs (int, optional): Epoch interval for saving the model.
Defaults to 1.
log_interval_steps (int, optional): Step interval for printing training
information. Defaults to 10.
save_dir (str, optional): Directory to save the model. Defaults to 'output'.
pretrain_weights (str|None, optional): None or name/path of pretrained
weights. If None, no pretrained weights will be loaded.
Defaults to 'IMAGENET'.
learning_rate (float, optional): Learning rate for training. Defaults to .001.
warmup_steps (int, optional): Number of steps of warm-up training.
Defaults to 0.
warmup_start_lr (float, optional): Start learning rate of warm-up training.
Defaults to 0..
lr_decay_epochs (list|tuple, optional): Epoch milestones for learning
rate decay. Defaults to (216, 243).
lr_decay_gamma (float, optional): Gamma coefficient of learning rate decay.
Defaults to .1.
metric (str|None, optional): Evaluation metric. Choices are {'VOC', 'COCO', None}.
If None, determine the metric according to the dataset format.
Defaults to None.
use_ema (bool, optional): Whether to use exponential moving average
strategy. Defaults to False.
early_stop (bool, optional): Whether to adopt early stop strategy.
Defaults to False.
early_stop_patience (int, optional): Early stop patience. Defaults to 5.
use_vdl(bool, optional): Whether to use VisualDL to monitor the training
process. Defaults to True.
resume_checkpoint (str|None, optional): Path of the checkpoint to resume
training from. If None, no training checkpoint will be resumed. At most
Aone of `resume_checkpoint` and `pretrain_weights` can be set simultaneously.
Defaults to None.
"""
def _pre_train(self, in_args):
train_dataset = in_args['train_dataset']
if train_dataset.pos_num < len(train_dataset.file_list):
# In-place modification
train_dataset.num_workers = 0
super(FasterRCNN, self).train(
num_epochs, train_dataset, train_batch_size, eval_dataset,
optimizer, save_interval_epochs, log_interval_steps, save_dir,
pretrain_weights, learning_rate, warmup_steps, warmup_start_lr,
lr_decay_epochs, lr_decay_gamma, metric, use_ema, early_stop,
early_stop_patience, use_vdl, resume_checkpoint)
return in_args
def _compose_batch_transform(self, transforms, mode='train'):
if mode == 'train':
@ -2235,82 +2109,12 @@ class MaskRCNN(BaseDetector):
super(MaskRCNN, self).__init__(
model_name='MaskRCNN', num_classes=num_classes, **params)
def train(self,
num_epochs,
train_dataset,
train_batch_size=64,
eval_dataset=None,
optimizer=None,
save_interval_epochs=1,
log_interval_steps=10,
save_dir='output',
pretrain_weights='IMAGENET',
learning_rate=.001,
warmup_steps=0,
warmup_start_lr=0.0,
lr_decay_epochs=(216, 243),
lr_decay_gamma=0.1,
metric=None,
use_ema=False,
early_stop=False,
early_stop_patience=5,
use_vdl=True,
resume_checkpoint=None):
"""
Train the model.
Args:
num_epochs (int): Number of epochs.
train_dataset (paddlers.datasets.COCODetDataset|paddlers.datasets.VOCDetDataset):
Training dataset.
train_batch_size (int, optional): Total batch size among all cards used in
training. Defaults to 64.
eval_dataset (paddlers.datasets.COCODetDataset|paddlers.datasets.VOCDetDataset|None, optional):
Evaluation dataset. If None, the model will not be evaluated during training
process. Defaults to None.
optimizer (paddle.optimizer.Optimizer|None, optional): Optimizer used for
training. If None, a default optimizer will be used. Defaults to None.
save_interval_epochs (int, optional): Epoch interval for saving the model.
Defaults to 1.
log_interval_steps (int, optional): Step interval for printing training
information. Defaults to 10.
save_dir (str, optional): Directory to save the model. Defaults to 'output'.
pretrain_weights (str|None, optional): None or name/path of pretrained
weights. If None, no pretrained weights will be loaded.
Defaults to 'IMAGENET'.
learning_rate (float, optional): Learning rate for training. Defaults to .001.
warmup_steps (int, optional): Number of steps of warm-up training.
Defaults to 0.
warmup_start_lr (float, optional): Start learning rate of warm-up training.
Defaults to 0..
lr_decay_epochs (list|tuple, optional): Epoch milestones for learning
rate decay. Defaults to (216, 243).
lr_decay_gamma (float, optional): Gamma coefficient of learning rate decay.
Defaults to .1.
metric (str|None, optional): Evaluation metric. Choices are {'VOC', 'COCO', None}.
If None, determine the metric according to the dataset format.
Defaults to None.
use_ema (bool, optional): Whether to use exponential moving average
strategy. Defaults to False.
early_stop (bool, optional): Whether to adopt early stop strategy.
Defaults to False.
early_stop_patience (int, optional): Early stop patience. Defaults to 5.
use_vdl(bool, optional): Whether to use VisualDL to monitor the training
process. Defaults to True.
resume_checkpoint (str|None, optional): Path of the checkpoint to resume
training from. If None, no training checkpoint will be resumed. At most
Aone of `resume_checkpoint` and `pretrain_weights` can be set simultaneously.
Defaults to None.
"""
def _pre_train(self, in_args):
train_dataset = in_args['train_dataset']
if train_dataset.pos_num < len(train_dataset.file_list):
# In-place modification
train_dataset.num_workers = 0
super(MaskRCNN, self).train(
num_epochs, train_dataset, train_batch_size, eval_dataset,
optimizer, save_interval_epochs, log_interval_steps, save_dir,
pretrain_weights, learning_rate, warmup_steps, warmup_start_lr,
lr_decay_epochs, lr_decay_gamma, metric, use_ema, early_stop,
early_stop_patience, use_vdl, resume_checkpoint)
return in_args
def _compose_batch_transform(self, transforms, mode='train'):
if mode == 'train':

@ -117,10 +117,10 @@ class BaseSegmenter(BaseModel):
if mode == 'test':
origin_shape = inputs[1]
if self.status == 'Infer':
label_map_list, score_map_list = self._postprocess(
label_map_list, score_map_list = self.postprocess(
net_out, origin_shape, transforms=inputs[2])
else:
logit_list = self._postprocess(
logit_list = self.postprocess(
logit, origin_shape, transforms=inputs[2])
label_map_list = []
score_map_list = []
@ -148,7 +148,7 @@ class BaseSegmenter(BaseModel):
raise ValueError("Expected label.ndim == 4 but got {}".format(
label.ndim))
origin_shape = [label.shape[-2:]]
pred = self._postprocess(
pred = self.postprocess(
pred, origin_shape, transforms=inputs[2])[0] # NCHW
intersect_area, pred_area, label_area = ppseg.utils.metrics.calculate_area(
pred, label, self.num_classes)
@ -526,8 +526,8 @@ class BaseSegmenter(BaseModel):
images = [img_file]
else:
images = img_file
batch_im, batch_origin_shape = self._preprocess(images, transforms,
self.model_type)
batch_im, batch_origin_shape = self.preprocess(images, transforms,
self.model_type)
self.net.eval()
data = (batch_im, batch_origin_shape, transforms.transforms)
outputs = self.run(self.net, data, 'test')
@ -631,7 +631,7 @@ class BaseSegmenter(BaseModel):
dst_data = None
print("GeoTiff saved in {}.".format(save_file))
def _preprocess(self, images, transforms, to_tensor=True):
def preprocess(self, images, transforms, to_tensor=True):
self._check_transforms(transforms, 'test')
batch_im = list()
batch_ori_shape = list()
@ -698,7 +698,7 @@ class BaseSegmenter(BaseModel):
batch_restore_list.append(restore_list)
return batch_restore_list
def _postprocess(self, batch_pred, batch_origin_shape, transforms):
def postprocess(self, batch_pred, batch_origin_shape, transforms):
batch_restore_list = BaseSegmenter.get_transforms_shape_info(
batch_origin_shape, transforms)
if isinstance(batch_pred, (tuple, list)) and self.status == 'Infer':
@ -786,7 +786,7 @@ class BaseSegmenter(BaseModel):
class UNet(BaseSegmenter):
def __init__(self,
input_channel=3,
in_channels=3,
num_classes=2,
use_mixed_loss=False,
losses=None,
@ -799,7 +799,7 @@ class UNet(BaseSegmenter):
})
super(UNet, self).__init__(
model_name='UNet',
input_channel=input_channel,
input_channel=in_channels,
num_classes=num_classes,
use_mixed_loss=use_mixed_loss,
losses=losses,
@ -808,7 +808,7 @@ class UNet(BaseSegmenter):
class DeepLabV3P(BaseSegmenter):
def __init__(self,
input_channel=3,
in_channels=3,
num_classes=2,
backbone='ResNet50_vd',
use_mixed_loss=False,

@ -141,7 +141,7 @@ class TIPCPredictor(object):
return config
def preprocess(self, images, transforms):
preprocessed_samples = self._model._preprocess(
preprocessed_samples = self._model.preprocess(
images, transforms, to_tensor=False)
if self._model.model_type == 'classifier':
preprocessed_samples = {'image': preprocessed_samples[0]}
@ -167,12 +167,12 @@ class TIPCPredictor(object):
def postprocess(self, net_outputs, topk=1, ori_shape=None, transforms=None):
if self._model.model_type == 'classifier':
true_topk = min(self._model.num_classes, topk)
if self._model._postprocess is None:
if self._model.postprocess is None:
self._model.build_postprocess_from_labels(topk)
# XXX: Convert ndarray to tensor as self._model._postprocess requires
# XXX: Convert ndarray to tensor as self._model.postprocess requires
assert len(net_outputs) == 1
net_outputs = paddle.to_tensor(net_outputs[0])
outputs = self._model._postprocess(net_outputs)
outputs = self._model.postprocess(net_outputs)
class_ids = map(itemgetter('class_ids'), outputs)
scores = map(itemgetter('scores'), outputs)
label_names = map(itemgetter('label_names'), outputs)
@ -182,7 +182,7 @@ class TIPCPredictor(object):
'label_names_map': n,
} for l, s, n in zip(class_ids, scores, label_names)]
elif self._model.model_type in ('segmenter', 'change_detector'):
label_map, score_map = self._model._postprocess(
label_map, score_map = self._model.postprocess(
net_outputs,
batch_origin_shape=ori_shape,
transforms=transforms.transforms)
@ -195,7 +195,7 @@ class TIPCPredictor(object):
k: v
for k, v in zip(['bbox', 'bbox_num', 'mask'], net_outputs)
}
preds = self._model._postprocess(net_outputs)
preds = self._model.postprocess(net_outputs)
else:
logging.error(
"Invalid model type {}.".format(self._model.model_type),

Loading…
Cancel
Save