diff --git a/paddlers/datasets/voc.py b/paddlers/datasets/voc.py index aad0b5c..8c838b8 100644 --- a/paddlers/datasets/voc.py +++ b/paddlers/datasets/voc.py @@ -289,7 +289,8 @@ class VOCDetDataset(BaseDataset): self.num_max_boxes = max(self.num_max_boxes, len(objs)) if not ct: - logging.error("No voc record found in %s' % (file_list)", exit=True) + logging.error( + "No voc record found in %s ." % (file_list), exit=True) self.pos_num = len(self.file_list) if self.allow_empty and neg_file_list: self.file_list += self._sample_empty(neg_file_list) diff --git a/paddlers/models/ppcls/arch/backbone/model_zoo/gvt.py b/paddlers/models/ppcls/arch/backbone/model_zoo/gvt.py index 4800da1..d8a0174 100644 --- a/paddlers/models/ppcls/arch/backbone/model_zoo/gvt.py +++ b/paddlers/models/ppcls/arch/backbone/model_zoo/gvt.py @@ -322,8 +322,7 @@ class PyramidVisionTransformer(nn.Layer): self.pos_drops.append(nn.Dropout(p=drop_rate)) dpr = [ - x.numpy()[0] - for x in paddle.linspace(0, drop_path_rate, sum(depths)) + float(x) for x in paddle.linspace(0, drop_path_rate, sum(depths)) ] # stochastic depth decay rule cur = 0 @@ -549,8 +548,7 @@ class ALTGVT(PCPVT): self.wss = wss # transformer encoder dpr = [ - x.numpy()[0] - for x in paddle.linspace(0, drop_path_rate, sum(depths)) + float(x) for x in paddle.linspace(0, drop_path_rate, sum(depths)) ] # stochastic depth decay rule cur = 0 self.blocks = nn.LayerList() diff --git a/paddlers/models/ppcls/engine/evaluation/classification.py b/paddlers/models/ppcls/engine/evaluation/classification.py index d7b5c47..ac3064a 100644 --- a/paddlers/models/ppcls/engine/evaluation/classification.py +++ b/paddlers/models/ppcls/engine/evaluation/classification.py @@ -72,8 +72,8 @@ def classification_eval(engine, epoch_id=0): for key in loss_dict: if key not in output_info: output_info[key] = AverageMeter(key, '7.5f') - output_info[key].update(loss_dict[key].numpy()[0], - batch_size) + output_info[key].update( + float(loss_dict[key]), batch_size) else: out = engine.model(batch[0]) # calc loss @@ -82,8 +82,7 @@ def classification_eval(engine, epoch_id=0): for key in loss_dict: if key not in output_info: output_info[key] = AverageMeter(key, '7.5f') - output_info[key].update(loss_dict[key].numpy()[0], - batch_size) + output_info[key].update(float(loss_dict[key]), batch_size) # just for DistributedBatchSampler issue: repeat sampling current_samples = batch_size * paddle.distributed.get_world_size() @@ -132,8 +131,8 @@ def classification_eval(engine, epoch_id=0): if key not in output_info: output_info[key] = AverageMeter(key, '7.5f') - output_info[key].update(metric_dict[key].numpy()[0], - current_samples) + output_info[key].update( + float(metric_dict[key]), current_samples) time_info["batch_cost"].update(time.time() - tic) diff --git a/paddlers/models/ppcls/engine/train/utils.py b/paddlers/models/ppcls/engine/train/utils.py index 92eb35d..2590662 100644 --- a/paddlers/models/ppcls/engine/train/utils.py +++ b/paddlers/models/ppcls/engine/train/utils.py @@ -25,8 +25,7 @@ def update_metric(trainer, out, batch, batch_size): for key in metric_dict: if key not in trainer.output_info: trainer.output_info[key] = AverageMeter(key, '7.5f') - trainer.output_info[key].update(metric_dict[key].numpy()[0], - batch_size) + trainer.output_info[key].update(float(metric_dict[key]), batch_size) def update_loss(trainer, loss_dict, batch_size): @@ -34,7 +33,7 @@ def update_loss(trainer, loss_dict, batch_size): for key in loss_dict: if key not in trainer.output_info: trainer.output_info[key] = AverageMeter(key, '7.5f') - trainer.output_info[key].update(loss_dict[key].numpy()[0], batch_size) + trainer.output_info[key].update(float(loss_dict[key]), batch_size) def log_info(trainer, batch_size, epoch_id, iter_id): diff --git a/paddlers/models/ppcls/metric/metrics.py b/paddlers/models/ppcls/metric/metrics.py index 30b11e2..4924419 100644 --- a/paddlers/models/ppcls/metric/metrics.py +++ b/paddlers/models/ppcls/metric/metrics.py @@ -81,7 +81,7 @@ class mAP(nn.Layer): #calc map precision_mask = paddle.multiply(equal_flag, precision) ap = paddle.sum(precision_mask, axis=1) / paddle.sum(equal_flag, axis=1) - metric_dict["mAP"] = paddle.mean(ap).numpy()[0] + metric_dict["mAP"] = float(paddle.mean(ap)) return metric_dict @@ -124,7 +124,7 @@ class mINP(nn.Layer): hard_index = paddle.argmax(auxilary, axis=1).astype("float32") all_INP = paddle.divide(paddle.sum(equal_flag, axis=1), hard_index) mINP = paddle.mean(all_INP) - metric_dict["mINP"] = mINP.numpy()[0] + metric_dict["mINP"] = float(mINP) return metric_dict diff --git a/paddlers/models/ppseg/core/train.py b/paddlers/models/ppseg/core/train.py index 0ef168c..8d8e4a2 100644 --- a/paddlers/models/ppseg/core/train.py +++ b/paddlers/models/ppseg/core/train.py @@ -231,7 +231,7 @@ def train(model, train_profiler.add_profiler_step(profiler_options) model.clear_gradients() - avg_loss += loss.numpy()[0] + avg_loss += float(loss) if not avg_loss_list: avg_loss_list = [l.numpy() for l in loss_list] else: diff --git a/paddlers/models/ppseg/models/losses/ohem_cross_entropy_loss.py b/paddlers/models/ppseg/models/losses/ohem_cross_entropy_loss.py index 5683521..f486e0b 100644 --- a/paddlers/models/ppseg/models/losses/ohem_cross_entropy_loss.py +++ b/paddlers/models/ppseg/models/losses/ohem_cross_entropy_loss.py @@ -77,7 +77,7 @@ class OhemCrossEntropyLoss(nn.Layer): if self.min_kept > 0: index = prob.argsort() threshold_index = index[min(len(index), self.min_kept) - 1] - threshold_index = int(threshold_index.numpy()[0]) + threshold_index = int(threshold_index) if prob[threshold_index] > self.thresh: threshold = prob[threshold_index] kept_mask = (prob < threshold).astype('int64') diff --git a/paddlers/models/ppseg/models/losses/ohem_edge_attention_loss.py b/paddlers/models/ppseg/models/losses/ohem_edge_attention_loss.py index 56db270..f6b9240 100644 --- a/paddlers/models/ppseg/models/losses/ohem_edge_attention_loss.py +++ b/paddlers/models/ppseg/models/losses/ohem_edge_attention_loss.py @@ -93,7 +93,7 @@ class OhemEdgeAttentionLoss(nn.Layer): if self.min_kept > 0: index = prob.argsort() threshold_index = index[min(len(index), self.min_kept) - 1] - threshold_index = int(threshold_index.numpy()[0]) + threshold_index = int(threshold_index) if prob[threshold_index] > self.thresh: threshold = prob[threshold_index] kept_mask = (prob < threshold).astype('int64')