Merge branch 'develop' of https://github.com/PaddlePaddle/PaddleRS into develop

own
Bobholamovic 2 years ago
commit 0f167807ba
  1. 3
      paddlers/datasets/voc.py
  2. 6
      paddlers/models/ppcls/arch/backbone/model_zoo/gvt.py
  3. 11
      paddlers/models/ppcls/engine/evaluation/classification.py
  4. 5
      paddlers/models/ppcls/engine/train/utils.py
  5. 4
      paddlers/models/ppcls/metric/metrics.py
  6. 2
      paddlers/models/ppseg/core/train.py
  7. 2
      paddlers/models/ppseg/models/losses/ohem_cross_entropy_loss.py
  8. 2
      paddlers/models/ppseg/models/losses/ohem_edge_attention_loss.py

@ -289,7 +289,8 @@ class VOCDetDataset(BaseDataset):
self.num_max_boxes = max(self.num_max_boxes, len(objs)) self.num_max_boxes = max(self.num_max_boxes, len(objs))
if not ct: if not ct:
logging.error("No voc record found in %s' % (file_list)", exit=True) logging.error(
"No voc record found in %s ." % (file_list), exit=True)
self.pos_num = len(self.file_list) self.pos_num = len(self.file_list)
if self.allow_empty and neg_file_list: if self.allow_empty and neg_file_list:
self.file_list += self._sample_empty(neg_file_list) self.file_list += self._sample_empty(neg_file_list)

@ -322,8 +322,7 @@ class PyramidVisionTransformer(nn.Layer):
self.pos_drops.append(nn.Dropout(p=drop_rate)) self.pos_drops.append(nn.Dropout(p=drop_rate))
dpr = [ dpr = [
x.numpy()[0] float(x) for x in paddle.linspace(0, drop_path_rate, sum(depths))
for x in paddle.linspace(0, drop_path_rate, sum(depths))
] # stochastic depth decay rule ] # stochastic depth decay rule
cur = 0 cur = 0
@ -549,8 +548,7 @@ class ALTGVT(PCPVT):
self.wss = wss self.wss = wss
# transformer encoder # transformer encoder
dpr = [ dpr = [
x.numpy()[0] float(x) for x in paddle.linspace(0, drop_path_rate, sum(depths))
for x in paddle.linspace(0, drop_path_rate, sum(depths))
] # stochastic depth decay rule ] # stochastic depth decay rule
cur = 0 cur = 0
self.blocks = nn.LayerList() self.blocks = nn.LayerList()

@ -72,8 +72,8 @@ def classification_eval(engine, epoch_id=0):
for key in loss_dict: for key in loss_dict:
if key not in output_info: if key not in output_info:
output_info[key] = AverageMeter(key, '7.5f') output_info[key] = AverageMeter(key, '7.5f')
output_info[key].update(loss_dict[key].numpy()[0], output_info[key].update(
batch_size) float(loss_dict[key]), batch_size)
else: else:
out = engine.model(batch[0]) out = engine.model(batch[0])
# calc loss # calc loss
@ -82,8 +82,7 @@ def classification_eval(engine, epoch_id=0):
for key in loss_dict: for key in loss_dict:
if key not in output_info: if key not in output_info:
output_info[key] = AverageMeter(key, '7.5f') output_info[key] = AverageMeter(key, '7.5f')
output_info[key].update(loss_dict[key].numpy()[0], output_info[key].update(float(loss_dict[key]), batch_size)
batch_size)
# just for DistributedBatchSampler issue: repeat sampling # just for DistributedBatchSampler issue: repeat sampling
current_samples = batch_size * paddle.distributed.get_world_size() current_samples = batch_size * paddle.distributed.get_world_size()
@ -132,8 +131,8 @@ def classification_eval(engine, epoch_id=0):
if key not in output_info: if key not in output_info:
output_info[key] = AverageMeter(key, '7.5f') output_info[key] = AverageMeter(key, '7.5f')
output_info[key].update(metric_dict[key].numpy()[0], output_info[key].update(
current_samples) float(metric_dict[key]), current_samples)
time_info["batch_cost"].update(time.time() - tic) time_info["batch_cost"].update(time.time() - tic)

@ -25,8 +25,7 @@ def update_metric(trainer, out, batch, batch_size):
for key in metric_dict: for key in metric_dict:
if key not in trainer.output_info: if key not in trainer.output_info:
trainer.output_info[key] = AverageMeter(key, '7.5f') trainer.output_info[key] = AverageMeter(key, '7.5f')
trainer.output_info[key].update(metric_dict[key].numpy()[0], trainer.output_info[key].update(float(metric_dict[key]), batch_size)
batch_size)
def update_loss(trainer, loss_dict, batch_size): def update_loss(trainer, loss_dict, batch_size):
@ -34,7 +33,7 @@ def update_loss(trainer, loss_dict, batch_size):
for key in loss_dict: for key in loss_dict:
if key not in trainer.output_info: if key not in trainer.output_info:
trainer.output_info[key] = AverageMeter(key, '7.5f') trainer.output_info[key] = AverageMeter(key, '7.5f')
trainer.output_info[key].update(loss_dict[key].numpy()[0], batch_size) trainer.output_info[key].update(float(loss_dict[key]), batch_size)
def log_info(trainer, batch_size, epoch_id, iter_id): def log_info(trainer, batch_size, epoch_id, iter_id):

@ -81,7 +81,7 @@ class mAP(nn.Layer):
#calc map #calc map
precision_mask = paddle.multiply(equal_flag, precision) precision_mask = paddle.multiply(equal_flag, precision)
ap = paddle.sum(precision_mask, axis=1) / paddle.sum(equal_flag, axis=1) ap = paddle.sum(precision_mask, axis=1) / paddle.sum(equal_flag, axis=1)
metric_dict["mAP"] = paddle.mean(ap).numpy()[0] metric_dict["mAP"] = float(paddle.mean(ap))
return metric_dict return metric_dict
@ -124,7 +124,7 @@ class mINP(nn.Layer):
hard_index = paddle.argmax(auxilary, axis=1).astype("float32") hard_index = paddle.argmax(auxilary, axis=1).astype("float32")
all_INP = paddle.divide(paddle.sum(equal_flag, axis=1), hard_index) all_INP = paddle.divide(paddle.sum(equal_flag, axis=1), hard_index)
mINP = paddle.mean(all_INP) mINP = paddle.mean(all_INP)
metric_dict["mINP"] = mINP.numpy()[0] metric_dict["mINP"] = float(mINP)
return metric_dict return metric_dict

@ -231,7 +231,7 @@ def train(model,
train_profiler.add_profiler_step(profiler_options) train_profiler.add_profiler_step(profiler_options)
model.clear_gradients() model.clear_gradients()
avg_loss += loss.numpy()[0] avg_loss += float(loss)
if not avg_loss_list: if not avg_loss_list:
avg_loss_list = [l.numpy() for l in loss_list] avg_loss_list = [l.numpy() for l in loss_list]
else: else:

@ -77,7 +77,7 @@ class OhemCrossEntropyLoss(nn.Layer):
if self.min_kept > 0: if self.min_kept > 0:
index = prob.argsort() index = prob.argsort()
threshold_index = index[min(len(index), self.min_kept) - 1] threshold_index = index[min(len(index), self.min_kept) - 1]
threshold_index = int(threshold_index.numpy()[0]) threshold_index = int(threshold_index)
if prob[threshold_index] > self.thresh: if prob[threshold_index] > self.thresh:
threshold = prob[threshold_index] threshold = prob[threshold_index]
kept_mask = (prob < threshold).astype('int64') kept_mask = (prob < threshold).astype('int64')

@ -93,7 +93,7 @@ class OhemEdgeAttentionLoss(nn.Layer):
if self.min_kept > 0: if self.min_kept > 0:
index = prob.argsort() index = prob.argsort()
threshold_index = index[min(len(index), self.min_kept) - 1] threshold_index = index[min(len(index), self.min_kept) - 1]
threshold_index = int(threshold_index.numpy()[0]) threshold_index = int(threshold_index)
if prob[threshold_index] > self.thresh: if prob[threshold_index] > self.thresh:
threshold = prob[threshold_index] threshold = prob[threshold_index]
kept_mask = (prob < threshold).astype('int64') kept_mask = (prob < threshold).astype('int64')

Loading…
Cancel
Save