|
|
@ -72,8 +72,8 @@ def classification_eval(engine, epoch_id=0): |
|
|
|
for key in loss_dict: |
|
|
|
for key in loss_dict: |
|
|
|
if key not in output_info: |
|
|
|
if key not in output_info: |
|
|
|
output_info[key] = AverageMeter(key, '7.5f') |
|
|
|
output_info[key] = AverageMeter(key, '7.5f') |
|
|
|
output_info[key].update(loss_dict[key].numpy()[0], |
|
|
|
output_info[key].update( |
|
|
|
batch_size) |
|
|
|
float(loss_dict[key]), batch_size) |
|
|
|
else: |
|
|
|
else: |
|
|
|
out = engine.model(batch[0]) |
|
|
|
out = engine.model(batch[0]) |
|
|
|
# calc loss |
|
|
|
# calc loss |
|
|
@ -82,8 +82,7 @@ def classification_eval(engine, epoch_id=0): |
|
|
|
for key in loss_dict: |
|
|
|
for key in loss_dict: |
|
|
|
if key not in output_info: |
|
|
|
if key not in output_info: |
|
|
|
output_info[key] = AverageMeter(key, '7.5f') |
|
|
|
output_info[key] = AverageMeter(key, '7.5f') |
|
|
|
output_info[key].update(loss_dict[key].numpy()[0], |
|
|
|
output_info[key].update(float(loss_dict[key]), batch_size) |
|
|
|
batch_size) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# just for DistributedBatchSampler issue: repeat sampling |
|
|
|
# just for DistributedBatchSampler issue: repeat sampling |
|
|
|
current_samples = batch_size * paddle.distributed.get_world_size() |
|
|
|
current_samples = batch_size * paddle.distributed.get_world_size() |
|
|
@ -132,8 +131,8 @@ def classification_eval(engine, epoch_id=0): |
|
|
|
if key not in output_info: |
|
|
|
if key not in output_info: |
|
|
|
output_info[key] = AverageMeter(key, '7.5f') |
|
|
|
output_info[key] = AverageMeter(key, '7.5f') |
|
|
|
|
|
|
|
|
|
|
|
output_info[key].update(metric_dict[key].numpy()[0], |
|
|
|
output_info[key].update( |
|
|
|
current_samples) |
|
|
|
float(metric_dict[key]), current_samples) |
|
|
|
|
|
|
|
|
|
|
|
time_info["batch_cost"].update(time.time() - tic) |
|
|
|
time_info["batch_cost"].update(time.time() - tic) |
|
|
|
|
|
|
|
|
|
|
|