|
|
|
@ -210,13 +210,13 @@ class DetectionValidator(BaseValidator): |
|
|
|
|
'bbox': [round(x, 3) for x in b], |
|
|
|
|
'score': round(p[4], 5)}) |
|
|
|
|
|
|
|
|
|
def eval_json(self): |
|
|
|
|
def eval_json(self, stats): |
|
|
|
|
if self.args.save_json and self.is_coco and len(self.jdict): |
|
|
|
|
anno_json = self.data['path'] / "annotations/instances_val2017.json" # annotations |
|
|
|
|
pred_json = self.save_dir / "predictions.json" # predictions |
|
|
|
|
self.logger.info(f'\nEvaluating pycocotools mAP using {pred_json} and {anno_json}...') |
|
|
|
|
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb |
|
|
|
|
check_requirements('pycocotools') |
|
|
|
|
check_requirements('pycocotools>=2.0.6') |
|
|
|
|
from pycocotools.coco import COCO # noqa |
|
|
|
|
from pycocotools.cocoeval import COCOeval # noqa |
|
|
|
|
|
|
|
|
@ -230,9 +230,10 @@ class DetectionValidator(BaseValidator): |
|
|
|
|
eval.evaluate() |
|
|
|
|
eval.accumulate() |
|
|
|
|
eval.summarize() |
|
|
|
|
self.metrics.metric.map, self.metrics.metric.map50 = eval.stats[:2] # update mAP50-95 and mAP50 |
|
|
|
|
stats[self.metric_keys[-1]], stats[self.metric_keys[-2]] = eval.stats[:2] # update mAP50-95 and mAP50 |
|
|
|
|
except Exception as e: |
|
|
|
|
self.logger.warning(f'pycocotools unable to run: {e}') |
|
|
|
|
return stats |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@hydra.main(version_base=None, config_path=str(DEFAULT_CONFIG.parent), config_name=DEFAULT_CONFIG.name) |
|
|
|
|