Update testing scripts and README.md

chiebot
YuAng 4 years ago
parent d41f04f107
commit d2b85e2046
  1. 11
      README.md
  2. 10
      assets/megadepth_test_1500_scene_info/megadepth_test_1500.txt
  3. 4
      configs/data/megadepth_test_1500.py
  4. 4
      configs/data/scannet_test_1500.py
  5. 1
      data/scannet/test
  6. 3
      data/scannet/test/.gitignore
  7. 4
      docs/TRAINING.md
  8. 100
      notebooks/demo_single_pair.ipynb
  9. 92
      notebooks/visualize_dump_results.ipynb
  10. 2
      scripts/reproduce_test/indoor_ds.sh
  11. 2
      scripts/reproduce_test/indoor_ot.sh
  12. 2
      scripts/reproduce_test/outdoor_ds.sh
  13. 2
      scripts/reproduce_test/outdoor_ot.sh
  14. 1
      src/config/default.py
  15. 4
      src/lightning/data.py
  16. 15
      src/utils/plotting.py

@ -127,6 +127,13 @@ cd demo
</details> </details>
### Reproduce the testing results with pytorch-lightning ### Reproduce the testing results with pytorch-lightning
You need to setup the testing subsets of ScanNet and MegaDepth first. We create symlinks from the previously downloaded datasets to `data/{{dataset}}/test`.
```shell
# set up symlinks
ln -s /path/to/scannet-1500-testset/* /path/to/LoFTR/data/scannet/test
ln -s /path/to/megadepth-1500-testset/* /path/to/LoFTR/data/megadepth/test
```
```shell ```shell
conda activate loftr conda activate loftr
@ -142,7 +149,7 @@ For visualizing the results, please refer to `notebooks/visualize_dump_results.i
<br/> <br/>
### Image pair info for training on ScanNet <!-- ### Image pair info for training on ScanNet
You can download the data at [here](https://drive.google.com/file/d/1fC2BezUSsSQy7_H65A0ZfrYK0RB3TXXj/view?usp=sharing). You can download the data at [here](https://drive.google.com/file/d/1fC2BezUSsSQy7_H65A0ZfrYK0RB3TXXj/view?usp=sharing).
<details> <details>
@ -175,7 +182,7 @@ Out[19]: 1684276
`data['name']` is the image pair info, organized as [`scene_id`, `seq_id`, `image0_id`, `image1_id`]. `data['name']` is the image pair info, organized as [`scene_id`, `seq_id`, `image0_id`, `image1_id`].
`data['score']` is the overlapping score defined in [SuperGlue](https://arxiv.org/pdf/1911.11763) (Page 12). `data['score']` is the overlapping score defined in [SuperGlue](https://arxiv.org/pdf/1911.11763) (Page 12).
</details> </details> -->
## Training ## Training

@ -1,5 +1,5 @@
0022_0.1_0.3.npz 0022_0.1_0.3
0015_0.1_0.3.npz 0015_0.1_0.3
0015_0.3_0.5.npz 0015_0.3_0.5
0022_0.3_0.5.npz 0022_0.3_0.5
0022_0.5_0.7.npz 0022_0.5_0.7

@ -3,9 +3,9 @@ from configs.data.base import cfg
TEST_BASE_PATH = "assets/megadepth_test_1500_scene_info" TEST_BASE_PATH = "assets/megadepth_test_1500_scene_info"
cfg.DATASET.TEST_DATA_SOURCE = "MegaDepth" cfg.DATASET.TEST_DATA_SOURCE = "MegaDepth"
cfg.DATASET.TEST_DATA_ROOT = "/data/MegaDepth/megadepth_test_1500" cfg.DATASET.TEST_DATA_ROOT = "data/megadepth/test"
cfg.DATASET.TEST_NPZ_ROOT = f"{TEST_BASE_PATH}" cfg.DATASET.TEST_NPZ_ROOT = f"{TEST_BASE_PATH}"
cfg.DATASET.TEST_LIST_PATH = f"{TEST_BASE_PATH}/megadepth_test_1500.txt" cfg.DATASET.TEST_LIST_PATH = f"{TEST_BASE_PATH}/megadepth_test_1500.txt"
cfg.DATASET.MGDPT_IMG_RESIZE = 840 cfg.DATASET.MGDPT_IMG_RESIZE = 840
cfg.DATASET.MIN_OVERLAP_SCORE = 0.0 cfg.DATASET.MIN_OVERLAP_SCORE_TEST = 0.0

@ -3,9 +3,9 @@ from configs.data.base import cfg
TEST_BASE_PATH = "assets/scannet_test_1500" TEST_BASE_PATH = "assets/scannet_test_1500"
cfg.DATASET.TEST_DATA_SOURCE = "ScanNet" cfg.DATASET.TEST_DATA_SOURCE = "ScanNet"
cfg.DATASET.TEST_DATA_ROOT = "/data/scannet/scannet_test_1500" cfg.DATASET.TEST_DATA_ROOT = "data/scannet/test"
cfg.DATASET.TEST_NPZ_ROOT = f"{TEST_BASE_PATH}" cfg.DATASET.TEST_NPZ_ROOT = f"{TEST_BASE_PATH}"
cfg.DATASET.TEST_LIST_PATH = f"{TEST_BASE_PATH}/scannet_test.txt" cfg.DATASET.TEST_LIST_PATH = f"{TEST_BASE_PATH}/scannet_test.txt"
cfg.DATASET.TEST_INTRINSIC_PATH = f"{TEST_BASE_PATH}/intrinsics.npz" cfg.DATASET.TEST_INTRINSIC_PATH = f"{TEST_BASE_PATH}/intrinsics.npz"
cfg.DATASET.MIN_OVERLAP_SCORE = 0.0 cfg.DATASET.MIN_OVERLAP_SCORE_TEST = 0.0

@ -1 +0,0 @@
/mnt/lustre/share/3dv/dataset/scannet/scannet_1500_testset

@ -0,0 +1,3 @@
*
*/
!.gitignore

@ -37,8 +37,8 @@ ln -s /path/to/scannet_indices/* /path/to/LoFTR/data/scannet/index
# megadepth # megadepth
# -- # train and test dataset (train and test share the same dataset) # -- # train and test dataset (train and test share the same dataset)
ln -s /path/to/megadepth/Undistorted_SfM/* /path/to/LoFTR/data/megadepth/train ln -s /path/to/megadepth/Undistorted_SfM /path/to/LoFTR/data/megadepth/train
ln -s /path/to/megadepth/Undistorted_SfM/* /path/to/LoFTR/data/megadepth/test ln -s /path/to/megadepth/Undistorted_SfM /path/to/LoFTR/data/megadepth/test
# -- # dataset indices # -- # dataset indices
ln -s /path/to/megadepth_indices/* /path/to/LoFTR/data/megadepth/index ln -s /path/to/megadepth_indices/* /path/to/LoFTR/data/megadepth/index
``` ```

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

@ -8,7 +8,7 @@ export PYTHONPATH=$PROJECT_DIR:$PYTHONPATH
cd $PROJECT_DIR cd $PROJECT_DIR
data_cfg_path="configs/data/scannet_test_1500.py" data_cfg_path="configs/data/scannet_test_1500.py"
main_cfg_path="configs/loftr/loftr_ds.py" main_cfg_path="configs/loftr/indoor/loftr_ds.py"
ckpt_path="weights/indoor_ds.ckpt" ckpt_path="weights/indoor_ds.ckpt"
dump_dir="dump/loftr_ds_indoor" dump_dir="dump/loftr_ds_indoor"
profiler_name="inference" profiler_name="inference"

@ -8,7 +8,7 @@ export PYTHONPATH=$PROJECT_DIR:$PYTHONPATH
cd $PROJECT_DIR cd $PROJECT_DIR
data_cfg_path="configs/data/scannet_test_1500.py" data_cfg_path="configs/data/scannet_test_1500.py"
main_cfg_path="configs/loftr/loftr_ot.py" main_cfg_path="configs/loftr/indoor/loftr_ot.py"
ckpt_path="weights/indoor_ot.ckpt" ckpt_path="weights/indoor_ot.ckpt"
dump_dir="dump/loftr_ot_indoor" dump_dir="dump/loftr_ot_indoor"
profiler_name="inference" profiler_name="inference"

@ -8,7 +8,7 @@ export PYTHONPATH=$PROJECT_DIR:$PYTHONPATH
cd $PROJECT_DIR cd $PROJECT_DIR
data_cfg_path="configs/data/megadepth_test_1500.py" data_cfg_path="configs/data/megadepth_test_1500.py"
main_cfg_path="configs/loftr/loftr_ds.py" main_cfg_path="configs/loftr/outdoor/loftr_ds.py"
ckpt_path="weights/outdoor_ds.ckpt" ckpt_path="weights/outdoor_ds.ckpt"
dump_dir="dump/loftr_ds_outdoor" dump_dir="dump/loftr_ds_outdoor"
profiler_name="inference" profiler_name="inference"

@ -8,7 +8,7 @@ export PYTHONPATH=$PROJECT_DIR:$PYTHONPATH
cd $PROJECT_DIR cd $PROJECT_DIR
data_cfg_path="configs/data/megadepth_test_1500.py" data_cfg_path="configs/data/megadepth_test_1500.py"
main_cfg_path="configs/loftr/loftr_ot.py" main_cfg_path="configs/loftr/outdoor/loftr_ot.py"
ckpt_path="weights/outdoor_ot.ckpt" ckpt_path="weights/outdoor_ot.ckpt"
dump_dir="dump/loftr_ot_outdoor" dump_dir="dump/loftr_ot_outdoor"
profiler_name="inference" profiler_name="inference"

@ -99,6 +99,7 @@ _CN.DATASET.MGDPT_DF = 8
############## Trainer ############## ############## Trainer ##############
_CN.TRAINER = CN() _CN.TRAINER = CN()
_CN.TRAINER.WORLD_SIZE = 1
_CN.TRAINER.CANONICAL_BS = 64 _CN.TRAINER.CANONICAL_BS = 64
_CN.TRAINER.CANONICAL_LR = 6e-3 _CN.TRAINER.CANONICAL_LR = 6e-3
_CN.TRAINER.SCALING = None # this will be calculated automatically _CN.TRAINER.SCALING = None # this will be calculated automatically

@ -75,13 +75,13 @@ class MultiSceneDataModule(pl.LightningDataModule):
self.train_loader_params = { self.train_loader_params = {
'batch_size': args.batch_size, 'batch_size': args.batch_size,
'num_workers': args.num_workers, 'num_workers': args.num_workers,
'pin_memory': args.pin_memory, 'pin_memory': getattr(args, 'pin_memory', True)
} }
self.val_loader_params = { self.val_loader_params = {
'batch_size': 1, 'batch_size': 1,
'shuffle': False, 'shuffle': False,
'num_workers': args.num_workers, 'num_workers': args.num_workers,
'pin_memory': args.pin_memory, 'pin_memory': getattr(args, 'pin_memory', True)
} }
self.test_loader_params = { self.test_loader_params = {
'batch_size': 1, 'batch_size': 1,

@ -16,10 +16,6 @@ def _compute_conf_thresh(data):
# --- VISUALIZATION --- # # --- VISUALIZATION --- #
def plot_keypoints(axes, kpts0, kpts1, color='w', ps=2):
axes[0].scatter(kpts0[:, 0], kpts0[:, 1], c=color, s=ps)
axes[1].scatter(kpts1[:, 0], kpts1[:, 1], c=color, s=ps)
def make_matching_figure( def make_matching_figure(
img0, img1, mkpts0, mkpts1, color, img0, img1, mkpts0, mkpts1, color,
@ -38,8 +34,8 @@ def make_matching_figure(
if kpts0 is not None: if kpts0 is not None:
assert kpts1 is not None assert kpts1 is not None
# plot_keypoints(axes, kpts0, kpts1, color='k', ps=4) axes[0].scatter(kpts0[:, 0], kpts0[:, 1], c='w', s=2)
plot_keypoints(axes, kpts0, kpts1, color='w', ps=2) axes[1].scatter(kpts1[:, 0], kpts1[:, 1], c='w', s=2)
# draw matches # draw matches
if mkpts0.shape[0] != 0 and mkpts1.shape[0] != 0: if mkpts0.shape[0] != 0 and mkpts1.shape[0] != 0:
@ -98,7 +94,7 @@ def _make_evaluation_figure(data, b_id, alpha='dynamic'):
color = error_colormap(epi_errs, conf_thr, alpha=alpha) color = error_colormap(epi_errs, conf_thr, alpha=alpha)
text = [ text = [
f'Matches {len(kpts0)}', f'#Matches {len(kpts0)}',
f'Precision({conf_thr:.2e}) ({100 * precision:.1f}%): {n_correct}/{len(kpts0)}', f'Precision({conf_thr:.2e}) ({100 * precision:.1f}%): {n_correct}/{len(kpts0)}',
f'Recall({conf_thr:.2e}) ({100 * recall:.1f}%): {n_correct}/{n_gt_matches}' f'Recall({conf_thr:.2e}) ({100 * recall:.1f}%): {n_correct}/{n_gt_matches}'
] ]
@ -115,15 +111,12 @@ def _make_confidence_figure(data, b_id):
def make_matching_figures(data, config, mode='evaluation'): def make_matching_figures(data, config, mode='evaluation'):
""" Make matching figures for a batch. """ Make matching figures for a batch.
Args: Args:
data (Dict): a batch updated by PL_LoFTR. data (Dict): a batch updated by PL_LoFTR.
config (Dict): matcher config config (Dict): matcher config
Returns: Returns:
figures (Dict[str, List[plt.figure]] figures (Dict[str, List[plt.figure]]
TODO:
- confidence mode plotting
- parallel plotting
- evaluation mode & confidence mode at the same time
""" """
assert mode in ['evaluation', 'confidence'] # 'confidence' assert mode in ['evaluation', 'confidence'] # 'confidence'
figures = {mode: []} figures = {mode: []}

Loading…
Cancel
Save