Merge pull request #25021 from asmorkalov:as/multiview_calib_generator_ext

Multiview calibration generator improvements
pull/25080/head
Alexander Smorkalov 1 year ago committed by GitHub
commit a0df2f5328
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 34
      apps/python-calibration-generator/calibration_generator.py
  2. 18
      apps/python-calibration-generator/utils.py

@ -2,14 +2,21 @@
# It is subject to the license terms in the LICENSE file found in the top-level directory
# of this distribution and at http://opencv.org/license.html.
# The script generates synthetic data for multi-camera calibration assessment
# Input: cameras configuration. See config_cv_test.yaml
# Output: generated object points (3d), image points (2d) for calibration and
# board poses ground truth (R, t) for check
import argparse
import numpy as np
import math
import yaml
from drawer import animation2D, animation3D
from utils import RandGen, insideImage, eul2rot, saveKDRT, areAllInsideImage, insideImageMask, projectCamera, export2JSON
from utils import RandGen, insideImage, eul2rot, saveKDRT, areAllInsideImage, insideImageMask, projectCamera, export2JSON, writeMatrix
from pathlib import Path
from board import CheckerBoard
import os
import json
class Camera:
def __init__(self, idx, img_width, img_height, fx_limit, euler_limit, t_limit, is_fisheye, fy_deviation=None, skew=None,
@ -185,6 +192,8 @@ def generateAll(cameras, board, num_frames, rand_gen, MAX_RAND_ITERS=10000, save
points_2d, points_3d = [], []
valid_frames_per_camera = np.zeros(len(cameras))
MIN_FRAMES_PER_CAM = int(num_frames * 0.1)
R_used = []
t_used = []
for frame in range(MAX_RAND_ITERS):
R_board = eul2rot([ rand_gen.randRange(board.euler_limit[0][0], board.euler_limit[0][1]),
rand_gen.randRange(board.euler_limit[1][0], board.euler_limit[1][1]),
@ -212,7 +221,9 @@ def generateAll(cameras, board, num_frames, rand_gen, MAX_RAND_ITERS=10000, save
"""
for cam_idx in range(len(cameras)):
if not board.isProjectionValid(cam_points_2d[cam_idx]):
# Check whether the board is in front of the the image
pt_3d = cameras[cam_idx].R @ pts_board + cameras[cam_idx].t
if not board.isProjectionValid(cam_points_2d[cam_idx]) or np.min(pt_3d[2]) < 1e-3:
cam_points_2d[cam_idx] = -np.ones_like(cam_points_2d[cam_idx])
elif cameras[cam_idx].noise_scale_img_diag is not None:
cam_points_2d[cam_idx] += np.random.normal(0, cameras[cam_idx].img_diag * cameras[cam_idx].noise_scale_img_diag, cam_points_2d[cam_idx].shape)
@ -233,6 +244,9 @@ def generateAll(cameras, board, num_frames, rand_gen, MAX_RAND_ITERS=10000, save
points_2d.append(np.stack(cam_points_2d))
points_3d.append(pts_board)
R_used.append(R_board)
t_used.append(R_board @ (board.t_origin - points_board_mean) + points_board_mean + t_board)
if len(points_2d) >= num_frames and (valid_frames_per_camera >= MIN_FRAMES_PER_CAM).all():
print('tried samples', frame)
break
@ -244,7 +258,7 @@ def generateAll(cameras, board, num_frames, rand_gen, MAX_RAND_ITERS=10000, save
if save_3d_animation is not None: animation3D(board, cameras, points_3d, save_3d_animation, VIDEOS_FPS, VIDEOS_DPI, MAX_FRAMES)
print('number of found frames', len(points_2d))
return np.stack(points_2d), np.stack(points_3d)
return np.stack(points_2d), np.stack(points_3d), np.stack(R_used), np.stack(t_used)
def createConfigFile(fname, params):
file = open(fname, 'w')
@ -313,7 +327,7 @@ def main(cfg_name, save_folder):
checkerboard = CheckerBoard(cfg['BOARD']['WIDTH'], cfg['BOARD']['HEIGHT'], cfg['BOARD']['SQUARE_LEN'], cfg['BOARD']['EULER_LIMIT'], cfg['BOARD']['T_LIMIT'], cfg['BOARD']['T_ORIGIN'])
cameras = getCamerasFromCfg(cfg)
points_2d, points_3d = generateAll(cameras, checkerboard, cfg['MAX_FRAMES'], RandGen(cfg['SEED']), cfg['MAX_RANDOM_ITERS'], save_folder+'plots_projections.mp4', save_folder+'board_cameras.mp4')
points_2d, points_3d, R_used, t_used = generateAll(cameras, checkerboard, cfg['MAX_FRAMES'], RandGen(cfg['SEED']), cfg['MAX_RANDOM_ITERS'], save_folder+'plots_projections.mp4', save_folder+'board_cameras.mp4')
for i in range(len(cameras)):
print('Camera', i)
@ -328,6 +342,18 @@ def main(cfg_name, save_folder):
export2JSON(checkerboard.pattern, points_2d, imgs_width_height, is_fisheye, save_folder+'opencv_sample_'+cfg['NAME']+'.json')
saveKDRT(cameras, save_folder+'gt.txt')
file = open(save_folder + "gt.txt", "a")
for i in range(R_used.shape[0]):
writeMatrix(file, 'R_%d' % i, R_used[i])
writeMatrix(file, 'T_%d' % i, t_used[i])
poses = dict()
for idx in range(len(R_used)):
poses['frame_%d' % idx] = {'R': R_used[idx].tolist(), 'T': t_used[idx].tolist()}
with open(os.path.join(save_folder, "gt_poses.json"), 'wt') as gt:
gt.write(json.dumps(poses, indent=4))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, required=True, help='path to config file, e.g., config_cv_test.yaml')

@ -46,25 +46,29 @@ def insideImage(pts, w, h):
def areAllInsideImage(pts, w, h):
return insideImageMask(pts, w, h).all()
def writeMatrix(file, M):
def writeMatrix(file, label, M):
file.write("%s:\n" % label)
for i in range(M.shape[0]):
for j in range(M.shape[1]):
file.write(str(M[i,j]) + ('\n' if j == M.shape[1]-1 else ' '))
def saveKDRT(cameras, fname):
file = open(fname, 'w')
for cam in cameras:
writeMatrix(file, cam.K)
writeMatrix(file, cam.distortion)
writeMatrix(file, cam.R)
writeMatrix(file, cam.t)
for idx, cam in enumerate(cameras):
file.write("camera_%d:\n" % idx)
writeMatrix(file, "K", cam.K)
writeMatrix(file, "distortion", cam.distortion)
writeMatrix(file, "R", cam.R)
writeMatrix(file, "T", cam.t)
def export2JSON(pattern_points, image_points, image_sizes, is_fisheye, json_file):
image_points = image_points.transpose(1,0,3,2)
image_points_list = [[] for i in range(len(image_sizes))]
for c in range(len(image_points)):
for f in range(len(image_points[c])):
if areAllInsideImage(image_points[c][f], image_sizes[c][0], image_sizes[c][1]):
if insideImage(image_points[c][f].T, image_sizes[c][0], image_sizes[c][1]) >= 4:
mask = np.logical_not(insideImageMask(image_points[c][f].T, image_sizes[c][0], image_sizes[c][1]))
image_points[c][f][mask] = -1.
image_points_list[c].append(image_points[c][f].tolist())
else:
image_points_list[c].append([])

Loading…
Cancel
Save