如何在搅拌机中可视化 colmap 导出 [images.txt]?
Posted
技术标签:
【中文标题】如何在搅拌机中可视化 colmap 导出 [images.txt]?【英文标题】:How to visualize colmap export [images.txt] in blender? 【发布时间】:2021-11-11 13:39:03 【问题描述】:我有一个名为“images.txt”的相机姿势文件的 colmap 导出。
# Image list with two lines of data per image:
# IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME
# POINTS2D[] as (X, Y, POINT3D_ID)
# Number of images: 2, mean observations per image: 2
1 0.851773 0.0165051 0.503764 -0.142941 -0.737434 1.02973 3.74354 1 P1180141.JPG
2362.39 248.498 58396 1784.7 268.254 59027 1784.7 268.254 -1
2 0.851773 0.0165051 0.503764 -0.142941 -0.737434 1.02973 3.74354 1 P1180142.JPG
1190.83 663.957 23056 1258.77 640.354 59070
然后我想将它导入 Blender。 所以我写了下面的代码。 首先,我阅读了文本,并将其拆分为四元数和翻译。 然后计算光学中心使用-R^T*t,相机旋转使用R^T
colmap_pose = r"/Users/chunibyo/Desktop/images.txt"
rc_pose = r'flight_log.csv'
image_list_file = r'select.bat'
pose_list_file = r'blender.py'
import math
import numpy as np
import open3d as o3d
def euler_from_quaternion(x, y, z, w):
"""
Convert a quaternion into euler angles (roll, pitch, yaw)
roll is rotation around x in radians (counterclockwise)
pitch is rotation around y in radians (counterclockwise)
yaw is rotation around z in radians (counterclockwise)
"""
t0 = +2.0 * (w * x + y * z)
t1 = +1.0 - 2.0 * (x * x + y * y)
roll_x = math.atan2(t0, t1)
t2 = +2.0 * (w * y - z * x)
t2 = +1.0 if t2 > +1.0 else t2
t2 = -1.0 if t2 < -1.0 else t2
pitch_y = math.asin(t2)
t3 = +2.0 * (w * z + x * y)
t4 = +1.0 - 2.0 * (y * y + z * z)
yaw_z = math.atan2(t3, t4)
return roll_x, pitch_y, yaw_z # in radians
def quaternion_rotation_matrix(q0, q1, q2, q3):
# w, x, y ,z
"""
Covert a quaternion into a full three-dimensional rotation matrix.
Input
:param Q: A 4 element array representing the quaternion (q0,q1,q2,q3)
Output
:return: A 3x3 element matrix representing the full 3D rotation matrix.
This rotation matrix converts a point in the local reference
frame to a point in the global reference frame.
"""
# Extract the values from Q
# First row of the rotation matrix
r00 = 2 * (q0 * q0 + q1 * q1) - 1
r01 = 2 * (q1 * q2 - q0 * q3)
r02 = 2 * (q1 * q3 + q0 * q2)
# Second row of the rotation matrix
r10 = 2 * (q1 * q2 + q0 * q3)
r11 = 2 * (q0 * q0 + q2 * q2) - 1
r12 = 2 * (q2 * q3 - q0 * q1)
# Third row of the rotation matrix
r20 = 2 * (q1 * q3 - q0 * q2)
r21 = 2 * (q2 * q3 + q0 * q1)
r22 = 2 * (q0 * q0 + q3 * q3) - 1
# 3x3 rotation matrix
rot_matrix = np.array([[r00, r01, r02],
[r10, r11, r12],
[r20, r21, r22]])
return rot_matrix
def rotmat2qvec(R):
Rxx, Ryx, Rzx, Rxy, Ryy, Rzy, Rxz, Ryz, Rzz = R.flat
K = np.array([
[Rxx - Ryy - Rzz, 0, 0, 0],
[Ryx + Rxy, Ryy - Rxx - Rzz, 0, 0],
[Rzx + Rxz, Rzy + Ryz, Rzz - Rxx - Ryy, 0],
[Ryz - Rzy, Rzx - Rxz, Rxy - Ryx, Rxx + Ryy + Rzz]]) / 3.0
eigvals, eigvecs = np.linalg.eigh(K)
qvec = eigvecs[[3, 0, 1, 2], np.argmax(eigvals)]
if qvec[0] < 0:
qvec *= -1
return qvec
def qvec2rotmat(qvec):
# w, x, y, z
return np.array([
[1 - 2 * qvec[2] ** 2 - 2 * qvec[3] ** 2,
2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],
2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]],
[2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],
1 - 2 * qvec[1] ** 2 - 2 * qvec[3] ** 2,
2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]],
[2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],
2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],
1 - 2 * qvec[1] ** 2 - 2 * qvec[2] ** 2]])
def main():
with open(colmap_pose) as f:
lines = f.readlines()
rc_lines = []
image_list = []
pose_list = []
point_cloud = []
accuracy = 10
for index, temp_line in enumerate(lines):
line = temp_line.strip()
if not line.startswith('#') and (line.endswith('.png') or line.endswith('jpg')):
temp = line.split(' ')
qw = float(temp[1])
qx = float(temp[2])
qy = float(temp[3])
qz = float(temp[4])
tx = float(temp[5])
ty = float(temp[6])
tz = float(temp[7])
name = temp[9]
rotation_matrix = qvec2rotmat([qw, qx, qy, qz])
optical_center = -rotation_matrix.T @ np.array([tx, ty, tz])
tx = float(optical_center[0])
ty = float(optical_center[1])
tz = float(optical_center[2])
_qw, _qx, _qy, _qz = rotmat2qvec(rotation_matrix.T)
roll_x, pitch_y, yaw_z = euler_from_quaternion(_qx, _qy, _qz, _qw)
rc_lines.append(
f"name,tx,ty,tz,accuracy,accuracy,accuracy,math.degrees(yaw_z),math.degrees(pitch_y),math.degrees(roll_x),accuracy,accuracy,accuracy\n")
image_list.append(f"xcopy name select\n")
# f"camera_objectindex.rotation_euler = Euler((yaw_z, pitch_y, roll_x), 'ZYX')\n" \
# f"camera_objectindex.rotation_quaternion = Quaternion((qw, qx, qy, qz))\n" \
blender_script = f"camera_dataindex = bpy.data.cameras.new(name='name')\n" \
f"camera_objectindex = bpy.data.objects.new('name', camera_dataindex)\n" \
f"bpy.context.scene.collection.objects.link(camera_objectindex)\n" \
f"camera_objectindex.location = (tx, ty, tz)\n" \
f"camera_objectindex.rotation_euler = Euler((yaw_z, pitch_y, roll_x), 'ZYX')\n" \
f"camera_objectindex.rotation_mode = 'ZYX'\n" \
f"bpy.data.cameras['name'].lens = 30\n\n"
pose_list.append(blender_script)
point_cloud.append([tx, ty, tz])
# selected image
with open(image_list_file, 'w') as f:
f.write('@echo off\nmkdir select\n')
f.writelines(image_list)
# blender
with open(pose_list_file, 'w') as f:
f.write('import bpy\nfrom mathutils import Euler, Quaternion\n\n')
f.writelines(pose_list)
# ply
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(np.array(point_cloud))
o3d.io.write_point_cloud("pcl.ply", pcd, write_ascii=True)
if __name__ == '__main__':
main()
但是我得到了错误的相机旋转,原始姿势和导入姿势如图所示。
你能帮帮我吗?
【问题讨论】:
【参考方案1】:在加载 R 和 T 后,试试这个并使用四元数作为相机方向。
w2c = np.eye(4)
w2c[:3,:3] = R
w2c[:3,3] = T
c2w = np.linalg.inv(w2c)
# quaternions
q = Rotation.from_matrix(c2w[:3, :3).as_quat()
# translation
t = c2w[:3, 3]
【讨论】:
以上是关于如何在搅拌机中可视化 colmap 导出 [images.txt]?的主要内容,如果未能解决你的问题,请参考以下文章