ROS实验笔记之——VINS-Mono在l515上的实现
Posted gwpscut
tags:
篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了ROS实验笔记之——VINS-Mono在l515上的实现相关的知识,希望对你有一定的参考价值。
之前博客《ROS实验笔记之——Intel Realsense l515激光相机的使用》实现了用l515运行RTABmap,本博文试试在l515上实现vins-mono
首先需要将vins-mono配置成功,如果出现像之前博客《ROS实验笔记之——SLAM无人驾驶初入门》提到的运行出错的问题也只能按里面的方法改改了。。。。
首先可以先运行
roslaunch realsense2_camera rs_camera.launch
看对应的launch文件
<launch>
<arg name="serial_no" default=""/>
<arg name="usb_port_id" default=""/>
<arg name="device_type" default=""/>
<arg name="json_file_path" default=""/>
<arg name="camera" default="camera"/>
<arg name="tf_prefix" default="$(arg camera)"/>
<arg name="external_manager" default="false"/>
<arg name="manager" default="realsense2_camera_manager"/>
<arg name="output" default="screen"/>
<arg name="respawn" default="false"/>
<arg name="fisheye_width" default="-1"/>
<arg name="fisheye_height" default="-1"/>
<arg name="enable_fisheye" default="false"/>
<arg name="depth_width" default="-1"/>
<arg name="depth_height" default="-1"/>
<arg name="enable_depth" default="true"/>
<arg name="confidence_width" default="-1"/>
<arg name="confidence_height" default="-1"/>
<arg name="enable_confidence" default="true"/>
<arg name="confidence_fps" default="-1"/>
<arg name="infra_width" default="848"/>
<arg name="infra_height" default="480"/>
<arg name="enable_infra" default="false"/>
<arg name="enable_infra1" default="false"/>
<arg name="enable_infra2" default="false"/>
<arg name="infra_rgb" default="false"/>
<arg name="color_width" default="-1"/>
<arg name="color_height" default="-1"/>
<arg name="enable_color" default="true"/>
<arg name="fisheye_fps" default="-1"/>
<arg name="depth_fps" default="-1"/>
<arg name="infra_fps" default="30"/>
<arg name="color_fps" default="-1"/>
<arg name="gyro_fps" default="-1"/>
<arg name="accel_fps" default="-1"/>
<arg name="enable_gyro" default="false"/>
<arg name="enable_accel" default="false"/>
<arg name="enable_pointcloud" default="false"/>
<arg name="pointcloud_texture_stream" default="RS2_STREAM_COLOR"/>
<arg name="pointcloud_texture_index" default="0"/>
<arg name="allow_no_texture_points" default="false"/>
<arg name="ordered_pc" default="false"/>
<arg name="enable_sync" default="false"/>
<arg name="align_depth" default="false"/>
<arg name="publish_tf" default="true"/>
<arg name="tf_publish_rate" default="0"/>
<arg name="filters" default=""/>
<arg name="clip_distance" default="-2"/>
<arg name="linear_accel_cov" default="0.01"/>
<arg name="initial_reset" default="false"/>
<arg name="unite_imu_method" default=""/>
<arg name="topic_odom_in" default="odom_in"/>
<arg name="calib_odom_file" default=""/>
<arg name="publish_odom_tf" default="true"/>
<arg name="stereo_module/exposure/1" default="7500"/>
<arg name="stereo_module/gain/1" default="16"/>
<arg name="stereo_module/exposure/2" default="1"/>
<arg name="stereo_module/gain/2" default="16"/>
<group ns="$(arg camera)">
<include file="$(find realsense2_camera)/launch/includes/nodelet.launch.xml">
<arg name="tf_prefix" value="$(arg tf_prefix)"/>
<arg name="external_manager" value="$(arg external_manager)"/>
<arg name="manager" value="$(arg manager)"/>
<arg name="output" value="$(arg output)"/>
<arg name="respawn" value="$(arg respawn)"/>
<arg name="serial_no" value="$(arg serial_no)"/>
<arg name="usb_port_id" value="$(arg usb_port_id)"/>
<arg name="device_type" value="$(arg device_type)"/>
<arg name="json_file_path" value="$(arg json_file_path)"/>
<arg name="enable_pointcloud" value="$(arg enable_pointcloud)"/>
<arg name="pointcloud_texture_stream" value="$(arg pointcloud_texture_stream)"/>
<arg name="pointcloud_texture_index" value="$(arg pointcloud_texture_index)"/>
<arg name="enable_sync" value="$(arg enable_sync)"/>
<arg name="align_depth" value="$(arg align_depth)"/>
<arg name="fisheye_width" value="$(arg fisheye_width)"/>
<arg name="fisheye_height" value="$(arg fisheye_height)"/>
<arg name="enable_fisheye" value="$(arg enable_fisheye)"/>
<arg name="depth_width" value="$(arg depth_width)"/>
<arg name="depth_height" value="$(arg depth_height)"/>
<arg name="enable_depth" value="$(arg enable_depth)"/>
<arg name="confidence_width" value="$(arg confidence_width)"/>
<arg name="confidence_height" value="$(arg confidence_height)"/>
<arg name="enable_confidence" value="$(arg enable_confidence)"/>
<arg name="confidence_fps" value="$(arg confidence_fps)"/>
<arg name="color_width" value="$(arg color_width)"/>
<arg name="color_height" value="$(arg color_height)"/>
<arg name="enable_color" value="$(arg enable_color)"/>
<arg name="infra_width" value="$(arg infra_width)"/>
<arg name="infra_height" value="$(arg infra_height)"/>
<arg name="enable_infra" value="$(arg enable_infra)"/>
<arg name="enable_infra1" value="$(arg enable_infra1)"/>
<arg name="enable_infra2" value="$(arg enable_infra2)"/>
<arg name="infra_rgb" value="$(arg infra_rgb)"/>
<arg name="fisheye_fps" value="$(arg fisheye_fps)"/>
<arg name="depth_fps" value="$(arg depth_fps)"/>
<arg name="infra_fps" value="$(arg infra_fps)"/>
<arg name="color_fps" value="$(arg color_fps)"/>
<arg name="gyro_fps" value="$(arg gyro_fps)"/>
<arg name="accel_fps" value="$(arg accel_fps)"/>
<arg name="enable_gyro" value="$(arg enable_gyro)"/>
<arg name="enable_accel" value="$(arg enable_accel)"/>
<arg name="publish_tf" value="$(arg publish_tf)"/>
<arg name="tf_publish_rate" value="$(arg tf_publish_rate)"/>
<arg name="filters" value="$(arg filters)"/>
<arg name="clip_distance" value="$(arg clip_distance)"/>
<arg name="linear_accel_cov" value="$(arg linear_accel_cov)"/>
<arg name="initial_reset" value="$(arg initial_reset)"/>
<arg name="unite_imu_method" value="$(arg unite_imu_method)"/>
<arg name="topic_odom_in" value="$(arg topic_odom_in)"/>
<arg name="calib_odom_file" value="$(arg calib_odom_file)"/>
<arg name="publish_odom_tf" value="$(arg publish_odom_tf)"/>
<arg name="stereo_module/exposure/1" value="$(arg stereo_module/exposure/1)"/>
<arg name="stereo_module/gain/1" value="$(arg stereo_module/gain/1)"/>
<arg name="stereo_module/exposure/2" value="$(arg stereo_module/exposure/2)"/>
<arg name="stereo_module/gain/2" value="$(arg stereo_module/gain/2)"/>
<arg name="allow_no_texture_points" value="$(arg allow_no_texture_points)"/>
<arg name="ordered_pc" value="$(arg ordered_pc)"/>
</include>
</group>
</launch>
查看一下topic
rostopic list
/camera/color/camera_info
/camera/color/image_raw
/camera/color/image_raw/compressed
/camera/color/image_raw/compressed/parameter_descriptions
/camera/color/image_raw/compressed/parameter_updates
/camera/color/image_raw/compressedDepth
/camera/color/image_raw/compressedDepth/parameter_descriptions
/camera/color/image_raw/compressedDepth/parameter_updates
/camera/color/image_raw/theora
/camera/color/image_raw/theora/parameter_descriptions
/camera/color/image_raw/theora/parameter_updates
/camera/depth/camera_info
/camera/depth/image_rect_raw
/camera/depth/image_rect_raw/compressed
/camera/depth/image_rect_raw/compressed/parameter_descriptions
/camera/depth/image_rect_raw/compressed/parameter_updates
/camera/depth/image_rect_raw/compressedDepth
/camera/depth/image_rect_raw/compressedDepth/parameter_descriptions
/camera/depth/image_rect_raw/compressedDepth/parameter_updates
/camera/depth/image_rect_raw/theora
/camera/depth/image_rect_raw/theora/parameter_descriptions
/camera/depth/image_rect_raw/theora/parameter_updates
/camera/extrinsics/depth_to_color
/camera/l500_depth_sensor/parameter_descriptions
/camera/l500_depth_sensor/parameter_updates
/camera/motion_module/parameter_descriptions
/camera/motion_module/parameter_updates
/camera/realsense2_camera_manager/bond
/camera/rgb_camera/parameter_descriptions
/camera/rgb_camera/parameter_updates
/diagnostics
/rosout
/rosout_agg
/tf
/tf_static
没有发现有IMU,该一下命令为:
roslaunch realsense2_camera rs_camera.launch \\
align_depth:=true \\
unite_imu_method:="linear_interpolation" \\
enable_gyro:=true \\
enable_accel:=true
rostopic list
/camera/accel/imu_info
/camera/align_to_color/parameter_descriptions
/camera/align_to_color/parameter_updates
/camera/aligned_depth_to_color/camera_info
/camera/aligned_depth_to_color/image_raw
/camera/aligned_depth_to_color/image_raw/compressed
/camera/aligned_depth_to_color/image_raw/compressed/parameter_descriptions
/camera/aligned_depth_to_color/image_raw/compressed/parameter_updates
/camera/aligned_depth_to_color/image_raw/compressedDepth
/camera/aligned_depth_to_color/image_raw/compressedDepth/parameter_descriptions
/camera/aligned_depth_to_color/image_raw/compressedDepth/parameter_updates
/camera/aligned_depth_to_color/image_raw/theora
/camera/aligned_depth_to_color/image_raw/theora/parameter_descriptions
/camera/aligned_depth_to_color/image_raw/theora/parameter_updates
/camera/color/camera_info
/camera/color/image_raw
/camera/color/image_raw/compressed
/camera/color/image_raw/compressed/parameter_descriptions
/camera/color/image_raw/compressed/parameter_updates
/camera/color/image_raw/compressedDepth
/camera/color/image_raw/compressedDepth/parameter_descriptions
/camera/color/image_raw/compressedDepth/parameter_updates
/camera/color/image_raw/theora
/camera/color/image_raw/theora/parameter_descriptions
/camera/color/image_raw/theora/parameter_updates
/camera/depth/camera_info
/camera/depth/image_rect_raw
/camera/depth/image_rect_raw/compressed
/camera/depth/image_rect_raw/compressed/parameter_descriptions
/camera/depth/image_rect_raw/compressed/parameter_updates
/camera/depth/image_rect_raw/compressedDepth
/camera/depth/image_rect_raw/compressedDepth/parameter_descriptions
/camera/depth/image_rect_raw/compressedDepth/parameter_updates
/camera/depth/image_rect_raw/theora
/camera/depth/image_rect_raw/theora/parameter_descriptions
/camera/depth/image_rect_raw/theora/parameter_updates
/camera/extrinsics/depth_to_color
/camera/gyro/imu_info
/camera/imu
/camera/l500_depth_sensor/parameter_descriptions
/camera/l500_depth_sensor/parameter_updates
/camera/motion_module/parameter_descriptions
/camera/motion_module/parameter_updates
/camera/realsense2_camera_manager/bond
/camera/rgb_camera/parameter_descriptions
/camera/rgb_camera/parameter_updates
/diagnostics
/rosout
/rosout_agg
/tf
/tf_static
就可以发现其将imu分成了两个:
“/camera/gyro/imu_info” 发布角速度
“/camera/accel/imu_info” 发布线加速度
但同时,也有
“/camera/imu”所以应该就不需要改动什么了。
同时还应该让imu与camera同步
roslaunch realsense2_camera rs_camera.launch \\
align_depth:=true \\
unite_imu_method:="linear_interpolation" \\
enable_gyro:=true \\
enable_accel:=true \\
enable_sync:=true
然后就设置vins。首先在realsense_color_config.yaml基础上修改订阅的topic
#common parameters
# imu_topic: "/camera/imu/data_raw"
# image_topic: "/camera/color/image_raw"
imu_topic: "/camera/imu"
image_topic: "/camera/color/image_raw"
相机的内参,通过读取camera_info得到或者自己标定,采用以下命令可以读取厂家的camera_info,但与实际可能存在差距。
rostopic echo /camera/color/camera_info
相机到IMU的变换矩阵
# Extrinsic parameter between IMU and Camera.
estimate_extrinsic: 0 # 0 Have an accurate extrinsic parameters. We will trust the following imu^R_cam, imu^T_cam, don't change it.
# 1 Have an initial guess about extrinsic parameters. We will optimize around your initial guess.
# 2 Don't know anything about extrinsic parameters. You don't need to give R,T. We will try to calibrate it. Do some rotation movement at beginning.
#If you choose 0 or 1, you should write down the following matrix.
这里IMU和camera之间的外参矩阵建议使用Kalibr工具进行离线标定,也可以改成1或者2让估计器自己标定和优化。此处设定为2
realsense l515说是已经做好了硬件同步所以不需要在线估计同步时差(但是用kalibr标定出来和在线估计出来都存在大概-0.06的时间差)
#unsynchronization parameters
estimate_td: 0 # online estimate time offset between camera and imu
td: 0.000 # initial value of time offset. unit: s. readed image clock + td = real image clock (IMU clock)
相机曝光方式应为全局曝光
#rolling shutter parameters
rolling_shutter: 0 # 0: global shutter camera, 1: rolling shutter camera
rolling_shutter_tr: 0 # unit: s. rolling shutter read out time per frame (from data sheet).
然后就可以运行了
roslaunch realsense2_camera rs_camera.launch \\
align_depth:=true \\
unite_imu_method:="copy" \\
enable_gyro:=true \\
enable_accel:=true \\
enable_sync:=true
roslaunch vins_estimator realsense_color.launch
roslaunch vins_estimator vins_rviz.launch
好像不太行。。。换台电脑试试终于可以了。。。但是运动一段时间后,会飘得很严重,详见下面视频
vins
%YAML:1.0
#common parameters
imu_topic: "/camera/imu"
image_topic: "/camera/color/image_raw"
output_path: "/home/tony-ws1/output/"
#camera calibration
model_type: PINHOLE
camera_name: camera
image_width: 1280
image_height: 720
distortion_parameters:
k1: 9.2615504465028850e-02
k2: -1.8082438825995681e-01
p1: -6.5484100374765971e-04
p2: -3.5829351558557421e-04
projection_parameters:
fx: 6.0970550296798035e+02
fy: 6.0909579671294716e+02
cx: 3.1916667152289227e+02
cy: 2.3558360480225772e+02
# Extrinsic parameter between IMU and Camera.
estimate_extrinsic: 2 # 0 Have an accurate extrinsic parameters. We will trust the following imu^R_cam, imu^T_cam, don't change it.
# 1 Have an initial guess about extrinsic parameters. We will optimize around your initial guess.
# 2 Don't know anything about extrinsic parameters. You don't need to give R,T. We will try to calibrate it. Do some rotation movement at beginning.
#If you choose 0 or 1, you should write down the following matrix.
#Rotation from camera frame to imu frame, imu^R_cam
extrinsicRotation: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 0.99964621, 0.01105994, 0.02418954,
-0.01088975, 0.9999151, -0.00715601,
-0.02426663, 0.00689006, 0.99968178]
#Translation from camera frame to imu frame, imu^T_cam
extrinsicTranslation: !!opencv-matrix
rows: 3
cols: 1
dt: d
data: [0.07494282, -0.01077138, -0.00641822]
#feature traker paprameters
max_cnt: 150 # max feature number in feature tracking
min_dist: 25 # min distance between two features
freq: 10 # frequence (Hz) of publish tracking result. At least 10Hz for good estimation. If set 0, the frequence will be same as raw image
F_threshold: 1.0 # ransac threshold (pixel)
show_track: 1 # publish tracking image as topic
equalize: 0 # if image is too dark or light, trun on equalize to find enough features
fisheye: 0 # if using fisheye, trun on it. A circle mask will be loaded to remove edge noisy points
#optimization parameters
max_solver_time: 0.04 # max solver itration time (ms), to guarantee real time
max_num_iterations: 8 # max solver itrations, to guarantee real time
keyframe_parallax: 10.0 # keyframe selection threshold (pixel)
#imu parameters The more accurate parameters you provide, the better performance
acc_n: 0.1 # accelerometer measurement noise standard deviation. #0.2
gyr_n: 0.01 # gyroscope measurement noise standard deviation. #0.05
acc_w: 0.0002 # accelerometer bias random work noise standard deviation. #0.02
gyr_w: 2.0e-5 # gyroscope bias random work noise standard deviation. #4.0e-5
g_norm: 9.805 # gravity magnitude
#loop closure parameters
loop_closure: 1 # start loop closure
fast_relocalization: 1 # useful in real-time and large project
load_previous_pose_graph: 0 # load and reuse previous pose graph; load from 'pose_graph_save_path'
pose_graph_save_path: "/home/tony-ws1/output/pose_graph/" # save and load path
#unsynchronization parameters
estimate_td: 1 # online estimate time offset between camera and imu
td: 0.000 # initial value of time offset. unit: s. readed image clock + td = real image clock (IMU clock)
#rolling shutter parameters
rolling_shutter: 1 # 0: global shutter camera, 1: rolling shutter camera
rolling_shutter_tr: 0.033 # unit: s. rolling shutter read out time per frame (from data sheet).
#visualization parameters
save_image: 1 # save image in pose graph for visualization prupose; you can close this function by setting 0
visualize_imu_forward: 0 # output imu forward propogation to achieve low latency and high frequence results
visualize_camera_size: 0.4 # size of camera marker in RVIZ
实现了之后发现还是会漂,感觉对相机进行矫正是非常有必要的,但是确又不知道各个参数的意义,终于在连接中找到了(https://github.com/HKPolyU-UAV/FLVIS)
header:
seq: 21
stamp:
secs: 1632650051
nsecs: 413622856
frame_id: "camera_color_optical_frame"
height: 720
width: 1280
distortion_model: "plumb_bob"
D: [0.14218834042549133, -0.48622673749923706, 0.0013360617449507117, 0.0007612911867909133, 0.444832980632782]
K: [905.8331298828125, 0.0, 649.684814453125, 0.0, 905.6519775390625, 352.701416015625, 0.0, 0.0, 1.0]
R: [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
P: [905.8331298828125, 0.0, 649.684814453125, 0.0, 0.0, 905.6519775390625, 352.701416015625, 0.0, 0.0, 0.0, 1.0, 0.0]
binning_x: 0
binning_y: 0
roi:
x_offset: 0
y_offset: 0
height: 0
width: 0
do_rectify: False
---
image_width: 1280 image_height: 720 cam0_intrinsics: [905.8331298828125, 905.6519775390625, 649.684814453125, 352.701416015625]#fx fy cx cy cam0_distortion_coeffs: [0.14218834042549133, -0.48622673749923706, 0.0013360617449507117, 0.0007612911867909133]#k1 k2 p1 p2
(所以对于镜头畸变一共有 5 个参数 k1, k2, k3, p1, p2 需要校准,这 5 个参数和 M1 一起,都是需要标定的相机内参。opencv 输出的即便参数顺序是 k1, k2, p1, p2, k3 因为 k3 没那么重要。)
采用标定的参数修改一下有:
%YAML:1.0
#common parameters
imu_topic: "/camera/imu"
image_topic: "/camera/color/image_raw"
output_path: "/home/tony-ws1/output/"
#camera calibration
model_type: PINHOLE
camera_name: camera
image_width: 1280
image_height: 720
distortion_parameters:
k1: 0.14218834042549133
k2: -0.48622673749923706
p1: 0.0013360617449507117
p2: 0.0007612911867909133
projection_parameters:
fx: 905.8331298828125
fy: 905.6519775390625
cx: 649.684814453125
cy: 352.701416015625
# Extrinsic parameter between IMU and Camera.
estimate_extrinsic: 2 # 0 Have an accurate extrinsic parameters. We will trust the following imu^R_cam, imu^T_cam, don't change it.
# 1 Have an initial guess about extrinsic parameters. We will optimize around your initial guess.
# 2 Don't know anything about extrinsic parameters. You don't need to give R,T. We will try to calibrate it. Do some rotation movement at beginning.
#If you choose 0 or 1, you should write down the following matrix.
#Rotation from camera frame to imu frame, imu^R_cam
extrinsicRotation: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 0.99964621, 0.01105994, 0.02418954,
-0.01088975, 0.9999151, -0.00715601,
-0.02426663, 0.00689006, 0.99968178]
#Translation from camera frame to imu frame, imu^T_cam
extrinsicTranslation: !!opencv-matrix
rows: 3
cols: 1
dt: d
data: [0.07494282, -0.01077138, -0.00641822]
#feature traker paprameters
max_cnt: 150 # max feature number in feature tracking
min_dist: 25 # min distance between two features
freq: 10 # frequence (Hz) of publish tracking result. At least 10Hz for good estimation. If set 0, the frequence will be same as raw image
F_threshold: 1.0 # ransac threshold (pixel)
show_track: 1 # publish tracking image as topic
equalize: 0 # if image is too dark or light, trun on equalize to find enough features
fisheye: 0 # if using fisheye, trun on it. A circle mask will be loaded to remove edge noisy points
#optimization parameters
max_solver_time: 0.04 # max solver itration time (ms), to guarantee real time
max_num_iterations: 8 # max solver itrations, to guarantee real time
keyframe_parallax: 10.0 # keyframe selection threshold (pixel)
#imu parameters The more accurate parameters you provide, the better performance
acc_n: 0.1 # accelerometer measurement noise standard deviation. #0.2
gyr_n: 0.01 # gyroscope measurement noise standard deviation. #0.05
acc_w: 0.0002 # accelerometer bias random work noise standard deviation. #0.02
gyr_w: 2.0e-5 # gyroscope bias random work noise standard deviation. #4.0e-5
g_norm: 9.805 # gravity magnitude
#loop closure parameters
loop_closure: 1 # start loop closure
fast_relocalization: 1 # useful in real-time and large project
load_previous_pose_graph: 0 # load and reuse previous pose graph; load from 'pose_graph_save_path'
pose_graph_save_path: "/home/tony-ws1/output/pose_graph/" # save and load path
#unsynchronization parameters
estimate_td: 1 # online estimate time offset between camera and imu
td: 0.000 # initial value of time offset. unit: s. readed image clock + td = real image clock (IMU clock)
#rolling shutter parameters
rolling_shutter: 1 # 0: global shutter camera, 1: rolling shutter camera
rolling_shutter_tr: 0.033 # unit: s. rolling shutter read out time per frame (from data sheet).
#visualization parameters
save_image: 1 # save image in pose graph for visualization prupose; you can close this function by setting 0
visualize_imu_forward: 0 # output imu forward propogation to achieve low latency and high frequence results
visualize_camera_size: 0.4 # size of camera marker in RVIZ
好像还是不行。。。imu飘得好厉害
参考资料
以上是关于ROS实验笔记之——VINS-Mono在l515上的实现的主要内容,如果未能解决你的问题,请参考以下文章