Open boardd opened 2 hours ago
def process_svo(file_path):
# Create a ZED camera object
zed = sl.Camera()
# Initialize the parameters for opening the SVO file
init_params = sl.InitParameters()
init_params.set_from_svo_file(file_path)
init_params.svo_real_time_mode = False # Disable real-time mode for better processing
init_params.coordinate_units = sl.UNIT.METER
init_params.coordinate_system = sl.COORDINATE_SYSTEM.RIGHT_HANDED_Z_UP # Set a coordinate system for tracking
init_params.depth_mode = sl.DEPTH_MODE.ULTRA
# Open the SVO file
status = zed.open(init_params)
if status != sl.ERROR_CODE.SUCCESS:
print(f"Failed to open SVO file: {status}")
return
# Enable positional tracking
tracking_params = sl.PositionalTrackingParameters()
tracking_params.set_gravity_as_origin = False
tracking_params.enable_area_memory = True
tracking_params.enable_pose_smoothing = False
tracking_params.enable_imu_fusion = True
tracking_params.mode = sl.POSITIONAL_TRACKING_MODE.GEN_2
# tracking_params.area_file_path = "test.area"
status = zed.enable_positional_tracking(tracking_params)
if status != sl.ERROR_CODE.SUCCESS:
print(f"Failed to enable positional tracking: {status}")
zed.close()
return
tracked_pose = []
trans_pose = []
camera_info = zed.get_camera_information()
viewer = gl.GLViewer()
viewer.init(camera_info.camera_model)
# Loop through frames in the SVO file
while viewer.is_available():
zed_pose = sl.Pose()
if zed.grab() == sl.ERROR_CODE.SUCCESS:
# Get the pose of the camera relative to the world frame
state = zed.get_position(zed_pose, sl.REFERENCE_FRAME.WORLD)
tracking_status = zed.get_positional_tracking_status()
if state == sl.POSITIONAL_TRACKING_STATE.OK:
translation_left_to_center = zed.get_camera_information().camera_configuration.calibration_parameters.get_camera_baseline() / 2
# Retrieve and transform the pose data into a new frame located at the center of the camera
transformation = [translation_left_to_center, -0.085, -0.11, np.pi, 0, 0]
transformed_pose = transform_pose(zed_pose.pose_data(sl.Transform()), transformation)
# Display translation and timestamp
py_translation = sl.Translation()
tx = zed_pose.get_translation(py_translation).get()[0]
ty = zed_pose.get_translation(py_translation).get()[1]
tz = zed_pose.get_translation(py_translation).get()[2]
trans_tx = transformed_pose.get_translation().get()[0]
trans_ty = transformed_pose.get_translation().get()[1]
trans_tz = transformed_pose.get_translation().get()[2]
# print("Translation: tx: {0}, ty: {1}, tz: {2}, timestamp: {3}\n".format(tx, ty, tz, zed_pose.timestamp))
#Display orientation quaternion
py_orientation = sl.Orientation()
ox = zed_pose.get_orientation(py_orientation).get()[0]
oy = zed_pose.get_orientation(py_orientation).get()[1]
oz = zed_pose.get_orientation(py_orientation).get()[2]
ow = zed_pose.get_orientation(py_orientation).get()[3]
trans_ox = transformed_pose.get_orientation().get()[0]
trans_oy = transformed_pose.get_orientation().get()[1]
trans_oz = transformed_pose.get_orientation().get()[2]
trans_ow = transformed_pose.get_orientation().get()[3]
# print("Orientation: ox: {0}, oy: {1}, oz: {2}, ow: {3}\n".format(ox, oy, oz, ow))
timestamp = zed_pose.timestamp.get_nanoseconds()
rotation = zed_pose.get_rotation_vector()
translation = zed_pose.get_translation(py_translation)
text_rotation = str((round(rotation[0], 2), round(rotation[1], 2), round(rotation[2], 2)))
text_translation = str((round(translation.get()[0], 2), round(translation.get()[1], 2), round(translation.get()[2], 2)))
tracked_pose.append([timestamp, tx, ty, tz, ox, oy, oz, ow])
trans_pose.append([timestamp, trans_tx, trans_ty, trans_tz, trans_ox, trans_oy, trans_oz, trans_ow])
viewer.updateData(zed_pose.pose_data(sl.Transform()), text_translation, text_rotation, tracking_status)
else:
print("Tracking Failed")
break
else:
break
tracked_pose = np.array(tracked_pose)
trans_pose = np.array(trans_pose)
code = zed.save_area_map('test.area')
print(repr(zed.get_area_export_state()))
if code == sl.ERROR_CODE.SUCCESS:
print("Area map saved")
# Close the camera and release resources
viewer.exit()
zed.close()
cv2.destroyAllWindows()
return tracked_pose, trans_pose
Preliminary Checks
Description
When trying to save the area map using GEN_2 Positional tracking mode using the python SDK, the "print(repr(zed.get_area_export_state()))" prints SUCCESS, but the area file is not actually saved in the directory specified. When I use GEN_1 positional tracking mode, the area map is saved properly in the specified directory. Note: I am playing back a recorded SVO file when doing the positional tracking.
Steps to Reproduce
Expected Result
Area Map is saved in the specified directory
Actual Result
Area Map is not saved despite SUCCESS printed from print(repr(zed.get_area_export_state()))
ZED Camera model
ZED Mini
Environment
Anything else?
Thank you in advance for helping!