Closed Praneet1997 closed 5 years ago
Camera:d435i SDK latest and pyrealsense2
My python script is working if I stream only IMU data or only Image data but not together
import pyrealsense2 as rs import numpy as np import cv2 import time import ros
def initialize_camera(): p = rs.pipeline() conf = rs.config() CAM_WIDTH, CAM_HEIGHT, CAM_FPS = 848,480,15 conf.enable_stream(rs.stream.depth, CAM_WIDTH, CAM_HEIGHT, rs.format.z16, CAM_FPS) conf.enable_stream(rs.stream.color, CAM_WIDTH, CAM_HEIGHT, rs.format.rgb8, CAM_FPS) conf.enable_stream(rs.stream.infrared,1, CAM_WIDTH, CAM_HEIGHT, rs.format.y8, CAM_FPS) conf.enable_stream(rs.stream.infrared,2, CAM_WIDTH, CAM_HEIGHT, rs.format.y8, CAM_FPS) conf.enable_stream(rs.stream.accel) conf.enable_stream(rs.stream.gyro) prof = p.start(conf) device = prof.get_device() depth_sensor = device.query_sensors()[0] depth_sensor.set_option(rs.option.laser_power, 0) return p
def gyro_data(gyro): return np.asarray([gyro.x, gyro.y, gyro.z])
def accel_data(accel): return np.asarray([accel.x, accel.y, accel.z])
t0 = time.time() p = initialize_camera() print "ok done" print time.time() - t0, "seconds elapsed"
try:
while True:
frames = p.wait_for_frames()
accel=accel_data(frames[0].as_motion_frame().get_motion_data())
depth_frame = frames.get_depth_frame()
color_frame = frames.get_color_frame()
infra_frame1 = frames.get_infrared_frame(1)
infra_frame2 = frames.get_infrared_frame(2)
time.sleep(0.1)
gyro= gyro_data(frames[1].as_motion_frame().get_motion_data())
depth_image=np.asanyarray(depth_frame.get_data())
color_image=np.asanyarray(color_frame.get_data())
infra_image1=np.asanyarray(infra_frame1.get_data())
infra_image2=np.asanyarray(infra_frame2.get_data())
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
images = np.hstack( (infra_image1,infra_image2))
images2=np.hstack((depth_colormap,color_image))
print("accelerometer: ", accel)
print("gyro: ", gyro)
cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
cv2.imshow('RealSense', images)
cv2.namedWindow('RealSense2', cv2.WINDOW_AUTOSIZE)
cv2.imshow('RealSense2', images2)
cv2.imshow('realsense3',depth_image)
cv2.waitKey(1)
finally:
p.stop()
It ends with error "RuntimeError: null pointer passed for argument "frame_ref"
@Praneet1997 hello, The frameset content is dynamically allocated, and one must not make assumptions regarding a specific frame within the container as appears in the snippet:
accel=accel_data(frames[0].as_motion_frame().get_motion_data()) gyro= gyro_data(frames[1].as_motion_frame().get_motion_data())
The envisioned approach is a dynamic discovery while iterating over the frames in the frameset
object.
frames = pipe.wait_for_frames()
for frame in frames:
if frame.is_depth():
...
if frame.is_motion():
#check if Accel or Gyro
...
if frame.is_pose():
...
In addition there are also auxiliary functions available for certain frame types, e.g. frameset.get_depth_frame()
See T265 example as a reference
there is no attribute of frame object as is_motion or is depth
AttributeError: 'pyrealsense2.pyrealsense2.frame' object has no attribute 'is_depth'
As you can see in the documentation for the frame class using the builtin help()
command, or online here the correct names for these functions is is_depth_frame
, is_motion_frame
, etc
I also seem to be running into this problem. I've tried to implement what @ev-mp said about iterating over the frames and dynamically retrieving the data, but I still get the "RuntimeError: null pointer passed for argument "frame_ref"" error. I'm using the following code:
import pyrealsense2 as rs import numpy as np import cv2
pipeline = rs.pipeline() config = rs.config() config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30) config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30) config.enable_stream(rs.stream.accel, rs.format.motion_xyz32f, 250) config.enable_stream(rs.stream.gyro, rs.format.motion_xyz32f, 200)
def gyro_data(gyro): return np.asarray([gyro.x, gyro.y, gyro.z])
def accel_data(accel): return np.asarray([accel.x, accel.y, accel.z])
pipeline.start(config) try: while True: frames = pipeline.wait_for_frames()
for frame in frames:
if frame.is_motion_frame():
accel = accel_data(frames[0].as_motion_frame().get_motion_data())
gyro = gyro_data(frames[1].as_motion_frame().get_motion_data())
if frame.is_depth_frame():
depth_frame = frames.get_depth_frame()
color_frame = frames.get_color_frame()`
finally: pipeline.stop()
Can anybody see what the problem is? If the problem is in the lines: accel = accel_data(frames[0].as_motion_frame().get_motion_data()) gyro = gyro_data(frames[1].as_motion_frame().get_motion_data()) what would I do different to retrieve the IMU data?
I'm using the D435i camera on a Jetson TX1 running ubuntu 16.04.
Thank you!
by using frames[0].as_motion_frame()
and frames[1].as_motion_frame()
you are making potentially incorrect assumptions about where in the frames object the motion frame is.
Try adapting the following instead:
for frame in frames:
if frame.is_motion_frame():
accel = accel_data(frame.as_motion_frame().get_motion_data())
Thank you @lramati, that solved the problem. Much appreciated.
HI @Praneet1997,
If no additional support for this topic, will close it.
Before opening a new issue, we wanted to provide you with some useful suggestions (Click "Preview" above for a better view):
All users are welcomed to report bugs, ask questions, suggest or request enhancements and generally feel free to open new issue, even if they haven't followed any of the suggestions above :)
Issue Description
<Describe your issue / question / feature request / etc..>