powersimmani / example_3d_reconstruction_pykinect2

depth to 3D coordinate mapping example code in pykinect2
4 stars 0 forks source link

I input the color and depth ,the code can‘t run successful,can you help me? #1

Open lixryjz opened 5 months ago

lixryjz commented 5 months ago

I want to do a segmentation and then generate a point cloud. Now I read the color image and depth image saved by the program, and then input the program to generate the point cloud, but found that the program cannot run normally. the code as follows:

import cv2 import numpy as np from ultralytics import YOLO

import open3d as o3d from pykinect2 import PyKinectV2 from pykinect2 import PyKinectRuntime

import code,os,struct import ctypes,_ctypes

初始化Kinect传感器

kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Depth)

def addchanel(color,alpha_value=0): h,w = color.shape[:2] a_channel=np.full((h,w),alpha_value,dtype=color.dtype) brg_img=cv2.merge((color,a_channel)) return brg_img

def rgb2float(self, r, g, b, a=0): return struct.unpack('f', struct.pack('i', r << 16 | g << 8 | b))[0] def point_cloud(depth_frame, color_frame, Float_RGB_type): print(color_frame.shape) print(depth_frame.shape)

size must be what the kinect offers as depth frame

# depth_frame=depth_frame[:,:,0]
print(depth_frame.shape)
color_frame=color_frame.flatten()
L = depth_frame.size  # 422*514=217088
print(L)

# depth_frame=depth_frame.astype(np.uint16)[:,:,0]
# create C-style Type
TYPE_CameraSpacePoint_Array = PyKinectV2._CameraSpacePoint * L

# instance of that type
csps = TYPE_CameraSpacePoint_Array()
# cstyle pointer to our 1d depth_frame data
ptr_depth = np.ctypeslib.as_ctypes(depth_frame.flatten().astype(np.uint16))

# calculate cameraspace coordinates
error_state = kinect._mapper.MapDepthFrameToCameraSpace(L, ptr_depth, L, csps)
# 0 means everythings ok, otherwise failed!
if error_state:
    raise "Could not map depth frame to camera space! "
    + str(error_state)

# convert back from ctype to numpy.ndarray
pf_csps = ctypes.cast(csps, ctypes.POINTER(ctypes.c_float))
position_data = np.copy(np.ctypeslib.as_array(pf_csps, shape=(L, 3)))
# print(color_frame.shape)
# color_frame=addchanel(color_frame)
print(color_frame.shape)
color_frame = np.reshape(color_frame, (1080, 1920, 4))

# Reserve valid points(remove out of range data such as infinites )
color_data = np.zeros(shape=(L, 3), dtype=np.int64)
color_position = np.zeros(shape=(L, 2), dtype=np.float64)

null_cnt = []
print(csps)
for index in range(L):
    a = kinect._mapper.MapCameraPointToColorSpace(csps[index])
    color_position[index][0] = a.y
    color_position[index][1] = a.x
    # print(a.y)

x_range = np.logical_and(color_position[:, 1] >= 0, color_position[:, 1] <= 1799.4)
y_range = np.logical_and(color_position[:, 0] >= 0, color_position[:, 0] <= 1079.4)
print("x_range")
print(x_range)
color_pos_range = np.logical_and(x_range, y_range)

position_data = position_data[color_pos_range]
color_mapper = np.rint(color_position[color_pos_range]).astype(int)

# Float_RGB_type=True is for point cloud library(PCL)
if Float_RGB_type:
    # *coloring with float casting takes too much time >1.7 sec
    color_data = np.asarray(
        [rgb2float(color_frame[y][x][2], color_frame[y][x][1], color_frame[y][x][0]) for y, x in color_mapper])
else:
    color_data = np.asarray([color_frame[y][x][:3] for y, x in color_mapper])

del pf_csps, csps, ptr_depth, TYPE_CameraSpacePoint_Array
print("position_data")  #46674,3
print(position_data.shape)
print("colo_data")
print(color_data.shape)

return position_data, color_data

if name == 'main':

pcd = o3d.io.read_point_cloud("test.ply")
color = cv2.imread("color.png",cv2.IMREAD_UNCHANGED)
depth = cv2.imread("depth.png",0)
position_data, color_data = point_cloud(depth, color, False)
pcd.points = o3d.utility.Vector3dVector(position_data)
# o3d.visualization.draw_geometries([pcd])
pcd.colors = o3d.utility.Vector3dVector(np.flip(color_data.astype(np.float64) / 255, axis=1))

the picture as follow: color color_1

lixryjz commented 5 months ago

when Idebug,I find this probelm MapDepthFrameToCameraSpace return all -inf,so why?