microsoft / Azure-Kinect-Sensor-SDK

A cross platform (Linux and Windows) user mode SDK to read data from your Azure Kinect device.
https://Azure.com/Kinect
MIT License
1.5k stars 619 forks source link

How to use gravity direction to align with point cloud y axis? #1799

Closed TitansWhale closed 2 years ago

TitansWhale commented 2 years ago

I try to use gravity direction to align with point cloud y axis. But I found that the coordinate system of the gyroscope seems to be a little off from the coordinate system of the depth camera. To test this idea I placed the camera on the ground and looked at a vertical wall and used open3d to render the point cloud of the wall. I found that there is always some inclination angle between the aligned wall and the coordinate axis.

image

‘’‘ import numpy as np from typing import * import pykinect_azure as pykinect # https://github.com/ibaiGorordo/pyKinectAzure import cv2 import open3d as o3d

def to_depth_space(acc_direct: List): in_x, in_y, in_z = acc_direct[0], acc_direct[1], acc_direct[2]

# transform the gravity to the depth space
x = -in_y
y = in_z
z = -in_x
return np.array([x, y, z])

def get_change_mat(acc: List): y = to_depth_space(acc) z = np.array([0, 0, 1]) x = np.cross(y, z) z = np.cross(x, y)

x = x / np.linalg.norm(x)
y = y / np.linalg.norm(y)
z = z / np.linalg.norm(z)
return np.array([x, y, z])

def get_change_xyz(xyzs_set: List, acc): mat = get_change_mat(acc) root_xyz = xyzs_set[250 * 250] ret = xyzs_set - root_xyz

out = ret

ret = np.matmul(out, mat.T)

return ret

def main():

Initialize the library, if the library is not found, add the library path as argument

pykinect.initialize_libraries()

# Modify camera configuration
device_config = pykinect.default_configuration
device_config.color_format = pykinect.K4A_IMAGE_FORMAT_COLOR_BGRA32
device_config.color_resolution = pykinect.K4A_COLOR_RESOLUTION_720P
device_config.depth_mode = pykinect.K4A_DEPTH_MODE_NFOV_2X2BINNED
# print(device_config)

# Start device
device = pykinect.start_device(config=device_config)

cv2.namedWindow('Transformed color', cv2.WINDOW_NORMAL)
for i in range(10):
    device.update()
while True:

    # Get capture
    capture = device.update()
    acc = device.update_imu().acc
    mat = get_change_mat(acc)
    # Get the 3D point cloud
    ret, points = capture.get_pointcloud()

    points = get_change_xyz(points, acc)

    # Get the color image in the depth camera axis
    ret, color_image = capture.get_transformed_color_image()

    if not ret:
        continue

    cv2.imshow('Transformed color', color_image)

    # Press q key to stop
    if cv2.waitKey(1) == ord('q'):
        break

    coor = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1000, origin=[0, 0, 0])
    pcd = o3d.geometry.PointCloud(
        o3d.utility.Vector3dVector(points)
    )
    o3d.visualization.draw_geometries([pcd, coor])

if name == "main": main()

’‘’

TitansWhale commented 2 years ago

image

TitansWhale commented 2 years ago

I did not convert the gravity direction to the coordinate system of the depth camera through the camera extrinsic parameters。 https://github.com/microsoft/Azure-Kinect-Samples/tree/master/body-tracking-samples/floor_detector_sample