nutonomy / nuscenes-devkit

The devkit of the nuScenes dataset.
https://www.nuScenes.org
Other
2.24k stars 617 forks source link

How do you label map gt? Mismatch with gt and lidar points? #909

Closed weimengchuan closed 1 year ago

weimengchuan commented 1 year ago

Q1: May I ask what coordinate system is the map labeled in?

Q2: I find a mismatch between gt and lidar points in visualization, after I transformed them into 3d global coord. Is this reasonable?

I transform lidar points of all frames in scene-0001 from 3d lidar coord to 3d global coord using l2e and e2g. Then I transform map gt in scene-0001 from 2d global coord to 3d global coord by add z=0. As shown in graph, the red line (divider) is not perfectly matched with actual points of divider (rendered in white near the red line). visgt_reflectance=0 3放大

Here is the complete visualization result visgt_reflectance=0 3

Thanks!

whyekit-motional commented 1 year ago

@weimengchuan the basemap is in the global coordinate frame

As for the renderings you have shown above, pls include a (working) code snippet to reproduce them

weimengchuan commented 1 year ago

@weimengchuan the basemap is in the global coordinate frame

As for the renderings you have shown above, pls include a (working) code snippet to reproduce them

Hello, as indicated by the red arrow, there is a misalignment between the green gt divider and white lidar points divider. What is the reason for misalignment?

demo

In this figure, color of points are set to reflectance ([r, g, b] = [reflectance, reflectance, reflectance]); color of gts lines is set to [0, 255, 0]. In addition, I only reserve points with reflectance from 0.25255 to 0.35255 for better visualization effect. You can get same results by running following code.

Thanks in advance!

import os
import open3d as o3d
import numpy as np
from pyquaternion import Quaternion
from nuscenes.nuscenes import NuScenes
from nuscenes.map_expansion.map_api import NuScenesMap

data_root_dir = '/media/samsung/samsung/datasets/Nuscenes/tar_files/trainval/'
version = 'v1.0-trainval'
map_name = 'singapore-onenorth'
layer_names = ['road_divider', 'lane_divider']

def get_all_lidar_data_in_one_scene(data_root_dir, nusc, scene_token):
    last_sample_token = nusc.get('scene', scene_token)['last_sample_token']
    sample = nusc.get('sample', last_sample_token)
    sample_data = nusc.get('sample_data', sample['data'][
        'LIDAR_TOP'])
    sample_data_lst = []
    while 1:
        sample_data_lst.append(sample_data)
        if sample_data['prev'] == '':
            break
        # if nusc.get('sample', sample_data['sample_token'])['scene_token'] != scene_token:
        #     break
        sample_data = nusc.get('sample_data', sample_data['prev'])

    lidar_data_lst = []
    for sample_data in sample_data_lst:
        lidar_path = os.path.join(data_root_dir, sample_data['filename'])
        token = sample_data['token']
        sample_token = sample_data['sample_token']
        ts = sample_data['timestamp']
        calibrated_sensor_record = nusc.get("calibrated_sensor", sample_data["calibrated_sensor_token"])
        ego_pose_record = nusc.get("ego_pose", sample_data["ego_pose_token"])
        l2e_t = np.array(calibrated_sensor_record["translation"])
        l2e_r = Quaternion(calibrated_sensor_record["rotation"]).rotation_matrix
        e2g_t = np.array(ego_pose_record["translation"])
        e2g_r = Quaternion(ego_pose_record["rotation"]).rotation_matrix
        lidar_data = {
            "lidar_path": lidar_path,
            "token": token,
            "sample_token": sample_token,
            "l2e_t": l2e_t,
            "l2e_r": l2e_r,
            "e2g_t": e2g_t,
            "e2g_r": e2g_r,
            "ts": ts,  # us
        }
        lidar_data_lst.append(lidar_data)
    return lidar_data_lst[::-1]

def get_transformation(lidar_data_lst):
    transformation_lst = []
    for lidar_data in lidar_data_lst:
        s2e_r = lidar_data['l2e_r']
        s2e_t = lidar_data['l2e_t']
        e2g_r = lidar_data['e2g_r']
        e2g_t = lidar_data['e2g_t']
        R = s2e_r.T @ e2g_r.T
        T = s2e_t @ e2g_r.T + e2g_t
        transformation_lst.append({'R': R.T, 'T': T})
    return transformation_lst

def stitch(lidar_data_lst, transformation_lst):
    points_lst = []
    for lidar_data, transformation in zip(lidar_data_lst, transformation_lst): 
        points = np.fromfile(lidar_data['lidar_path'], dtype=np.float32).reshape(-1, 5)
        points[:, :3] = points[:, :3] @ transformation['R'].T + transformation['T']
        points_lst.append(points)
    points = np.concatenate(points_lst)
    return points

def reserve_ground(points, dz=2.):
    in_range_flags = (
        (points[:, 2] >= -dz / 2)
        & (points[:, 2] <= dz / 2)
    )
    points = points[in_range_flags]
    return points

def filter_by_reflectance(points, r_limit=(0.25, 0.35)):
    r_limit = [i * 255 for i in r_limit]
    reflectance = points[:, 3]
    mask = np.where(np.logical_and(reflectance >= r_limit[0], reflectance <= r_limit[1]))
    points = points[mask]
    points[:, 3] = (points[:, 3] - points[:, 3].min()) / (points[:, 3].max() - points[:, 3].min())
    return points

def get_points_in_one_scene(data_root_dir, i):
    nusc = NuScenes(version=version, dataroot=data_root_dir, verbose=True)
    scene_token = nusc.scene[i]['token']
    lidar_data_lst = get_all_lidar_data_in_one_scene(data_root_dir, nusc, scene_token)
    transformation_lst = get_transformation(lidar_data_lst)
    points = stitch(lidar_data_lst, transformation_lst)
    points = reserve_ground(points)
    points = filter_by_reflectance(points)
    return points

def get_gt(data_root_dir, map_name, patch, layer_names):
    nusc_map = NuScenesMap(dataroot=data_root_dir, map_name=map_name)     # The rectangular patch coordinates (x_min, y_min, x_max, y_max).
    records = nusc_map.get_records_in_patch(patch, layer_names)
    gts = []
    for layer in layer_names:
        for token in records[layer]:
            record = nusc_map.explorer.map_api.get(layer, token)
            line = nusc_map.extract_line(record['line_token'])
            gts.append(np.array(line.xy).T)
    return gts

def vis3d(points, gts):
    lines_pcd_lst = []
    for gt in gts:
        n_gt = len(gt)
        GT = np.hstack([gt, np.zeros((n_gt, 1))])  # z=0
        lines = np.stack([np.arange(n_gt - 1), np.arange(1, n_gt)]).T
        color = [[0, 255, 0]] * len(lines)
        lines_pcd = o3d.geometry.LineSet()
        lines_pcd.lines = o3d.utility.Vector2iVector(lines)
        lines_pcd.colors = o3d.utility.Vector3dVector(color)
        lines_pcd.points = o3d.utility.Vector3dVector(GT)
        lines_pcd_lst.append(lines_pcd)

    pcd = o3d.open3d.geometry.PointCloud()

    pcd.points = o3d.open3d.utility.Vector3dVector(points[:, :3])
    color = points[:, 3][None].repeat(3, axis=0).T
    pcd.colors = o3d.open3d.utility.Vector3dVector(color)

    v = o3d.visualization.Visualizer()
    v.create_window()
    render_option = v.get_render_option()
    render_option.background_color = np.array([0, 0, 0])
    v.add_geometry(pcd)
    for lines_pcd in lines_pcd_lst:
        v.add_geometry(lines_pcd)

    v.run()
    v.destroy_window()

if __name__ == '__main__':
    # Get stitched lidar points. 
    # For all sweeps of given scene, I transform points from "lidar coordinate system" to "global coordinate system", then concatenate them in one array.
    points = get_points_in_one_scene(data_root_dir, 0)

    # Get patch. 
    # Patch is (x_min, y_min, x_max, y_max) of stitched lidar points
    patch = (points[:, 0].min(), points[:, 1].min(), points[:, 0].max(), points[:, 1].max())

    # Get gts within patch.
    gts = get_gt(data_root_dir, map_name, patch, layer_names)

    # Visualize points & gts in global coordinate system. 
    vis3d(points, gts)
whyekit-motional commented 1 year ago

@weimengchuan thanks for the very nice code snippet! I don't see anything wrong with the way you have produced the renderings

My guess is that the (slight) mismatch observed above between the map annotations and the accumulated lidar point cloud is due to a slight difference in the localization of the lidar of the scene

weimengchuan commented 1 year ago

Thanks a lot!