I made my own point cloud dataset (by using robosense rslidar_16) whicn obtains ".bin" and ".txt". When I started to train with pointpillars ,the bug ocurred as follow.
`Traceback (most recent call last):
File "/home/chenyang/anaconda3/envs/mmlab3d/lib/python3.8/site-packages/mmengine/registry/build_functions.py", line 122, in build_from_cfg
obj = obj_cls(**args) # type: ignore
File "/home/chenyang/mmdetection3d/mmdet3d/datasets/det3d_dataset.py", line 129, in init
super().init(
File "/home/chenyang/anaconda3/envs/mmlab3d/lib/python3.8/site-packages/mmengine/dataset/base_dataset.py", line 250, in init
self.full_init()
File "/home/chenyang/anaconda3/envs/mmlab3d/lib/python3.8/site-packages/mmengine/dataset/base_dataset.py", line 310, in full_init
self.data_bytes, self.data_address = self._serialize_data()
File "/home/chenyang/anaconda3/envs/mmlab3d/lib/python3.8/site-packages/mmengine/dataset/base_dataset.py", line 772, in _serialize_data
data_bytes = np.concatenate(data_list)
File "<__array_function__ internals>", line 200, in concatenate
ValueError: need at least one array to concatenate
Traceback (most recent call last):
File "/home/chenyang/anaconda3/envs/mmlab3d/lib/python3.8/site-packages/mmengine/registry/build_functions.py", line 122, in build_from_cfg
obj = obj_cls(*args) # type: ignore
File "/home/chenyang/anaconda3/envs/mmlab3d/lib/python3.8/site-packages/mmengine/dataset/dataset_wrapper.py", line 211, in init
self.dataset = DATASETS.build(dataset)
File "/home/chenyang/anaconda3/envs/mmlab3d/lib/python3.8/site-packages/mmengine/registry/registry.py", line 548, in build
return self.build_func(cfg, args, **kwargs, registry=self)
File "/home/chenyang/anaconda3/envs/mmlab3d/lib/python3.8/site-packages/mmengine/registry/build_functions.py", line 144, in build_from_cfg
raise type(e)(
ValueError: class MyDataset in mmdet3d/datasets/my_dataset.py: need at least one array to concatenate
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "tools/train.py", line 135, in
main()
File "tools/train.py", line 131, in main
runner.train()
File "/home/chenyang/anaconda3/envs/mmlab3d/lib/python3.8/site-packages/mmengine/runner/runner.py", line 1687, in train
self._train_loop = self.build_train_loop(
File "/home/chenyang/anaconda3/envs/mmlab3d/lib/python3.8/site-packages/mmengine/runner/runner.py", line 1486, in build_train_loop
loop = EpochBasedTrainLoop(
File "/home/chenyang/anaconda3/envs/mmlab3d/lib/python3.8/site-packages/mmengine/runner/loops.py", line 44, in init
super().init(runner, dataloader)
File "/home/chenyang/anaconda3/envs/mmlab3d/lib/python3.8/site-packages/mmengine/runner/base_loop.py", line 26, in init
self.dataloader = runner.build_dataloader(
File "/home/chenyang/anaconda3/envs/mmlab3d/lib/python3.8/site-packages/mmengine/runner/runner.py", line 1346, in build_dataloader
dataset = DATASETS.build(dataset_cfg)
File "/home/chenyang/anaconda3/envs/mmlab3d/lib/python3.8/site-packages/mmengine/registry/registry.py", line 548, in build
return self.build_func(cfg, *args, **kwargs, registry=self)
File "/home/chenyang/anaconda3/envs/mmlab3d/lib/python3.8/site-packages/mmengine/registry/build_functions.py", line 144, in build_from_cfg
raise type(e)(
ValueError: class RepeatDataset in mmengine/dataset/dataset_wrapper.py: class MyDataset in mmdet3d/datasets/my_dataset.py: need at least one array to concatenate
`
custom.py:
`# dataset settings
dataset_type = 'MyDataset'
data_root = 'data/my_data/'
class_names = ['pedestrian', 'bicycle', 'car'] # replace with your dataset class
point_cloud_range = [-40, -40, -2, 70.4, 40, 2] # adjust according to your dataset
input_modality = dict(use_lidar=True, use_camera=False)
metainfo = dict(classes=class_names)
train_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4, # replace with your point cloud data dimension
use_dim=4), # replace with the actual dimension used in training and inference
dict(
type='LoadAnnotations3D',
with_bbox_3d=True,
with_label_3d=True),
dict(
type='ObjectNoise',
num_try=100,
translation_std=[1.0, 1.0, 0.5],
global_rot_range=[0.0, 0.0],
rot_range=[-0.78539816, 0.78539816]),
dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5),
dict(
type='GlobalRotScaleTrans',
rot_range=[-0.78539816, 0.78539816],
scale_ratio_range=[0.95, 1.05]),
dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range),
dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),
dict(type='PointShuffle'),
dict(
type='Pack3DDetInputs',
keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']
keys=['points']
)
]
test_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4, # replace with your point cloud data dimension
use_dim=4),
dict(type='Pack3DDetInputs', keys=['points'])
]
eval_pipeline = [
dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4),
dict(type='Pack3DDetInputs', keys=['points']),
]
train_dataloader = dict(
batch_size=6,
num_workers=4,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
type='RepeatDataset',
times=2,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='my_data_infos_train.pkl', # specify your training pkl info
data_prefix=dict(pts='points'),
pipeline=train_pipeline,
modality=input_modality,
test_mode=False,
metainfo=metainfo,
box_type_3d='LiDAR')))
val_dataloader = dict(
batch_size=1,
num_workers=1,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_prefix=dict(pts='points'),
ann_file='my_data_infos_val.pkl', # specify your validation pkl info
pipeline=test_pipeline,
modality=input_modality,
test_mode=True,
metainfo=metainfo,
box_type_3d='LiDAR'))
val_evaluator = dict(
type='KittiMetric',
ann_file=data_root + 'my_data_infos_val.pkl', # specify your validation pkl info
metric='bbox')
test_dataloader = dict(
batch_size=1,
num_workers=1,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_prefix=dict(pts='points'),
ann_file='my_data_infos_val.pkl', # specify your validation pkl info
pipeline=test_pipeline,
modality=input_modality,
test_mode=True,
metainfo=metainfo,
box_type_3d='LiDAR'))
test_evaluator = dict(
type='KittiMetric',
ann_file=data_root + 'my_data_infos_val.pkl', # specify your validation pkl info
metric='bbox')`
pointpillars_hv_secfpn_custom.py:
`base = [
'../base/models/pointpillars_hv_secfpn_kitti.py',
'../base/datasets/custom.py',
'../base/schedules/cyclic-40e.py', '../base/default_runtime.py'
]
voxel_size = [0.16, 0.16, 4] # adjust according to your dataset
point_cloud_range = [-50, -40, -2, 70, 40, 2] # adjust according to your dataset
model = dict(
type='VoxelNet',
data_preprocessor=dict(
type='Det3DDataPreprocessor',
voxel=True,
voxel_layer=dict(
max_num_points=32,
point_cloud_range=point_cloud_range,
voxel_size=voxel_size,
max_voxels=(16000, 40000))),
voxel_encoder=dict(
type='PillarFeatureNet',
in_channels=4,
feat_channels=[64],
with_distance=False,
voxel_size=voxel_size,
point_cloud_range=point_cloud_range),
the output_shape should be adjusted according to point_cloud_range
Checklist
Describe the bug
I made my own point cloud dataset (by using robosense rslidar_16) whicn obtains ".bin" and ".txt". When I started to train with pointpillars ,the bug ocurred as follow.
`Traceback (most recent call last): File "/home/chenyang/anaconda3/envs/mmlab3d/lib/python3.8/site-packages/mmengine/registry/build_functions.py", line 122, in build_from_cfg obj = obj_cls(**args) # type: ignore File "/home/chenyang/mmdetection3d/mmdet3d/datasets/det3d_dataset.py", line 129, in init super().init( File "/home/chenyang/anaconda3/envs/mmlab3d/lib/python3.8/site-packages/mmengine/dataset/base_dataset.py", line 250, in init self.full_init() File "/home/chenyang/anaconda3/envs/mmlab3d/lib/python3.8/site-packages/mmengine/dataset/base_dataset.py", line 310, in full_init self.data_bytes, self.data_address = self._serialize_data() File "/home/chenyang/anaconda3/envs/mmlab3d/lib/python3.8/site-packages/mmengine/dataset/base_dataset.py", line 772, in _serialize_data data_bytes = np.concatenate(data_list) File "<__array_function__ internals>", line 200, in concatenate ValueError: need at least one array to concatenate
Traceback (most recent call last): File "/home/chenyang/anaconda3/envs/mmlab3d/lib/python3.8/site-packages/mmengine/registry/build_functions.py", line 122, in build_from_cfg obj = obj_cls(*args) # type: ignore File "/home/chenyang/anaconda3/envs/mmlab3d/lib/python3.8/site-packages/mmengine/dataset/dataset_wrapper.py", line 211, in init self.dataset = DATASETS.build(dataset) File "/home/chenyang/anaconda3/envs/mmlab3d/lib/python3.8/site-packages/mmengine/registry/registry.py", line 548, in build return self.build_func(cfg, args, **kwargs, registry=self) File "/home/chenyang/anaconda3/envs/mmlab3d/lib/python3.8/site-packages/mmengine/registry/build_functions.py", line 144, in build_from_cfg raise type(e)( ValueError: class
MyDataset
in mmdet3d/datasets/my_dataset.py: need at least one array to concatenateDuring handling of the above exception, another exception occurred:
Traceback (most recent call last): File "tools/train.py", line 135, in
main()
File "tools/train.py", line 131, in main
runner.train()
File "/home/chenyang/anaconda3/envs/mmlab3d/lib/python3.8/site-packages/mmengine/runner/runner.py", line 1687, in train
self._train_loop = self.build_train_loop(
File "/home/chenyang/anaconda3/envs/mmlab3d/lib/python3.8/site-packages/mmengine/runner/runner.py", line 1486, in build_train_loop
loop = EpochBasedTrainLoop(
File "/home/chenyang/anaconda3/envs/mmlab3d/lib/python3.8/site-packages/mmengine/runner/loops.py", line 44, in init
super().init(runner, dataloader)
File "/home/chenyang/anaconda3/envs/mmlab3d/lib/python3.8/site-packages/mmengine/runner/base_loop.py", line 26, in init
self.dataloader = runner.build_dataloader(
File "/home/chenyang/anaconda3/envs/mmlab3d/lib/python3.8/site-packages/mmengine/runner/runner.py", line 1346, in build_dataloader
dataset = DATASETS.build(dataset_cfg)
File "/home/chenyang/anaconda3/envs/mmlab3d/lib/python3.8/site-packages/mmengine/registry/registry.py", line 548, in build
return self.build_func(cfg, *args, **kwargs, registry=self)
File "/home/chenyang/anaconda3/envs/mmlab3d/lib/python3.8/site-packages/mmengine/registry/build_functions.py", line 144, in build_from_cfg
raise type(e)(
ValueError: class
RepeatDataset
in mmengine/dataset/dataset_wrapper.py: classMyDataset
in mmdet3d/datasets/my_dataset.py: need at least one array to concatenate `custom.py:
`# dataset settings dataset_type = 'MyDataset' data_root = 'data/my_data/' class_names = ['pedestrian', 'bicycle', 'car'] # replace with your dataset class point_cloud_range = [-40, -40, -2, 70.4, 40, 2] # adjust according to your dataset input_modality = dict(use_lidar=True, use_camera=False) metainfo = dict(classes=class_names)
train_pipeline = [ dict( type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, # replace with your point cloud data dimension use_dim=4), # replace with the actual dimension used in training and inference dict( type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), dict( type='ObjectNoise', num_try=100, translation_std=[1.0, 1.0, 0.5], global_rot_range=[0.0, 0.0], rot_range=[-0.78539816, 0.78539816]), dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), dict( type='GlobalRotScaleTrans', rot_range=[-0.78539816, 0.78539816], scale_ratio_range=[0.95, 1.05]), dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), dict(type='PointShuffle'), dict( type='Pack3DDetInputs',
keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']
] test_pipeline = [ dict( type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, # replace with your point cloud data dimension use_dim=4), dict(type='Pack3DDetInputs', keys=['points']) ] eval_pipeline = [ dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), dict(type='Pack3DDetInputs', keys=['points']), ] train_dataloader = dict( batch_size=6, num_workers=4, persistent_workers=True, sampler=dict(type='DefaultSampler', shuffle=True), dataset=dict( type='RepeatDataset', times=2, dataset=dict( type=dataset_type, data_root=data_root, ann_file='my_data_infos_train.pkl', # specify your training pkl info data_prefix=dict(pts='points'), pipeline=train_pipeline, modality=input_modality, test_mode=False, metainfo=metainfo, box_type_3d='LiDAR'))) val_dataloader = dict( batch_size=1, num_workers=1, persistent_workers=True, drop_last=False, sampler=dict(type='DefaultSampler', shuffle=False), dataset=dict( type=dataset_type, data_root=data_root, data_prefix=dict(pts='points'), ann_file='my_data_infos_val.pkl', # specify your validation pkl info pipeline=test_pipeline, modality=input_modality, test_mode=True, metainfo=metainfo, box_type_3d='LiDAR')) val_evaluator = dict( type='KittiMetric', ann_file=data_root + 'my_data_infos_val.pkl', # specify your validation pkl info metric='bbox') test_dataloader = dict( batch_size=1, num_workers=1, persistent_workers=True, drop_last=False, sampler=dict(type='DefaultSampler', shuffle=False), dataset=dict( type=dataset_type, data_root=data_root, data_prefix=dict(pts='points'), ann_file='my_data_infos_val.pkl', # specify your validation pkl info pipeline=test_pipeline, modality=input_modality, test_mode=True, metainfo=metainfo, box_type_3d='LiDAR')) test_evaluator = dict( type='KittiMetric', ann_file=data_root + 'my_data_infos_val.pkl', # specify your validation pkl info metric='bbox')`
pointpillars_hv_secfpn_custom.py:
`base = [ '../base/models/pointpillars_hv_secfpn_kitti.py', '../base/datasets/custom.py', '../base/schedules/cyclic-40e.py', '../base/default_runtime.py' ] voxel_size = [0.16, 0.16, 4] # adjust according to your dataset point_cloud_range = [-50, -40, -2, 70, 40, 2] # adjust according to your dataset model = dict( type='VoxelNet', data_preprocessor=dict( type='Det3DDataPreprocessor', voxel=True, voxel_layer=dict( max_num_points=32, point_cloud_range=point_cloud_range, voxel_size=voxel_size, max_voxels=(16000, 40000))), voxel_encoder=dict( type='PillarFeatureNet', in_channels=4, feat_channels=[64], with_distance=False, voxel_size=voxel_size, point_cloud_range=point_cloud_range),
the
output_shape
should be adjusted according topoint_cloud_range
Reproduction I used command as follow.
@lindahua