======== Loaded model from checkpoint ========
Traceback (most recent call last):
File "/usr/local/lib/python3.10/runpy.py", line 196, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/usr/local/lib/python3.10/runpy.py", line 86, in _run_code
exec(code, run_globals)
File "/mnt/bn/dika/mlx/workspace/project/OpenLRM/lrm/inferrer.py", line 265, in
inferrer.infer(
File "/mnt/bn/dika/mlx/workspace/project/OpenLRM/lrm/inferrer.py", line 210, in infer
results = self.infer_single(
File "/mnt/bn/dika/mlx/workspace/project/OpenLRM/lrm/inferrer.py", line 155, in infer_single
planes = self.model.forward_planes(image, source_camera)
File "/mnt/bn/dika/mlx/workspace/project/OpenLRM/lrm/models/generator.py", line 91, in forward_planes
planes = self.transformer(image_feats, camera_embeddings)
File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, kwargs)
File "/mnt/bn/dika/mlx/workspace/project/OpenLRM/lrm/models/transformer.py", line 126, in forward
x = layer(x, image_feats, camera_embeddings)
File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, *kwargs)
File "/mnt/bn/dika/mlx/workspace/project/OpenLRM/lrm/models/transformer.py", line 79, in forward
x = x + self.self_attn(before_sa, before_sa, before_sa)[0]
File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(args, kwargs)
File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/activation.py", line 1160, in forward
return torch._native_multi_head_attention(
TypeError: _native_multi_head_attention(): argument 'qkv_bias' (position 7) must be Tensor, not NoneType
$ python -m lrm.inferrer --model_name openlrm-base-obj-1.0 --source_image ./assets/sample_input/owl.png --export_video
======== Loaded model from checkpoint ======== Traceback (most recent call last): File "/usr/local/lib/python3.10/runpy.py", line 196, in _run_module_as_main return _run_code(code, main_globals, None, File "/usr/local/lib/python3.10/runpy.py", line 86, in _run_code exec(code, run_globals) File "/mnt/bn/dika/mlx/workspace/project/OpenLRM/lrm/inferrer.py", line 265, in
inferrer.infer(
File "/mnt/bn/dika/mlx/workspace/project/OpenLRM/lrm/inferrer.py", line 210, in infer
results = self.infer_single(
File "/mnt/bn/dika/mlx/workspace/project/OpenLRM/lrm/inferrer.py", line 155, in infer_single
planes = self.model.forward_planes(image, source_camera)
File "/mnt/bn/dika/mlx/workspace/project/OpenLRM/lrm/models/generator.py", line 91, in forward_planes
planes = self.transformer(image_feats, camera_embeddings)
File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, kwargs)
File "/mnt/bn/dika/mlx/workspace/project/OpenLRM/lrm/models/transformer.py", line 126, in forward
x = layer(x, image_feats, camera_embeddings)
File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, *kwargs)
File "/mnt/bn/dika/mlx/workspace/project/OpenLRM/lrm/models/transformer.py", line 79, in forward
x = x + self.self_attn(before_sa, before_sa, before_sa)[0]
File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(args, kwargs)
File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/activation.py", line 1160, in forward
return torch._native_multi_head_attention(
TypeError: _native_multi_head_attention(): argument 'qkv_bias' (position 7) must be Tensor, not NoneType