err occur when gen atlas of stage2 and concerned info pasted below:
[INFO] Using xatlas to perform UV unwrapping, may take a while ...
--- Generated Atlas ---
Utilization: 71.477982%
Charts: 11139
Size: 4521x4485
Traceback (most recent call last):
File "/mnt/sdb/humannorm/launch.py", line 237, in <module>
main(args, extras)
File "/mnt/sdb/humannorm/launch.py", line 180, in main
trainer.fit(system, datamodule=dm, ckpt_path=cfg.resume)
File "/mnt/sdb/conda/envs/hunorm/lib/python3.9/site-packages/pytorch_lightning/trainer/trainer.py", line 538, in fit
call._call_and_handle_interrupt(
File "/mnt/sdb/conda/envs/hunorm/lib/python3.9/site-packages/pytorch_lightning/trainer/call.py", line 47, in _call_and_handle_interrupt
return trainer_fn(*args, **kwargs)
File "/mnt/sdb/conda/envs/hunorm/lib/python3.9/site-packages/pytorch_lightning/trainer/trainer.py", line 574, in _fit_impl
self._run(model, ckpt_path=ckpt_path)
File "/mnt/sdb/conda/envs/hunorm/lib/python3.9/site-packages/pytorch_lightning/trainer/trainer.py", line 981, in _run
results = self._run_stage()
File "/mnt/sdb/conda/envs/hunorm/lib/python3.9/site-packages/pytorch_lightning/trainer/trainer.py", line 1025, in _run_stage
self.fit_loop.run()
File "/mnt/sdb/conda/envs/hunorm/lib/python3.9/site-packages/pytorch_lightning/loops/fit_loop.py", line 205, in run
self.advance()
File "/mnt/sdb/conda/envs/hunorm/lib/python3.9/site-packages/pytorch_lightning/loops/fit_loop.py", line 363, in advance
self.epoch_loop.run(self._data_fetcher)
File "/mnt/sdb/conda/envs/hunorm/lib/python3.9/site-packages/pytorch_lightning/loops/training_epoch_loop.py", line 140, in run
self.advance(data_fetcher)
File "/mnt/sdb/conda/envs/hunorm/lib/python3.9/site-packages/pytorch_lightning/loops/training_epoch_loop.py", line 250, in advance
batch_output = self.automatic_optimization.run(trainer.optimizers[0], batch_idx, kwargs)
File "/mnt/sdb/conda/envs/hunorm/lib/python3.9/site-packages/pytorch_lightning/loops/optimization/automatic.py", line 190, in run
self._optimizer_step(batch_idx, closure)
File "/mnt/sdb/conda/envs/hunorm/lib/python3.9/site-packages/pytorch_lightning/loops/optimization/automatic.py", line 268, in _optimizer_step
call._call_lightning_module_hook(
File "/mnt/sdb/conda/envs/hunorm/lib/python3.9/site-packages/pytorch_lightning/trainer/call.py", line 167, in _call_lightning_module_hook
output = fn(*args, **kwargs)
File "/mnt/sdb/conda/envs/hunorm/lib/python3.9/site-packages/pytorch_lightning/core/module.py", line 1306, in optimizer_step
optimizer.step(closure=optimizer_closure)
File "/mnt/sdb/conda/envs/hunorm/lib/python3.9/site-packages/pytorch_lightning/core/optimizer.py", line 153, in step
step_output = self._strategy.optimizer_step(self._optimizer, closure, **kwargs)
File "/mnt/sdb/conda/envs/hunorm/lib/python3.9/site-packages/pytorch_lightning/strategies/strategy.py", line 238, in optimizer_step
return self.precision_plugin.optimizer_step(optimizer, model=model, closure=closure, **kwargs)
File "/mnt/sdb/conda/envs/hunorm/lib/python3.9/site-packages/pytorch_lightning/plugins/precision/amp.py", line 78, in optimizer_step
closure_result = closure()
File "/mnt/sdb/conda/envs/hunorm/lib/python3.9/site-packages/pytorch_lightning/loops/optimization/automatic.py", line 144, in __call__
self._result = self.closure(*args, **kwargs)
File "/mnt/sdb/conda/envs/hunorm/lib/python3.9/site-packages/torch/utils/_contextlib.py", line 116, in decorate_context
return func(*args, **kwargs)
File "/mnt/sdb/conda/envs/hunorm/lib/python3.9/site-packages/pytorch_lightning/loops/optimization/automatic.py", line 129, in closure
step_output = self._step_fn()
File "/mnt/sdb/conda/envs/hunorm/lib/python3.9/site-packages/pytorch_lightning/loops/optimization/automatic.py", line 317, in _training_step
training_step_output = call._call_strategy_hook(trainer, "training_step", *kwargs.values())
File "/mnt/sdb/conda/envs/hunorm/lib/python3.9/site-packages/pytorch_lightning/trainer/call.py", line 319, in _call_strategy_hook
output = fn(*args, **kwargs)
File "/mnt/sdb/conda/envs/hunorm/lib/python3.9/site-packages/pytorch_lightning/strategies/strategy.py", line 390, in training_step
return self.lightning_module.training_step(*args, **kwargs)
File "/mnt/sdb/humannorm/threestudio/systems/humannorm.py", line 71, in training_step
out = self(batch)
File "/mnt/sdb/conda/envs/hunorm/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1553, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/mnt/sdb/conda/envs/hunorm/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1562, in _call_impl
return forward_call(*args, **kwargs)
File "/mnt/sdb/humannorm/threestudio/systems/humannorm.py", line 39, in forward
render_out = self.renderer(**batch, render_rgb=self.cfg.texture)
File "/mnt/sdb/conda/envs/hunorm/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1553, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/mnt/sdb/conda/envs/hunorm/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1562, in _call_impl
return forward_call(*args, **kwargs)
File "/mnt/sdb/humannorm/threestudio/models/renderers/nvdiff_rasterizer.py", line 146, in forward
rgb_fg = self.material(
File "/mnt/sdb/conda/envs/hunorm/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1553, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/mnt/sdb/conda/envs/hunorm/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1562, in _call_impl
return forward_call(*args, **kwargs)
File "/mnt/sdb/humannorm/threestudio/models/materials/pbr_material.py", line 73, in forward
perturb_normal = (material[..., 5:8] * 2 - 1) + torch.tensor(
RuntimeError: The size of tensor a (0) must match the size of tensor b (3) at non-singleton dimension 1
please explain why the material can not be added in textture generation and how to fix above err
edit the
humannorm-texture-coarse.yaml
as below:err occur when gen atlas of stage2 and concerned info pasted below:
please explain why the material can not be added in textture generation and how to fix above err