AIGODLIKE / ComfyUI-ToonCrafter

This project is used to enable ToonCrafter to be used in ComfyUI.
Apache License 2.0
294 stars 9 forks source link

Compile with `TORCH_USE_CUDA_DSA` to enable device-side assertions #19

Open lord-lethris opened 1 month ago

lord-lethris commented 1 month ago

I am getting the following error when running ToonCrafter:

CUDA error: named symbol not found
Compile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.

File "D:\apps\SD-WebUI\ComfyUI\execution.py", line 151, in recursive_execute
output_data, output_ui = get_output_data(obj, input_data_all)
File "D:\apps\SD-WebUI\ComfyUI\execution.py", line 81, in get_output_data
return_values = map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True)
File "D:\apps\SD-WebUI\ComfyUI\execution.py", line 74, in map_node_over_list
results.append(getattr(obj, func)(**slice_dict(input_data_all, i)))
File "D:\apps\SD-WebUI\ComfyUI\custom_nodes\ComfyUI-ToonCrafter\__init__.py", line 200, in get_image
batch_samples = batch_ddim_sampling(model, cond, noise_shape, n_samples=1, ddim_steps=steps, ddim_eta=eta, cfg_scale=cfg_scale, hs=hs, callback=cb)
File "D:\apps/SD-WebUI/ComfyUI/custom_nodes/ComfyUI-ToonCrafter\ToonCrafter\scripts\evaluation\funcs.py", line 65, in batch_ddim_sampling
samples, _ = ddim_sampler.sample(S=ddim_steps,
File "D:\apps\Python\Python310\lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "D:\apps/SD-WebUI/ComfyUI/custom_nodes/ComfyUI-ToonCrafter/ToonCrafter\lvdm\models\samplers\ddim.py", line 113, in sample
samples, intermediates = self.ddim_sampling(conditioning, size,
File "D:\apps\Python\Python310\lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "D:\apps/SD-WebUI/ComfyUI/custom_nodes/ComfyUI-ToonCrafter/ToonCrafter\lvdm\models\samplers\ddim.py", line 182, in ddim_sampling
outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
File "D:\apps\Python\Python310\lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "D:\apps/SD-WebUI/ComfyUI/custom_nodes/ComfyUI-ToonCrafter/ToonCrafter\lvdm\models\samplers\ddim.py", line 221, in p_sample_ddim
e_t_cond = self.model.apply_model(x, t, c, **kwargs)
File "D:\apps/SD-WebUI/ComfyUI/custom_nodes/ComfyUI-ToonCrafter/ToonCrafter\lvdm\models\ddpm3d.py", line 755, in apply_model
x_recon = self.model(x_noisy, t, **cond, **kwargs)
File "D:\apps\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\apps\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1520, in _call_impl
return forward_call(*args, **kwargs)
File "D:\apps/SD-WebUI/ComfyUI/custom_nodes/ComfyUI-ToonCrafter/ToonCrafter\lvdm\models\ddpm3d.py", line 1274, in forward
out = self.diffusion_model(xc, t, context=cc, **kwargs)
File "D:\apps\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\apps\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1520, in _call_impl
return forward_call(*args, **kwargs)
File "D:\apps/SD-WebUI/ComfyUI/custom_nodes/ComfyUI-ToonCrafter/ToonCrafter\lvdm\modules\networks\openaimodel3d.py", line 584, in forward
h = module(h, emb, context=context, batch_size=b)
File "D:\apps\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\apps\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1520, in _call_impl
return forward_call(*args, **kwargs)
File "D:\apps/SD-WebUI/ComfyUI/custom_nodes/ComfyUI-ToonCrafter/ToonCrafter\lvdm\modules\networks\openaimodel3d.py", line 41, in forward
x = layer(x, context)
File "D:\apps\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\apps\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1520, in _call_impl
return forward_call(*args, **kwargs)
File "D:\apps/SD-WebUI/ComfyUI/custom_nodes/ComfyUI-ToonCrafter/ToonCrafter\lvdm\modules\attention.py", line 304, in forward
x = block(x, context=context, **kwargs)
File "D:\apps\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\apps\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1520, in _call_impl
return forward_call(*args, **kwargs)
File "D:\apps/SD-WebUI/ComfyUI/custom_nodes/ComfyUI-ToonCrafter/ToonCrafter\lvdm\modules\attention.py", line 239, in forward
return checkpoint(self._forward, input_tuple, self.parameters(), self.checkpoint)
File "D:\apps/SD-WebUI/ComfyUI/custom_nodes/ComfyUI-ToonCrafter/ToonCrafter\lvdm\common.py", line 94, in checkpoint
return func(*inputs)
File "D:\apps/SD-WebUI/ComfyUI/custom_nodes/ComfyUI-ToonCrafter/ToonCrafter\lvdm\modules\attention.py", line 243, in _forward
x = self.attn1(self.norm1(x), context=context if self.disable_self_attn else None, mask=mask) + x
File "D:\apps\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\apps\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1520, in _call_impl
return forward_call(*args, **kwargs)
File "D:\apps/SD-WebUI/ComfyUI/custom_nodes/ComfyUI-ToonCrafter/ToonCrafter\lvdm\modules\attention.py", line 175, in efficient_forward
out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=None)
File "D:\apps\Python\Python310\lib\site-packages\xformers\ops\fmha\__init__.py", line 247, in memory_efficient_attention
return _memory_efficient_attention(
File "D:\apps\Python\Python310\lib\site-packages\xformers\ops\fmha\__init__.py", line 365, in _memory_efficient_attention
return _memory_efficient_attention_forward(
File "D:\apps\Python\Python310\lib\site-packages\xformers\ops\fmha\__init__.py", line 385, in _memory_efficient_attention_forward
out, *_ = op.apply(inp, needs_gradient=False)
File "D:\apps\Python\Python310\lib\site-packages\xformers\ops\fmha\cutlass.py", line 202, in apply
return cls.apply_bmhk(inp, needs_gradient=needs_gradient)
File "D:\apps\Python\Python310\lib\site-packages\xformers\ops\fmha\cutlass.py", line 266, in apply_bmhk
out, lse, rng_seed, rng_offset = cls.OPERATOR(
File "D:\apps\Python\Python310\lib\site-packages\torch\_ops.py", line 755, in __call__
return self._op(*args, **(kwargs or {}))