Closed longzy1 closed 1 month ago
Which torch version?
Which torch version?
pip show torch Name: torch Version: 2.4.1
是这个版本的torch请问是高了还是低了呢
Have you tried with the latest version? Some changes I did fixed this for someone else.
Have you tried with the latest version? Some changes I did fixed this for someone else.
感谢您的回复,升级了插件之后解决了报错问题,现在可以用576分辨率转完十几秒了 谢谢!
升级之后已解决
Traceback (most recent call last): File "/root/ComfyUI/execution.py", line 323, in execute output_data, output_ui, has_subgraph = get_output_data(obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb) File "/root/ComfyUI/execution.py", line 198, in get_output_data return_values = _map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb) File "/root/ComfyUI/execution.py", line 169, in _map_node_over_list process_inputs(input_dict, i) File "/root/ComfyUI/execution.py", line 158, in process_inputs results.append(getattr(obj, func)(inputs)) File "/root/ComfyUI/custom_nodes/ComfyUI-LVCDWrapper/nodes.py", line 156, in loadmodel samples = sample_video(model, device, inp, arg, verbose=True) File "/root/ComfyUI/lib/python3.10/site-packages/torch/utils/_contextlib.py", line 116, in decorate_context return func(args, kwargs) File "/root/ComfyUI/custom_nodes/ComfyUI-LVCDWrapper/inference/sample_func.py", line 85, in sample_video x0, intermediates = sample( File "/root/ComfyUI/custom_nodes/ComfyUI-LVCDWrapper/inference/sample_func.py", line 265, in sample denoised = denoise( File "/root/ComfyUI/custom_nodes/ComfyUI-LVCDWrapper/inference/sample_func.py", line 413, in denoise denoised = model.denoiser(model_forward, x_in, s_in, cond_in, additional_model_inputs) File "/root/ComfyUI/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1553, in _wrapped_call_impl return self._call_impl(args, kwargs) File "/root/ComfyUI/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1562, in _call_impl return forward_call(*args, kwargs) File "/root/ComfyUI/custom_nodes/ComfyUI-LVCDWrapper/sgm/modules/diffusionmodules/denoiser.py", line 37, in forward network(input * c_in, c_noise, cond, *additional_model_inputs) c_out File "/root/ComfyUI/custom_nodes/ComfyUI-LVCDWrapper/inference/sample_func.py", line 409, in
model_forward = lambda inp, c_noise, cond, add: hacked(model, step, inp, c_noise, cond, add)
File "/root/ComfyUI/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1553, in _wrapped_call_impl
return self._call_impl(*args, *kwargs)
File "/root/ComfyUI/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1562, in _call_impl
return forward_call(args, kwargs)
File "/root/ComfyUI/custom_nodes/ComfyUI-LVCDWrapper/inference/model_hack.py", line 66, in forward
out = model.apply_model(x_in, c_noise, cond_in, additional_model_inputs)
File "/root/ComfyUI/custom_nodes/ComfyUI-LVCDWrapper/models/csvd.py", line 984, in apply_model
controls = self.control_model(
File "/root/ComfyUI/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1553, in _wrapped_call_impl
return self._call_impl(*args, *kwargs)
File "/root/ComfyUI/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1562, in _call_impl
return forward_call(args, kwargs)
File "/root/ComfyUI/custom_nodes/ComfyUI-LVCDWrapper/models/csvd.py", line 833, in forward
h = module(
File "/root/ComfyUI/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1553, in _wrapped_call_impl
return self._call_impl(*args, kwargs)
File "/root/ComfyUI/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1562, in _call_impl
return forward_call(*args, *kwargs)
File "/root/ComfyUI/custom_nodes/ComfyUI-LVCDWrapper/sgm/modules/diffusionmodules/openaimodel.py", line 98, in forward
x = layer(
File "/root/ComfyUI/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1553, in _wrapped_call_impl
return self._call_impl(args, kwargs)
File "/root/ComfyUI/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1562, in _call_impl
return forward_call(*args, kwargs)
File "/root/ComfyUI/custom_nodes/ComfyUI-LVCDWrapper/sgm/modules/video_attention.py", line 312, in forward
x = block(
File "/root/ComfyUI/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1553, in _wrapped_call_impl
return self._call_impl(*args, *kwargs)
File "/root/ComfyUI/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1562, in _call_impl
return forward_call(args, kwargs)
File "/root/ComfyUI/custom_nodes/ComfyUI-LVCDWrapper/sgm/modules/attention.py", line 968, in forward
return checkpoint(self._forward, x, context)
File "/root/ComfyUI/lib/python3.10/site-packages/torch/_compile.py", line 31, in inner
return disable_fn(*args, kwargs)
File "/root/ComfyUI/lib/python3.10/site-packages/torch/_dynamo/eval_frame.py", line 600, in _fn
return fn(*args, kwargs)
File "/root/ComfyUI/lib/python3.10/site-packages/torch/utils/checkpoint.py", line 481, in checkpoint
return CheckpointFunction.apply(function, preserve, args)
File "/root/ComfyUI/lib/python3.10/site-packages/torch/autograd/function.py", line 574, in apply
return super().apply(args, kwargs) # type: ignore[misc]
File "/root/ComfyUI/lib/python3.10/site-packages/torch/utils/checkpoint.py", line 255, in forward
outputs = run_function(args)
File "/root/ComfyUI/custom_nodes/ComfyUI-LVCDWrapper/sgm/modules/attention.py", line 977, in _forward
out1 = self.attn1(
File "/root/ComfyUI/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1553, in _wrapped_call_impl
return self._call_impl(args, *kwargs)
File "/root/ComfyUI/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1616, in _call_impl
hook_result = hook(self, args, result)
File "/root/ComfyUI/lib/python3.10/site-packages/torch/utils/_contextlib.py", line 116, in decorate_context
return func(args, kwargs)
File "/root/ComfyUI/custom_nodes/ComfyUI-LVCDWrapper/inference/model_hack.py", line 142, in hook
out = self.reference_attn_forward(module, inputs, outputs)
File "/root/ComfyUI/custom_nodes/ComfyUI-LVCDWrapper/inference/model_hack.py", line 221, in reference_attn_forward
out = F.scaled_dot_product_attention(q, k, v, attn_mask=attn_bias)
RuntimeError: No available kernel. Aborting execution.