I am getting this error when I try to use Animatediff
Error completing request
Arguments: ('task(nz4au6pbx6i78te)', '((best quality)), ((masterpiece)), ((realistic)), long highlighted hair, cybergirl, futuristic silver armor suit, confident stance, high-resolution, living room, smiling, head tilted', 'worst quality', [], 20, 'DPM++ 2M Karras', 1, 1, 7, 512, 512, False, 0.7, 2, 'Latent', 0, 0, 0, 'Use same checkpoint', 'Use same sampler', '', '', [], <gradio.routes.Request object at 0x0000014AC7070B50>, 0, False, '', 0.8, -1, False, -1, 0, 0, 0, <scripts.animatediff_ui.AnimateDiffProcess object at 0x0000014AC70A94B0>, <scripts.controlnet_ui.controlnet_ui_group.UiControlNetUnit object at 0x0000014AC853C2B0>, <scripts.controlnet_ui.controlnet_ui_group.UiControlNetUnit object at 0x0000014AC859CFD0>, <scripts.controlnet_ui.controlnet_ui_group.UiControlNetUnit object at 0x0000014AC859C7F0>, False, False, 'positive', 'comma', 0, False, False, '', 1, '', [], 0, '', [], 0, '', [], True, False, False, False, 0, False, None, None, False, None, None, False, None, None, False, 50) {}
Traceback (most recent call last):
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\modules\call_queue.py", line 57, in f
res = list(func(*args, kwargs))
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\modules\call_queue.py", line 36, in f
res = func(*args, *kwargs)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\modules\txt2img.py", line 55, in txt2img
processed = processing.process_images(p)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\modules\processing.py", line 732, in process_images
res = process_images_inner(p)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\extensions\sd-webui-animatediff\scripts\animatediff_cn.py", line 118, in hacked_processing_process_images_hijack
return getattr(processing, '__controlnet_original_process_images_inner')(p, args, kwargs)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\modules\processing.py", line 867, in process_images_inner
samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\modules\processing.py", line 1140, in sample
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\modules\sd_samplers_kdiffusion.py", line 235, in sample
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, extra_params_kwargs))
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\modules\sd_samplers_common.py", line 261, in launch_sampling
return func()
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\modules\sd_samplers_kdiffusion.py", line 235, in
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, extra_params_kwargs))
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\venv\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, kwargs)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\repositories\k-diffusion\k_diffusion\sampling.py", line 594, in sample_dpmpp_2m
denoised = model(x, sigmas[i] * s_in, *extra_args)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(args, kwargs)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\extensions\sd-webui-animatediff\scripts\animatediff_infv2v.py", line 250, in mm_cfg_forward
x_out = mm_sd_forward(self, x_in, sigma_in, cond_in, image_cond_in, make_condition_dict) # hook
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\extensions\sd-webui-animatediff\scripts\animatediff_infv2v.py", line 160, in mm_sd_forward
out = self.inner_model(
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, kwargs)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\repositories\k-diffusion\k_diffusion\external.py", line 112, in forward
eps = self.get_eps(input * c_in, self.sigma_to_t(sigma), *kwargs)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\repositories\k-diffusion\k_diffusion\external.py", line 138, in get_eps
return self.inner_model.apply_model(args, kwargs)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\modules\sd_hijack_utils.py", line 17, in
setattr(resolved_obj, func_path[-1], lambda *args, kwargs: self(*args, *kwargs))
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\modules\sd_hijack_utils.py", line 28, in call
return self.__orig_func(args, kwargs)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 858, in apply_model
x_recon = self.model(x_noisy, t, cond)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, *kwargs)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 1335, in forward
out = self.diffusion_model(x, t, context=cc)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(args, kwargs)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\modules\sd_unet.py", line 91, in UNetModel_forward
return ldm.modules.diffusionmodules.openaimodel.copy_of_UNetModel_forward_for_webui(self, x, timesteps, context, *args, kwargs)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\openaimodel.py", line 797, in forward
h = module(h, emb, context)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, *kwargs)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\openaimodel.py", line 86, in forward
x = layer(x)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(args, kwargs)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\extensions\sd-webui-animatediff\motion_module.py", line 107, in forward
return self.temporal_transformer(input_tensor, encoder_hidden_states, attention_mask)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, kwargs)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\extensions\sd-webui-animatediff\motion_module.py", line 173, in forward
hidden_states = block(hidden_states, encoder_hidden_states=encoder_hidden_states, video_length=video_length)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, *kwargs)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\extensions\sd-webui-animatediff\motion_module.py", line 237, in forward
hidden_states = attention_block(
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(args, kwargs)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\extensions\sd-webui-animatediff\motion_module.py", line 596, in forward
hidden_states = self._memory_efficient_attention(query, key, value, attention_mask, optimizer_name)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\extensions\sd-webui-animatediff\motion_module.py", line 494, in _memory_efficient_attention
hidden_states = xformers.ops.memory_efficient_attention(
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\venv\lib\site-packages\xformers\ops\fmha__init.py", line 192, in memory_efficient_attention
return _memory_efficient_attention(
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\venv\lib\site-packages\xformers\ops\fmha__init__.py", line 290, in _memory_efficient_attention
return _memory_efficient_attention_forward(
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\venv\lib\site-packages\xformers\ops\fmha__init__.py", line 310, in _memory_efficient_attentionforward
out, * = op.apply(inp, needs_gradient=False)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\venv\lib\site-packages\xformers\ops\fmha\cutlass.py", line 175, in apply
out, lse, rng_seed, rng_offset = cls.OPERATOR(
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\venv\lib\site-packages\torch_ops.py", line 502, in call__
return self._op(*args, **kwargs or {})
RuntimeError: CUDA error: invalid configuration argument
CUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.
For debugging consider passing CUDA_LAUNCH_BLOCKING=1.
Compile with TORCH_USE_CUDA_DSA to enable device-side assertions.
and this is what the Automatic1111 webui is giving
RuntimeError: CUDA error: invalid configuration argument CUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect. For debugging consider passing CUDA_LAUNCH_BLOCKING=1. Compile with TORCH_USE_CUDA_DSA to enable device-side assertions.
I have a fresh install of Automatic1111 and it only has Controlnet and Animatediff extensions. I using a 4090RTX card.
I am getting this error when I try to use Animatediff
Error completing request Arguments: ('task(nz4au6pbx6i78te)', '((best quality)), ((masterpiece)), ((realistic)), long highlighted hair, cybergirl, futuristic silver armor suit, confident stance, high-resolution, living room, smiling, head tilted', 'worst quality', [], 20, 'DPM++ 2M Karras', 1, 1, 7, 512, 512, False, 0.7, 2, 'Latent', 0, 0, 0, 'Use same checkpoint', 'Use same sampler', '', '', [], <gradio.routes.Request object at 0x0000014AC7070B50>, 0, False, '', 0.8, -1, False, -1, 0, 0, 0, <scripts.animatediff_ui.AnimateDiffProcess object at 0x0000014AC70A94B0>, <scripts.controlnet_ui.controlnet_ui_group.UiControlNetUnit object at 0x0000014AC853C2B0>, <scripts.controlnet_ui.controlnet_ui_group.UiControlNetUnit object at 0x0000014AC859CFD0>, <scripts.controlnet_ui.controlnet_ui_group.UiControlNetUnit object at 0x0000014AC859C7F0>, False, False, 'positive', 'comma', 0, False, False, '', 1, '', [], 0, '', [], 0, '', [], True, False, False, False, 0, False, None, None, False, None, None, False, None, None, False, 50) {} Traceback (most recent call last): File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\modules\call_queue.py", line 57, in f res = list(func(*args, kwargs)) File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\modules\call_queue.py", line 36, in f res = func(*args, *kwargs) File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\modules\txt2img.py", line 55, in txt2img processed = processing.process_images(p) File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\modules\processing.py", line 732, in process_images res = process_images_inner(p) File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\extensions\sd-webui-animatediff\scripts\animatediff_cn.py", line 118, in hacked_processing_process_images_hijack return getattr(processing, '__controlnet_original_process_images_inner')(p, args, kwargs) File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\modules\processing.py", line 867, in process_images_inner samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts) File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\modules\processing.py", line 1140, in sample samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\modules\sd_samplers_kdiffusion.py", line 235, in sample samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, extra_params_kwargs)) File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\modules\sd_samplers_common.py", line 261, in launch_sampling return func() File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\modules\sd_samplers_kdiffusion.py", line 235, in
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, extra_params_kwargs))
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\venv\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, kwargs)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\repositories\k-diffusion\k_diffusion\sampling.py", line 594, in sample_dpmpp_2m
denoised = model(x, sigmas[i] * s_in, *extra_args)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(args, kwargs)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\extensions\sd-webui-animatediff\scripts\animatediff_infv2v.py", line 250, in mm_cfg_forward
x_out = mm_sd_forward(self, x_in, sigma_in, cond_in, image_cond_in, make_condition_dict) # hook
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\extensions\sd-webui-animatediff\scripts\animatediff_infv2v.py", line 160, in mm_sd_forward
out = self.inner_model(
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, kwargs)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\repositories\k-diffusion\k_diffusion\external.py", line 112, in forward
eps = self.get_eps(input * c_in, self.sigma_to_t(sigma), *kwargs)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\repositories\k-diffusion\k_diffusion\external.py", line 138, in get_eps
return self.inner_model.apply_model(args, kwargs)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\modules\sd_hijack_utils.py", line 17, in
setattr(resolved_obj, func_path[-1], lambda *args, kwargs: self(*args, *kwargs))
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\modules\sd_hijack_utils.py", line 28, in call
return self.__orig_func(args, kwargs)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 858, in apply_model
x_recon = self.model(x_noisy, t, cond)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, *kwargs)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 1335, in forward
out = self.diffusion_model(x, t, context=cc)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(args, kwargs)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\modules\sd_unet.py", line 91, in UNetModel_forward
return ldm.modules.diffusionmodules.openaimodel.copy_of_UNetModel_forward_for_webui(self, x, timesteps, context, *args, kwargs)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\openaimodel.py", line 797, in forward
h = module(h, emb, context)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, *kwargs)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\openaimodel.py", line 86, in forward
x = layer(x)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(args, kwargs)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\extensions\sd-webui-animatediff\motion_module.py", line 107, in forward
return self.temporal_transformer(input_tensor, encoder_hidden_states, attention_mask)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, kwargs)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\extensions\sd-webui-animatediff\motion_module.py", line 173, in forward
hidden_states = block(hidden_states, encoder_hidden_states=encoder_hidden_states, video_length=video_length)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, *kwargs)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\extensions\sd-webui-animatediff\motion_module.py", line 237, in forward
hidden_states = attention_block(
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(args, kwargs)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\extensions\sd-webui-animatediff\motion_module.py", line 596, in forward
hidden_states = self._memory_efficient_attention(query, key, value, attention_mask, optimizer_name)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\extensions\sd-webui-animatediff\motion_module.py", line 494, in _memory_efficient_attention
hidden_states = xformers.ops.memory_efficient_attention(
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\venv\lib\site-packages\xformers\ops\fmha__init.py", line 192, in memory_efficient_attention
return _memory_efficient_attention(
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\venv\lib\site-packages\xformers\ops\fmha__init__.py", line 290, in _memory_efficient_attention
return _memory_efficient_attention_forward(
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\venv\lib\site-packages\xformers\ops\fmha__init__.py", line 310, in _memory_efficient_attentionforward
out, * = op.apply(inp, needs_gradient=False)
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\venv\lib\site-packages\xformers\ops\fmha\cutlass.py", line 175, in apply
out, lse, rng_seed, rng_offset = cls.OPERATOR(
File "C:\Automatic1111builds\SD16 AnimateDiff\stable-diffusion-webui\venv\lib\site-packages\torch_ops.py", line 502, in call__
return self._op(*args, **kwargs or {})
RuntimeError: CUDA error: invalid configuration argument
CUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.
For debugging consider passing CUDA_LAUNCH_BLOCKING=1.
Compile with
TORCH_USE_CUDA_DSA
to enable device-side assertions.and this is what the Automatic1111 webui is giving RuntimeError: CUDA error: invalid configuration argument CUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect. For debugging consider passing CUDA_LAUNCH_BLOCKING=1. Compile with
TORCH_USE_CUDA_DSA
to enable device-side assertions.I have a fresh install of Automatic1111 and it only has Controlnet and Animatediff extensions. I using a 4090RTX card.
Please help on how to resolve this.
Thank You.