continue-revolution / sd-webui-animatediff

AnimateDiff for AUTOMATIC1111 Stable Diffusion WebUI
Other
3.11k stars 258 forks source link

[Bug]: RuntimeError: CUDA error: invalid configuration argument 提示:Python 运行时抛出了一个异常。请检查疑难解答页面。 CUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect. For debugging consider passing CUDA_LAUNCH_BLOCKING=1. Compile with `TORCH_USE_CUDA_DSA` to enable device-side assertions. #328

Closed seanching closed 12 months ago

seanching commented 12 months ago

Is there an existing issue for this?

Have you read FAQ on README?

What happened?

Error completing request Arguments: ('task(ofzx89k6t0cqwjj)', 'tree, ', '', [], 20, 'DPM++ 2M Karras', 1, 1, 7, 512, 512, False, 0.7, 2, 'Latent', 0, 0, 0, 'Use same checkpoint', 'Use same sampler', '', '', [], <gradio.routes.Request object at 0x0000022F093AEC50>, 0, False, '', 0.8, -1, False, -1, 0, 0, 0, False, 'MultiDiffusion', False, True, 1024, 1024, 96, 96, 48, 4, 'None', 2, False, 10, 1, 1, 64, False, False, False, False, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 3072, 256, True, True, True, False, <scripts.animatediff_ui.AnimateDiffProcess object at 0x0000022F093AC4C0>, UiControlNetUnit(enabled=False, module='none', model='None', weight=1, image=None, resize_mode='Crop and Resize', low_vram=False, processor_res=-1, threshold_a=-1, threshold_b=-1, guidance_start=0, guidance_end=1, pixel_perfect=False, control_mode='Balanced', save_detected_map=True), UiControlNetUnit(enabled=False, module='none', model='None', weight=1, image=None, resize_mode='Crop and Resize', low_vram=False, processor_res=-1, threshold_a=-1, threshold_b=-1, guidance_start=0, guidance_end=1, pixel_perfect=False, control_mode='Balanced', save_detected_map=True), UiControlNetUnit(enabled=False, module='none', model='None', weight=1, image=None, resize_mode='Crop and Resize', low_vram=False, processor_res=-1, threshold_a=-1, threshold_b=-1, guidance_start=0, guidance_end=1, pixel_perfect=False, control_mode='Balanced', save_detected_map=True), 'NONE:0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\nALL:1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1\nINS:1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0\nIND:1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0\nINALL:1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0\nMIDD:1,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0\nOUTD:1,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0\nOUTS:1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1\nOUTALL:1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1\nALL0.5:0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5', True, 0, 'values', '0,0.25,0.5,0.75,1', 'Block ID', 'IN05-OUT05', 'none', '', '0.5,1', 'BASE,IN00,IN01,IN02,IN03,IN04,IN05,IN06,IN07,IN08,IN09,IN10,IN11,M00,OUT00,OUT01,OUT02,OUT03,OUT04,OUT05,OUT06,OUT07,OUT08,OUT09,OUT10,OUT11', 1.0, 'black', '20', False, 'ATTNDEEPON:IN05-OUT05:attn:1\n\nATTNDEEPOFF:IN05-OUT05:attn:0\n\nPROJDEEPOFF:IN05-OUT05:proj:0\n\nXYZ:::1', False, False, False, False, 0, None, [], 0, False, [], [], False, 0, 1, False, False, 0, None, [], -2, False, [], False, 0, None, None, False, False, 'positive', 'comma', 0, False, False, '', 1, '', [], 0, '', [], 0, '', [], True, False, False, False, 0, False, None, None, False, None, None, False, None, None, False, 50, 'NONE:0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\nALL:1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1\nINS:1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0\nIND:1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0\nINALL:1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0\nMIDD:1,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0\nOUTD:1,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0\nOUTS:1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1\nOUTALL:1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1\nALL0.5:0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5', True, 0, 'values', '0,0.25,0.5,0.75,1', 'Block ID', 'IN05-OUT05', 'none', '', '0.5,1', 'BASE,IN00,IN01,IN02,IN03,IN04,IN05,IN06,IN07,IN08,IN09,IN10,IN11,M00,OUT00,OUT01,OUT02,OUT03,OUT04,OUT05,OUT06,OUT07,OUT08,OUT09,OUT10,OUT11', 1.0, 'black', '20', False, 'ATTNDEEPON:IN05-OUT05:attn:1\n\nATTNDEEPOFF:IN05-OUT05:attn:0\n\nPROJDEEPOFF:IN05-OUT05:proj:0\n\nXYZ:::1', False, False) {} Traceback (most recent call last): File "G:\sd-webui-aki\sd-webui-aki-v4.4\modules\call_queue.py", line 57, in f res = list(func(*args, kwargs)) File "G:\sd-webui-aki\sd-webui-aki-v4.4\modules\call_queue.py", line 36, in f res = func(*args, *kwargs) File "G:\sd-webui-aki\sd-webui-aki-v4.4\modules\txt2img.py", line 55, in txt2img processed = processing.process_images(p) File "G:\sd-webui-aki\sd-webui-aki-v4.4\modules\processing.py", line 732, in process_images res = process_images_inner(p) File "G:\sd-webui-aki\sd-webui-aki-v4.4\extensions\sd-webui-animatediff\scripts\animatediff_cn.py", line 118, in hacked_processing_process_images_hijack return getattr(processing, '__controlnet_original_process_images_inner')(p, args, kwargs) File "G:\sd-webui-aki\sd-webui-aki-v4.4\modules\processing.py", line 867, in process_images_inner samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts) File "G:\sd-webui-aki\sd-webui-aki-v4.4\modules\processing.py", line 1140, in sample samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) File "G:\sd-webui-aki\sd-webui-aki-v4.4\modules\sd_samplers_kdiffusion.py", line 235, in sample samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, extra_params_kwargs)) File "G:\sd-webui-aki\sd-webui-aki-v4.4\modules\sd_samplers_common.py", line 261, in launch_sampling return func() File "G:\sd-webui-aki\sd-webui-aki-v4.4\modules\sd_samplers_kdiffusion.py", line 235, in samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, extra_params_kwargs)) File "G:\sd-webui-aki\sd-webui-aki-v4.4\python\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context return func(*args, kwargs) File "G:\sd-webui-aki\sd-webui-aki-v4.4\repositories\k-diffusion\k_diffusion\sampling.py", line 594, in sample_dpmpp_2m denoised = model(x, sigmas[i] * s_in, *extra_args) File "G:\sd-webui-aki\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl return forward_call(args, kwargs) File "G:\sd-webui-aki\sd-webui-aki-v4.4\extensions\sd-webui-animatediff\scripts\animatediff_infv2v.py", line 250, in mm_cfg_forward x_out = mm_sd_forward(self, x_in, sigma_in, cond_in, image_cond_in, make_condition_dict) # hook File "G:\sd-webui-aki\sd-webui-aki-v4.4\extensions\sd-webui-animatediff\scripts\animatediff_infv2v.py", line 160, in mm_sd_forward out = self.inner_model( File "G:\sd-webui-aki\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl return forward_call(*args, kwargs) File "G:\sd-webui-aki\sd-webui-aki-v4.4\repositories\k-diffusion\k_diffusion\external.py", line 112, in forward eps = self.get_eps(input * c_in, self.sigma_to_t(sigma), *kwargs) File "G:\sd-webui-aki\sd-webui-aki-v4.4\repositories\k-diffusion\k_diffusion\external.py", line 138, in get_eps return self.inner_model.apply_model(args, kwargs) File "G:\sd-webui-aki\sd-webui-aki-v4.4\modules\sd_hijack_utils.py", line 17, in setattr(resolved_obj, func_path[-1], lambda *args, kwargs: self(*args, *kwargs)) File "G:\sd-webui-aki\sd-webui-aki-v4.4\modules\sd_hijack_utils.py", line 28, in call return self.__orig_func(args, kwargs) File "G:\sd-webui-aki\sd-webui-aki-v4.4\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 858, in apply_model x_recon = self.model(x_noisy, t, cond) File "G:\sd-webui-aki\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl return forward_call(*args, *kwargs) File "G:\sd-webui-aki\sd-webui-aki-v4.4\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 1335, in forward out = self.diffusion_model(x, t, context=cc) File "G:\sd-webui-aki\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl return forward_call(args, kwargs) File "G:\sd-webui-aki\sd-webui-aki-v4.4\modules\sd_unet.py", line 91, in UNetModel_forward return ldm.modules.diffusionmodules.openaimodel.copy_of_UNetModel_forward_for_webui(self, x, timesteps, context, *args, kwargs) File "G:\sd-webui-aki\sd-webui-aki-v4.4\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\openaimodel.py", line 797, in forward h = module(h, emb, context) File "G:\sd-webui-aki\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl return forward_call(*args, *kwargs) File "G:\sd-webui-aki\sd-webui-aki-v4.4\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\openaimodel.py", line 86, in forward x = layer(x) File "G:\sd-webui-aki\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl return forward_call(args, kwargs) File "G:\sd-webui-aki\sd-webui-aki-v4.4\extensions\sd-webui-animatediff\motion_module.py", line 112, in forward return self.temporal_transformer(input_tensor, encoder_hidden_states, attention_mask) File "G:\sd-webui-aki\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl return forward_call(*args, kwargs) File "G:\sd-webui-aki\sd-webui-aki-v4.4\extensions\sd-webui-animatediff\motion_module.py", line 178, in forward hidden_states = block(hidden_states, encoder_hidden_states=encoder_hidden_states, video_length=video_length) File "G:\sd-webui-aki\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl return forward_call(*args, *kwargs) File "G:\sd-webui-aki\sd-webui-aki-v4.4\extensions\sd-webui-animatediff\motion_module.py", line 242, in forward hidden_states = attention_block( File "G:\sd-webui-aki\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl return forward_call(args, kwargs) File "G:\sd-webui-aki\sd-webui-aki-v4.4\extensions\sd-webui-animatediff\motion_module.py", line 601, in forward hidden_states = self._memory_efficient_attention(query, key, value, attention_mask, optimizer_name) File "G:\sd-webui-aki\sd-webui-aki-v4.4\extensions\sd-webui-animatediff\motion_module.py", line 499, in _memory_efficient_attention hidden_states = xformers.ops.memory_efficient_attention( File "G:\sd-webui-aki\sd-webui-aki-v4.4\python\lib\site-packages\xformers\ops\fmha__init.py", line 193, in memory_efficient_attention return _memory_efficient_attention( File "G:\sd-webui-aki\sd-webui-aki-v4.4\python\lib\site-packages\xformers\ops\fmha__init__.py", line 291, in _memory_efficient_attention return _memory_efficient_attention_forward( File "G:\sd-webui-aki\sd-webui-aki-v4.4\python\lib\site-packages\xformers\ops\fmha__init__.py", line 311, in _memory_efficient_attentionforward out, * = op.apply(inp, needs_gradient=False) File "G:\sd-webui-aki\sd-webui-aki-v4.4\python\lib\site-packages\xformers\ops\fmha\cutlass.py", line 186, in apply out, lse, rng_seed, rng_offset = cls.OPERATOR( File "G:\sd-webui-aki\sd-webui-aki-v4.4\python\lib\site-packages\torch_ops.py", line 502, in call__ return self._op(*args, **kwargs or {}) RuntimeError: CUDA error: invalid configuration argument 提示:Python 运行时抛出了一个异常。请检查疑难解答页面。 CUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect. For debugging consider passing CUDA_LAUNCH_BLOCKING=1. Compile with TORCH_USE_CUDA_DSA to enable device-side assertions.

Steps to reproduce the problem

  1. text to image
  2. choose animatediff
  3. generate

What should have happened?

show right images

Commit where the problem happens

run animatediff,

What browsers do you use to access the UI ?

Microsoft Edge

Command Line Arguments

No

Console logs

*** Error completing request
*** Arguments: ('task(ofzx89k6t0cqwjj)', 'tree, ', '', [], 20, 'DPM++ 2M Karras', 1, 1, 7, 512, 512, False, 0.7, 2, 'Latent', 0, 0, 0, 'Use same checkpoint', 'Use same sampler', '', '', [], <gradio.routes.Request object at 0x0000022F093AEC50>, 0, False, '', 0.8, -1, False, -1, 0, 0, 0, False, 'MultiDiffusion', False, True, 1024, 1024, 96, 96, 48, 4, 'None', 2, False, 10, 1, 1, 64, False, False, False, False, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 3072, 256, True, True, True, False, <scripts.animatediff_ui.AnimateDiffProcess object at 0x0000022F093AC4C0>, UiControlNetUnit(enabled=False, module='none', model='None', weight=1, image=None, resize_mode='Crop and Resize', low_vram=False, processor_res=-1, threshold_a=-1, threshold_b=-1, guidance_start=0, guidance_end=1, pixel_perfect=False, control_mode='Balanced', save_detected_map=True), UiControlNetUnit(enabled=False, module='none', model='None', weight=1, image=None, resize_mode='Crop and Resize', low_vram=False, processor_res=-1, threshold_a=-1, threshold_b=-1, guidance_start=0, guidance_end=1, pixel_perfect=False, control_mode='Balanced', save_detected_map=True), UiControlNetUnit(enabled=False, module='none', model='None', weight=1, image=None, resize_mode='Crop and Resize', low_vram=False, processor_res=-1, threshold_a=-1, threshold_b=-1, guidance_start=0, guidance_end=1, pixel_perfect=False, control_mode='Balanced', save_detected_map=True), 'NONE:0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\nALL:1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1\nINS:1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0\nIND:1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0\nINALL:1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0\nMIDD:1,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0\nOUTD:1,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0\nOUTS:1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1\nOUTALL:1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1\nALL0.5:0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5', True, 0, 'values', '0,0.25,0.5,0.75,1', 'Block ID', 'IN05-OUT05', 'none', '', '0.5,1', 'BASE,IN00,IN01,IN02,IN03,IN04,IN05,IN06,IN07,IN08,IN09,IN10,IN11,M00,OUT00,OUT01,OUT02,OUT03,OUT04,OUT05,OUT06,OUT07,OUT08,OUT09,OUT10,OUT11', 1.0, 'black', '20', False, 'ATTNDEEPON:IN05-OUT05:attn:1\n\nATTNDEEPOFF:IN05-OUT05:attn:0\n\nPROJDEEPOFF:IN05-OUT05:proj:0\n\nXYZ:::1', False, False, False, False, 0, None, [], 0, False, [], [], False, 0, 1, False, False, 0, None, [], -2, False, [], False, 0, None, None, False, False, 'positive', 'comma', 0, False, False, '', 1, '', [], 0, '', [], 0, '', [], True, False, False, False, 0, False, None, None, False, None, None, False, None, None, False, 50, 'NONE:0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\nALL:1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1\nINS:1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0\nIND:1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0\nINALL:1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0\nMIDD:1,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0\nOUTD:1,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0\nOUTS:1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1\nOUTALL:1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1\nALL0.5:0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5', True, 0, 'values', '0,0.25,0.5,0.75,1', 'Block ID', 'IN05-OUT05', 'none', '', '0.5,1', 'BASE,IN00,IN01,IN02,IN03,IN04,IN05,IN06,IN07,IN08,IN09,IN10,IN11,M00,OUT00,OUT01,OUT02,OUT03,OUT04,OUT05,OUT06,OUT07,OUT08,OUT09,OUT10,OUT11', 1.0, 'black', '20', False, 'ATTNDEEPON:IN05-OUT05:attn:1\n\nATTNDEEPOFF:IN05-OUT05:attn:0\n\nPROJDEEPOFF:IN05-OUT05:proj:0\n\nXYZ:::1', False, False) {}
    Traceback (most recent call last):
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\modules\call_queue.py", line 57, in f
        res = list(func(*args, **kwargs))
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\modules\call_queue.py", line 36, in f
        res = func(*args, **kwargs)
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\modules\txt2img.py", line 55, in txt2img
        processed = processing.process_images(p)
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\modules\processing.py", line 732, in process_images
        res = process_images_inner(p)
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\extensions\sd-webui-animatediff\scripts\animatediff_cn.py", line 118, in hacked_processing_process_images_hijack
        return getattr(processing, '__controlnet_original_process_images_inner')(p, *args, **kwargs)
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\modules\processing.py", line 867, in process_images_inner
        samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts)
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\modules\processing.py", line 1140, in sample
        samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\modules\sd_samplers_kdiffusion.py", line 235, in sample
        samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\modules\sd_samplers_common.py", line 261, in launch_sampling
        return func()
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\modules\sd_samplers_kdiffusion.py", line 235, in <lambda>
        samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\python\lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context
        return func(*args, **kwargs)
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\repositories\k-diffusion\k_diffusion\sampling.py", line 594, in sample_dpmpp_2m
        denoised = model(x, sigmas[i] * s_in, **extra_args)
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
        return forward_call(*args, **kwargs)
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\extensions\sd-webui-animatediff\scripts\animatediff_infv2v.py", line 250, in mm_cfg_forward
        x_out = mm_sd_forward(self, x_in, sigma_in, cond_in, image_cond_in, make_condition_dict) # hook
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\extensions\sd-webui-animatediff\scripts\animatediff_infv2v.py", line 160, in mm_sd_forward
        out = self.inner_model(
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
        return forward_call(*args, **kwargs)
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\repositories\k-diffusion\k_diffusion\external.py", line 112, in forward
        eps = self.get_eps(input * c_in, self.sigma_to_t(sigma), **kwargs)
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\repositories\k-diffusion\k_diffusion\external.py", line 138, in get_eps
        return self.inner_model.apply_model(*args, **kwargs)
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\modules\sd_hijack_utils.py", line 17, in <lambda>
        setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: self(*args, **kwargs))
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\modules\sd_hijack_utils.py", line 28, in __call__
        return self.__orig_func(*args, **kwargs)
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 858, in apply_model
        x_recon = self.model(x_noisy, t, **cond)
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
        return forward_call(*args, **kwargs)
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 1335, in forward
        out = self.diffusion_model(x, t, context=cc)
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
        return forward_call(*args, **kwargs)
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\modules\sd_unet.py", line 91, in UNetModel_forward
        return ldm.modules.diffusionmodules.openaimodel.copy_of_UNetModel_forward_for_webui(self, x, timesteps, context, *args, **kwargs)
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\openaimodel.py", line 797, in forward
        h = module(h, emb, context)
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
        return forward_call(*args, **kwargs)
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\openaimodel.py", line 86, in forward
        x = layer(x)
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
        return forward_call(*args, **kwargs)
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\extensions\sd-webui-animatediff\motion_module.py", line 112, in forward
        return self.temporal_transformer(input_tensor, encoder_hidden_states, attention_mask)
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
        return forward_call(*args, **kwargs)
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\extensions\sd-webui-animatediff\motion_module.py", line 178, in forward
        hidden_states = block(hidden_states, encoder_hidden_states=encoder_hidden_states, video_length=video_length)
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
        return forward_call(*args, **kwargs)
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\extensions\sd-webui-animatediff\motion_module.py", line 242, in forward
        hidden_states = attention_block(
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
        return forward_call(*args, **kwargs)
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\extensions\sd-webui-animatediff\motion_module.py", line 601, in forward
        hidden_states = self._memory_efficient_attention(query, key, value, attention_mask, optimizer_name)
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\extensions\sd-webui-animatediff\motion_module.py", line 499, in _memory_efficient_attention
        hidden_states = xformers.ops.memory_efficient_attention(
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\python\lib\site-packages\xformers\ops\fmha\__init__.py", line 193, in memory_efficient_attention
        return _memory_efficient_attention(
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\python\lib\site-packages\xformers\ops\fmha\__init__.py", line 291, in _memory_efficient_attention
        return _memory_efficient_attention_forward(
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\python\lib\site-packages\xformers\ops\fmha\__init__.py", line 311, in _memory_efficient_attention_forward
        out, *_ = op.apply(inp, needs_gradient=False)
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\python\lib\site-packages\xformers\ops\fmha\cutlass.py", line 186, in apply
        out, lse, rng_seed, rng_offset = cls.OPERATOR(
      File "G:\sd-webui-aki\sd-webui-aki-v4.4\python\lib\site-packages\torch\_ops.py", line 502, in __call__
        return self._op(*args, **kwargs or {})
    RuntimeError: CUDA error: invalid configuration argument
提示:Python 运行时抛出了一个异常。请检查疑难解答页面。
    CUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.
    For debugging consider passing CUDA_LAUNCH_BLOCKING=1.
    Compile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.

Additional information

No response

wandrzej commented 12 months ago

Same issue here. After some updates started seeing this

continue-revolution commented 12 months ago

Read one of the pinned issue