Closed Remy33f closed 8 months ago
had the same issue adding --always-gpu seems to fix it for me
please do not use --always-gpu I attempted a fix at https://github.com/lllyasviel/stable-diffusion-webui-forge/commit/9c31b0ddcba42afcbda310b46750decd33b6ea2e please try again and see if it is working
can confirm its working now Thanks!
@adnanT11 do not close too soon. I added another better fix and please test again
thanks in advance!
tested again after updating and its giving the same error from before RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!
thanks. I updated and used old fix. Please update and close this issue
@adnanT11 do not close too soon. I added another better fix and please test again
thanks in advance!
I used update.bat and restarted but the issue remains. Do I need to download the code one more time ? Thanks
update again please because i used old fix in last commit
Please close this issue if fixed
Please close this issue if fixed
Thanks! It works now.
It fixed for me but enabling animatediff (https://github.com/continue-revolution/sd-forge-animatediff), problem still persist
Traceback (most recent call last):
File "C:\AI\A1111\webui\modules\call_queue.py", line 57, in f
res = list(func(*args, **kwargs))
File "C:\AI\A1111\webui\modules\call_queue.py", line 36, in f
res = func(*args, **kwargs)
File "C:\AI\A1111\webui\modules\txt2img.py", line 110, in txt2img
processed = processing.process_images(p)
File "C:\AI\A1111\webui\modules\processing.py", line 749, in process_images
res = process_images_inner(p)
File "C:\AI\A1111\webui\modules\processing.py", line 920, in process_images_inner
samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts)
File "C:\AI\A1111\webui\modules\processing.py", line 1275, in sample
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
File "C:\AI\A1111\webui\modules\sd_samplers_kdiffusion.py", line 251, in sample
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))
File "C:\AI\A1111\webui\modules\sd_samplers_common.py", line 260, in launch_sampling
return func()
File "C:\AI\A1111\webui\modules\sd_samplers_kdiffusion.py", line 251, in <lambda>
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))
File "C:\AI\A1111\system\python\lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "C:\AI\A1111\webui\repositories\k-diffusion\k_diffusion\sampling.py", line 594, in sample_dpmpp_2m
denoised = model(x, sigmas[i] * s_in, **extra_args)
File "C:\AI\A1111\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\AI\A1111\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "C:\AI\A1111\webui\modules\sd_samplers_cfg_denoiser.py", line 179, in forward
denoised = forge_sampler.forge_sample(self, denoiser_params=denoiser_params,
File "C:\AI\A1111\webui\modules_forge\forge_sampler.py", line 82, in forge_sample
denoised = sampling_function(model, x, timestep, uncond, cond, cond_scale, model_options, seed)
File "C:\AI\A1111\webui\ldm_patched\modules\samplers.py", line 282, in sampling_function
cond_pred, uncond_pred = calc_cond_uncond_batch(model, cond, uncond_, x, timestep, model_options)
File "C:\AI\A1111\webui\ldm_patched\modules\samplers.py", line 251, in calc_cond_uncond_batch
output = model_options['model_function_wrapper'](model.apply_model, {"input": input_x, "timestep": timestep_, "c": c, "cond_or_uncond": cond_or_uncond}).chunk(batch_chunks)
File "C:\AI\A1111\webui\extensions\sd-forge-animatediff\scripts\animatediff_infv2v.py", line 132, in mm_sd_forward out = apply_model(
File "C:\AI\A1111\webui\ldm_patched\modules\model_base.py", line 85, in apply_model
model_output = self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, **extra_conds).float()
File "C:\AI\A1111\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\AI\A1111\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "C:\AI\A1111\webui\ldm_patched\ldm\modules\diffusionmodules\openaimodel.py", line 860, in forward
h = forward_timestep_embed(module, h, emb, context, transformer_options, time_context=time_context, num_video_frames=num_video_frames, image_only_indicator=image_only_indicator)
File "C:\AI\A1111\webui\ldm_patched\ldm\modules\diffusionmodules\openaimodel.py", line 57, in forward_timestep_embed
x = modifier(x, 'after', layer, layer_index, ts, transformer_options)
File "C:\AI\A1111\webui\extensions\sd-forge-animatediff\scripts\animatediff_mm.py", line 82, in mm_block_modifier
return self.mm.down_blocks[mm_idx0].motion_modules[mm_idx1](x)
File "C:\AI\A1111\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\AI\A1111\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "C:\AI\A1111\webui\extensions\sd-forge-animatediff\motion_module.py", line 127, in forward
return self.temporal_transformer(x)
File "C:\AI\A1111\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\AI\A1111\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "C:\AI\A1111\webui\extensions\sd-forge-animatediff\motion_module.py", line 185, in forward
hidden_states = block(hidden_states)
File "C:\AI\A1111\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\AI\A1111\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "C:\AI\A1111\webui\extensions\sd-forge-animatediff\motion_module.py", line 239, in forward
hidden_states = attention_block(norm_hidden_states) + hidden_states
File "C:\AI\A1111\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\AI\A1111\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "C:\AI\A1111\webui\extensions\sd-forge-animatediff\motion_module.py", line 329, in forward
x = self.pos_encoder(x)
File "C:\AI\A1111\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\AI\A1111\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "C:\AI\A1111\webui\extensions\sd-forge-animatediff\motion_module.py", line 264, in forward
x = x + self.pe[:, :x.size(1)]
RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!
DPM++ 2M Karras is working fine, but when i try to use Euler A i get this same error
*** Error completing request | 0/30 [00:00<?, ?it/s]
*** Arguments: ('task(n6q4gm5cjrftp90)', <gradio.routes.Request object at 0x000001D76C8617E0>, '1girl,', 'lowres, bad hands, missing fingers, duplicate, bad anatomy, fused fingers, bad quality, worst quality, extra fingers, clone, cloned face, monochrome, ', [], 30, 'Euler a', 1, 1, 8.5, 1216, 832, False, 0.7, 2, 'Latent', 0, 0, 0, 'Use same checkpoint', 'Use same sampler', '', '', [], 0, False, '', 0.8, -1, False, -1, 0, 0, 0, UiControlNetUnit(input_mode=<InputMode.SIMPLE: 'simple'>, use_preview_as_input=False, batch_image_dir='', batch_input_gallery=[], generated_image=None, mask_image=None, enabled=False, module='None', model='None', weight=1, image=None, resize_mode='Crop and Resize', processor_res=-1, threshold_a=-1, threshold_b=-1, guidance_start=0, guidance_end=1, pixel_perfect=False, control_mode='Balanced'), UiControlNetUnit(input_mode=<InputMode.SIMPLE: 'simple'>, use_preview_as_input=False, batch_image_dir='', batch_input_gallery=[], generated_image=None, mask_image=None, enabled=False, module='None', model='None', weight=1, image=None, resize_mode='Crop and Resize', processor_res=-1, threshold_a=-1, threshold_b=-1, guidance_start=0, guidance_end=1, pixel_perfect=False, control_mode='Balanced'), UiControlNetUnit(input_mode=<InputMode.SIMPLE: 'simple'>, use_preview_as_input=False, batch_image_dir='', batch_input_gallery=[], generated_image=None, mask_image=None, enabled=False, module='None', model='None', weight=1, image=None, resize_mode='Crop and Resize', processor_res=-1, threshold_a=-1, threshold_b=-1, guidance_start=0, guidance_end=1, pixel_perfect=False, control_mode='Balanced'), False, 1.3, 1.4, 0.9, 0.2, False, 256, 2, 0, False, False, 3, 2, 0, 0.35, True, 'bicubic', 'bicubic', False, 0.5, 2, False, False, False, 'positive', 'comma', 0, False, False, 'start', '', 1, '', [], 0, '', [], 0, '', [], True, False, False, False, False, False, False, 0, False) {}
Traceback (most recent call last):
File "D:\webui_forge\webui\modules\call_queue.py", line 57, in f
res = list(func(*args, **kwargs))
File "D:\webui_forge\webui\modules\call_queue.py", line 36, in f
res = func(*args, **kwargs)
File "D:\webui_forge\webui\modules\txt2img.py", line 110, in txt2img
processed = processing.process_images(p)
File "D:\webui_forge\webui\modules\processing.py", line 749, in process_images
res = process_images_inner(p)
File "D:\webui_forge\webui\modules\processing.py", line 920, in process_images_inner
samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts)
File "D:\webui_forge\webui\modules\processing.py", line 1275, in sample
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
File "D:\webui_forge\webui\modules\sd_samplers_kdiffusion.py", line 251, in sample
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))
File "D:\webui_forge\webui\modules\sd_samplers_common.py", line 260, in launch_sampling
return func()
File "D:\webui_forge\webui\modules\sd_samplers_kdiffusion.py", line 251, in <lambda>
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))
File "D:\webui_forge\system\python\lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "D:\webui_forge\webui\repositories\k-diffusion\k_diffusion\sampling.py", line 149, in sample_euler_ancestral
d = to_d(x, sigmas[i], denoised)
File "D:\webui_forge\webui\repositories\k-diffusion\k_diffusion\sampling.py", line 48, in to_d
return (x - denoised) / utils.append_dims(sigma, x.ndim)
RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!
DPM++ 2M Karras is working fine, but when i try to use Euler A i get this same error
*** Error completing request | 0/30 [00:00<?, ?it/s] *** Arguments: ('task(n6q4gm5cjrftp90)', <gradio.routes.Request object at 0x000001D76C8617E0>, '1girl,', 'lowres, bad hands, missing fingers, duplicate, bad anatomy, fused fingers, bad quality, worst quality, extra fingers, clone, cloned face, monochrome, ', [], 30, 'Euler a', 1, 1, 8.5, 1216, 832, False, 0.7, 2, 'Latent', 0, 0, 0, 'Use same checkpoint', 'Use same sampler', '', '', [], 0, False, '', 0.8, -1, False, -1, 0, 0, 0, UiControlNetUnit(input_mode=<InputMode.SIMPLE: 'simple'>, use_preview_as_input=False, batch_image_dir='', batch_input_gallery=[], generated_image=None, mask_image=None, enabled=False, module='None', model='None', weight=1, image=None, resize_mode='Crop and Resize', processor_res=-1, threshold_a=-1, threshold_b=-1, guidance_start=0, guidance_end=1, pixel_perfect=False, control_mode='Balanced'), UiControlNetUnit(input_mode=<InputMode.SIMPLE: 'simple'>, use_preview_as_input=False, batch_image_dir='', batch_input_gallery=[], generated_image=None, mask_image=None, enabled=False, module='None', model='None', weight=1, image=None, resize_mode='Crop and Resize', processor_res=-1, threshold_a=-1, threshold_b=-1, guidance_start=0, guidance_end=1, pixel_perfect=False, control_mode='Balanced'), UiControlNetUnit(input_mode=<InputMode.SIMPLE: 'simple'>, use_preview_as_input=False, batch_image_dir='', batch_input_gallery=[], generated_image=None, mask_image=None, enabled=False, module='None', model='None', weight=1, image=None, resize_mode='Crop and Resize', processor_res=-1, threshold_a=-1, threshold_b=-1, guidance_start=0, guidance_end=1, pixel_perfect=False, control_mode='Balanced'), False, 1.3, 1.4, 0.9, 0.2, False, 256, 2, 0, False, False, 3, 2, 0, 0.35, True, 'bicubic', 'bicubic', False, 0.5, 2, False, False, False, 'positive', 'comma', 0, False, False, 'start', '', 1, '', [], 0, '', [], 0, '', [], True, False, False, False, False, False, False, 0, False) {} Traceback (most recent call last): File "D:\webui_forge\webui\modules\call_queue.py", line 57, in f res = list(func(*args, **kwargs)) File "D:\webui_forge\webui\modules\call_queue.py", line 36, in f res = func(*args, **kwargs) File "D:\webui_forge\webui\modules\txt2img.py", line 110, in txt2img processed = processing.process_images(p) File "D:\webui_forge\webui\modules\processing.py", line 749, in process_images res = process_images_inner(p) File "D:\webui_forge\webui\modules\processing.py", line 920, in process_images_inner samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts) File "D:\webui_forge\webui\modules\processing.py", line 1275, in sample samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) File "D:\webui_forge\webui\modules\sd_samplers_kdiffusion.py", line 251, in sample samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) File "D:\webui_forge\webui\modules\sd_samplers_common.py", line 260, in launch_sampling return func() File "D:\webui_forge\webui\modules\sd_samplers_kdiffusion.py", line 251, in <lambda> samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) File "D:\webui_forge\system\python\lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context return func(*args, **kwargs) File "D:\webui_forge\webui\repositories\k-diffusion\k_diffusion\sampling.py", line 149, in sample_euler_ancestral d = to_d(x, sigmas[i], denoised) File "D:\webui_forge\webui\repositories\k-diffusion\k_diffusion\sampling.py", line 48, in to_d return (x - denoised) / utils.append_dims(sigma, x.ndim) RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!
I updated and it's fixed.
Checklist
What happened?
RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cpu and cuda:0! (when checking argument for argument index in method wrapper_CUDA__index_select)
Steps to reproduce the problem
Installed Forge WebUI, generate first image then message appear
What should have happened?
Should chose the correct GPU
What browsers do you use to access the UI ?
Microsoft Edge
Sysinfo
sysinfo-2024-02-06-11-11.json
Console logs
Additional information
1660 Super/32Gb Ram/Ryzen 7