Closed tonymacx86PRO closed 1 year ago
hello, I meet same error. did you solve it?
No, it is not solved
This could simply mean that your GPU doesn't have enough memory for the training. Increase the accumulate_grad_batches
in the yaml file to be something >1. But also change the batch_size
accordingly so that batch_size*accumulate_grad_batches
= the original batch size
mm, not enough memory, see the diffusers repo for more memory efficient version
Traceback (most recent call last): File "C:\Users\Coder\Documents\sd-exp\scripts\gradio_variations.py", line 143, in
fire.Fire(run_demo)
File "C:\Users\Coder\AppData\Roaming\Python\Python310\site-packages\fire\core.py", line 141, in Fire
component_trace = _Fire(component, args, parsed_flag_args, context, name)
File "C:\Users\Coder\AppData\Roaming\Python\Python310\site-packages\fire\core.py", line 466, in _Fire
component, remaining_args = _CallAndUpdateTrace(
File "C:\Users\Coder\AppData\Roaming\Python\Python310\site-packages\fire\core.py", line 681, in _CallAndUpdateTrace
component = fn(*varargs, *kwargs)
File "C:\Users\Coder\Documents\sd-exp\scripts\gradio_variations.py", line 110, in run_demo
model = load_model_from_config(config, ckpt, device=device)
File "c:\users\coder\documents\sd-exp\scripts\image_variations.py", line 35, in load_model_from_config
model.to(device)
File "C:\Users\Coder\AppData\Roaming\Python\Python310\site-packages\pytorch_lightning\core\mixins\device_dtype_mixin.py", line 109, in to
return super().to(args, **kwargs)
File "C:\Users\Coder\AppData\Roaming\Python\Python310\site-packages\torch\nn\modules\module.py", line 927, in to
return self._apply(convert)
File "C:\Users\Coder\AppData\Roaming\Python\Python310\site-packages\torch\nn\modules\module.py", line 579, in _apply
module._apply(fn)
File "C:\Users\Coder\AppData\Roaming\Python\Python310\site-packages\torch\nn\modules\module.py", line 579, in _apply
module._apply(fn)
File "C:\Users\Coder\AppData\Roaming\Python\Python310\site-packages\torch\nn\modules\module.py", line 579, in _apply
module._apply(fn)
[Previous line repeated 5 more times]
File "C:\Users\Coder\AppData\Roaming\Python\Python310\site-packages\torch\nn\modules\module.py", line 602, in _apply
param_applied = fn(param)
File "C:\Users\Coder\AppData\Roaming\Python\Python310\site-packages\torch\nn\modules\module.py", line 925, in convert
return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None, non_blocking)
RuntimeError: CUDA out of memory. Tried to allocate 16.00 MiB (GPU 0; 12.00 GiB total capacity; 11.19 GiB already allocated; 0 bytes free; 11.32 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
(sd-exp) C:\Users\Coder\Documents\sd-exp>python scripts/gradio_variations.py Loading model from models/ldm/stable-diffusion-v1/sd-clip-vit-l14-img-embed_ema_only.ckpt LatentDiffusion: Running in eps-prediction mode DiffusionWrapper has 859.52 M params. Keeping EMAs of 688. making attention of type 'vanilla' with 512 in_channels Working with z of shape (1, 4, 32, 32) = 4096 dimensions. making attention of type 'vanilla' with 512 in_channels Traceback (most recent call last): File "C:\Users\Coder\Documents\sd-exp\scripts\gradio_variations.py", line 145, in
fire.Fire(run_demo)
File "C:\Users\Coder\AppData\Roaming\Python\Python310\site-packages\fire\core.py", line 141, in Fire
component_trace = _Fire(component, args, parsed_flag_args, context, name)
File "C:\Users\Coder\AppData\Roaming\Python\Python310\site-packages\fire\core.py", line 466, in _Fire
component, remaining_args = _CallAndUpdateTrace(
File "C:\Users\Coder\AppData\Roaming\Python\Python310\site-packages\fire\core.py", line 681, in _CallAndUpdateTrace
component = fn(*varargs, *kwargs)
File "C:\Users\Coder\Documents\sd-exp\scripts\gradio_variations.py", line 112, in run_demo
model = load_model_from_config(config, ckpt, device=device)
File "c:\users\coder\documents\sd-exp\scripts\image_variations.py", line 35, in load_model_from_config
model.to(device)
File "C:\Users\Coder\AppData\Roaming\Python\Python310\site-packages\pytorch_lightning\core\mixins\device_dtype_mixin.py", line 109, in to
return super().to(args, **kwargs)
File "C:\Users\Coder\AppData\Roaming\Python\Python310\site-packages\torch\nn\modules\module.py", line 927, in to
return self._apply(convert)
File "C:\Users\Coder\AppData\Roaming\Python\Python310\site-packages\torch\nn\modules\module.py", line 579, in _apply
module._apply(fn)
File "C:\Users\Coder\AppData\Roaming\Python\Python310\site-packages\torch\nn\modules\module.py", line 579, in _apply
module._apply(fn)
File "C:\Users\Coder\AppData\Roaming\Python\Python310\site-packages\torch\nn\modules\module.py", line 579, in _apply
module._apply(fn)
[Previous line repeated 5 more times]
File "C:\Users\Coder\AppData\Roaming\Python\Python310\site-packages\torch\nn\modules\module.py", line 602, in _apply
param_applied = fn(param)
File "C:\Users\Coder\AppData\Roaming\Python\Python310\site-packages\torch\nn\modules\module.py", line 925, in convert
return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None, non_blocking)
RuntimeError: CUDA out of memory. Tried to allocate 16.00 MiB (GPU 0; 12.00 GiB total capacity; 11.19 GiB already allocated; 0 bytes free; 11.32 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF