Closed jeffreyrobeson closed 8 months ago
CUDA out of memory on win10
Traceback (most recent call last): File "E:\software\magic-animate\Python\lib\site-packages\gradio\routes.py", line 488, in run_predict output = await app.get_blocks().process_api( File "E:\software\magic-animate\Python\lib\site-packages\gradio\blocks.py", line 1431, in process_api result = await self.call_function( File "E:\software\magic-animate\Python\lib\site-packages\gradio\blocks.py", line 1103, in call_function prediction = await anyio.to_thread.run_sync( File "E:\software\magic-animate\Python\lib\site-packages\anyio\to_thread.py", line 33, in run_sync return await get_asynclib().run_sync_in_worker_thread( File "E:\software\magic-animate\Python\lib\site-packages\anyio_backends_asyncio.py", line 877, in run_sync_in_worker_thread return await future File "E:\software\magic-animate\Python\lib\site-packages\anyio_backends_asyncio.py", line 807, in run result = context.run(func, args) File "E:\software\magic-animate\Python\lib\site-packages\gradio\utils.py", line 707, in wrapper response = f(args, kwargs) File "E:\software\magic-animate\magicanimate\demo\gradio_animate.py", line 24, in animate return animator(reference_image, motion_sequence_state, seed, steps, guidance_scale) File "E:\software\magic-animate\magicanimate\demo\animate.py", line 157, in call sample = self.pipeline( File "E:\software\magic-animate\Python\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context return func(*args, *kwargs) File "E:\software\magic-animate\magicanimate\magicanimate\pipelines\pipeline_animation.py", line 697, in call down_block_res_samples, mid_block_res_sample = self.controlnet( File "E:\software\magic-animate\Python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl return forward_call(args, kwargs) File "E:\software\magic-animate\magicanimate\magicanimate\models\controlnet.py", line 529, in forward sample, res_samples = downsample_block( File "E:\software\magic-animate\Python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl return forward_call(*args, kwargs) File "E:\software\magic-animate\Python\lib\site-packages\diffusers\models\unet_2d_blocks.py", line 1086, in forward hidden_states = attn( File "E:\software\magic-animate\Python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl return forward_call(*args, *kwargs) File "E:\software\magic-animate\Python\lib\site-packages\diffusers\models\transformer_2d.py", line 315, in forward hidden_states = block( File "E:\software\magic-animate\Python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl return forward_call(args, kwargs) File "E:\software\magic-animate\Python\lib\site-packages\diffusers\models\attention.py", line 248, in forward ff_output = self.ff(norm_hidden_states, scale=lora_scale) File "E:\software\magic-animate\Python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl return forward_call(*args, *kwargs) File "E:\software\magic-animate\Python\lib\site-packages\diffusers\models\attention.py", line 307, in forward hidden_states = module(hidden_states, scale) File "E:\software\magic-animate\Python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl return forward_call(args, *kwargs) File "E:\software\magic-animate\Python\lib\site-packages\diffusers\models\attention.py", line 356, in forward return hidden_states self.gelu(gate) torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 160.00 MiB (GPU 0; 8.00 GiB total capacity; 14.25 GiB already allocated; 0 bytes free; 14.50 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
Hi, please use a GPU with larger GPU memory.
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 160.00 MiB (GPU 0; 8.00 GiB total capacity; 14.25 GiB already allocated; 0 bytes free; 14.50 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF