44%|████████████████████████████████████ | 11/25 [02:48<03:34, 15.30s/it]
Traceback (most recent call last):
File "C:\Users\NGXCRYPT-2ND\.conda\envs\m_animate_for_win\lib\site-packages\gradio\routes.py", line 488, in run_predict
output = await app.get_blocks().process_api(
File "C:\Users\NGXCRYPT-2ND\.conda\envs\m_animate_for_win\lib\site-packages\gradio\blocks.py", line 1431, in process_api
result = await self.call_function(
File "C:\Users\NGXCRYPT-2ND\.conda\envs\m_animate_for_win\lib\site-packages\gradio\blocks.py", line 1103, in call_function
prediction = await anyio.to_thread.run_sync(
File "C:\Users\NGXCRYPT-2ND\.conda\envs\m_animate_for_win\lib\site-packages\anyio\to_thread.py", line 33, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File "C:\Users\NGXCRYPT-2ND\.conda\envs\m_animate_for_win\lib\site-packages\anyio\_backends\_asyncio.py", line 877, in run_sync_in_worker_thread
return await future
File "C:\Users\NGXCRYPT-2ND\.conda\envs\m_animate_for_win\lib\site-packages\anyio\_backends\_asyncio.py", line 807, in run
result = context.run(func, *args)
File "C:\Users\NGXCRYPT-2ND\.conda\envs\m_animate_for_win\lib\site-packages\gradio\utils.py", line 707, in wrapper
response = f(*args, **kwargs)
File "V:\_ANIMATION\MAGIC_ANIMATE\magic-animate-for-windows\demo\gradio_animate_gpu_1.py", line 22, in animate
return animator(reference_image, motion_sequence_state, seed, steps, guidance_scale)
File "V:\_ANIMATION\MAGIC_ANIMATE\magic-animate-for-windows\demo\animate_gpu_1.py", line 164, in __call__
sample = self.pipeline(
File "C:\Users\NGXCRYPT-2ND\.conda\envs\m_animate_for_win\lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "V:\_ANIMATION\MAGIC_ANIMATE\magic-animate-for-windows\magicanimate\pipelines\pipeline_animation.py", line 738, in __call__
pred = self.unet(
File "C:\Users\NGXCRYPT-2ND\.conda\envs\m_animate_for_win\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "V:\_ANIMATION\MAGIC_ANIMATE\magic-animate-for-windows\magicanimate\models\unet_controlnet.py", line 462, in forward
sample = upsample_block(
File "C:\Users\NGXCRYPT-2ND\.conda\envs\m_animate_for_win\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "V:\_ANIMATION\MAGIC_ANIMATE\magic-animate-for-windows\magicanimate\models\unet_3d_blocks.py", line 653, in forward
hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample
File "C:\Users\NGXCRYPT-2ND\.conda\envs\m_animate_for_win\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "V:\_ANIMATION\MAGIC_ANIMATE\magic-animate-for-windows\magicanimate\models\attention.py", line 136, in forward
hidden_states = block(
File "C:\Users\NGXCRYPT-2ND\.conda\envs\m_animate_for_win\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "V:\_ANIMATION\MAGIC_ANIMATE\magic-animate-for-windows\magicanimate\models\mutual_self_attention.py", line 272, in hacked_basic_transformer_inner_forward
hidden_states = self.ff(self.norm3(hidden_states)) + hidden_states
File "C:\Users\NGXCRYPT-2ND\.conda\envs\m_animate_for_win\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\NGXCRYPT-2ND\.conda\envs\m_animate_for_win\lib\site-packages\diffusers\models\attention.py", line 307, in forward
hidden_states = module(hidden_states, scale)
File "C:\Users\NGXCRYPT-2ND\.conda\envs\m_animate_for_win\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\NGXCRYPT-2ND\.conda\envs\m_animate_for_win\lib\site-packages\diffusers\models\attention.py", line 356, in forward
return hidden_states * self.gelu(gate)
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 320.00 MiB (GPU 0; 10.00 GiB total capacity; 8.34 GiB already allocated; 0 bytes free; 9.12 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF