Traceback (most recent call last):
File "/usr/local/lib/python3.8/dist-packages/gradio/routes.py", line 275, in predict
output = await app.blocks.process_api(body, username, session_state)
File "/usr/local/lib/python3.8/dist-packages/gradio/blocks.py", line 274, in process_api
predictions = await run_in_threadpool(block_fn.fn, *processed_input)
File "/usr/local/lib/python3.8/dist-packages/starlette/concurrency.py", line 41, in run_in_threadpool
return await anyio.to_thread.run_sync(func, *args)
File "/usr/local/lib/python3.8/dist-packages/anyio/to_thread.py", line 31, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File "/usr/local/lib/python3.8/dist-packages/anyio/_backends/_asyncio.py", line 937, in run_sync_in_worker_thread
return await future
File "/usr/local/lib/python3.8/dist-packages/anyio/_backends/_asyncio.py", line 867, in run
result = context.run(func, *args)
File "/usr/local/lib/python3.8/dist-packages/gradio/interface.py", line 500, in <lambda>
lambda *args: self.run_prediction(args)[0]
File "/usr/local/lib/python3.8/dist-packages/gradio/interface.py", line 682, in run_prediction
prediction = predict_fn(*processed_input)
File "aet_demo.py", line 60, in transfer
src_model = SSLDualLightningModule(config).load_from_checkpoint(
File "/root/ssl_speech_restoration/lightning_module.py", line 623, in __init__
super().__init__(config)
File "/root/ssl_speech_restoration/lightning_module.py", line 307, in __init__
self.vocoder = load_vocoder(config)
File "/root/ssl_speech_restoration/utils.py", line 44, in load_vocoder
vocoder.load_state_dict(torch.load(config["general"]["hifigan_path"])["generator"])
File "/usr/local/lib/python3.8/dist-packages/torch/serialization.py", line 608, in load
return _legacy_load(opened_file, map_location, pickle_module, **pickle_load_args)
File "/usr/local/lib/python3.8/dist-packages/torch/serialization.py", line 787, in _legacy_load
result = unpickler.load()
File "/usr/local/lib/python3.8/dist-packages/torch/serialization.py", line 743, in persistent_load
deserialized_objects[root_key] = restore_location(obj, location)
File "/usr/local/lib/python3.8/dist-packages/torch/serialization.py", line 175, in default_restore_location
result = fn(storage, location)
File "/usr/local/lib/python3.8/dist-packages/torch/serialization.py", line 151, in _cuda_deserialize
device = validate_cuda_device(location)
File "/usr/local/lib/python3.8/dist-packages/torch/serialization.py", line 135, in validate_cuda_device
raise RuntimeError('Attempting to deserialize object on a CUDA '
RuntimeError: Attempting to deserialize object on a CUDA device but torch.cuda.is_available() is False. If you are running on a CPU-only machine, please use torch.load with map_location=torch.device('cpu') to map your storages to the CPU.
that is when upload a sample file with spanish