`2023-10-07 04:36:26.238 | INFO | lama_cleaner.helper:load_jit_model:102 - Loading model from: C:\Users\空灵/.cache\torch\hub\checkpoints\big-lama.pt
2023-10-07 04:36:27.126 | ERROR | lama_cleaner.helper:handle_error:89 - Failed to load model C:\Users\空灵/.cache\torch\hub\checkpoints\big-lama.pt,please submit an issue at https://github.com/Sanster/lama-cleaner/issues and include a screenshot of the error:
open file failed because of errno 2 on fopen: No such file or directory, file path: C:\Users\空灵/.cache\torch\hub\checkpoints\big-lama.pt
Traceback (most recent call last):
File "D:\AI\stable-diffusion-webui\venv\lib\site-packages\lama_cleaner\helper.py", line 104, in load_jit_model
model = torch.jit.load(model_path, map_location="cpu").to(device)
File "D:\AI\stable-diffusion-webui\venv\lib\site-packages\torch\jit_serialization.py", line 162, in load
cpp_module = torch._C.import_ir_module(cu, str(f), map_location, _extra_files, _restore_shapes) # type: ignore[call-arg]
RuntimeError: open file failed because of errno 2 on fopen: No such file or directory, file path: C:\Users\空灵/.cache\torch\hub\checkpoints\big-lama.pt
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "D:\AI\stable-diffusion-webui\venv\lib\site-packages\gradio\routes.py", line 488, in run_predict
output = await app.get_blocks().process_api(
File "D:\AI\stable-diffusion-webui\venv\lib\site-packages\gradio\blocks.py", line 1431, in process_api
result = await self.call_function(
File "D:\AI\stable-diffusion-webui\venv\lib\site-packages\gradio\blocks.py", line 1103, in call_function
prediction = await anyio.to_thread.run_sync(
File "D:\AI\stable-diffusion-webui\venv\lib\site-packages\anyio\to_thread.py", line 33, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File "D:\AI\stable-diffusion-webui\venv\lib\site-packages\anyio_backends_asyncio.py", line 877, in run_sync_in_worker_thread
return await future
File "D:\AI\stable-diffusion-webui\venv\lib\site-packages\anyio_backends_asyncio.py", line 807, in run
result = context.run(func, args)
File "D:\AI\stable-diffusion-webui\venv\lib\site-packages\gradio\utils.py", line 707, in wrapper
response = f(args, kwargs)
File "D:\AI\stable-diffusion-webui\extensions\sd-webui-inpaint-anything\ia_threading.py", line 165, in wrapper
res = func(*args, *kwargs)
File "D:\AI\stable-diffusion-webui\extensions\sd-webui-inpaint-anything\ia_threading.py", line 125, in wrapper
res = func(args, kwargs)
File "D:\AI\stable-diffusion-webui\extensions\sd-webui-inpaint-anything\scripts\inpaint_anything.py", line 511, in run_cleaner
model = ModelManager(name=cleaner_model_id, device=devices.device)
File "D:\AI\stable-diffusion-webui\venv\lib\site-packages\lama_cleaner\model_manager.py", line 44, in init
self.model = self.init_model(name, device, kwargs)
File "D:\AI\stable-diffusion-webui\venv\lib\site-packages\lama_cleaner\model_manager.py", line 51, in init_model
model = models[name](device, kwargs)
File "D:\AI\stable-diffusion-webui\venv\lib\site-packages\lama_cleaner\model\base.py", line 32, in init
self.init_model(device, **kwargs)
File "D:\AI\stable-diffusion-webui\venv\lib\site-packages\lama_cleaner\model\lama.py", line 27, in init_model
self.model = load_jit_model(LAMA_MODEL_URL, device, LAMA_MODEL_MD5).eval()
File "D:\AI\stable-diffusion-webui\venv\lib\site-packages\lama_cleaner\helper.py", line 106, in load_jit_model
handle_error(model_path, model_md5, e)
File "D:\AI\stable-diffusion-webui\venv\lib\site-packages\lama_cleaner\helper.py", line 93, in handle_error
exit(-1)
File "C:\Users\空灵\AppData\Local\Programs\Python\Python310\lib_sitebuiltins.py", line 26, in call
raise SystemExit(code)
SystemExit: -1
`
`2023-10-07 04:36:26.238 | INFO | lama_cleaner.helper:load_jit_model:102 - Loading model from: C:\Users\空灵/.cache\torch\hub\checkpoints\big-lama.pt 2023-10-07 04:36:27.126 | ERROR | lama_cleaner.helper:handle_error:89 - Failed to load model C:\Users\空灵/.cache\torch\hub\checkpoints\big-lama.pt,please submit an issue at https://github.com/Sanster/lama-cleaner/issues and include a screenshot of the error: open file failed because of errno 2 on fopen: No such file or directory, file path: C:\Users\空灵/.cache\torch\hub\checkpoints\big-lama.pt Traceback (most recent call last): File "D:\AI\stable-diffusion-webui\venv\lib\site-packages\lama_cleaner\helper.py", line 104, in load_jit_model model = torch.jit.load(model_path, map_location="cpu").to(device) File "D:\AI\stable-diffusion-webui\venv\lib\site-packages\torch\jit_serialization.py", line 162, in load cpp_module = torch._C.import_ir_module(cu, str(f), map_location, _extra_files, _restore_shapes) # type: ignore[call-arg] RuntimeError: open file failed because of errno 2 on fopen: No such file or directory, file path: C:\Users\空灵/.cache\torch\hub\checkpoints\big-lama.pt
During handling of the above exception, another exception occurred:
Traceback (most recent call last): File "D:\AI\stable-diffusion-webui\venv\lib\site-packages\gradio\routes.py", line 488, in run_predict output = await app.get_blocks().process_api( File "D:\AI\stable-diffusion-webui\venv\lib\site-packages\gradio\blocks.py", line 1431, in process_api result = await self.call_function( File "D:\AI\stable-diffusion-webui\venv\lib\site-packages\gradio\blocks.py", line 1103, in call_function prediction = await anyio.to_thread.run_sync( File "D:\AI\stable-diffusion-webui\venv\lib\site-packages\anyio\to_thread.py", line 33, in run_sync return await get_asynclib().run_sync_in_worker_thread( File "D:\AI\stable-diffusion-webui\venv\lib\site-packages\anyio_backends_asyncio.py", line 877, in run_sync_in_worker_thread return await future File "D:\AI\stable-diffusion-webui\venv\lib\site-packages\anyio_backends_asyncio.py", line 807, in run result = context.run(func, args) File "D:\AI\stable-diffusion-webui\venv\lib\site-packages\gradio\utils.py", line 707, in wrapper response = f(args, kwargs) File "D:\AI\stable-diffusion-webui\extensions\sd-webui-inpaint-anything\ia_threading.py", line 165, in wrapper res = func(*args, *kwargs) File "D:\AI\stable-diffusion-webui\extensions\sd-webui-inpaint-anything\ia_threading.py", line 125, in wrapper res = func(args, kwargs) File "D:\AI\stable-diffusion-webui\extensions\sd-webui-inpaint-anything\scripts\inpaint_anything.py", line 511, in run_cleaner model = ModelManager(name=cleaner_model_id, device=devices.device) File "D:\AI\stable-diffusion-webui\venv\lib\site-packages\lama_cleaner\model_manager.py", line 44, in init self.model = self.init_model(name, device, kwargs) File "D:\AI\stable-diffusion-webui\venv\lib\site-packages\lama_cleaner\model_manager.py", line 51, in init_model model = models[name](device, kwargs) File "D:\AI\stable-diffusion-webui\venv\lib\site-packages\lama_cleaner\model\base.py", line 32, in init self.init_model(device, **kwargs) File "D:\AI\stable-diffusion-webui\venv\lib\site-packages\lama_cleaner\model\lama.py", line 27, in init_model self.model = load_jit_model(LAMA_MODEL_URL, device, LAMA_MODEL_MD5).eval() File "D:\AI\stable-diffusion-webui\venv\lib\site-packages\lama_cleaner\helper.py", line 106, in load_jit_model handle_error(model_path, model_md5, e) File "D:\AI\stable-diffusion-webui\venv\lib\site-packages\lama_cleaner\helper.py", line 93, in handle_error exit(-1) File "C:\Users\空灵\AppData\Local\Programs\Python\Python310\lib_sitebuiltins.py", line 26, in call raise SystemExit(code) SystemExit: -1 `