Open idreamerhx opened 1 year ago
hi,
Ubuntu 22.04.2 LTS \n \l
commit 0f12c71a120341c41d659594f9579d65de8e8d8e (HEAD -> main, origin/main, origin/HEAD) Author: Chenfei Wu cqwuchenfei@163.com Date: Thu Mar 23 00:36:10 2023 +0800
Update README.md
command: python visual_chatgpt.py --load ImageCaptioning_cuda:0,ImageEditing_cuda:0,VisualQuestionAnswering_cuda:0,Text2Image_cuda:0
/opt/visual-chatgpt# python visual_chatgpt.py --load ImageCaptioning_cuda:0,ImageEditing_cuda:0,VisualQuestionAnswering_cuda:0,Text2Image_cuda:0 Initializing VisualChatGPT, load_dict={'ImageCaptioning': 'cuda:0', 'ImageEditing': 'cuda:0', 'VisualQuestionAnswering': 'cuda:0', 'Text2Image': 'cuda:0'} Initializing ImageCaptioning to cuda:0 Initializing ImageEditing to cuda:0 Initializing MaskFormer to cuda:0 text_encoder/model.safetensors not found Fetching 15 files: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 15/15 [00:00<00:00, 48210.39it/s] ╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮ │ /opt/pyvenv-visual-chatgpt/lib/python3.10/site-packages/diffusers/models/modeling_utils.py:101 │ │ in load_state_dict │ │ │ │ 98 │ """ │ │ 99 │ try: │ │ 100 │ │ if os.path.basename(checkpoint_file) == _add_variant(WEIGHTS_NAME, variant): │ │ ❱ 101 │ │ │ return torch.load(checkpoint_file, map_location="cpu") │ │ 102 │ │ else: │ │ 103 │ │ │ return safetensors.torch.load_file(checkpoint_file, device="cpu") │ │ 104 │ except Exception as e: │ │ │ │ /opt/pyvenv-visual-chatgpt/lib/python3.10/site-packages/torch/serialization.py:705 in load │ │ │ │ 702 │ │ │ # If we want to actually tail call to torch.jit.load, we need to │ │ 703 │ │ │ # reset back to the original position. │ │ 704 │ │ │ orig_position = opened_file.tell() │ │ ❱ 705 │ │ │ with _open_zipfile_reader(opened_file) as opened_zipfile: │ │ 706 │ │ │ │ if _is_torchscript_zip(opened_zipfile): │ │ 707 │ │ │ │ │ warnings.warn("'torch.load' received a zip file that looks like a To │ │ 708 │ │ │ │ │ │ │ │ " dispatching to 'torch.jit.load' (call 'torch.jit.loa │ │ │ │ /opt/pyvenv-visual-chatgpt/lib/python3.10/site-packages/torch/serialization.py:242 in init │ │ │ │ 239 │ │ 240 class _open_zipfile_reader(_opener): │ │ 241 │ def init(self, name_or_buffer) -> None: │ │ ❱ 242 │ │ super(_open_zipfile_reader, self).init(torch._C.PyTorchFileReader(name_or_bu │ │ 243 │ │ 244 │ │ 245 class _open_zipfile_writer_file(_opener): │ ╰──────────────────────────────────────────────────────────────────────────────────────────────────╯ RuntimeError: PytorchStreamReader failed reading zip archive: failed finding central directory
During handling of the above exception, another exception occurred:
╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮ │ /opt/pyvenv-visual-chatgpt/lib/python3.10/site-packages/diffusers/models/modeling_utils.py:107 │ │ in load_state_dict │ │ │ │ 104 │ except Exception as e: │ │ 105 │ │ try: │ │ 106 │ │ │ with open(checkpoint_file) as f: │ │ ❱ 107 │ │ │ │ if f.read().startswith("version"): │ │ 108 │ │ │ │ │ raise OSError( │ │ 109 │ │ │ │ │ │ "You seem to have cloned a repository without having git-lfs ins │ │ 110 │ │ │ │ │ │ "git-lfs and run git lfs install followed by git lfs pull in │ │ │ │ /usr/lib/python3.10/codecs.py:322 in decode │ │ │ │ 319 │ def decode(self, input, final=False): │ │ 320 │ │ # decode input (taking the buffer into account) │ │ 321 │ │ data = self.buffer + input │ │ ❱ 322 │ │ (result, consumed) = self._buffer_decode(data, self.errors, final) │ │ 323 │ │ # keep undecoded input until the next call │ │ 324 │ │ self.buffer = data[consumed:] │ │ 325 │ │ return result │ ╰──────────────────────────────────────────────────────────────────────────────────────────────────╯ UnicodeDecodeError: 'utf-8' codec can't decode byte 0x80 in position 64: invalid start byte
git lfs install
git lfs pull
╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮ │ /opt/visual-chatgpt/visual_chatgpt.py:1050 in │ │ │ │ 1047 │ parser.add_argument('--load', type=str, default="ImageCaptioning_cuda:0,Text2Image_c │ │ 1048 │ args = parser.parse_args() │ │ 1049 │ loaddict = {e.split('')[0].strip(): e.split('_')[1].strip() for e in args.load.spl │ │ ❱ 1050 │ bot = ConversationBot(load_dict=load_dict) │ │ 1051 │ with gr.Blocks(css="#chatbot .overflow-y-auto{height:500px}") as demo: │ │ 1052 │ │ chatbot = gr.Chatbot(elem_id="chatbot", label="Visual ChatGPT") │ │ 1053 │ │ state = gr.State([]) │ │ │ │ /opt/visual-chatgpt/visual_chatgpt.py:986 in init │ │ │ │ 983 │ │ self.models = {} │ │ 984 │ │ # Load Basic Foundation Models │ │ 985 │ │ for class_name, device in load_dict.items(): │ │ ❱ 986 │ │ │ self.models[class_name] = globals()class_name │ │ 987 │ │ │ │ 988 │ │ # Load Template Foundation Models │ │ 989 │ │ for class_name, module in globals().items(): │ │ │ │ /opt/visual-chatgpt/visual_chatgpt.py:219 in init │ │ │ │ 216 │ │ self.mask_former = MaskFormer(device=self.device) │ │ 217 │ │ self.revision = 'fp16' if 'cuda' in device else None │ │ 218 │ │ self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32 │ │ ❱ 219 │ │ self.inpaint = StableDiffusionInpaintPipeline.from_pretrained( │ │ 220 │ │ │ "runwayml/stable-diffusion-inpainting", revision=self.revision, torch_dtype= │ │ 221 │ │ │ 222 │ @prompts(name="Remove Something From The Photo", │ │ │ │ /opt/pyvenv-visual-chatgpt/lib/python3.10/site-packages/diffusers/pipelines/pipeline_utils.py:94 │ │ 4 in from_pretrained │ │ │ │ 941 │ │ │ │ │ │ 942 │ │ │ │ # check if the module is in a subdirectory │ │ 943 │ │ │ │ if os.path.isdir(os.path.join(cached_folder, name)): │ │ ❱ 944 │ │ │ │ │ loaded_sub_model = load_method(os.path.join(cached_folder, name), │ │ 945 │ │ │ │ else: │ │ 946 │ │ │ │ │ # else load from the root directory │ │ 947 │ │ │ │ │ loaded_sub_model = load_method(cached_folder, loading_kwargs) │ │ │ │ /opt/pyvenv-visual-chatgpt/lib/python3.10/site-packages/diffusers/models/modeling_utils.py:563 │ │ in from_pretrained │ │ │ │ 560 │ │ │ │ # if device_map is None, load the state dict and move the params from me │ │ 561 │ │ │ │ if device_map is None: │ │ 562 │ │ │ │ │ param_device = "cpu" │ │ ❱ 563 │ │ │ │ │ state_dict = load_state_dict(model_file, variant=variant) │ │ 564 │ │ │ │ │ # move the params from meta device to cpu │ │ 565 │ │ │ │ │ missing_keys = set(model.state_dict().keys()) - set(state_dict.keys( │ │ 566 │ │ │ │ │ if len(missing_keys) > 0: │ │ │ │ /opt/pyvenv-visual-chatgpt/lib/python3.10/site-packages/diffusers/models/modeling_utils.py:119 │ │ in load_state_dict │ │ │ │ 116 │ │ │ │ │ │ "model. Make sure you have saved the model properly." │ │ 117 │ │ │ │ │ ) from e │ │ 118 │ │ except (UnicodeDecodeError, ValueError): │ │ ❱ 119 │ │ │ raise OSError( │ │ 120 │ │ │ │ f"Unable to load weights from checkpoint file for '{checkpoint_file}' " │ │ 121 │ │ │ │ f"at '{checkpoint_file}'. " │ │ 122 │ │ │ │ "If you tried to load a PyTorch model from a TF 2.0 checkpoint, please s │ ╰──────────────────────────────────────────────────────────────────────────────────────────────────╯ OSError: Unable to load weights from checkpoint file for '/root/.cache/huggingface/hub/models--runwayml--stable-diffusion-inpainting/snapshots/afeee10def38be19995784bcc811882409d066e5/unet/diffusion_pytorch_model.bin' at '/root/.cache/huggingface/hub/models--runwayml--stable-diffusion-inpainting/snapshots/afeee10def38be19995784bcc811882409d066e5/unet/diffusion_pytorch_model.bin'. If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True.
(pyvenv-visual-chatgpt) (ubu22x86-visual-chatgpt) :/opt/visual-chatgpt# ll /root/.cache/huggingface/hub/models--runwayml--stable-diffusion-inpainting/snapshots/afeee10def38be19995784bcc811882409d066e5/unet/diffusion_pytorch_model.bin lrwxrwxrwx 1 root root 145 Mar 24 19:28 /root/.cache/huggingface/hub/models--runwayml--stable-diffusion-inpainting/snapshots/afeee10def38be19995784bcc811882409d066e5/unet/diffusion_pytorch_model.bin -> /root/.cache/huggingface/hub/models--runwayml--stable-diffusion-inpainting/blobs/7b6d4fcca2f74a7ef4740139989c590dcd6d2f1e01cf2774624d01ac2671f58b (pyvenv-visual-chatgpt) (ubu22x86-visual-chatgpt) :/opt/visual-chatgpt# ll /root/.cache/huggingface/hub/models--runwayml--stable-diffusion-inpainting/blobs/7b6d4fcca2f74a7ef4740139989c590dcd6d2f1e01cf2774624d01ac2671f58b -rw-r--r-- 1 root root 1003446047 Mar 24 19:28 /root/.cache/huggingface/hub/models--runwayml--stable-diffusion-inpainting/blobs/7b6d4fcca2f74a7ef4740139989c590dcd6d2f1e01cf2774624d01ac2671f58b
hi,
Ubuntu 22.04.2 LTS \n \l
commit 0f12c71a120341c41d659594f9579d65de8e8d8e (HEAD -> main, origin/main, origin/HEAD) Author: Chenfei Wu cqwuchenfei@163.com Date: Thu Mar 23 00:36:10 2023 +0800
command: python visual_chatgpt.py --load ImageCaptioning_cuda:0,ImageEditing_cuda:0,VisualQuestionAnswering_cuda:0,Text2Image_cuda:0
/opt/visual-chatgpt# python visual_chatgpt.py --load ImageCaptioning_cuda:0,ImageEditing_cuda:0,VisualQuestionAnswering_cuda:0,Text2Image_cuda:0 Initializing VisualChatGPT, load_dict={'ImageCaptioning': 'cuda:0', 'ImageEditing': 'cuda:0', 'VisualQuestionAnswering': 'cuda:0', 'Text2Image': 'cuda:0'} Initializing ImageCaptioning to cuda:0 Initializing ImageEditing to cuda:0 Initializing MaskFormer to cuda:0 text_encoder/model.safetensors not found Fetching 15 files: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 15/15 [00:00<00:00, 48210.39it/s] ╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮ │ /opt/pyvenv-visual-chatgpt/lib/python3.10/site-packages/diffusers/models/modeling_utils.py:101 │ │ in load_state_dict │ │ │ │ 98 │ """ │ │ 99 │ try: │ │ 100 │ │ if os.path.basename(checkpoint_file) == _add_variant(WEIGHTS_NAME, variant): │ │ ❱ 101 │ │ │ return torch.load(checkpoint_file, map_location="cpu") │ │ 102 │ │ else: │ │ 103 │ │ │ return safetensors.torch.load_file(checkpoint_file, device="cpu") │ │ 104 │ except Exception as e: │ │ │ │ /opt/pyvenv-visual-chatgpt/lib/python3.10/site-packages/torch/serialization.py:705 in load │ │ │ │ 702 │ │ │ # If we want to actually tail call to torch.jit.load, we need to │ │ 703 │ │ │ # reset back to the original position. │ │ 704 │ │ │ orig_position = opened_file.tell() │ │ ❱ 705 │ │ │ with _open_zipfile_reader(opened_file) as opened_zipfile: │ │ 706 │ │ │ │ if _is_torchscript_zip(opened_zipfile): │ │ 707 │ │ │ │ │ warnings.warn("'torch.load' received a zip file that looks like a To │ │ 708 │ │ │ │ │ │ │ │ " dispatching to 'torch.jit.load' (call 'torch.jit.loa │ │ │ │ /opt/pyvenv-visual-chatgpt/lib/python3.10/site-packages/torch/serialization.py:242 in init │ │ │ │ 239 │ │ 240 class _open_zipfile_reader(_opener): │ │ 241 │ def init(self, name_or_buffer) -> None: │ │ ❱ 242 │ │ super(_open_zipfile_reader, self).init(torch._C.PyTorchFileReader(name_or_bu │ │ 243 │ │ 244 │ │ 245 class _open_zipfile_writer_file(_opener): │ ╰──────────────────────────────────────────────────────────────────────────────────────────────────╯ RuntimeError: PytorchStreamReader failed reading zip archive: failed finding central directory
During handling of the above exception, another exception occurred:
╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮ │ /opt/pyvenv-visual-chatgpt/lib/python3.10/site-packages/diffusers/models/modeling_utils.py:107 │ │ in load_state_dict │ │ │ │ 104 │ except Exception as e: │ │ 105 │ │ try: │ │ 106 │ │ │ with open(checkpoint_file) as f: │ │ ❱ 107 │ │ │ │ if f.read().startswith("version"): │ │ 108 │ │ │ │ │ raise OSError( │ │ 109 │ │ │ │ │ │ "You seem to have cloned a repository without having git-lfs ins │ │ 110 │ │ │ │ │ │ "git-lfs and run
git lfs install
followed bygit lfs pull
in │ │ │ │ /usr/lib/python3.10/codecs.py:322 in decode │ │ │ │ 319 │ def decode(self, input, final=False): │ │ 320 │ │ # decode input (taking the buffer into account) │ │ 321 │ │ data = self.buffer + input │ │ ❱ 322 │ │ (result, consumed) = self._buffer_decode(data, self.errors, final) │ │ 323 │ │ # keep undecoded input until the next call │ │ 324 │ │ self.buffer = data[consumed:] │ │ 325 │ │ return result │ ╰──────────────────────────────────────────────────────────────────────────────────────────────────╯ UnicodeDecodeError: 'utf-8' codec can't decode byte 0x80 in position 64: invalid start byteDuring handling of the above exception, another exception occurred:
╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮ │ /opt/visual-chatgpt/visual_chatgpt.py:1050 in │
│ │
│ 1047 │ parser.add_argument('--load', type=str, default="ImageCaptioning_cuda:0,Text2Image_c │
│ 1048 │ args = parser.parse_args() │
│ 1049 │ loaddict = {e.split('')[0].strip(): e.split('_')[1].strip() for e in args.load.spl │
│ ❱ 1050 │ bot = ConversationBot(load_dict=load_dict) │
│ 1051 │ with gr.Blocks(css="#chatbot .overflow-y-auto{height:500px}") as demo: │
│ 1052 │ │ chatbot = gr.Chatbot(elem_id="chatbot", label="Visual ChatGPT") │
│ 1053 │ │ state = gr.State([]) │
│ │
│ /opt/visual-chatgpt/visual_chatgpt.py:986 in init │
│ │
│ 983 │ │ self.models = {} │
│ 984 │ │ # Load Basic Foundation Models │
│ 985 │ │ for class_name, device in load_dict.items(): │
│ ❱ 986 │ │ │ self.models[class_name] = globals()class_name │
│ 987 │ │ │
│ 988 │ │ # Load Template Foundation Models │
│ 989 │ │ for class_name, module in globals().items(): │
│ │
│ /opt/visual-chatgpt/visual_chatgpt.py:219 in init │
│ │
│ 216 │ │ self.mask_former = MaskFormer(device=self.device) │
│ 217 │ │ self.revision = 'fp16' if 'cuda' in device else None │
│ 218 │ │ self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32 │
│ ❱ 219 │ │ self.inpaint = StableDiffusionInpaintPipeline.from_pretrained( │
│ 220 │ │ │ "runwayml/stable-diffusion-inpainting", revision=self.revision, torch_dtype= │
│ 221 │ │
│ 222 │ @prompts(name="Remove Something From The Photo", │
│ │
│ /opt/pyvenv-visual-chatgpt/lib/python3.10/site-packages/diffusers/pipelines/pipeline_utils.py:94 │
│ 4 in from_pretrained │
│ │
│ 941 │ │ │ │ │
│ 942 │ │ │ │ # check if the module is in a subdirectory │
│ 943 │ │ │ │ if os.path.isdir(os.path.join(cached_folder, name)): │
│ ❱ 944 │ │ │ │ │ loaded_sub_model = load_method(os.path.join(cached_folder, name), │
│ 945 │ │ │ │ else: │
│ 946 │ │ │ │ │ # else load from the root directory │
│ 947 │ │ │ │ │ loaded_sub_model = load_method(cached_folder, loading_kwargs) │
│ │
│ /opt/pyvenv-visual-chatgpt/lib/python3.10/site-packages/diffusers/models/modeling_utils.py:563 │
│ in from_pretrained │
│ │
│ 560 │ │ │ │ # if device_map is None, load the state dict and move the params from me │
│ 561 │ │ │ │ if device_map is None: │
│ 562 │ │ │ │ │ param_device = "cpu" │
│ ❱ 563 │ │ │ │ │ state_dict = load_state_dict(model_file, variant=variant) │
│ 564 │ │ │ │ │ # move the params from meta device to cpu │
│ 565 │ │ │ │ │ missing_keys = set(model.state_dict().keys()) - set(state_dict.keys( │
│ 566 │ │ │ │ │ if len(missing_keys) > 0: │
│ │
│ /opt/pyvenv-visual-chatgpt/lib/python3.10/site-packages/diffusers/models/modeling_utils.py:119 │
│ in load_state_dict │
│ │
│ 116 │ │ │ │ │ │ "model. Make sure you have saved the model properly." │
│ 117 │ │ │ │ │ ) from e │
│ 118 │ │ except (UnicodeDecodeError, ValueError): │
│ ❱ 119 │ │ │ raise OSError( │
│ 120 │ │ │ │ f"Unable to load weights from checkpoint file for '{checkpoint_file}' " │
│ 121 │ │ │ │ f"at '{checkpoint_file}'. " │
│ 122 │ │ │ │ "If you tried to load a PyTorch model from a TF 2.0 checkpoint, please s │
╰──────────────────────────────────────────────────────────────────────────────────────────────────╯
OSError: Unable to load weights from checkpoint file for '/root/.cache/huggingface/hub/models--runwayml--stable-diffusion-inpainting/snapshots/afeee10def38be19995784bcc811882409d066e5/unet/diffusion_pytorch_model.bin' at
'/root/.cache/huggingface/hub/models--runwayml--stable-diffusion-inpainting/snapshots/afeee10def38be19995784bcc811882409d066e5/unet/diffusion_pytorch_model.bin'. If you tried to load a PyTorch model from a TF 2.0 checkpoint, please
set from_tf=True.
(pyvenv-visual-chatgpt) (ubu22x86-visual-chatgpt) :/opt/visual-chatgpt# ll /root/.cache/huggingface/hub/models--runwayml--stable-diffusion-inpainting/snapshots/afeee10def38be19995784bcc811882409d066e5/unet/diffusion_pytorch_model.bin lrwxrwxrwx 1 root root 145 Mar 24 19:28 /root/.cache/huggingface/hub/models--runwayml--stable-diffusion-inpainting/snapshots/afeee10def38be19995784bcc811882409d066e5/unet/diffusion_pytorch_model.bin -> /root/.cache/huggingface/hub/models--runwayml--stable-diffusion-inpainting/blobs/7b6d4fcca2f74a7ef4740139989c590dcd6d2f1e01cf2774624d01ac2671f58b (pyvenv-visual-chatgpt) (ubu22x86-visual-chatgpt) :/opt/visual-chatgpt# ll /root/.cache/huggingface/hub/models--runwayml--stable-diffusion-inpainting/blobs/7b6d4fcca2f74a7ef4740139989c590dcd6d2f1e01cf2774624d01ac2671f58b -rw-r--r-- 1 root root 1003446047 Mar 24 19:28 /root/.cache/huggingface/hub/models--runwayml--stable-diffusion-inpainting/blobs/7b6d4fcca2f74a7ef4740139989c590dcd6d2f1e01cf2774624d01ac2671f58b