wxj630 / visual-chatgpt-zh

visual-chatgpt支持中文版本
Apache License 2.0
285 stars 43 forks source link

遇到的问题汇总,求解决 #10

Open MiaoJYWayne opened 1 year ago

MiaoJYWayne commented 1 year ago

我运行遇到了这三个问题,求教大佬给解决下,感谢感谢

╭───────────────────── Traceback (most recent call last) ──────────────────────╮ │ /data/miaojinyang-slurm/anaconda3/envs/visgpt/lib/python3.8/site-packages/di │ │ ffusers/models/modeling_utils.py:103 in load_state_dict │ │ │ │ 100 │ │ if os.path.basename(checkpoint_file) == _add_variant(WEIGHTS_N │ │ 101 │ │ │ return torch.load(checkpoint_file, map_location="cpu") │ │ 102 │ │ else: │ │ ❱ 103 │ │ │ return safetensors.torch.load_file(checkpoint_file, device │ │ 104 │ except Exception as e: │ │ 105 │ │ try: │ │ 106 │ │ │ with open(checkpoint_file) as f: │ │ │ │ /data/miaojinyang-slurm/anaconda3/envs/visgpt/lib/python3.8/site-packages/sa │ │ fetensors/torch.py:101 in load_file │ │ │ │ 98 │ result = {} │ │ 99 │ with safe_open(filename, framework="pt", device=device) as f: │ │ 100 │ │ for k in f.keys(): │ │ ❱ 101 │ │ │ result[k] = f.get_tensor(k) │ │ 102 │ return result │ │ 103 │ │ 104 │ ╰──────────────────────────────────────────────────────────────────────────────╯

RuntimeError: shape '[10240, 1280]' is invalid for input of size 5784003

During handling of the above exception, another exception occurred:

╭───────────────────── Traceback (most recent call last) ──────────────────────╮ │ /data/miaojinyang-slurm/anaconda3/envs/visgpt/lib/python3.8/site-packages/di │ │ ffusers/models/modeling_utils.py:107 in load_state_dict │ │ │ │ 104 │ except Exception as e: │ │ 105 │ │ try: │ │ 106 │ │ │ with open(checkpoint_file) as f: │ │ ❱ 107 │ │ │ │ if f.read().startswith("version"): │ │ 108 │ │ │ │ │ raise OSError( │ │ 109 │ │ │ │ │ │ "You seem to have cloned a repository without │ │ 110 │ │ │ │ │ │ "git-lfs and run git lfs install followed by │ │ │ │ /data/miaojinyang-slurm/anaconda3/envs/visgpt/lib/python3.8/codecs.py:322 in │ │ decode │ │ │ │ 319 │ def decode(self, input, final=False): │ │ 320 │ │ # decode input (taking the buffer into account) │ │ 321 │ │ data = self.buffer + input │ │ ❱ 322 │ │ (result, consumed) = self._buffer_decode(data, self.errors, f │ │ 323 │ │ # keep undecoded input until the next call │ │ 324 │ │ self.buffer = data[consumed:] │ │ 325 │ │ return result │ ╰──────────────────────────────────────────────────────────────────────────────╯ UnicodeDecodeError: 'utf-8' codec can't decode byte 0xdc in position 0: invalid continuation byte

During handling of the above exception, another exception occurred:

╭───────────────────── Traceback (most recent call last) ──────────────────────╮ │ /data/miaojinyang-slurm/visual-chatgpt-zh/visual_chatgpt_zh.py:164 in │ │ │ │ │ │ 161 │ pretrained_model_dir = args.pretrained_model_dir │ │ 162 │ │ │ 163 │ loaddict = {e.split('')[0].strip(): e.split('_')[1].strip() for │ │ ❱ 164 │ bot = ConversationBot(load_dict=load_dict, pretrained_model_dir=pr │ │ 165 │ with gr.Blocks(css="#chatbot .overflow-y-auto{height:500px}") as d │ │ 166 │ │ chatbot = gr.Chatbot(elem_id="chatbot", label="Visual ChatGPT" │ │ 167 │ │ state = gr.State([]) │ │ │ │ /data/miaojinyang-slurm/visual-chatgpt-zh/visual_chatgpt_zh.py:98 in │ │ init │ │ │ │ 95 │ │ │ │ 96 │ │ self.models = dict() │ │ 97 │ │ for class_name, device in load_dict.items(): │ │ ❱ 98 │ │ │ self.models[class_name] = globals()[class_name](device=dev │ │ 99 │ │ │ │ 100 │ │ self.tools = [] │ │ 101 │ │ for class_name, instance in self.models.items(): │ │ │ │ /data/miaojinyang-slurm/visual-chatgpt-zh/modules/text2img.py:8 in init │ │ │ │ 5 │ │ print("Initializing Text2Image to %s" % device) │ │ 6 │ │ self.device = device │ │ 7 │ │ self.torch_dtype = torch.float16 if 'cuda' in device else torch │ │ ❱ 8 │ │ self.pipe = StableDiffusionPipeline.from_pretrained(f"{pretrain │ │ 9 │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ torch_dtype │ │ 10 │ │ self.pipe.to(device) │ │ 11 │ │ self.a_prompt = 'best quality, extremely detailed' │ │ │ │ /data/miaojinyang-slurm/anaconda3/envs/visgpt/lib/python3.8/site-packages/di │ │ ffusers/pipelines/pipeline_utils.py:944 in from_pretrained │ │ │ │ 941 │ │ │ │ │ │ 942 │ │ │ │ # check if the module is in a subdirectory │ │ 943 │ │ │ │ if os.path.isdir(os.path.join(cached_folder, name)): │ │ ❱ 944 │ │ │ │ │ loaded_sub_model = load_method(os.path.join(cache │ │ 945 │ │ │ │ else: │ │ 946 │ │ │ │ │ # else load from the root directory │ │ 947 │ │ │ │ │ loaded_sub_model = load_method(cached_folder, l │ │ │ │ /data/miaojinyang-slurm/anaconda3/envs/visgpt/lib/python3.8/site-packages/di │ │ ffusers/models/modeling_utils.py:563 in from_pretrained │ │ │ │ 560 │ │ │ │ # if device_map is None, load the state dict and move │ │ 561 │ │ │ │ if device_map is None: │ │ 562 │ │ │ │ │ param_device = "cpu" │ │ ❱ 563 │ │ │ │ │ state_dict = load_state_dict(model_file, variant=v │ │ 564 │ │ │ │ │ # move the params from meta device to cpu │ │ 565 │ │ │ │ │ missing_keys = set(model.state_dict().keys()) - se │ │ 566 │ │ │ │ │ if len(missing_keys) > 0: │ │ │ │ /data/miaojinyang-slurm/anaconda3/envs/visgpt/lib/python3.8/site-packages/di │ │ ffusers/models/modeling_utils.py:119 in load_state_dict │ │ │ │ 116 │ │ │ │ │ │ "model. Make sure you have saved the model pro │ │ 117 │ │ │ │ │ ) from e │ │ 118 │ │ except (UnicodeDecodeError, ValueError): │ │ ❱ 119 │ │ │ raise OSError( │ │ 120 │ │ │ │ f"Unable to load weights from checkpoint file for '{ch │ │ 121 │ │ │ │ f"at '{checkpoint_file}'. " │ │ 122 │ │ │ │ "If you tried to load a PyTorch model from a TF 2.0 ch │ ╰──────────────────────────────────────────────────────────────────────────────╯ OSError: Unable to load weights from checkpoint file for './model//stable-diffusion-v1-5/unet/diffusion_pytorch_model.safetensors' at './model//stable-diffusion-v1-5/unet/diffusion_pytorch_model.safetensors'. If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True.** srun: error: gpu01: task 0: Exited with exit code 1 srun: launch/slurm: _step_signal: Terminating StepId=87329.0