When I use the command python -m llava.serve.cli --model-path liuhaotian/llava-v1.5-7b --image-file 'https://llava-vl.github.io/static/images/view.jpg' --load-4bit as required by the llava project to call the CLI Inference feature, there is no output from the ASSISTANT. I tried using print() to print relevant information, and the specific printout is as shown in the screenshot. It can be seen that input_ids are normal, but output_ids always only output 'tensor[[1,2]]'. I don't know why this error occurs, and I have tried many methods to figure it out, but there is still no solution. So I would like to ask if anyone knows what the reason for this is? How should I resolve this issue so that I can use CLI Inference normally? If you can help me with my problem, I would be very grateful!!!!!
NOTICE:This is a code I add the print() function in llava/serve/cli.py
import argparse
import torch
from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
from llava.conversation import conv_templates, SeparatorStyle
from llava.model.builder import load_pretrained_model
from llava.utils import disable_torch_init
from llava.mm_utils import process_images, tokenizer_image_token, get_model_name_from_path
from PIL import Image
import requests
from io import BytesIO
from transformers import TextStreamer
def load_image(image_file):
if image_file.startswith('http://') or image_file.startswith('https://'):
response = requests.get(image_file)
image = Image.open(BytesIO(response.content)).convert('RGB')
else:
image = Image.open(image_file).convert('RGB')
return image
def main(args):
# 模型初始化
disable_torch_init()
model_name = get_model_name_from_path(args.model_path)
tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, args.load_8bit, args.load_4bit, device=args.device)
if "llama-2" in model_name.lower():
conv_mode = "llava_llama_2"
elif "mistral" in model_name.lower():
conv_mode = "mistral_instruct"
elif "v1.6-34b" in model_name.lower():
conv_mode = "chatml_direct"
elif "v1" in model_name.lower():
conv_mode = "llava_v1"
elif "mpt" in model_name.lower():
conv_mode = "mpt"
else:
conv_mode = "llava_v0"
if args.conv_mode is not None and conv_mode != args.conv_mode:
print('[WARNING] 自动推断的对话模式是 {},而 `--conv-mode` 是 {},使用 {}'.format(conv_mode, args.conv_mode, args.conv_mode))
else:
args.conv_mode = conv_mode
conv = conv_templates[args.conv_mode].copy()
if "mpt" in model_name.lower():
roles = ('user', 'assistant')
else:
roles = conv.roles
image = load_image(args.image_file)
image_size = image.size
# 类似 model_worker.py 中的操作
image_tensor = process_images([image], image_processor, model.config)
if type(image_tensor) is list:
image_tensor = [image.to(model.device, dtype=torch.float16) for image in image_tensor]
else:
image_tensor = image_tensor.to(model.device, dtype=torch.float16)
while True:
try:
inp = input(f"{roles[0]}: ")
except EOFError:
inp = ""
if not inp:
print("退出...")
break
print(f"{roles[1]}: ", end="")
if image is not None:
# 第一次消息
if model.config.mm_use_im_start_end:
inp = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + inp
else:
inp = DEFAULT_IMAGE_TOKEN + '\n' + inp
image = None
conv.append_message(conv.roles[0], inp)
conv.append_message(conv.roles[1], None)
prompt = conv.get_prompt()
input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(model.device)
print("输入ID:", input_ids)
stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
keywords = [stop_str]
streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
with torch.inference_mode():
output_ids = model.generate(
input_ids,
images=image_tensor,
image_sizes=[image_size],
do_sample=True if args.temperature > 0 else False,
temperature=args.temperature,
max_new_tokens=args.max_new_tokens,
streamer=streamer,
use_cache=True)
print("输出ID:", output_ids)
outputs = tokenizer.decode(output_ids[0], skip_special_tokens=True).strip()
print("解码后的输出:", outputs)
conv.messages[-1][-1] = outputs
print(outputs)
if args.debug:
print("\n", {"prompt": prompt, "outputs": outputs}, "\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model-path", type=str, default="facebook/opt-350m")
parser.add_argument("--model-base", type=str, default=None)
parser.add_argument("--image-file", type=str, required=True)
parser.add_argument("--device", type=str, default="cuda")
parser.add_argument("--conv-mode", type=str, default=None)
parser.add_argument("--temperature", type=float, default=0.2)
parser.add_argument("--max-new-tokens", type=int, default=512)
parser.add_argument("--load-8bit", action="store_true")
parser.add_argument("--load-4bit", action="store_true")
parser.add_argument("--debug", action="store_true")
args = parser.parse_args()
main(args)
Describe the issue
When I use the command python -m llava.serve.cli --model-path liuhaotian/llava-v1.5-7b --image-file 'https://llava-vl.github.io/static/images/view.jpg' --load-4bit as required by the llava project to call the CLI Inference feature, there is no output from the ASSISTANT. I tried using print() to print relevant information, and the specific printout is as shown in the screenshot. It can be seen that input_ids are normal, but output_ids always only output 'tensor[[1,2]]'. I don't know why this error occurs, and I have tried many methods to figure it out, but there is still no solution. So I would like to ask if anyone knows what the reason for this is? How should I resolve this issue so that I can use CLI Inference normally? If you can help me with my problem, I would be very grateful!!!!! NOTICE:This is a code I add the print() function in llava/serve/cli.py