Open waltonfuture opened 3 weeks ago
Below is an example of running AIDC-AI/Ovis1.5-Llama3-8B
on two GPUs:
import torch
from PIL import Image
from transformers import AutoModelForCausalLM
device_map = {
"visual_tokenizer": 0,
"vte": 0,
"llm.model.embed_tokens": 0,
"llm.model.norm": 0,
"llm.lm_head": 0,
"llm.model.layers.0": 0,
"llm.model.layers.1": 0,
"llm.model.layers.2": 0,
"llm.model.layers.3": 0,
"llm.model.layers.4": 0,
"llm.model.layers.5": 0,
"llm.model.layers.6": 0,
"llm.model.layers.7": 0,
"llm.model.layers.8": 0,
"llm.model.layers.9": 0,
"llm.model.layers.10": 0,
"llm.model.layers.11": 0,
"llm.model.layers.12": 0,
"llm.model.layers.13": 0,
"llm.model.layers.14": 1,
"llm.model.layers.15": 1,
"llm.model.layers.16": 1,
"llm.model.layers.17": 1,
"llm.model.layers.18": 1,
"llm.model.layers.19": 1,
"llm.model.layers.20": 1,
"llm.model.layers.21": 1,
"llm.model.layers.22": 1,
"llm.model.layers.23": 1,
"llm.model.layers.24": 1,
"llm.model.layers.25": 1,
"llm.model.layers.26": 1,
"llm.model.layers.27": 1,
"llm.model.layers.28": 1,
"llm.model.layers.29": 1,
"llm.model.layers.30": 1,
"llm.model.layers.31": 1
}
# load model
model = AutoModelForCausalLM.from_pretrained("AIDC-AI/Ovis1.5-Llama3-8B",
torch_dtype=torch.bfloat16,
multimodal_max_length=8192,
device_map=device_map,
trust_remote_code=True)
text_tokenizer = model.get_text_tokenizer()
visual_tokenizer = model.get_visual_tokenizer()
conversation_formatter = model.get_conversation_formatter()
# enter image path and prompt
image_path = input("Enter image path: ")
image = Image.open(image_path)
text = input("Enter prompt: ")
query = f'<image>\n{text}'
prompt, input_ids = conversation_formatter.format_query(query)
input_ids = torch.unsqueeze(input_ids, dim=0).cuda()
attention_mask = torch.ne(input_ids, text_tokenizer.pad_token_id).cuda()
pixel_values = [visual_tokenizer.preprocess_image(image).to(
dtype=visual_tokenizer.dtype, device=visual_tokenizer.device)]
# generate output
with torch.inference_mode():
gen_kwargs = dict(
max_new_tokens=1024,
do_sample=False,
top_p=None,
top_k=None,
temperature=None,
repetition_penalty=None,
eos_token_id=model.generation_config.eos_token_id,
pad_token_id=text_tokenizer.pad_token_id,
use_cache=True
)
output_ids = model.generate(input_ids, pixel_values=pixel_values, attention_mask=attention_mask, **gen_kwargs)[0]
output = text_tokenizer.decode(output_ids, skip_special_tokens=True)
print(f'Output: {output}')
Below is an example of running
AIDC-AI/Ovis1.5-Llama3-8B
on two GPUs:import torch from PIL import Image from transformers import AutoModelForCausalLM device_map = { "visual_tokenizer": 0, "vte": 0, "llm.model.embed_tokens": 0, "llm.model.norm": 0, "llm.lm_head": 0, "llm.model.layers.0": 0, "llm.model.layers.1": 0, "llm.model.layers.2": 0, "llm.model.layers.3": 0, "llm.model.layers.4": 0, "llm.model.layers.5": 0, "llm.model.layers.6": 0, "llm.model.layers.7": 0, "llm.model.layers.8": 0, "llm.model.layers.9": 0, "llm.model.layers.10": 0, "llm.model.layers.11": 0, "llm.model.layers.12": 0, "llm.model.layers.13": 0, "llm.model.layers.14": 1, "llm.model.layers.15": 1, "llm.model.layers.16": 1, "llm.model.layers.17": 1, "llm.model.layers.18": 1, "llm.model.layers.19": 1, "llm.model.layers.20": 1, "llm.model.layers.21": 1, "llm.model.layers.22": 1, "llm.model.layers.23": 1, "llm.model.layers.24": 1, "llm.model.layers.25": 1, "llm.model.layers.26": 1, "llm.model.layers.27": 1, "llm.model.layers.28": 1, "llm.model.layers.29": 1, "llm.model.layers.30": 1, "llm.model.layers.31": 1 } # load model model = AutoModelForCausalLM.from_pretrained("AIDC-AI/Ovis1.5-Llama3-8B", torch_dtype=torch.bfloat16, multimodal_max_length=8192, device_map=device_map, trust_remote_code=True) text_tokenizer = model.get_text_tokenizer() visual_tokenizer = model.get_visual_tokenizer() conversation_formatter = model.get_conversation_formatter() # enter image path and prompt image_path = input("Enter image path: ") image = Image.open(image_path) text = input("Enter prompt: ") query = f'<image>\n{text}' prompt, input_ids = conversation_formatter.format_query(query) input_ids = torch.unsqueeze(input_ids, dim=0).cuda() attention_mask = torch.ne(input_ids, text_tokenizer.pad_token_id).cuda() pixel_values = [visual_tokenizer.preprocess_image(image).to( dtype=visual_tokenizer.dtype, device=visual_tokenizer.device)] # generate output with torch.inference_mode(): gen_kwargs = dict( max_new_tokens=1024, do_sample=False, top_p=None, top_k=None, temperature=None, repetition_penalty=None, eos_token_id=model.generation_config.eos_token_id, pad_token_id=text_tokenizer.pad_token_id, use_cache=True ) output_ids = model.generate(input_ids, pixel_values=pixel_values, attention_mask=attention_mask, **gen_kwargs)[0] output = text_tokenizer.decode(output_ids, skip_special_tokens=True) print(f'Output: {output}')
what's the script for Ovis1.5-Gemma2-9B
I want to use multiple GPUs for inference, and I use device_map='auto' to load the model. However, I always met that problem: Expected all tensors to be on the same device, but found at least two devices, cuda:1 and cuda:0!
Can you help me with that? Thanks a lot!