Open Mrzhang-dada opened 11 months ago
def generate_one_completion(prompt: str): torch.set_default_device("cuda") model = AutoModelForCausalLM.from_pretrained("//phi-1", trust_remote_code=True) tokenizer = AutoTokenizer.from_pretrained("//phi-1", trust_remote_code=True)
inputs = tokenizer(prompt, return_tensors="pt", return_attention_mask=False) # outputs = model.generate(**inputs, max_length=200,max_new_tokens=430) outputs = model.generate(**inputs, max_length=200,temperature=0.8,do_sample=True) completion = tokenizer.batch_decode(outputs)[0] return completion
This is my model output code. Regardless of the value of num_samples_per_task set, it returns the same answer for each question
def generate_one_completion(prompt: str): torch.set_default_device("cuda") model = AutoModelForCausalLM.from_pretrained("//phi-1", trust_remote_code=True) tokenizer = AutoTokenizer.from_pretrained("//phi-1", trust_remote_code=True)
inputs = tokenizer("'''"+prompt+"'''", return_tensors="pt", return_attention_mask=False)
This is my model output code. Regardless of the value of num_samples_per_task set, it returns the same answer for each question