t46 / fukuro-researcher

Apache License 2.0
0 stars 0 forks source link

prompt テクニックの提案の研究に対応できるように tokenize の位置を再考する #20

Open t46 opened 2 weeks ago

t46 commented 2 weeks ago

現状だと prepare_dataset の内部で tokenizer が隠蔽されてしまっているが、promptを追加するなら生のテキストに追加する必要があって、それならトークン化する前の方が都合が良い tokenize_dataset 関数を追記することも合わせて、experiment.py で諸々やってしまった方がいいまであるかもしれない

def run_inference(self, test_dataset: datasets.Dataset):
    test_dataset = tokenize_dataset(test_dataset, self.tokenizer, self.tokenizer.model_max_length)
    test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn)

    all_outputs = []
    self.model.eval()
    with torch.no_grad():
        for batch in tqdm(test_loader, desc="Evaluating"):
            input_ids = batch['input_ids'].to(self.device)

            # Decode the original input
            original_input = self.tokenizer.decode(input_ids[0], skip_special_tokens=True)

            # Step-by-step reasoning prompt
            step_prompt = f"Given the following input:\n{original_input}\n\nThink step by step:\n1."
            inputs = self.tokenizer(step_prompt, return_tensors="pt").input_ids.to(self.device)

            full_output = ""
            for step in range(1, 6):  # Generate 5 steps
                outputs = self.model.generate(
                    inputs, 
                    max_length=self.tokenizer.model_max_length, 
                    num_return_sequences=1, 
                    do_sample=True,
                    temperature=0.7,
                    top_p=0.9,
                    stop_token="\n"
                )
                step_output = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
                full_output += f"{step_output}\n"

                # Prepare next step
                next_step = f"{step + 1}."
                inputs = self.tokenizer(full_output + next_step, return_tensors="pt").input_ids.to(self.device)

            # Add final conclusion step
            conclusion_prompt = "Therefore, the final answer is:"
            inputs = self.tokenizer(full_output + conclusion_prompt, return_tensors="pt").input_ids.to(self.device)
            outputs = self.model.generate(
                inputs, 
                max_length=self.tokenizer.model_max_length, 
                num_return_sequences=1, 
                do_sample=True,
                temperature=0.7,
                top_p=0.9
            )
            conclusion = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
            full_output += f"Conclusion: {conclusion}"

            all_outputs.append(full_output)

    return all_outputs