aws-samples / amazon-bedrock-workshop

This is a workshop designed for Amazon Bedrock a foundational model service.
https://catalog.us-east-1.prod.workshops.aws/workshops/a4bdb007-5600-4368-81c5-ff5b4154f518/en-US/20-intro
MIT No Attribution
1.34k stars 576 forks source link

00_Chatbot_Claude ValueError: Error: Prompt must alternate between ' Human:' and ' Assistant:'. #181

Closed deki closed 7 months ago

deki commented 7 months ago

Running print_ww(conversation.predict(input="Give me a few tips on how to start a new garden.")) in 00_Chatbot_Claude.ipynb fails with

---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
Cell In[7], line 1
----> 1 print_ww(conversation.predict(input="Give me a few tips on how to start a new garden."))

File /opt/conda/lib/python3.10/site-packages/langchain/chains/llm.py:257, in LLMChain.predict(self, callbacks, **kwargs)
    242 def predict(self, callbacks: Callbacks = None, **kwargs: Any) -> str:
    243     """Format prompt with kwargs and pass to LLM.
    244 
    245     Args:
   (...)
    255             completion = llm.predict(adjective="funny")
    256     """
--> 257     return self(kwargs, callbacks=callbacks)[self.output_key]

File /opt/conda/lib/python3.10/site-packages/langchain/chains/base.py:312, in Chain.__call__(self, inputs, return_only_outputs, callbacks, tags, metadata, run_name, include_run_info)
    310 except BaseException as e:
    311     run_manager.on_chain_error(e)
--> 312     raise e
    313 run_manager.on_chain_end(outputs)
    314 final_outputs: Dict[str, Any] = self.prep_outputs(
    315     inputs, outputs, return_only_outputs
    316 )

File /opt/conda/lib/python3.10/site-packages/langchain/chains/base.py:306, in Chain.__call__(self, inputs, return_only_outputs, callbacks, tags, metadata, run_name, include_run_info)
    299 run_manager = callback_manager.on_chain_start(
    300     dumpd(self),
    301     inputs,
    302     name=run_name,
    303 )
    304 try:
    305     outputs = (
--> 306         self._call(inputs, run_manager=run_manager)
    307         if new_arg_supported
    308         else self._call(inputs)
    309     )
    310 except BaseException as e:
    311     run_manager.on_chain_error(e)

File /opt/conda/lib/python3.10/site-packages/langchain/chains/llm.py:93, in LLMChain._call(self, inputs, run_manager)
     88 def _call(
     89     self,
     90     inputs: Dict[str, Any],
     91     run_manager: Optional[CallbackManagerForChainRun] = None,
     92 ) -> Dict[str, str]:
---> 93     response = self.generate([inputs], run_manager=run_manager)
     94     return self.create_outputs(response)[0]

File /opt/conda/lib/python3.10/site-packages/langchain/chains/llm.py:103, in LLMChain.generate(self, input_list, run_manager)
    101 """Generate LLM result from inputs."""
    102 prompts, stop = self.prep_prompts(input_list, run_manager=run_manager)
--> 103 return self.llm.generate_prompt(
    104     prompts,
    105     stop,
    106     callbacks=run_manager.get_child() if run_manager else None,
    107     **self.llm_kwargs,
    108 )

File /opt/conda/lib/python3.10/site-packages/langchain/llms/base.py:509, in BaseLLM.generate_prompt(self, prompts, stop, callbacks, **kwargs)
    501 def generate_prompt(
    502     self,
    503     prompts: List[PromptValue],
   (...)
    506     **kwargs: Any,
    507 ) -> LLMResult:
    508     prompt_strings = [p.to_string() for p in prompts]
--> 509     return self.generate(prompt_strings, stop=stop, callbacks=callbacks, **kwargs)

File /opt/conda/lib/python3.10/site-packages/langchain/llms/base.py:658, in BaseLLM.generate(self, prompts, stop, callbacks, tags, metadata, run_name, **kwargs)
    643         raise ValueError(
    644             "Asked to cache, but no cache found at `langchain.cache`."
    645         )
    646     run_managers = [
    647         callback_manager.on_llm_start(
    648             dumpd(self),
   (...)
    656         )
    657     ]
--> 658     output = self._generate_helper(
    659         prompts, stop, run_managers, bool(new_arg_supported), **kwargs
    660     )
    661     return output
    662 if len(missing_prompts) > 0:

File /opt/conda/lib/python3.10/site-packages/langchain/llms/base.py:546, in BaseLLM._generate_helper(self, prompts, stop, run_managers, new_arg_supported, **kwargs)
    544     for run_manager in run_managers:
    545         run_manager.on_llm_error(e)
--> 546     raise e
    547 flattened_outputs = output.flatten()
    548 for manager, flattened_output in zip(run_managers, flattened_outputs):

File /opt/conda/lib/python3.10/site-packages/langchain/llms/base.py:533, in BaseLLM._generate_helper(self, prompts, stop, run_managers, new_arg_supported, **kwargs)
    523 def _generate_helper(
    524     self,
    525     prompts: List[str],
   (...)
    529     **kwargs: Any,
    530 ) -> LLMResult:
    531     try:
    532         output = (
--> 533             self._generate(
    534                 prompts,
    535                 stop=stop,
    536                 # TODO: support multiple run managers
    537                 run_manager=run_managers[0] if run_managers else None,
    538                 **kwargs,
    539             )
    540             if new_arg_supported
    541             else self._generate(prompts, stop=stop)
    542         )
    543     except BaseException as e:
    544         for run_manager in run_managers:

File /opt/conda/lib/python3.10/site-packages/langchain/llms/base.py:1053, in LLM._generate(self, prompts, stop, run_manager, **kwargs)
   1050 new_arg_supported = inspect.signature(self._call).parameters.get("run_manager")
   1051 for prompt in prompts:
   1052     text = (
-> 1053         self._call(prompt, stop=stop, run_manager=run_manager, **kwargs)
   1054         if new_arg_supported
   1055         else self._call(prompt, stop=stop, **kwargs)
   1056     )
   1057     generations.append([Generation(text=text)])
   1058 return LLMResult(generations=generations)

File /opt/conda/lib/python3.10/site-packages/langchain/llms/bedrock.py:383, in Bedrock._call(self, prompt, stop, run_manager, **kwargs)
    380         completion += chunk.text
    381     return completion
--> 383 return self._prepare_input_and_invoke(prompt=prompt, stop=stop, **kwargs)

File /opt/conda/lib/python3.10/site-packages/langchain/llms/bedrock.py:225, in BedrockBase._prepare_input_and_invoke(self, prompt, stop, run_manager, **kwargs)
    223 provider = self._get_provider()
    224 params = {**_model_kwargs, **kwargs}
--> 225 input_body = LLMInputOutputAdapter.prepare_input(provider, prompt, params)
    226 body = json.dumps(input_body)
    227 accept = "application/json"

File /opt/conda/lib/python3.10/site-packages/langchain/llms/bedrock.py:76, in LLMInputOutputAdapter.prepare_input(cls, provider, prompt, model_kwargs)
     74 input_body = {**model_kwargs}
     75 if provider == "anthropic":
---> 76     input_body["prompt"] = _human_assistant_format(prompt)
     77 elif provider == "ai21":
     78     input_body["prompt"] = prompt

File /opt/conda/lib/python3.10/site-packages/langchain/llms/bedrock.py:45, in _human_assistant_format(input_text)
     43         count += 1
     44     else:
---> 45         raise ValueError(ALTERNATION_ERROR)
     46 if input_text[i : i + len(ASSISTANT_PROMPT)] == ASSISTANT_PROMPT:
     47     if count % 2 == 1:

ValueError: Error: Prompt must alternate between '

Human:' and '

Assistant:'.
rsgrewal-aws commented 7 months ago

i could not see this error after i have run the pip installs from the set up notebook

deki commented 7 months ago

I can confirm the issue no longer occurs in the latest version therefore closing.