langchain-ai / langchain

🦜🔗 Build context-aware reasoning applications
https://python.langchain.com
MIT License
92.4k stars 14.78k forks source link

Malformed input request: string [ Observation] does not match pattern ^(\|+|User:)$ #18565

Closed aqiao closed 6 months ago

aqiao commented 6 months ago

Checked other resources

Example Code


from langchain.agents import AgentExecutor, create_react_agent
from langchain.tools import tool
from langchain.llms.bedrock import Bedrock
import boto3
from langchain_core.prompts import PromptTemplate
from langchain import hub

react_prompt_template="""
Answer the following questions as best you can. You have access to the following tools:

{tools}

Use the following format:

Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question

Begin!

Question: {input}
Thought:{agent_scratchpad}
"""
# prompt = hub.pull("hwchase17/react")
prompt = PromptTemplate(
    input_variables=["input"],
    template=react_prompt_template
)

@tool
def say_hi(name: str) -> str:
    """Say hi to the world"""
    return f"hi {name}"

def specify_bedrock_titan_llm():
    bedrock_client = boto3.client(
        service_name="bedrock-runtime",
        region_name="us-east-1",
    )
    bedrock_llm = Bedrock(
        model_id="amazon.titan-text-express-v1",
        client=bedrock_client,
        model_kwargs={'temperature': 0}
    )
    return bedrock_llm

if __name__ == '__main__':
    llm = specify_bedrock_titan_llm()
    agent = create_react_agent(llm, [say_hi], prompt)
    agent_executor = AgentExecutor(agent=agent, tools=[say_hi], verbose=True, handle_parsing_errors=True)
    result = agent_executor.invoke({"input": "call say_hi function and return the result"})
    print(result)

Error Message and Stack Trace (if applicable)

Traceback (most recent call last): File "/Users/aqiao/PycharmProjects/pythonProject/venv/lib/python3.10/site-packages/langchain_community/llms/bedrock.py", line 543, in _prepare_input_and_invoke_stream response = self.client.invoke_model_with_response_stream(**request_options) File "/Users/aqiao/PycharmProjects/pythonProject/venv/lib/python3.10/site-packages/botocore/client.py", line 553, in _api_call return self._make_api_call(operation_name, kwargs) File "/Users/aqiao/PycharmProjects/pythonProject/venv/lib/python3.10/site-packages/botocore/client.py", line 1009, in _make_api_call raise error_class(parsed_response, operation_name) botocore.errorfactory.ValidationException: An error occurred (ValidationException) when calling the InvokeModelWithResponseStream operation: Malformed input request: string [ Observation] does not match pattern ^(|+|User:)$, please reformat your input and try again.

During handling of the above exception, another exception occurred:

Traceback (most recent call last): File "/Users/aqiao/Learning/bedrock/langchain-agent/demo2.py", line 58, in result = agent_executor.invoke({"input": "call say_hi function and return the result"}) File "/Users/aqiao/PycharmProjects/pythonProject/venv/lib/python3.10/site-packages/langchain/chains/base.py", line 163, in invoke raise e File "/Users/aqiao/PycharmProjects/pythonProject/venv/lib/python3.10/site-packages/langchain/chains/base.py", line 153, in invoke self._call(inputs, run_manager=run_manager) File "/Users/aqiao/PycharmProjects/pythonProject/venv/lib/python3.10/site-packages/langchain/agents/agent.py", line 1391, in _call next_step_output = self._take_next_step( File "/Users/aqiao/PycharmProjects/pythonProject/venv/lib/python3.10/site-packages/langchain/agents/agent.py", line 1097, in _take_next_step [ File "/Users/aqiao/PycharmProjects/pythonProject/venv/lib/python3.10/site-packages/langchain/agents/agent.py", line 1097, in [ File "/Users/aqiao/PycharmProjects/pythonProject/venv/lib/python3.10/site-packages/langchain/agents/agent.py", line 1125, in _iter_next_step output = self.agent.plan( File "/Users/aqiao/PycharmProjects/pythonProject/venv/lib/python3.10/site-packages/langchain/agents/agent.py", line 387, in plan for chunk in self.runnable.stream(inputs, config={"callbacks": callbacks}): File "/Users/aqiao/PycharmProjects/pythonProject/venv/lib/python3.10/site-packages/langchain_core/runnables/base.py", line 2446, in stream yield from self.transform(iter([input]), config, kwargs) File "/Users/aqiao/PycharmProjects/pythonProject/venv/lib/python3.10/site-packages/langchain_core/runnables/base.py", line 2433, in transform yield from self._transform_stream_with_config( File "/Users/aqiao/PycharmProjects/pythonProject/venv/lib/python3.10/site-packages/langchain_core/runnables/base.py", line 1513, in _transform_stream_with_config chunk: Output = context.run(next, iterator) # type: ignore File "/Users/aqiao/PycharmProjects/pythonProject/venv/lib/python3.10/site-packages/langchain_core/runnables/base.py", line 2397, in _transform for output in final_pipeline: File "/Users/aqiao/PycharmProjects/pythonProject/venv/lib/python3.10/site-packages/langchain_core/runnables/base.py", line 1051, in transform for chunk in input: File "/Users/aqiao/PycharmProjects/pythonProject/venv/lib/python3.10/site-packages/langchain_core/runnables/base.py", line 4173, in transform yield from self.bound.transform( File "/Users/aqiao/PycharmProjects/pythonProject/venv/lib/python3.10/site-packages/langchain_core/runnables/base.py", line 1061, in transform yield from self.stream(final, config, kwargs) File "/Users/aqiao/PycharmProjects/pythonProject/venv/lib/python3.10/site-packages/langchain_core/language_models/llms.py", line 452, in stream raise e File "/Users/aqiao/PycharmProjects/pythonProject/venv/lib/python3.10/site-packages/langchain_core/language_models/llms.py", line 436, in stream for chunk in self._stream( File "/Users/aqiao/PycharmProjects/pythonProject/venv/lib/python3.10/site-packages/langchain_community/llms/bedrock.py", line 546, in _prepare_input_and_invoke_stream raise ValueError(f"Error raised by bedrock service: {e}") ValueError: Error raised by bedrock service: An error occurred (ValidationException) when calling the InvokeModelWithResponseStream operation: Malformed input request: string [ Observation] does not match pattern ^(|+|User:)$, please reformat your input and try again.

Description

I'm using langchain (0.1.10) to interact with aws titan text g1 follow langchain official demo.

there is the Prompt_Temlate


react_prompt_template="""
Answer the following questions as best you can. You have access to the following tools:

{tools}

Use the following format:

Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question

Begin!

Question: {input}
Thought:{agent_scratchpad}
"""

Here is the tool say_hi definition

@tool
def say_hi(name: str) -> str:
    """Say hi to the world"""
    return f"hi {name}"

When running the code, it raised below exception

ValueError: Error raised by bedrock service: An error occurred (ValidationException) when calling the InvokeModelWithResponseStream operation: Malformed input request: string [
Observation] does not match pattern ^(\|+|User:)$, please reformat your input and try again.

System Info

langchain 0.1.10 aws Titan Text G1 langchain agent

aqiao commented 6 months ago

Reference issue : https://github.com/langchain-ai/langchain/issues/16840

HI all, after adding 'textGenerationConfig': {"stopSequences": "Observation"} to model_kwargs, the error fixed. However, it raised another exception as below

Traceback (most recent call last):
  File "/Users/aqiao/Learning/bedrock/langchain-agent/demo2.py", line 58, in <module>
    result = agent_executor.invoke({"input": "User: call say_hi function and return the result\nBot:"})
  File "/Users/aqiao/PycharmProjects/pythonProject/venv/lib/python3.10/site-packages/langchain/chains/base.py", line 163, in invoke
    raise e
  File "/Users/aqiao/PycharmProjects/pythonProject/venv/lib/python3.10/site-packages/langchain/chains/base.py", line 153, in invoke
    self._call(inputs, run_manager=run_manager)
  File "/Users/aqiao/PycharmProjects/pythonProject/venv/lib/python3.10/site-packages/langchain/agents/agent.py", line 1391, in _call
    next_step_output = self._take_next_step(
  File "/Users/aqiao/PycharmProjects/pythonProject/venv/lib/python3.10/site-packages/langchain/agents/agent.py", line 1097, in _take_next_step
    [
  File "/Users/aqiao/PycharmProjects/pythonProject/venv/lib/python3.10/site-packages/langchain/agents/agent.py", line 1097, in <listcomp>
    [
  File "/Users/aqiao/PycharmProjects/pythonProject/venv/lib/python3.10/site-packages/langchain/agents/agent.py", line 1125, in _iter_next_step
    output = self.agent.plan(
  File "/Users/aqiao/PycharmProjects/pythonProject/venv/lib/python3.10/site-packages/langchain/agents/agent.py", line 387, in plan
    for chunk in self.runnable.stream(inputs, config={"callbacks": callbacks}):
  File "/Users/aqiao/PycharmProjects/pythonProject/venv/lib/python3.10/site-packages/langchain_core/runnables/base.py", line 2446, in stream
    yield from self.transform(iter([input]), config, **kwargs)
  File "/Users/aqiao/PycharmProjects/pythonProject/venv/lib/python3.10/site-packages/langchain_core/runnables/base.py", line 2433, in transform
    yield from self._transform_stream_with_config(
  File "/Users/aqiao/PycharmProjects/pythonProject/venv/lib/python3.10/site-packages/langchain_core/runnables/base.py", line 1513, in _transform_stream_with_config
    chunk: Output = context.run(next, iterator)  # type: ignore
  File "/Users/aqiao/PycharmProjects/pythonProject/venv/lib/python3.10/site-packages/langchain_core/runnables/base.py", line 2397, in _transform
    for output in final_pipeline:
  File "/Users/aqiao/PycharmProjects/pythonProject/venv/lib/python3.10/site-packages/langchain_core/runnables/base.py", line 1051, in transform
    for chunk in input:
  File "/Users/aqiao/PycharmProjects/pythonProject/venv/lib/python3.10/site-packages/langchain_core/runnables/base.py", line 4173, in transform
    yield from self.bound.transform(
  File "/Users/aqiao/PycharmProjects/pythonProject/venv/lib/python3.10/site-packages/langchain_core/runnables/base.py", line 1061, in transform
    yield from self.stream(final, config, **kwargs)
  File "/Users/aqiao/PycharmProjects/pythonProject/venv/lib/python3.10/site-packages/langchain_core/language_models/llms.py", line 452, in stream
    raise e
  File "/Users/aqiao/PycharmProjects/pythonProject/venv/lib/python3.10/site-packages/langchain_core/language_models/llms.py", line 436, in stream
    for chunk in self._stream(
  File "/Users/aqiao/PycharmProjects/pythonProject/venv/lib/python3.10/site-packages/langchain_community/llms/bedrock.py", line 546, in _prepare_input_and_invoke_stream
    raise ValueError(f"Error raised by bedrock service: {e}")
ValueError: Error raised by bedrock service: An error occurred (ValidationException) when calling the InvokeModelWithResponseStream operation: Malformed input request: 2 schema violations found, please reformat your input and try again.

Here is my prompt following langchain official doc

react_prompt_template="""
Answer the following questions as best you can. You have access to the following tools:

{tools}

Use the following format:

Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question

Begin!

Question: {input}
Thought:{agent_scratchpad}
"""

I passed input value as below

result = agent_executor.invoke({"input": "User: call say_hi function and return the result\nBot:"})

Here is my completed code

from langchain.agents import AgentExecutor, create_react_agent
from langchain.tools import tool
from langchain.llms.bedrock import Bedrock
import boto3
from langchain_core.prompts import PromptTemplate
from langchain import hub

react_prompt_template="""
Answer the following questions as best you can. You have access to the following tools:

{tools}

Use the following format:

Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question

Begin!

Question: {input}
Thought:{agent_scratchpad}
"""
# prompt = hub.pull("hwchase17/react")
prompt = PromptTemplate(
    input_variables=["input"],
    template=react_prompt_template
)

@tool
def say_hi(name: str) -> str:
    """Say hi to the world"""
    return f"hi {name}"

def specify_bedrock_titan_llm():
    bedrock_client = boto3.client(
        service_name="bedrock-runtime",
        region_name="us-east-1",
    )
    # https://github.com/langchain-ai/langchain/issues/16840
    bedrock_llm = Bedrock(
        model_id="amazon.titan-text-express-v1",
        client=bedrock_client,
        model_kwargs={'temperature': 0, 'textGenerationConfig': {"stopSequences": "Observation"}}
    )
    return bedrock_llm

if __name__ == '__main__':
    llm = specify_bedrock_titan_llm()
    agent = create_react_agent(llm, [say_hi], prompt)
    agent_executor = AgentExecutor(agent=agent, tools=[say_hi], verbose=True, handle_parsing_errors=True)
    result = agent_executor.invoke({"input": "User: call say_hi function and return the result\nBot:"})
    print(result)

any suggestion on this?

Fares-Tabet commented 5 months ago

Hey! im running into the exact same issue, can you tell me how you resolved this ? Thanks!!

Sanzid88888 commented 4 months ago

@Fares-Tabet do you find any solution for this? I'm running into the same issue

Sanzid88888 commented 4 months ago

@aqiao do you find any solution for this? I'm running into the same issue

aqiao commented 4 months ago

@aqiao do you find any solution for this? I'm running into the same issue

HI, i changed LLM to OpenAI model, and it works