Open tkmptech opened 6 days ago
@tkmptech their something wrong with your tool schema. Cannot help you unless you provide the tool schema that you are using. This validation error usually occurrs when AI calls for schema which is different then what you have specified.
@tool
def GetNewQuestion(category) -> str:
"""Fetch a new interview question based on the provided category."""
# Check if category is a dict and extract the value, otherwise use it directly
if isinstance(category, dict):
category = category.get(
"category", "general"
) # default to 'general' if key is missing
return f"What is your experience with {category}?"
def main():
# Initialize the UI and get persona
persona, N_folder = init_ui()
# Initialize LLM
llm = ChatOpenAI(
openai_api_base=st.text_input("Enter the model URL:"),
openai_api_key="your-api-key-here",
model_name="gpt-3.5-turbo",
max_retries=1,
openai_organization="your-organization-id-here",
)
tools = [GetNewQuestion] # Define the available tools
llm = llm.bind_tools(tools, tool_choice="GetNewQuestion")
ChatBot = CHATBOT(persona, llm, tools)
# Handle chat interaction
handle_chat(ChatBot)
# Option to save the conversation history
save_chat_history()
This is my tool schema and main function to handle all the chat but at the end it calls "invoke_tool_or_model"
I've error like him
Traceback (most recent call last):
File "C:\Users\m.rahamneh\Documents\MiTool\Minerets.QueryGenerator\app.py", line 42, in <module>
query_generator.generate_query(user_request)
File "C:\Users\m.rahamneh\Documents\MiTool\Minerets.QueryGenerator\core\sql_agent.py", line 107, in generate_query
response = self.agent_executor.invoke(
File "C:\Users\m.rahamneh\Documents\MiTool\Minerets.SQLGenerator\.sqlgenenv\lib\site-packages\langchain\chains\base.py", line 170, in invoke
raise e
File "C:\Users\m.rahamneh\Documents\MiTool\Minerets.SQLGenerator\.sqlgenenv\lib\site-packages\langchain\chains\base.py", line 165, in invoke
final_outputs: Dict[str, Any] = self.prep_outputs(
File "C:\Users\m.rahamneh\Documents\MiTool\Minerets.SQLGenerator\.sqlgenenv\lib\site-packages\langchain\chains\base.py", line 466, in prep_outputs
self.memory.save_context(inputs, outputs)
File "C:\Users\m.rahamneh\Documents\MiTool\Minerets.SQLGenerator\.sqlgenenv\lib\site-packages\langchain\memory\chat_memory.py", line 57, in save_context
[HumanMessage(content=input_str), AIMessage(content=output_str)]
File "C:\Users\m.rahamneh\Documents\MiTool\Minerets.SQLGenerator\.sqlgenenv\lib\site-packages\langchain_core\messages\human.py", line 56, in __init__
super().__init__(content=content, **kwargs)
File "C:\Users\m.rahamneh\Documents\MiTool\Minerets.SQLGenerator\.sqlgenenv\lib\site-packages\langchain_core\messages\base.py", line 76, in __init__
super().__init__(content=content, **kwargs)
File "C:\Users\m.rahamneh\Documents\MiTool\Minerets.SQLGenerator\.sqlgenenv\lib\site-packages\langchain_core\load\serializable.py", line 110, in __init__
super().__init__(*args, **kwargs)
File "C:\Users\m.rahamneh\Documents\MiTool\Minerets.SQLGenerator\.sqlgenenv\lib\site-packages\pydantic\main.py", line 212, in __init__
validated_self = self.__pydantic_validator__.validate_python(data, self_instance=self)
pydantic_core._pydantic_core.ValidationError: 5 validation errors for HumanMessage
content.str
Input should be a valid string [type=string_type, input_value=[SystemMessage(content='\..., response_metadata={})], input_type=list]
For further information visit https://errors.pydantic.dev/2.9/v/string_type
content.list[union[str,dict[any,any]]].0.str
Input should be a valid string [type=string_type, input_value=SystemMessage(content='\n...}, response_metadata={}), input_type=SystemMessage]
For further information visit https://errors.pydantic.dev/2.9/v/string_type
content.list[union[str,dict[any,any]]].0.dict[any,any]
Input should be a valid dictionary [type=dict_type, input_value=SystemMessage(content='\n...}, response_metadata={}), input_type=SystemMessage]
For further information visit https://errors.pydantic.dev/2.9/v/dict_type
content.list[union[str,dict[any,any]]].1.str
Input should be a valid string [type=string_type, input_value=HumanMessage(content='wha...}, response_metadata={}), input_type=HumanMessage]
For further information visit https://errors.pydantic.dev/2.9/v/string_type
content.list[union[str,dict[any,any]]].1.dict[any,any]
Input should be a valid dictionary [type=dict_type, input_value=HumanMessage(content='wha...}, response_metadata={}), input_type=HumanMessage]
For further information visit https://errors.pydantic.dev/2.9/v/dict_type
def generate_query(self, input):
prompt = ChatPromptTemplate.from_messages(
[
SystemMessage(
content=system_message
),
HumanMessage(content=input),
]
)
formatted_messages = prompt.format_messages(input=input)
response = self.agent_executor.invoke(
{
"input": formatted_messages
}
)
return response
I encounter the same issue. The llama 3.1 Model returns the correct Format, just that it comes back a dict instead of a string in json format and than the PyDantic parsing fails. I tried already with prompting to changes this but no success so far.
I use HF TGI as Inference API.
Checked other resources
Example Code
The Following code:
Error Message and Stack Trace (if applicable)
the json response is giving the error inside the "tool_calls" where the accepted format is not decoded.
Description
System Info
System Information
Package Information
Optional packages not installed
Other Dependencies