from llm_axe import models,agents,selenium_hybrid_reader
llm = models.OllamaChat(model="llama3:instruct")
online_agent = agents.OnlineAgent(llm, custom_site_reader=selenium_hybrid_reader)
history = [{'role': 'user', 'content': 'what time is it?'}, {'role': 'assistant', 'content': 'Based on information from the internet, as of Thursday, 20 June 2024, it is currently 17:45 in Taiwan.'}]
#history = []
human_resp = online_agent.search('what time is it?',history=history)
print(human_resp)
output
Traceback (most recent call last):
File "/home/yoni/myllama/test.py", line 9, in <module>
human_resp = online_agent.search('what time is it?',history=history)
File "/home/yoni/.local/lib/python3.10/site-packages/llm_axe/agents.py", line 577, in search
response = final_responder.ask(user_prompt, history)
File "/home/yoni/.local/lib/python3.10/site-packages/llm_axe/agents.py", line 64, in ask
response = self.llm.ask(prompts, temperature=self.temperature, format=self.format)
File "/home/yoni/.local/lib/python3.10/site-packages/llm_axe/models.py", line 21, in ask
return self._ollama.chat(model=self._model, messages=prompts, format=format, options={"temperature": temperature})["message"]["content"]
File "/home/yoni/.local/lib/python3.10/site-packages/ollama/_client.py", line 178, in chat
message['images'] = [_encode_image(image) for image in images]
File "/home/yoni/.local/lib/python3.10/site-packages/ollama/_client.py", line 178, in <listcomp>
message['images'] = [_encode_image(image) for image in images]
File "/home/yoni/.local/lib/python3.10/site-packages/ollama/_client.py", line 673, in _encode_image
raise RequestError('image must be bytes, path-like object, or file-like object')
ollama._types.RequestError: image must be bytes, path-like object, or file-like object
Making the history list empty[] makes the error disappear.
The code I use and reproduce the bug
output
Making the history list empty
[]
makes the error disappear.