Closed 52THANOS closed 5 months ago
To use llama3
in an agent if it does not support function calling, you can override the predict_and_call
and apredict_and_call
methods to handle the scenario where function calling is not supported. Here is an example:
from llama_index.core.llms.llm import LLM
from llama_index.core.base.llms.types import ChatMessage, ChatResponse
from llama_index.core.tools.calling import call_tool_with_selection, acall_tool_with_selection
from llama_index.core.chat_engine.types import AgentChatResponse
from llama_index.core.llms.llm import ToolSelection
from typing import List, Optional, Union, Any
import asyncio
class CustomLLM(LLM):
def predict_and_call(
self,
tools: List["BaseTool"],
user_msg: Optional[Union[str, ChatMessage]] = None,
chat_history: Optional[List[ChatMessage]] = None,
verbose: bool = False,
allow_parallel_tool_calls: bool = False,
**kwargs: Any,
) -> "AgentChatResponse":
if not self.metadata.is_function_calling_model:
# Handle the case where function calling is not supported
return super().predict_and_call(
tools,
user_msg=user_msg,
chat_history=chat_history,
verbose=verbose,
**kwargs,
)
response = self.chat_with_tools(
tools,
user_msg,
chat_history=chat_history,
verbose=verbose,
allow_parallel_tool_calls=allow_parallel_tool_calls,
**kwargs,
)
tool_calls = self.get_tool_calls_from_response(response)
tool_outputs = [
call_tool_with_selection(tool_call, tools, verbose=verbose)
for tool_call in tool_calls
]
if allow_parallel_tool_calls:
output_text = "\n\n".join(
[tool_output.content for tool_output in tool_outputs]
)
return AgentChatResponse(response=output_text, sources=tool_outputs)
else:
if len(tool_outputs) > 1:
raise ValueError("Invalid")
return AgentChatResponse(
response=tool_outputs[0].content, sources=tool_outputs
)
async def apredict_and_call(
self,
tools: List["BaseTool"],
user_msg: Optional[Union[str, ChatMessage]] = None,
chat_history: Optional[List[ChatMessage]] = None,
verbose: bool = False,
allow_parallel_tool_calls: bool = False,
**kwargs: Any,
) -> "AgentChatResponse":
if not self.metadata.is_function_calling_model:
# Handle the case where function calling is not supported
return await super().apredict_and_call(
tools,
user_msg=user_msg,
chat_history=chat_history,
verbose=verbose,
**kwargs,
)
response = await self.achat_with_tools(
tools,
user_msg,
chat_history=chat_history,
verbose=verbose,
allow_parallel_tool_calls=allow_parallel_tool_calls,
**kwargs,
)
tool_calls = self.get_tool_calls_from_response(response)
tool_tasks = [
acall_tool_with_selection(tool_call, tools, verbose=verbose)
for tool_call in tool_calls
]
tool_outputs = await asyncio.gather(*tool_tasks)
if allow_parallel_tool_calls:
output_text = "\n\n".join(
[tool_output.content for tool_output in tool_outputs]
)
return AgentChatResponse(response=output_text, sources=tool_outputs)
else:
if len(tool_outputs) > 1:
raise ValueError("Invalid")
return AgentChatResponse(
response=tool_outputs[0].content, sources=tool_outputs
)
This code provides a way to handle the scenario where llama3
does not support function calling by falling back to the default behavior when function calling is not supported [1].
To continue the conversation, mention @dosu.
To use
llama3
in an agent if it does not support function calling, you can override thepredict_and_call
andapredict_and_call
methods to handle the scenario where function calling is not supported. Here is an example:from llama_index.core.llms.llm import LLM from llama_index.core.base.llms.types import ChatMessage, ChatResponse from llama_index.core.tools.calling import call_tool_with_selection, acall_tool_with_selection from llama_index.core.chat_engine.types import AgentChatResponse from llama_index.core.llms.llm import ToolSelection from typing import List, Optional, Union, Any import asyncio class CustomLLM(LLM): def predict_and_call( self, tools: List["BaseTool"], user_msg: Optional[Union[str, ChatMessage]] = None, chat_history: Optional[List[ChatMessage]] = None, verbose: bool = False, allow_parallel_tool_calls: bool = False, **kwargs: Any, ) -> "AgentChatResponse": if not self.metadata.is_function_calling_model: # Handle the case where function calling is not supported return super().predict_and_call( tools, user_msg=user_msg, chat_history=chat_history, verbose=verbose, **kwargs, ) response = self.chat_with_tools( tools, user_msg, chat_history=chat_history, verbose=verbose, allow_parallel_tool_calls=allow_parallel_tool_calls, **kwargs, ) tool_calls = self.get_tool_calls_from_response(response) tool_outputs = [ call_tool_with_selection(tool_call, tools, verbose=verbose) for tool_call in tool_calls ] if allow_parallel_tool_calls: output_text = "\n\n".join( [tool_output.content for tool_output in tool_outputs] ) return AgentChatResponse(response=output_text, sources=tool_outputs) else: if len(tool_outputs) > 1: raise ValueError("Invalid") return AgentChatResponse( response=tool_outputs[0].content, sources=tool_outputs ) async def apredict_and_call( self, tools: List["BaseTool"], user_msg: Optional[Union[str, ChatMessage]] = None, chat_history: Optional[List[ChatMessage]] = None, verbose: bool = False, allow_parallel_tool_calls: bool = False, **kwargs: Any, ) -> "AgentChatResponse": if not self.metadata.is_function_calling_model: # Handle the case where function calling is not supported return await super().apredict_and_call( tools, user_msg=user_msg, chat_history=chat_history, verbose=verbose, **kwargs, ) response = await self.achat_with_tools( tools, user_msg, chat_history=chat_history, verbose=verbose, allow_parallel_tool_calls=allow_parallel_tool_calls, **kwargs, ) tool_calls = self.get_tool_calls_from_response(response) tool_tasks = [ acall_tool_with_selection(tool_call, tools, verbose=verbose) for tool_call in tool_calls ] tool_outputs = await asyncio.gather(*tool_tasks) if allow_parallel_tool_calls: output_text = "\n\n".join( [tool_output.content for tool_output in tool_outputs] ) return AgentChatResponse(response=output_text, sources=tool_outputs) else: if len(tool_outputs) > 1: raise ValueError("Invalid") return AgentChatResponse( response=tool_outputs[0].content, sources=tool_outputs )
This code provides a way to handle the scenario where
llama3
does not support function calling by falling back to the default behavior when function calling is not supported [1].Details
To continue the conversation, mention @dosu. did not work
To use
llama3
in an agent if it does not support function calling, you can override thepredict_and_call
andapredict_and_call
methods to handle the scenario where function calling is not supported. Here is an example:from llama_index.core.llms.llm import LLM from llama_index.core.base.llms.types import ChatMessage, ChatResponse from llama_index.core.tools.calling import call_tool_with_selection, acall_tool_with_selection from llama_index.core.chat_engine.types import AgentChatResponse from llama_index.core.llms.llm import ToolSelection from typing import List, Optional, Union, Any import asyncio class CustomLLM(LLM): def predict_and_call( self, tools: List["BaseTool"], user_msg: Optional[Union[str, ChatMessage]] = None, chat_history: Optional[List[ChatMessage]] = None, verbose: bool = False, allow_parallel_tool_calls: bool = False, **kwargs: Any, ) -> "AgentChatResponse": if not self.metadata.is_function_calling_model: # Handle the case where function calling is not supported return super().predict_and_call( tools, user_msg=user_msg, chat_history=chat_history, verbose=verbose, **kwargs, ) response = self.chat_with_tools( tools, user_msg, chat_history=chat_history, verbose=verbose, allow_parallel_tool_calls=allow_parallel_tool_calls, **kwargs, ) tool_calls = self.get_tool_calls_from_response(response) tool_outputs = [ call_tool_with_selection(tool_call, tools, verbose=verbose) for tool_call in tool_calls ] if allow_parallel_tool_calls: output_text = "\n\n".join( [tool_output.content for tool_output in tool_outputs] ) return AgentChatResponse(response=output_text, sources=tool_outputs) else: if len(tool_outputs) > 1: raise ValueError("Invalid") return AgentChatResponse( response=tool_outputs[0].content, sources=tool_outputs ) async def apredict_and_call( self, tools: List["BaseTool"], user_msg: Optional[Union[str, ChatMessage]] = None, chat_history: Optional[List[ChatMessage]] = None, verbose: bool = False, allow_parallel_tool_calls: bool = False, **kwargs: Any, ) -> "AgentChatResponse": if not self.metadata.is_function_calling_model: # Handle the case where function calling is not supported return await super().apredict_and_call( tools, user_msg=user_msg, chat_history=chat_history, verbose=verbose, **kwargs, ) response = await self.achat_with_tools( tools, user_msg, chat_history=chat_history, verbose=verbose, allow_parallel_tool_calls=allow_parallel_tool_calls, **kwargs, ) tool_calls = self.get_tool_calls_from_response(response) tool_tasks = [ acall_tool_with_selection(tool_call, tools, verbose=verbose) for tool_call in tool_calls ] tool_outputs = await asyncio.gather(*tool_tasks) if allow_parallel_tool_calls: output_text = "\n\n".join( [tool_output.content for tool_output in tool_outputs] ) return AgentChatResponse(response=output_text, sources=tool_outputs) else: if len(tool_outputs) > 1: raise ValueError("Invalid") return AgentChatResponse( response=tool_outputs[0].content, sources=tool_outputs )
This code provides a way to handle the scenario where
llama3
does not support function calling by falling back to the default behavior when function calling is not supported [1].Details
To continue the conversation, mention @dosu.
ImportError: cannot import name 'PromptHelper' from partially initialized module 'llama_index.core.indices.prompt_helper' (most likely due to a circular import) (D:\Anaconda\envs\MiniCPM-V\Lib\site-packages\llama_index\core\indices\prompt_helper.py)
The ImportError
related to PromptHelper
from a partially initialized module llama_index.core.indices.prompt_helper
is due to a circular import. To resolve this, you can modify the import statements to use local imports within the functions or methods that require them. This approach delays the import until the function is called, thus avoiding the circular import issue.
Here is an example of how you can modify the import:
# Instead of this at the top of the file
# from llama_index.core.indices.prompt_helper import PromptHelper
# Use this inside the function or method where PromptHelper is needed
def some_function():
from llama_index.core.indices.prompt_helper import PromptHelper
# Your code using PromptHelper
By moving the import statement inside the function, you ensure that the module is fully initialized before the import is attempted, thus resolving the circular import issue [1].
To continue the conversation, mention @dosu.
The
ImportError
related toPromptHelper
from a partially initialized modulellama_index.core.indices.prompt_helper
is due to a circular import. To resolve this, you can modify the import statements to use local imports within the functions or methods that require them. This approach delays the import until the function is called, thus avoiding the circular import issue.Here is an example of how you can modify the import:
# Instead of this at the top of the file # from llama_index.core.indices.prompt_helper import PromptHelper # Use this inside the function or method where PromptHelper is needed def some_function(): from llama_index.core.indices.prompt_helper import PromptHelper # Your code using PromptHelper
By moving the import statement inside the function, you ensure that the module is fully initialized before the import is attempted, thus resolving the circular import issue [1].
Details
To continue the conversation, mention @dosu.
your code can not work . i overrided the function in customllm but still not working
To resolve the ImportError
related to PromptHelper
from a partially initialized module llama_index.core.indices.prompt_helper
, ensure that the PromptHelper
is correctly imported from the llama_index.core.indices.prompt_helper
module. Here is an example of how you can use PromptHelper
in your code:
def some_function():
from llama_index.core.indices.prompt_helper import PromptHelper
# Your code using PromptHelper
If you are still encountering issues, it might be due to an installation or environment setup problem. Ensure that the llama_index
package is correctly installed and accessible in your Python environment.
Additionally, to use llama3
in an agent without function calling support, you can follow the standard procedure to initialize and use the llama3
model, ensuring that you do not use any function calling features that are not supported. Here is a basic example:
from llama_index import llama3
# Initialize the llama3 model
model = llama3.Model()
# Use the model in your agent
response = model.generate("Your prompt here")
print(response)
For more advanced prompt handling, you can refer to the BasePromptTemplate
and its derived classes such as PromptTemplate
, ChatPromptTemplate
, and SelectorPromptTemplate
in the llama_index.core.prompts.base
module. These classes provide a structured way to manage and format prompts, which can be useful for complex prompt scenarios. Here is an example of how you might define and use a PromptTemplate
:
from llama_index.core.prompts.base import PromptTemplate
# Define a prompt template
template = PromptTemplate(
template="Hello, {name}!",
prompt_type="custom"
)
# Format the prompt with variables
formatted_prompt = template.format(name="Alice")
print(formatted_prompt) # Output: Hello, Alice!
By using these classes, you can manage prompt variables, apply functions to variables, and format prompts in a consistent manner [1][2].
To continue the conversation, mention @dosu.
To resolve the
ImportError
related toPromptHelper
from a partially initialized modulellama_index.core.indices.prompt_helper
, ensure that thePromptHelper
is correctly imported from thellama_index.core.indices.prompt_helper
module. Here is an example of how you can usePromptHelper
in your code:def some_function(): from llama_index.core.indices.prompt_helper import PromptHelper # Your code using PromptHelper
If you are still encountering issues, it might be due to an installation or environment setup problem. Ensure that the
llama_index
package is correctly installed and accessible in your Python environment.Additionally, to use
llama3
in an agent without function calling support, you can follow the standard procedure to initialize and use thellama3
model, ensuring that you do not use any function calling features that are not supported. Here is a basic example:from llama_index import llama3 # Initialize the llama3 model model = llama3.Model() # Use the model in your agent response = model.generate("Your prompt here") print(response)
For more advanced prompt handling, you can refer to the
BasePromptTemplate
and its derived classes such asPromptTemplate
,ChatPromptTemplate
, andSelectorPromptTemplate
in thellama_index.core.prompts.base
module. These classes provide a structured way to manage and format prompts, which can be useful for complex prompt scenarios. Here is an example of how you might define and use aPromptTemplate
:from llama_index.core.prompts.base import PromptTemplate # Define a prompt template template = PromptTemplate( template="Hello, {name}!", prompt_type="custom" ) # Format the prompt with variables formatted_prompt = template.format(name="Alice") print(formatted_prompt) # Output: Hello, Alice!
By using these classes, you can manage prompt variables, apply functions to variables, and format prompts in a consistent manner [1][2].
Details
To continue the conversation, mention @dosu.
ValueError: Model name llama3 does not support function calling API.
To resolve the ValueError: Model name llama3 does not support function calling API
issue and properly use llama3
in an agent without function calling support, you need to ensure that the llama3
model is used in a way that does not involve function calling. This can be done by formatting the prompt without invoking any function mappings.
Here is an example of how you can format a prompt using the PromptTemplate
class without function calling:
from llama_index.core.prompts.base import PromptTemplate
# Define your template
template_str = "Your prompt template here with variables like {var1} and {var2}"
# Create a PromptTemplate instance
prompt_template = PromptTemplate(template=template_str)
# Define the variables to be used in the template
variables = {
"var1": "value1",
"var2": "value2"
}
# Format the prompt without using function mappings
formatted_prompt = prompt_template.format(**variables)
print(formatted_prompt)
In this example, the PromptTemplate
class is used to create a prompt template, and the format
method is called with the necessary variables. This ensures that the prompt is formatted without invoking any function mappings, thus avoiding the ValueError
related to function calling API support [1].
Additionally, if you are using the FunctionCallingAgentWorker
class, ensure that the LLM you are using does not require function calling capabilities. The FunctionCallingAgentWorker
class checks if the LLM supports function calling and raises a ValueError
if it does not:
class FunctionCallingAgentWorker(BaseAgentWorker):
"""Function calling agent worker."""
def __init__(
self,
tools: List[BaseTool],
llm: FunctionCallingLLM,
prefix_messages: List[ChatMessage],
verbose: bool = False,
max_function_calls: int = 5,
callback_manager: Optional[CallbackManager] = None,
tool_retriever: Optional[ObjectRetriever[BaseTool]] = None,
allow_parallel_tool_calls: bool = True,
) -> None:
"""Init params."""
if not llm.metadata.is_function_calling_model:
raise ValueError(
f"Model name {llm.model} does not support function calling API. "
)
self._llm = llm
self._verbose = verbose
self._max_function_calls = max_function_calls
self.prefix_messages = prefix_messages
self.callback_manager = callback_manager or self._llm.callback_manager
self.allow_parallel_tool_calls = allow_parallel_tool_calls
if len(tools) > 0 and tool_retriever is not None:
raise ValueError("Cannot specify both tools and tool_retriever")
elif len(tools) > 0:
self._get_tools = lambda _: tools
elif tool_retriever is not None:
tool_retriever_c = cast(ObjectRetriever[BaseTool], tool_retriever)
self._get_tools = lambda message: tool_retriever_c.retrieve(message)
else:
# no tools
self._get_tools = lambda _: []
Make sure that the LLM you are using is compatible with the FunctionCallingAgentWorker
class or use an alternative agent class that does not require function calling support [2].
To continue the conversation, mention @dosu.
To resolve the
ValueError: Model name llama3 does not support function calling API
issue and properly usellama3
in an agent without function calling support, you need to ensure that thellama3
model is used in a way that does not involve function calling. This can be done by formatting the prompt without invoking any function mappings.Here is an example of how you can format a prompt using the
PromptTemplate
class without function calling:from llama_index.core.prompts.base import PromptTemplate # Define your template template_str = "Your prompt template here with variables like {var1} and {var2}" # Create a PromptTemplate instance prompt_template = PromptTemplate(template=template_str) # Define the variables to be used in the template variables = { "var1": "value1", "var2": "value2" } # Format the prompt without using function mappings formatted_prompt = prompt_template.format(**variables) print(formatted_prompt)
In this example, the
PromptTemplate
class is used to create a prompt template, and theformat
method is called with the necessary variables. This ensures that the prompt is formatted without invoking any function mappings, thus avoiding theValueError
related to function calling API support [1].Additionally, if you are using the
FunctionCallingAgentWorker
class, ensure that the LLM you are using does not require function calling capabilities. TheFunctionCallingAgentWorker
class checks if the LLM supports function calling and raises aValueError
if it does not:class FunctionCallingAgentWorker(BaseAgentWorker): """Function calling agent worker.""" def __init__( self, tools: List[BaseTool], llm: FunctionCallingLLM, prefix_messages: List[ChatMessage], verbose: bool = False, max_function_calls: int = 5, callback_manager: Optional[CallbackManager] = None, tool_retriever: Optional[ObjectRetriever[BaseTool]] = None, allow_parallel_tool_calls: bool = True, ) -> None: """Init params.""" if not llm.metadata.is_function_calling_model: raise ValueError( f"Model name {llm.model} does not support function calling API. " ) self._llm = llm self._verbose = verbose self._max_function_calls = max_function_calls self.prefix_messages = prefix_messages self.callback_manager = callback_manager or self._llm.callback_manager self.allow_parallel_tool_calls = allow_parallel_tool_calls if len(tools) > 0 and tool_retriever is not None: raise ValueError("Cannot specify both tools and tool_retriever") elif len(tools) > 0: self._get_tools = lambda _: tools elif tool_retriever is not None: tool_retriever_c = cast(ObjectRetriever[BaseTool], tool_retriever) self._get_tools = lambda message: tool_retriever_c.retrieve(message) else: # no tools self._get_tools = lambda _: []
Make sure that the LLM you are using is compatible with the
FunctionCallingAgentWorker
class or use an alternative agent class that does not require function calling support [2].Details
To continue the conversation, mention @dosu.
return func(self, *args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\Anaconda\envs\MiniCPM-V\Lib\site-packages\llama_index\core\agent\function_calling\step.py", line 246, in run_step response = self._llm.chat_with_tools( ^^^^^^^^^^^^^^^^^^^^^^^^^^ File "D:\Anaconda\envs\MiniCPM-V\Lib\site-packages\llama_index\core\llms\custom.py", line 97, in chat_with_tools raise NotImplementedError("chat_with_tools is not supported by default.") NotImplementedError: chat_with_tools is not supported by default.
The NotImplementedError
you're encountering indicates that the chat_with_tools
method is not supported by default in the FunctionCallingLLM
class. To resolve this, you need to ensure that your implementation does not call methods that are not supported by the llama3
model.
Here is a revised approach to handle the scenario where function calling is not supported:
Override the predict_and_call
and apredict_and_call
methods to avoid calling chat_with_tools
and handle the response directly.
Ensure that your agent does not use the FunctionCallingAgentWorker
class if the model does not support function calling.
Here is an example of how you can modify your CustomLLM
class:
from llama_index.core.llms.llm import LLM
from llama_index.core.base.llms.types import ChatMessage, ChatResponse
from llama_index.core.chat_engine.types import AgentChatResponse
from typing import List, Optional, Union, Any
import asyncio
class CustomLLM(LLM):
def predict_and_call(
self,
tools: List["BaseTool"],
user_msg: Optional[Union[str, ChatMessage]] = None,
chat_history: Optional[List[ChatMessage]] = None,
verbose: bool = False,
allow_parallel_tool_calls: bool = False,
**kwargs: Any,
) -> "AgentChatResponse":
if not self.metadata.is_function_calling_model:
# Handle the case where function calling is not supported
response = self.predict(user_msg, chat_history, **kwargs)
return AgentChatResponse(response=response)
# Your existing implementation for function calling models
# ...
async def apredict_and_call(
self,
tools: List["BaseTool"],
user_msg: Optional[Union[str, ChatMessage]] = None,
chat_history: Optional[List[ChatMessage]] = None,
verbose: bool = False,
allow_parallel_tool_calls: bool = False,
**kwargs: Any,
) -> "AgentChatResponse":
if not self.metadata.is_function_calling_model:
# Handle the case where function calling is not supported
response = await self.apredict(user_msg, chat_history, **kwargs)
return AgentChatResponse(response=response)
# Your existing implementation for function calling models
# ...
In this example, the predict_and_call
and apredict_and_call
methods are overridden to handle cases where function calling is not supported by directly calling the predict
and apredict
methods, respectively.
Additionally, ensure that you are not using the FunctionCallingAgentWorker
class if your model does not support function calling. Instead, use a different agent class that does not require function calling capabilities.
By following this approach, you can avoid the NotImplementedError
and properly use the llama3
model in your agent without function calling support [1].
To continue the conversation, mention @dosu.
@52THANOS dosu is going off the rails. Just use a react agent
from llama_index.core.agent import ReActAgent
agent = ReActAgent.from_tools(tools, llm=Ollama(model="llama-3", request_timeout=3600.0))
Or if you want to use the worker
from llama_index.core.agent import AgentRunner, ReActAgentWorker
worker = ReActAgentWorker.from_tools(tools, llm=Ollama(...))
agent = AgentRunner(worker)
Just note that open-source LLMs are usually kind of bad at being agents, so your experience may vary
Question Validation
Question
how to use llama3 in agent , it seems does not have function_calling