def build_session_query(query, user_id):
'''
build query with conversation history
e.g. [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Who won the world series in 2020?"},
{"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."},
{"role": "user", "content": "Where was it played?"}
]
:param query: query content
:param user_id: from user id
:return: query content with conversaction
'''
session = user_session.get(user_id, [])
if len(session) == 0:
system_prompt = model_conf(const.OPEN_AI).get("character_desc", "")
system_item = {'role': 'system', 'content': system_prompt}
session.append(system_item)
user_session[user_id] = session
# Added by Jean 2023/07/08
# Load user defined functions to global name space.
functions = model_conf(const.OPEN_AI).get("functions", None)
if functions:
log.info("[CHATGPT] functions={}", functions)
try:
exec(open(functions).read(), globals())
except Exception as e:
log.exception(e)
user_item = {'role': 'user', 'content': query}
session.append(user_item)
return session
# For access to OpenAI API
import openai
import json
import os
import sys
# For gRPC call to Melbourne outliers as a trusted third party data supplier
# Need to append the path as they're in different directory
sys.path.append( '/home/jean/scripts' )
import grpc
import helloworld_pb2
import helloworld_pb2_grpc
# Class to add authentication header to the meta data of every gRPC call.
class GrpcAuth(grpc.AuthMetadataPlugin):
def __init__(self, key):
self._key = key
# 'rpc-auth-header' is the authentication header defined by the server.
def __call__(self, context, callback):
callback((('rpc-auth-header', self._key),), None)
# Test gRPC call to Melbourne house pricing outliers.
# Need to be loaded with absolute path
with open('/home/jean/scripts/ca.crt', 'rb') as f:
creds = grpc.ssl_channel_credentials(f.read())
# A composite channel credentials with SSL and password.
# Host name need to be the same as the server certificate.
channel = grpc.secure_channel(
'jeanye.cn:50051',
grpc.composite_channel_credentials(
creds,
grpc.metadata_call_credentials(
GrpcAuth('right_access_key')
# A worng key will fail then.
# GrpcAuth('wrong_access_key')
)
)
)
stub = helloworld_pb2_grpc.GreeterStub(channel)
# Test access to gRPC call
# print("Outlier client received: \n")
# for outlier in stub.GetOutliers(helloworld_pb2.MelbourneRequest(algo='cat', threshold=65)):
# print(outlier)
# Function call to get Melbourne house pricing outliers with specific algorithm and threshold.
def get_Melbourne_Outliers(algo='cat', threshold=65):
"""Get the outliers of house pricing in Melbourne city"""
outliers = []
# Turn the returned object into a Python dictionary so that it can be dumped into a JSON string.
for outlier in stub.GetOutliers(helloworld_pb2.MelbourneRequest(algo=algo, threshold=threshold)):
outlierDic = {}
outlierDic["row"] = outlier.row
outlierDic["origin"] = outlier.origin
outlierDic["predict"] = outlier.predict
outlierDic["se"] = outlier.se
outliers.append(outlierDic)
return json.dumps(outliers)
# Description of the function for being called by GPT, it's important to set a default value.
functions = [
{
"name": "get_Melbourne_Outliers",
"description": "Get the outliers of house pricing in Melbourne city",
"parameters": {
"type": "object",
"properties": {
"algo": {
"type": "string",
"description": "The algorithm name to find outliers, default is cat"
},
"threshold": {
"type": "integer",
"description": "The threshold of percentage to filter outliers, default is 65, means 65%"
},
},
"required": ["algo","threshold"],
},
}
]
# only one function in this example, but you can have multiple
available_functions = {
"get_Melbourne_Outliers": get_Melbourne_Outliers,
}
# Function to call GPT through GPT API with function call
def run_conversation(messages, functions):
response = openai.ChatCompletion.create(
# I can only access this model now.
# model="gpt-3.5-turbo-0613",
# model="gpt-4-0613",
model= model_conf(const.OPEN_AI).get("model") or "gpt-3.5-turbo",
messages=messages,
max_tokens=model_conf(const.OPEN_AI).get("conversation_max_tokens", 1024),
temperature=model_conf(const.OPEN_AI).get("temperature", 0.75),
functions=functions,
function_call="auto", # auto is default, but we'll be explicit
)
return response
def reply_text_function(messages):
# messages is a dictionary of message, which is a dictionary in nesting.
# Step 1: send the conversation and available functions to GPT
response = run_conversation(messages, functions)
# Get the response message.
response_message = response["choices"][0]["message"]
# print(response_message)
# Step 2: check if GPT wanted to call a function
if response_message.get("function_call"):
# Step 3: call the function
# Note: the JSON response may not always be valid; be sure to handle errors
function_name = response_message["function_call"]["name"]
fuction_to_call = available_functions[function_name]
function_args = json.loads(response_message["function_call"]["arguments"])
# Set default for function args if it is missing.
if (function_args.get("algo")==None):
function_args["algo"]="cat"
if (function_args.get("threshold")==None):
function_args["threshold"]=65
# Call the function by yourself
function_response = fuction_to_call(
# With the parameters provided by GPT.
algo=function_args.get("algo"),
threshold=function_args.get("threshold"),
)
# Step 4: send the info on the function call and function response to GPT
messages.append(response_message) # extend conversation with assistant's reply
messages.append(
{
"role": "function",
"name": function_name,
"content": function_response,
}
) # extend conversation with function response
second_response = run_conversation(messages, functions)
else: # No need to call a user function
second_response = response
# Need to keep all conversation messages for the future.
messages.append(second_response['choices'][0]['message'])
# And the token of total dialogue too.
used_token = second_response['usage']['total_tokens']
return messages, used_token
# Will be comment out in production environment
# # Test the conversation fucntion with function call
# messages = [{"role": "user", "content": "What're the top 3 outliers of house pricing in Melbourne city?"}]
# response, used_token = reply_text_function(messages)
# print(response)
运行效果:
![GPTFunctionCall-3](https://github.com/zhayujie/bot-on-anything/assets/54383348/d1000552-4979-4284-8b4f-2e875db4449b)
具体可以参阅OpenAI的文档函数调用功能及其它更新,以及我的这篇介绍文章ChatGPT函数调用功能测试。
1、修改了config.json,增加了一个参数functions指出出用户自定义函数的入口脚本。
2、修改了 /model/openai/chatgpt_model.py,检测该配置,如果没有该配置项,则按原有的程序执行;如果有则做两处操作: 1)在
Sesssion.build_session_query(query, user_id)
中加载用户自定义函数脚本:2)在
reply_text(self, query, user_id, retry_count=0)
中判断是否有配置用户自定义函数,用if else分开处理,没有的话就照旧:3、在用户自定义函数脚本中实现对用户自定义函数的调用,不同的自定义函数可以写不同的脚本,实现不同的功能,只需实现
reply_text(self, query, user_id, retry_count=0)
函数中调用的response, used_token = reply_text_function(query.copy())
接口。具体可以参阅OpenAI的文档函数调用功能及其它更新,以及我的这篇介绍文章ChatGPT函数调用功能测试。以下是本例的实现: