Closed long202258 closed 8 months ago
There is a models.py
file with a list of all supported models by g4f
and their corresponding providers.
You can replace gpt-3.5-turbo-16k
on any model from this file - choose the one you want from the variable name
, e.g. gpt-4-32k-0613
and since this model corresponds to the variable best_provider = gpt_4.best_provider
, the providers corresponding to the gpt_4
model will be selected, namely Bing, Liaobots
.
If, however, in addition to the model, it is necessary to specify the provider, then this topic was discussed here https://github.com/xtekky/gpt4free/issues/1539
# GPT-3.5 too, but all providers supports long requests and responses
gpt_35_long = Model(
name = 'gpt-3.5-turbo',
base_provider = 'openai',
best_provider = RetryProvider([
FreeGpt, You,
Chatgpt4Online,
ChatgptDemoAi,
ChatgptNext,
ChatgptDemo,
Gpt6,
])
)
# GPT-3.5 / GPT-4
gpt_35_turbo = Model(
name = 'gpt-3.5-turbo',
base_provider = 'openai',
best_provider = RetryProvider([
OpenaiChat, GptGo, You,
GptForLove, ChatBase,
Chatgpt4Online,
])
)
gpt_4 = Model(
name = 'gpt-4',
base_provider = 'openai',
best_provider = RetryProvider([
Bing, Liaobots,
])
)
gpt_4_turbo = Model(
name = 'gpt-4-turbo',
base_provider = 'openai',
best_provider = Bing
)
llama2_7b = Model(
name = "meta-llama/Llama-2-7b-chat-hf",
base_provider = 'huggingface',
best_provider = RetryProvider([Llama2, DeepInfra])
)
llama2_13b = Model(
name = "meta-llama/Llama-2-13b-chat-hf",
base_provider = 'huggingface',
best_provider = RetryProvider([Llama2, DeepInfra])
)
llama2_70b = Model(
name = "meta-llama/Llama-2-70b-chat-hf",
base_provider = "huggingface",
best_provider = RetryProvider([Llama2, DeepInfra, HuggingChat, PerplexityLabs])
)
codellama_34b_instruct = Model(
name = "codellama/CodeLlama-34b-Instruct-hf",
base_provider = "huggingface",
best_provider = RetryProvider([HuggingChat, PerplexityLabs, DeepInfra])
)
codellama_70b_instruct = Model(
name = "codellama/CodeLlama-70b-Instruct-hf",
base_provider = "huggingface",
best_provider = DeepInfra
)
# Mistral
mixtral_8x7b = Model(
name = "mistralai/Mixtral-8x7B-Instruct-v0.1",
base_provider = "huggingface",
best_provider = RetryProvider([DeepInfra, HuggingChat, PerplexityLabs])
)
mistral_7b = Model(
name = "mistralai/Mistral-7B-Instruct-v0.1",
base_provider = "huggingface",
best_provider = RetryProvider([DeepInfra, HuggingChat, PerplexityLabs])
)
# Misc models
dolphin_mixtral_8x7b = Model(
name = "cognitivecomputations/dolphin-2.6-mixtral-8x7b",
base_provider = "huggingface",
best_provider = DeepInfra
)
lzlv_70b = Model(
name = "lizpreciatior/lzlv_70b_fp16_hf",
base_provider = "huggingface",
best_provider = DeepInfra
)
airoboros_70b = Model(
name = "deepinfra/airoboros-70b",
base_provider = "huggingface",
best_provider = DeepInfra
)
airoboros_l2_70b = Model(
name = "jondurbin/airoboros-l2-70b-gpt4-1.4.1",
base_provider = "huggingface",
best_provider = DeepInfra
)
openchat_35 = Model(
name = "openchat/openchat_3.5",
base_provider = "huggingface",
best_provider = RetryProvider([DeepInfra, HuggingChat])
)
# Bard
gemini = bard = palm = Model(
name = 'gemini',
base_provider = 'google',
best_provider = Gemini
)
claude_v2 = Model(
name = 'claude-v2',
base_provider = 'anthropic',
best_provider = RetryProvider([FreeChatgpt, Vercel])
)
gpt_35_turbo_16k = Model(
name = 'gpt-3.5-turbo-16k',
base_provider = 'openai',
best_provider = gpt_35_long.best_provider
)
gpt_35_turbo_16k_0613 = Model(
name = 'gpt-3.5-turbo-16k-0613',
base_provider = 'openai',
best_provider = gpt_35_long.best_provider
)
gpt_35_turbo_0613 = Model(
name = 'gpt-3.5-turbo-0613',
base_provider = 'openai',
best_provider = gpt_35_turbo.best_provider
)
gpt_4_0613 = Model(
name = 'gpt-4-0613',
base_provider = 'openai',
best_provider = gpt_4.best_provider
)
gpt_4_32k = Model(
name = 'gpt-4-32k',
base_provider = 'openai',
best_provider = gpt_4.best_provider
)
gpt_4_32k_0613 = Model(
name = 'gpt-4-32k-0613',
base_provider = 'openai',
best_provider = gpt_4.best_provider
)
gemini_pro = Model(
name = 'gemini-pro',
base_provider = 'google',
best_provider = RetryProvider([FreeChatgpt, GeminiProChat, GeminiProGBeCo, GeminiProFreeChatGPT, GeminiProTop, GeminiProCCFgptCN])
)
pi = Model(
name = 'pi',
base_provider = 'inflection',
best_provider = Pi
)
有一个文件,其中包含所有支持的模型及其相应的提供程序的列表。您可以从此文件中替换任何模型 - 从变量中选择您想要的模型,例如 并且由于该模型对应变量 ,因此将选择与该模型对应的提供者,即 。但是,如果除了模型之外,还需要指定提供程序,则此处讨论此主题 #1539
models.py``g4f``gpt-3.5-turbo-16k``name``gpt-4-32k-0613``best_provider = gpt_4.best_provider``gpt_4``Bing, Liaobots
# GPT-3.5 too, but all providers supports long requests and responses gpt_35_long = Model( name = 'gpt-3.5-turbo', base_provider = 'openai', best_provider = RetryProvider([ FreeGpt, You, Chatgpt4Online, ChatgptDemoAi, ChatgptNext, ChatgptDemo, Gpt6, ]) ) # GPT-3.5 / GPT-4 gpt_35_turbo = Model( name = 'gpt-3.5-turbo', base_provider = 'openai', best_provider = RetryProvider([ OpenaiChat, GptGo, You, GptForLove, ChatBase, Chatgpt4Online, ]) ) gpt_4 = Model( name = 'gpt-4', base_provider = 'openai', best_provider = RetryProvider([ Bing, Liaobots, ]) ) gpt_4_turbo = Model( name = 'gpt-4-turbo', base_provider = 'openai', best_provider = Bing ) llama2_7b = Model( name = "meta-llama/Llama-2-7b-chat-hf", base_provider = 'huggingface', best_provider = RetryProvider([Llama2, DeepInfra]) ) llama2_13b = Model( name = "meta-llama/Llama-2-13b-chat-hf", base_provider = 'huggingface', best_provider = RetryProvider([Llama2, DeepInfra]) ) llama2_70b = Model( name = "meta-llama/Llama-2-70b-chat-hf", base_provider = "huggingface", best_provider = RetryProvider([Llama2, DeepInfra, HuggingChat, PerplexityLabs]) ) codellama_34b_instruct = Model( name = "codellama/CodeLlama-34b-Instruct-hf", base_provider = "huggingface", best_provider = RetryProvider([HuggingChat, PerplexityLabs, DeepInfra]) ) codellama_70b_instruct = Model( name = "codellama/CodeLlama-70b-Instruct-hf", base_provider = "huggingface", best_provider = DeepInfra ) # Mistral mixtral_8x7b = Model( name = "mistralai/Mixtral-8x7B-Instruct-v0.1", base_provider = "huggingface", best_provider = RetryProvider([DeepInfra, HuggingChat, PerplexityLabs]) ) mistral_7b = Model( name = "mistralai/Mistral-7B-Instruct-v0.1", base_provider = "huggingface", best_provider = RetryProvider([DeepInfra, HuggingChat, PerplexityLabs]) ) # Misc models dolphin_mixtral_8x7b = Model( name = "cognitivecomputations/dolphin-2.6-mixtral-8x7b", base_provider = "huggingface", best_provider = DeepInfra ) lzlv_70b = Model( name = "lizpreciatior/lzlv_70b_fp16_hf", base_provider = "huggingface", best_provider = DeepInfra ) airoboros_70b = Model( name = "deepinfra/airoboros-70b", base_provider = "huggingface", best_provider = DeepInfra ) airoboros_l2_70b = Model( name = "jondurbin/airoboros-l2-70b-gpt4-1.4.1", base_provider = "huggingface", best_provider = DeepInfra ) openchat_35 = Model( name = "openchat/openchat_3.5", base_provider = "huggingface", best_provider = RetryProvider([DeepInfra, HuggingChat]) ) # Bard gemini = bard = palm = Model( name = 'gemini', base_provider = 'google', best_provider = Gemini ) claude_v2 = Model( name = 'claude-v2', base_provider = 'anthropic', best_provider = RetryProvider([FreeChatgpt, Vercel]) ) gpt_35_turbo_16k = Model( name = 'gpt-3.5-turbo-16k', base_provider = 'openai', best_provider = gpt_35_long.best_provider ) gpt_35_turbo_16k_0613 = Model( name = 'gpt-3.5-turbo-16k-0613', base_provider = 'openai', best_provider = gpt_35_long.best_provider ) gpt_35_turbo_0613 = Model( name = 'gpt-3.5-turbo-0613', base_provider = 'openai', best_provider = gpt_35_turbo.best_provider ) gpt_4_0613 = Model( name = 'gpt-4-0613', base_provider = 'openai', best_provider = gpt_4.best_provider ) gpt_4_32k = Model( name = 'gpt-4-32k', base_provider = 'openai', best_provider = gpt_4.best_provider ) gpt_4_32k_0613 = Model( name = 'gpt-4-32k-0613', base_provider = 'openai', best_provider = gpt_4.best_provider ) gemini_pro = Model( name = 'gemini-pro', base_provider = 'google', best_provider = RetryProvider([FreeChatgpt, GeminiProChat, GeminiProGBeCo, GeminiProFreeChatGPT, GeminiProTop, GeminiProCCFgptCN]) ) pi = Model( name = 'pi', base_provider = 'inflection', best_provider = Pi )
I don't understand. Can you give me a specific code that uses GPT4
根本用不了,浪费时间
import requests url = "http://localhost:1337/v1/chat/completions" body = { "model": "gpt-3.5-turbo-16k", "stream": False, "messages": [ {"role": "assistant", "content": "What can you do?"} ] } json_response = requests.post(url, json=body).json().get('choices', [])
for choice in json_response: print(choice.get('message', {}).get('content', ''))
使用这个示例代码是没问题的,请问"model": "gpt-3.5-turbo-16k", 这个可以用其他的GPT4的模型吗?还有生成字数怎么限制 感谢好心人指点,祝2024财源广进,步步高升