Closed maxadc closed 3 months ago
Good idea,快的话明天就能整出来
Good idea,快的话明天就能整出来
兄弟,我用python写了一个,遇到一点问题,不知道你遇到没。js不是很熟,看不懂你的代码。 发送消息时,如果发送一条消息,就没事,如果发出一条以上,就会报错400 比如 {'model': 'gpt-4o-mini', 'messages': [{'role': 'system', 'content': 'hahaha'}, {'role': 'user', 'content': '在吗'}]} 这样就会出问题 这样就没事 {'model': 'gpt-4o-mini', [{'role': 'user', 'content': '在吗'}]} 只能发一条消息
import requests from fastapi import FastAPI, HTTPException, Request from fastapi.responses import StreamingResponse import httpx import json import asyncio
app = FastAPI()
STATUS_URL = "https://duckduckgo.com/duckchat/v1/status" CHAT_URL = "https://duckduckgo.com/duckchat/v1/chat"
def get_vqd(): headers = { "accept": "/", "accept-language": "en-US,en;q=0.9", "sec-ch-ua": "\"Not_A Brand\";v=\"99\", \"Google Chrome\";v=\"109\", \"Chromium\";v=\"109\"", "sec-ch-ua-mobile": "?0", "sec-ch-ua-platform": "\"Windows\"", "sec-fetch-dest": "empty", "sec-fetch-mode": "cors", "sec-fetch-site": "same-origin", "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36", "x-vqd-accept": "1" } response = requests.get(STATUS_URL, headers=headers) return response.headers.get("x-vqd-4")
async def fetch_duckduckgo_response(messages, model="gpt-4o-mini", retries=3): data = { "model": model, "messages": messages }
for attempt in range(retries):
vqd = get_vqd() # Get a fresh VQD for each attempt
headers = {
'accept': 'text/event-stream',
'accept-language': 'en-US,en;q=0.9',
'content-type': 'application/json',
'origin': 'https://duckduckgo.com',
'referer': 'https://duckduckgo.com/',
'sec-ch-ua': '"Not_A Brand";v="99", "Google Chrome";v="109", "Chromium";v="109"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36',
'x-vqd-4': vqd
}
try:
async with httpx.AsyncClient(timeout=30.0) as client:
async with client.stream('POST', CHAT_URL, headers=headers, json=data) as response:
if response.status_code == 200:
async for line in response.aiter_lines():
if line.startswith("data: "):
yield line[6:] # Remove "data: " prefix
return # Successful response, exit the function
else:
print(f"Error response: {response.status_code}")
print(await response.text())
except Exception as e:
print(f"Error occurred: {str(e)}")
if attempt < retries - 1:
wait_time = 2 ** attempt # Exponential backoff
print(f"Retrying in {wait_time} seconds...")
await asyncio.sleep(wait_time)
print("All retries failed")
yield json.dumps({"error": "Failed to get response after multiple attempts"})
def parse_duckduckgo_response(response): try: data = json.loads(response) return data.get("message", "") except json.JSONDecodeError: return ""
@app.post("/v1/chat/completions") async def chat_completions(request: Request): try: data = await request.json() model = data.get("model") messages = data.get("messages", []) for message in messages: print("用户消息:", message) async def generate(): full_response = "" print(f"{model}:", end='', flush=True) async for chunk in fetch_duckduckgo_response(messages, model=model): content = parse_duckduckgo_response(chunk) if content: full_response += content
print(f"{content}", end='', flush=True)
yield f"data: {json.dumps({'choices': [{'delta': {'content': content}}]})}\n\n"
yield f"data: {json.dumps({'choices': [{'delta': {'content': ''}, 'finish_reason': 'stop'}]})}\n\n"
yield "data: [DONE]\n\n"
return StreamingResponse(generate(), media_type="text/event-stream")
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
if name == "main": import uvicorn
uvicorn.run(app, host="0.0.0.0", port=5002)
chain = [{ "role": "user", content: "在吗" }]
vqd = get_vqd_from_status_url()
response = send(chain, vqd)
vqd = response["x-vqd-4"] # Duckduckgo AI Chat 会返回新的 vqd,从 headers 中获取,如果要继续对话需要用这个 vqd 发送消息
# 此时 chain 是 [{ ... , content: "在吗" }, { "role": "assistant", content: ... }]
# 继续对话
chain.append({ "role": "user", content: ... })
response = send(chain, vqd)
这个项目不错,可以封装成docker版本的服务吗,兼容openai那种!