|
|
|
@ -1,4 +1,4 @@ |
|
|
|
|
from openai import OpenAI |
|
|
|
|
from openai import AsyncOpenAI |
|
|
|
|
from dotenv import load_dotenv |
|
|
|
|
from typing import AsyncGenerator |
|
|
|
|
|
|
|
|
@ -15,7 +15,7 @@ from datetime import datetime |
|
|
|
|
|
|
|
|
|
load_dotenv() |
|
|
|
|
|
|
|
|
|
client = OpenAI(api_key=settings.DEEPSEEK_API_KEY, base_url=settings.DEEPSEEK_API_URL) |
|
|
|
|
async_client = AsyncOpenAI(api_key=settings.DEEPSEEK_API_KEY, base_url=settings.DEEPSEEK_API_URL) |
|
|
|
|
|
|
|
|
|
#分类提示词 |
|
|
|
|
CATEGORY_PROMPT = """你是一个分类助手,请根据用户的问题判断属于以下哪一类: |
|
|
|
@ -169,7 +169,7 @@ ANSWER_PROMPT = """ |
|
|
|
|
async def classify(msg: str) -> str: |
|
|
|
|
print(f"Starting classification for message: {msg}") |
|
|
|
|
try: |
|
|
|
|
response = client.chat.completions.create( |
|
|
|
|
response = await async_client.chat.completions.create( |
|
|
|
|
model="deepseek-chat", |
|
|
|
|
messages=[{"role": "system", "content": CATEGORY_PROMPT}, {"role": "user", "content": msg}] |
|
|
|
|
) |
|
|
|
@ -206,22 +206,19 @@ async def ai_chat_stream(inp: ChatIn, conversation_history: list) -> AsyncGenera |
|
|
|
|
print(f"Starting AI chat stream with input: {inp.message}") |
|
|
|
|
full_response = "" |
|
|
|
|
try: |
|
|
|
|
response = client.chat.completions.create( |
|
|
|
|
response = await async_client.chat.completions.create( |
|
|
|
|
model="deepseek-chat", |
|
|
|
|
messages=messages, |
|
|
|
|
stream=True |
|
|
|
|
) |
|
|
|
|
# 使用异步方式处理同步流 |
|
|
|
|
import asyncio |
|
|
|
|
for chunk in response: |
|
|
|
|
async for chunk in response: |
|
|
|
|
delta = chunk.choices[0].delta |
|
|
|
|
if hasattr(delta, "content") and delta.content: |
|
|
|
|
full_response += delta.content |
|
|
|
|
yield delta.content |
|
|
|
|
import sys |
|
|
|
|
sys.stdout.flush() |
|
|
|
|
# 短暂让出控制权,避免阻塞 |
|
|
|
|
await asyncio.sleep(0) |
|
|
|
|
|
|
|
|
|
# 生成推荐问题 |
|
|
|
|
if full_response: |
|
|
|
@ -255,22 +252,19 @@ async def gen_markdown_stream(msg: str, data: str, language: str, conversation_h |
|
|
|
|
print(f"Starting markdown stream with message: {msg} and data: {data}") |
|
|
|
|
full_response = "" |
|
|
|
|
try: |
|
|
|
|
response = client.chat.completions.create( |
|
|
|
|
response = await async_client.chat.completions.create( |
|
|
|
|
model="deepseek-chat", |
|
|
|
|
messages=messages, |
|
|
|
|
stream=True |
|
|
|
|
) |
|
|
|
|
# 使用异步方式处理同步流 |
|
|
|
|
import asyncio |
|
|
|
|
for chunk in response: |
|
|
|
|
async for chunk in response: |
|
|
|
|
delta = chunk.choices[0].delta |
|
|
|
|
if hasattr(delta, "content") and delta.content: |
|
|
|
|
full_response += delta.content |
|
|
|
|
yield delta.content |
|
|
|
|
import sys |
|
|
|
|
sys.stdout.flush() |
|
|
|
|
# 短暂让出控制权,避免阻塞 |
|
|
|
|
await asyncio.sleep(0) |
|
|
|
|
# 生成推荐问题 |
|
|
|
|
if full_response: |
|
|
|
|
recommended_questions = await generate_recommended_questions(msg, full_response) |
|
|
|
@ -299,7 +293,7 @@ async def extract_spot(msg) -> str: |
|
|
|
|
|
|
|
|
|
print(f"Starting spot extraction for message: {msg_content}") |
|
|
|
|
try: |
|
|
|
|
response = client.chat.completions.create( |
|
|
|
|
response = await async_client.chat.completions.create( |
|
|
|
|
model="deepseek-chat", |
|
|
|
|
messages=[{"role": "system", "content": EXTRACT_PROMPT}, {"role": "user", "content": msg_content}] |
|
|
|
|
) |
|
|
|
@ -406,7 +400,7 @@ async def query_flow(request: Request, spot: str) -> str: |
|
|
|
|
result += f"注:全部内容输出完以后,最后输出一段固定内容,内容为:<hr class=\"keliu\" data-id=\"{scenic_id}\"/>;" |
|
|
|
|
|
|
|
|
|
try: |
|
|
|
|
await redis_client.setex(cache_key, 60, result) |
|
|
|
|
await redis_client.setex(cache_key, 120, result) |
|
|
|
|
except Exception as e: |
|
|
|
|
print(f"[Redis] 写缓存失败: {e}") |
|
|
|
|
|
|
|
|
@ -426,22 +420,19 @@ async def handle_quick_question(inp: ChatIn, question_content: str) -> AsyncGene |
|
|
|
|
|
|
|
|
|
full_response = "" |
|
|
|
|
try: |
|
|
|
|
response = client.chat.completions.create( |
|
|
|
|
response = await async_client.chat.completions.create( |
|
|
|
|
model="deepseek-chat", |
|
|
|
|
messages=messages, |
|
|
|
|
stream=True |
|
|
|
|
) |
|
|
|
|
# 使用异步方式处理同步流 |
|
|
|
|
import asyncio |
|
|
|
|
for chunk in response: |
|
|
|
|
async for chunk in response: |
|
|
|
|
delta = chunk.choices[0].delta |
|
|
|
|
if hasattr(delta, "content") and delta.content: |
|
|
|
|
full_response += delta.content |
|
|
|
|
yield delta.content |
|
|
|
|
import sys |
|
|
|
|
sys.stdout.flush() |
|
|
|
|
# 短暂让出控制权,避免阻塞 |
|
|
|
|
await asyncio.sleep(0) |
|
|
|
|
# 生成推荐问题 |
|
|
|
|
if full_response: |
|
|
|
|
recommended_questions = await generate_recommended_questions(inp.message, full_response) |
|
|
|
@ -478,7 +469,7 @@ async def generate_recommended_questions(user_msg: str, ai_response: str) -> lis |
|
|
|
|
""" |
|
|
|
|
|
|
|
|
|
try: |
|
|
|
|
response = client.chat.completions.create( |
|
|
|
|
response = await async_client.chat.completions.create( |
|
|
|
|
model="deepseek-chat", |
|
|
|
|
messages=[ |
|
|
|
|
{"role": "system", "content": "你是一个问题推荐助手,擅长基于对话内容生成相关的延伸问题"}, |
|
|
|
@ -865,7 +856,7 @@ async def extract_multi_scenic(msg) -> list: |
|
|
|
|
|
|
|
|
|
print(f"Starting multi scenic extraction for message: {msg_content}") |
|
|
|
|
try: |
|
|
|
|
response = client.chat.completions.create( |
|
|
|
|
response = await async_client.chat.completions.create( |
|
|
|
|
model="deepseek-chat", |
|
|
|
|
messages=[{"role": "system", "content": MULTI_SCENIC_EXTRACT_PROMPT}, {"role": "user", "content": msg_content}] |
|
|
|
|
) |
|
|
|
@ -902,7 +893,7 @@ async def query_multi_scenic_flow(request: Request, scenics: list,msg: str) -> s |
|
|
|
|
comparison_prompt += f"**{result['scenic']}**: {result['data']}\n\n" |
|
|
|
|
|
|
|
|
|
try: |
|
|
|
|
response = client.chat.completions.create( |
|
|
|
|
response = await async_client.chat.completions.create( |
|
|
|
|
model="deepseek-chat", |
|
|
|
|
messages=[{"role": "user", "content": comparison_prompt}] |
|
|
|
|
) |
|
|
|
|