新增对话接口

This commit is contained in:
zcr
2026-03-12 13:13:52 +08:00
parent 7042d428fa
commit a6393df0e3
35 changed files with 843 additions and 1163 deletions

View File

View File

@@ -1,6 +1,6 @@
from langchain_core.prompts import PromptTemplate
from src.server.deep_agent.agents.init_llm import title_llm
from src.server.deep_agent.init_llm import title_llm
def conversation_title(full_conversation):

View File

@@ -0,0 +1,75 @@
import json
from typing import List
from langchain_core.messages import (
HumanMessage,
AIMessage,
ToolMessage,
)
from src.server.deep_agent.init_llm import suggested_llm
def format_messages(messages, max_messages: int = 6) -> str:
"""
将 LangGraph messages 转换为 LLM prompt 文本
"""
messages = messages[-max_messages:]
lines: List[str] = []
for m in messages:
if isinstance(m, HumanMessage):
lines.append(f"User: {m.content}")
elif isinstance(m, AIMessage):
if m.content:
lines.append(f"Assistant: {m.content}")
elif isinstance(m, ToolMessage):
# Tool结果建议简单化
tool_output = str(m.content)
if len(tool_output) > 200:
tool_output = tool_output[:200] + "..."
lines.append(f"Tool Result: {tool_output}")
return "\n".join(lines)
async def generate_suggested_questions(
agent,
thread_id: str,
max_messages: int = 6,
) -> List[str]:
"""
根据当前对话生成3条用户可能继续提问的问题
"""
# 获取当前对话state
state = agent.get_state(
{"configurable": {"thread_id": thread_id}}
)
messages = state.values.get("messages", [])
if not messages:
return []
conversation = format_messages(messages, max_messages)
prompt = f"""
以下是用户与AI助手的对话
{conversation}
请根据对话内容生成3条用户可能继续提出的问题。
要求:
- 每条一句话
- 语言自然
- 不要解释
- 返回JSON数组
- 尽量与家具设计相关
示例:
["问题1", "问题2", "问题3"]
"""
result = await suggested_llm.ainvoke(prompt)
text = result.content.strip()
try:
questions = json.loads(text)
if isinstance(questions, list):
return questions[:3]
except Exception:
pass
return []

View File

@@ -2,27 +2,11 @@ import os
import json
import re
from typing import Optional, List, Dict
from langchain_qwq import ChatQwen
from langgraph.config import get_stream_writer
from pydantic import BaseModel, Field
from langchain_core.tools import tool
from langchain_core.messages import SystemMessage, HumanMessage
from src.core.config import settings
# =========================
# LLM 初始化
# =========================
llm = ChatQwen(
enable_thinking=False,
model="qwen3.5-flash",
temperature=0.2,
max_tokens=3_000,
timeout=None,
max_retries=2,
api_key=settings.QWEN_API_KEY)
from src.server.deep_agent.init_llm import repoer_llm
# =========================
@@ -109,7 +93,7 @@ async def report_generator(
full_report = ""
try:
report_llm = llm.with_config(
report_llm = repoer_llm.with_config(
callbacks=[]
)
async for chunk in report_llm.astream(