添加额外请求体字段和推理内容请求选项到预设配置以支持deepseek-v4

This commit is contained in:
FuQuan233 2026-04-24 14:36:05 +08:00
parent c2a53175d5
commit d7d6c14b3f
3 changed files with 43 additions and 8 deletions

View file

@ -157,6 +157,8 @@ _✨ 支持多API预设、MCP协议、内置工具、联网搜索、视觉模型
| proxy | 否 | 无 | 请求API时使用的HTTP代理 |
| support_mcp | 否 | False | 是否支持MCP协议 |
| support_image | 否 | False | 是否支持图片输入 |
| extra_body | 否 | {} | 额外的请求体字段用于兼容不同API的特殊参数 |
| request_with_reasoning_content | 否 | false | 请求中是否包含推理过程内容部分模型要求进行了工具调用后必须完整回传推理过程给API |
LLMCHAT__MCP_SERVERS同样为一个dictkey为服务器名称value配置的格式基本兼容 Claude.app 的配置格式,具体支持如下
@ -184,6 +186,18 @@ LLMCHAT__MCP_SERVERS同样为一个dictkey为服务器名称value配置的
LLMCHAT__PRIVATE_CHAT_PRESET="deepseek-v1"
LLMCHAT__API_PRESETS='
[
{
"name": "deepseek-v4-pro",
"api_key": "sk-your-api-key",
"model_name": "deepseek-v4-pro",
"api_base": "https://api.deepseek.com",
"support_mcp": true,
"support_image": false,
"extra_body": {
"thinking": {"type": "enabled"}
},
"request_with_reasoning_content": true
}
{
"name": "aliyun-deepseek-v3",
"api_key": "sk-your-api-key",

View file

@ -443,6 +443,7 @@ async def process_messages(context_id: int, is_group: bool = True):
"max_tokens": preset.max_tokens,
"temperature": preset.temperature,
"timeout": 60,
"extra_body": preset.extra_body,
}
if preset.support_mcp:
@ -461,10 +462,14 @@ async def process_messages(context_id: int, is_group: bool = True):
# 处理响应并处理工具调用
while preset.support_mcp and message and message.tool_calls:
new_messages.append({
llm_reply: ChatCompletionMessageParam = {
"role": "assistant",
"content": message.content,
"tool_calls": [tool_call.model_dump() for tool_call in message.tool_calls]
})
}
if preset.request_with_reasoning_content:
llm_reply["reasoning_content"] = message.reasoning_content# pyright: ignore[reportGeneralTypeIssues]
# 发送LLM调用工具时的回复一般没有
if message.content:
@ -531,6 +536,9 @@ async def process_messages(context_id: int, is_group: bool = True):
# openai的sdk里的assistant消息暂时没有images字段需要单独处理
llm_reply["images"] = reply_images # pyright: ignore[reportGeneralTypeIssues]
if preset.request_with_reasoning_content:
llm_reply["reasoning_content"] = reasoning_content# pyright: ignore[reportGeneralTypeIssues]
new_messages.append(llm_reply)
# 请求成功后再保存历史记录保证user和assistant穿插防止R1模型报错
@ -540,12 +548,20 @@ async def process_messages(context_id: int, is_group: bool = True):
if state.output_reasoning_content and reasoning_content:
try:
bot = get_bot(str(event.self_id))
if is_group:
await bot.send_group_forward_msg(
group_id=group_id,
messages=build_reasoning_forward_nodes(
bot.self_id, reasoning_content
),
)
else:
await bot.send_private_forward_msg(
user_id=context_id,
messages=build_reasoning_forward_nodes(
bot.self_id, reasoning_content
),
)
except Exception as e:
logger.error(f"合并转发消息发送失败:\n{e!s}\n")

View file

@ -13,6 +13,11 @@ class PresetConfig(BaseModel):
proxy: str = Field("", description="HTTP代理服务器")
support_mcp: bool = Field(False, description="是否支持MCP")
support_image: bool = Field(False, description="是否支持图片输入")
extra_body: dict = Field({}, description="额外的请求体字段用于兼容不同API的特殊参数")
request_with_reasoning_content: bool = Field(
False,
description="请求中是否包含推理过程内容部分模型要求进行了工具调用后必须完整回传推理过程给API"
)
class MCPServerConfig(BaseModel):
"""MCP服务器配置"""