mirror of
https://github.com/FuQuan233/nonebot-plugin-llmchat.git
synced 2026-02-05 03:28:05 +00:00
Compare commits
2 commits
d640f16abe
...
14fbe3cb3c
| Author | SHA1 | Date | |
|---|---|---|---|
| 14fbe3cb3c | |||
| efb25f0727 |
2 changed files with 28 additions and 8 deletions
14
README.md
14
README.md
|
|
@ -24,32 +24,36 @@ _✨ 支持多API预设、MCP协议、联网搜索、视觉模型的AI群聊插
|
|||
|
||||
## 📖 介绍
|
||||
|
||||
1. **支持LLM回复图片**
|
||||
- 支持最新 Gemini 2.5 Flash Image (Nano Banana) 的图片回复
|
||||
- 支持图片上下文修改
|
||||
|
||||
1. **支持MCP协议**
|
||||
- 可以连接各种支持MCP协议的LLM工具
|
||||
- 通过连接一些搜索MCP服务器可以实现在线搜索
|
||||
- 兼容 Claude.app 的配置格式
|
||||
|
||||
2. **多API预设支持**
|
||||
1. **多API预设支持**
|
||||
- 可配置多个LLM服务预设(如不同模型/API密钥)
|
||||
- 支持运行时通过`API预设`命令热切换API配置
|
||||
- 内置服务开关功能(预设名为`off`时停用)
|
||||
|
||||
3. **多种回复触发方式**
|
||||
1. **多种回复触发方式**
|
||||
- @触发 + 随机概率触发
|
||||
- 支持处理回复消息
|
||||
- 群聊消息顺序处理,防止消息错乱
|
||||
|
||||
4. **分群聊上下文记忆管理**
|
||||
1. **分群聊上下文记忆管理**
|
||||
- 分群聊保留对话历史记录(可配置保留条数)
|
||||
- 自动合并未处理消息,降低API用量
|
||||
- 支持`记忆清除`命令手动重置对话上下文
|
||||
|
||||
5. **分段回复支持**
|
||||
1. **分段回复支持**
|
||||
- 支持多段式回复(由LLM决定如何回复)
|
||||
- 可@群成员(由LLM插入)
|
||||
- 可选输出AI的思维过程(需模型支持)
|
||||
|
||||
6. **可自定义性格**
|
||||
1. **可自定义性格**
|
||||
- 可动态修改群组专属系统提示词(`/修改设定`)
|
||||
- 支持自定义默认提示词
|
||||
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ from nonebot import (
|
|||
on_message,
|
||||
require,
|
||||
)
|
||||
from nonebot.adapters.onebot.v11 import GroupMessageEvent, Message
|
||||
from nonebot.adapters.onebot.v11 import GroupMessageEvent, Message, MessageSegment
|
||||
from nonebot.adapters.onebot.v11.permission import GROUP_ADMIN, GROUP_OWNER
|
||||
from nonebot.params import CommandArg
|
||||
from nonebot.permission import SUPERUSER
|
||||
|
|
@ -407,10 +407,18 @@ async def process_messages(group_id: int):
|
|||
or matched_reasoning_content
|
||||
)
|
||||
|
||||
new_messages.append({
|
||||
llm_reply: ChatCompletionMessageParam = {
|
||||
"role": "assistant",
|
||||
"content": reply,
|
||||
})
|
||||
}
|
||||
|
||||
reply_images = getattr(response.choices[0].message, "images", None)
|
||||
|
||||
if reply_images:
|
||||
# openai的sdk里的assistant消息暂时没有images字段,需要单独处理
|
||||
llm_reply["images"] = reply_images # pyright: ignore[reportGeneralTypeIssues]
|
||||
|
||||
new_messages.append(llm_reply)
|
||||
|
||||
# 请求成功后再保存历史记录,保证user和assistant穿插,防止R1模型报错
|
||||
for message in new_messages:
|
||||
|
|
@ -432,6 +440,14 @@ async def process_messages(group_id: int):
|
|||
assert reply is not None
|
||||
await send_split_messages(handler, reply)
|
||||
|
||||
if reply_images:
|
||||
logger.debug(f"API响应 图片数:{len(reply_images)}")
|
||||
for i, image in enumerate(reply_images, start=1):
|
||||
logger.debug(f"正在发送第{i}张图片")
|
||||
image_base64 = image["image_url"]["url"].removeprefix("data:image/png;base64,")
|
||||
image_msg = MessageSegment.image(base64.b64decode(image_base64))
|
||||
await handler.send(image_msg)
|
||||
|
||||
except Exception as e:
|
||||
logger.opt(exception=e).error(f"API请求失败 群号:{group_id}")
|
||||
await handler.send(Message(f"服务暂时不可用,请稍后再试\n{e!s}"))
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue