diff --git a/README.md b/README.md
index 8bedf4a..198d121 100644
--- a/README.md
+++ b/README.md
@@ -8,7 +8,7 @@
# nonebot-plugin-llmchat
-_✨ 支持多API预设、MCP协议、联网搜索的AI群聊插件 ✨_
+_✨ 支持多API预设、MCP协议、联网搜索、视觉模型的AI群聊插件 ✨_
@@ -17,38 +17,43 @@ _✨ 支持多API预设、MCP协议、联网搜索的AI群聊插件 ✨_
-
+
+
## 📖 介绍
+1. **支持LLM回复图片**
+ - 支持最新 Gemini 2.5 Flash Image (Nano Banana) 的图片回复
+ - 支持图片上下文修改
+
1. **支持MCP协议**
- 可以连接各种支持MCP协议的LLM工具
- 通过连接一些搜索MCP服务器可以实现在线搜索
- 兼容 Claude.app 的配置格式
-2. **多API预设支持**
+1. **多API预设支持**
- 可配置多个LLM服务预设(如不同模型/API密钥)
- 支持运行时通过`API预设`命令热切换API配置
- 内置服务开关功能(预设名为`off`时停用)
-3. **多种回复触发方式**
+1. **多种回复触发方式**
- @触发 + 随机概率触发
- 支持处理回复消息
- 群聊消息顺序处理,防止消息错乱
-4. **分群聊上下文记忆管理**
+1. **分群聊上下文记忆管理**
- 分群聊保留对话历史记录(可配置保留条数)
- 自动合并未处理消息,降低API用量
- 支持`记忆清除`命令手动重置对话上下文
-5. **分段回复支持**
+1. **分段回复支持**
- 支持多段式回复(由LLM决定如何回复)
- 可@群成员(由LLM插入)
- 可选输出AI的思维过程(需模型支持)
-6. **可自定义性格**
+1. **可自定义性格**
- 可动态修改群组专属系统提示词(`/修改设定`)
- 支持自定义默认提示词
@@ -107,6 +112,8 @@ _✨ 支持多API预设、MCP协议、联网搜索的AI群聊插件 ✨_
| LLMCHAT__DEFAULT_PRESET | 否 | off | 默认使用的预设名称,配置为off则为关闭 |
| LLMCHAT__RANDOM_TRIGGER_PROB | 否 | 0.05 | 默认随机触发概率 [0, 1] |
| LLMCHAT__DEFAULT_PROMPT | 否 | 你的回答应该尽量简洁、幽默、可以使用一些语气词、颜文字。你应该拒绝回答任何政治相关的问题。 | 默认提示词 |
+| LLMCHAT__BLACKLIST_USER_IDS | 否 | [] | 黑名单用户ID列表,机器人将不会处理黑名单用户的消息 |
+| LLMCHAT__IGNORE_PREFIXES | 否 | [] | 需要忽略的消息前缀列表,匹配到这些前缀的消息不会处理 |
| LLMCHAT__MCP_SERVERS | 否 | {} | MCP服务器配置,具体见下表 |
其中LLMCHAT__API_PRESETS为一个列表,每项配置有以下的配置项
@@ -119,6 +126,8 @@ _✨ 支持多API预设、MCP协议、联网搜索的AI群聊插件 ✨_
| max_tokens | 否 | 2048 | 最大响应token数 |
| temperature | 否 | 0.7 | 生成温度 |
| proxy | 否 | 无 | 请求API时使用的HTTP代理 |
+| support_mcp | 否 | False | 是否支持MCP协议 |
+| support_image | 否 | False | 是否支持图片输入 |
LLMCHAT__MCP_SERVERS同样为一个dict,key为服务器名称,value配置的格式基本兼容 Claude.app 的配置格式,具体支持如下
@@ -151,10 +160,18 @@ LLMCHAT__MCP_SERVERS同样为一个dict,key为服务器名称,value配置的
"proxy": "http://10.0.0.183:7890"
},
{
- "name": "deepseek-r1",
+ "name": "deepseek-v1",
"api_key": "sk-your-api-key",
- "model_name": "deepseek-reasoner",
- "api_base": "https://api.deepseek.com"
+ "model_name": "deepseek-chat",
+ "api_base": "https://api.deepseek.com",
+ "support_mcp": true
+ },
+ {
+ "name": "some-vison-model",
+ "api_key": "sk-your-api-key",
+ "model_name": "some-vison-model",
+ "api_base": "https://some-vison-model.com/api",
+ "support_image": true
}
]
LLMCHAT__MCP_SERVERS='
@@ -195,4 +212,4 @@ LLMCHAT__MCP_SERVERS同样为一个dict,key为服务器名称,value配置的
### 效果图

-
\ No newline at end of file
+
diff --git a/nonebot_plugin_llmchat/__init__.py b/nonebot_plugin_llmchat/__init__.py
old mode 100644
new mode 100755
index 879953a..e89e485
--- a/nonebot_plugin_llmchat/__init__.py
+++ b/nonebot_plugin_llmchat/__init__.py
@@ -1,10 +1,12 @@
import asyncio
+import base64
from collections import defaultdict, deque
from datetime import datetime
import json
import os
import random
import re
+import ssl
import time
from typing import TYPE_CHECKING
@@ -19,7 +21,7 @@ from nonebot import (
on_message,
require,
)
-from nonebot.adapters.onebot.v11 import GroupMessageEvent, Message
+from nonebot.adapters.onebot.v11 import GroupMessageEvent, Message, MessageSegment
from nonebot.adapters.onebot.v11.permission import GROUP_ADMIN, GROUP_OWNER
from nonebot.params import CommandArg
from nonebot.permission import SUPERUSER
@@ -37,11 +39,14 @@ require("nonebot_plugin_apscheduler")
from nonebot_plugin_apscheduler import scheduler
if TYPE_CHECKING:
- from openai.types.chat import ChatCompletionMessageParam
+ from openai.types.chat import (
+ ChatCompletionContentPartParam,
+ ChatCompletionMessageParam,
+ )
__plugin_meta__ = PluginMetadata(
name="llmchat",
- description="支持多API预设、MCP协议、联网搜索的AI群聊插件",
+ description="支持多API预设、MCP协议、联网搜索、视觉模型、Nano Banana(生图模型)的AI群聊插件",
usage="""@机器人 + 消息 开启对话""",
type="application",
homepage="https://github.com/FuQuan233/nonebot-plugin-llmchat",
@@ -160,6 +165,16 @@ async def is_triggered(event: GroupMessageEvent) -> bool:
if state.preset_name == "off":
return False
+ # 黑名单用户
+ if event.user_id in plugin_config.blacklist_user_ids:
+ return False
+
+ # 忽略特定前缀的消息
+ msg_text = event.get_plaintext().strip()
+ for prefix in plugin_config.ignore_prefixes:
+ if msg_text.startswith(prefix):
+ return False
+
state.past_events.append(event)
# 原有@触发条件
@@ -176,7 +191,7 @@ async def is_triggered(event: GroupMessageEvent) -> bool:
# 消息处理器
handler = on_message(
rule=Rule(is_triggered),
- priority=10,
+ priority=99,
block=False,
)
@@ -197,6 +212,46 @@ async def handle_message(event: GroupMessageEvent):
task.add_done_callback(tasks.discard)
tasks.add(task)
+async def process_images(event: GroupMessageEvent) -> list[str]:
+ base64_images = []
+ for segement in event.get_message():
+ if segement.type == "image":
+ image_url = segement.data.get("url") or segement.data.get("file")
+ if image_url:
+ try:
+ # 处理高版本 httpx 的 [SSL: SSLV3_ALERT_HANDSHAKE_FAILURE] 报错
+ ssl_context = ssl.create_default_context()
+ ssl_context.check_hostname = False
+ ssl_context.verify_mode = ssl.CERT_NONE
+ ssl_context.set_ciphers("DEFAULT@SECLEVEL=2")
+
+ # 下载图片并将图片转换为base64
+ async with httpx.AsyncClient(verify=ssl_context) as client:
+ response = await client.get(image_url, timeout=10.0)
+ if response.status_code != 200:
+ logger.error(f"下载图片失败: {image_url}, 状态码: {response.status_code}")
+ continue
+ image_data = response.content
+ base64_data = base64.b64encode(image_data).decode("utf-8")
+ base64_images.append(base64_data)
+ except Exception as e:
+ logger.error(f"处理图片时出错: {e}")
+ logger.debug(f"共处理 {len(base64_images)} 张图片")
+ return base64_images
+
+async def send_split_messages(message_handler, content: str):
+ """
+ 将消息按分隔符分段并发送
+ """
+ logger.info(f"准备发送分段消息,分段数:{len(content.split(''))}")
+ for segment in content.split(""):
+ # 跳过空消息
+ if not segment.strip():
+ continue
+ segment = segment.strip() # 删除前后多余的换行和空格
+ await asyncio.sleep(2) # 避免发送过快
+ logger.debug(f"发送消息分段 内容:{segment[:50]}...") # 只记录前50个字符避免日志过大
+ await message_handler.send(Message(segment))
async def process_messages(group_id: int):
state = group_states[group_id]
@@ -252,14 +307,28 @@ async def process_messages(group_id: int):
{"role": "system", "content": systemPrompt}
]
+ while len(state.history) > 0 and state.history[0]["role"] != "user":
+ state.history.popleft()
+
messages += list(state.history)[-plugin_config.history_size * 2 :]
# 没有未处理的消息说明已经被处理了,跳过
if state.past_events.__len__() < 1:
break
+ content: list[ChatCompletionContentPartParam] = []
+
# 将机器人错过的消息推送给LLM
- content = ",".join([format_message(ev) for ev in state.past_events])
+ past_events_snapshot = list(state.past_events)
+ for ev in past_events_snapshot:
+ text_content = format_message(ev)
+ content.append({"type": "text", "text": text_content})
+
+ # 将消息中的图片转成 base64
+ if preset.support_image:
+ base64_images = await process_images(ev)
+ for base64_image in base64_images:
+ content.append({"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}})
new_messages: list[ChatCompletionMessageParam] = [
{"role": "user", "content": content}
@@ -301,7 +370,7 @@ async def process_messages(group_id: int):
# 发送LLM调用工具时的回复,一般没有
if message.content:
- await handler.send(Message(message.content))
+ await send_split_messages(handler, message.content)
# 处理每个工具调用
for tool_call in message.tool_calls:
@@ -317,7 +386,7 @@ async def process_messages(group_id: int):
new_messages.append({
"role": "tool",
"tool_call_id": tool_call.id,
- "content": str(result.content)
+ "content": str(result)
})
# 将工具调用的结果交给 LLM
@@ -338,10 +407,18 @@ async def process_messages(group_id: int):
or matched_reasoning_content
)
- new_messages.append({
+ llm_reply: ChatCompletionMessageParam = {
"role": "assistant",
"content": reply,
- })
+ }
+
+ reply_images = getattr(response.choices[0].message, "images", None)
+
+ if reply_images:
+ # openai的sdk里的assistant消息暂时没有images字段,需要单独处理
+ llm_reply["images"] = reply_images # pyright: ignore[reportGeneralTypeIssues]
+
+ new_messages.append(llm_reply)
# 请求成功后再保存历史记录,保证user和assistant穿插,防止R1模型报错
for message in new_messages:
@@ -361,20 +438,15 @@ async def process_messages(group_id: int):
logger.error(f"合并转发消息发送失败:\n{e!s}\n")
assert reply is not None
- logger.info(
- f"准备发送回复消息 群号:{group_id} 消息分段数:{len(reply.split(''))}"
- )
- for r in reply.split(""):
- # 似乎会有空消息的情况导致string index out of range异常
- if len(r) == 0 or r.isspace():
- continue
- # 删除前后多余的换行和空格
- r = r.strip()
- await asyncio.sleep(2)
- logger.debug(
- f"发送消息分段 内容:{r[:50]}..."
- ) # 只记录前50个字符避免日志过大
- await handler.send(Message(r))
+ await send_split_messages(handler, reply)
+
+ if reply_images:
+ logger.debug(f"API响应 图片数:{len(reply_images)}")
+ for i, image in enumerate(reply_images, start=1):
+ logger.debug(f"正在发送第{i}张图片")
+ image_base64 = image["image_url"]["url"].removeprefix("data:image/png;base64,")
+ image_msg = MessageSegment.image(base64.b64decode(image_base64))
+ await handler.send(image_msg)
except Exception as e:
logger.opt(exception=e).error(f"API请求失败 群号:{group_id}")
@@ -411,7 +483,7 @@ async def handle_preset(event: GroupMessageEvent, args: Message = CommandArg()):
edit_preset_handler = on_command(
"修改设定",
- priority=99,
+ priority=1,
block=True,
permission=(SUPERUSER | GROUP_ADMIN | GROUP_OWNER),
)
@@ -428,7 +500,7 @@ async def handle_edit_preset(event: GroupMessageEvent, args: Message = CommandAr
reset_handler = on_command(
"记忆清除",
- priority=99,
+ priority=1,
block=True,
permission=(SUPERUSER | GROUP_ADMIN | GROUP_OWNER),
)
@@ -445,7 +517,7 @@ async def handle_reset(event: GroupMessageEvent, args: Message = CommandArg()):
set_prob_handler = on_command(
"设置主动回复概率",
- priority=99,
+ priority=1,
block=True,
permission=(SUPERUSER | GROUP_ADMIN | GROUP_OWNER),
)
diff --git a/nonebot_plugin_llmchat/config.py b/nonebot_plugin_llmchat/config.py
old mode 100644
new mode 100755
index c802a8d..d658875
--- a/nonebot_plugin_llmchat/config.py
+++ b/nonebot_plugin_llmchat/config.py
@@ -12,6 +12,7 @@ class PresetConfig(BaseModel):
temperature: float = Field(0.7, description="生成温度(0-2]")
proxy: str = Field("", description="HTTP代理服务器")
support_mcp: bool = Field(False, description="是否支持MCP")
+ support_image: bool = Field(False, description="是否支持图片输入")
class MCPServerConfig(BaseModel):
"""MCP服务器配置"""
@@ -42,6 +43,11 @@ class ScopedConfig(BaseModel):
description="默认提示词",
)
mcp_servers: dict[str, MCPServerConfig] = Field({}, description="MCP服务器配置")
+ blacklist_user_ids: set[int] = Field(set(), description="黑名单用户ID列表")
+ ignore_prefixes: list[str] = Field(
+ default_factory=list,
+ description="需要忽略的消息前缀列表,匹配到这些前缀的消息不会处理"
+ )
class Config(BaseModel):
diff --git a/nonebot_plugin_llmchat/mcpclient.py b/nonebot_plugin_llmchat/mcpclient.py
index 7031d34..55e1b44 100644
--- a/nonebot_plugin_llmchat/mcpclient.py
+++ b/nonebot_plugin_llmchat/mcpclient.py
@@ -1,3 +1,4 @@
+import asyncio
from contextlib import AsyncExitStack
from mcp import ClientSession, StdioServerParameters
@@ -64,9 +65,13 @@ class MCPClient:
server_name, real_tool_name = tool_name.split("___")
logger.info(f"正在服务器[{server_name}]上调用工具[{real_tool_name}]")
session = self.sessions[server_name]
- response = await session.call_tool(real_tool_name, tool_args)
+ try:
+ response = await asyncio.wait_for(session.call_tool(real_tool_name, tool_args), timeout=30)
+ except asyncio.TimeoutError:
+ logger.error(f"调用工具[{real_tool_name}]超时")
+ return f"调用工具[{real_tool_name}]超时"
logger.debug(f"工具[{real_tool_name}]调用完成,响应: {response}")
- return response
+ return response.content
def get_friendly_name(self, tool_name: str):
server_name, real_tool_name = tool_name.split("___")
diff --git a/pyproject.toml b/pyproject.toml
index 4875a8f..bb49e2f 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "nonebot-plugin-llmchat"
-version = "0.2.1"
+version = "0.3.0"
description = "Nonebot AI group chat plugin supporting multiple API preset configurations"
license = "GPL"
authors = ["FuQuan i@fuquan.moe"]