Compare commits

...

24 commits
v0.2.0 ... main

Author SHA1 Message Date
d640f16abe 🔖 bump llmchat version 0.2.5
Some checks failed
Pyright Lint / Pyright Lint (push) Has been cancelled
Ruff Lint / Ruff Lint (push) Has been cancelled
2025-09-01 10:56:31 +08:00
1600cba172 支持忽略特定前缀的消息 #21 2025-09-01 10:51:30 +08:00
9f81a38d5b 🐛 将mcp超时延长到30秒,避免执行失败 2025-09-01 10:45:18 +08:00
53d57beba3 🔖 bump llmchat version 0.2.4 2025-08-20 12:48:13 +08:00
ea635fd147 🐛 修复重复发送消息给llm的问题 2025-08-20 12:38:39 +08:00
5014d3014b 🐛 修复mcp服务器卡住导致的卡死 2025-08-20 11:40:54 +08:00
89baec6abc 📘 更新 README 2025-05-19 14:17:25 +08:00
19ff0026c0 🐛 修复deque mutated during iteration 2025-05-16 21:43:08 +08:00
52ada66616 🔖 bump llmchat version 0.2.3 2025-05-13 14:02:23 +08:00
cf2d549f02 📘 更新meta信息 2025-05-13 14:02:03 +08:00
6c27cf56fa 🐛 修复命令本身会触发回复的问题 2025-05-13 13:43:06 +08:00
3d85ea90ef 🐛 修复多条消息中只处理最后一条消息的图片的问题 2025-05-13 13:41:28 +08:00
7edd7c913e 🐛 修复MCP调用过程中回复不分条的问题 2025-05-13 11:23:52 +08:00
84d3851936 🐛 修复某些协议端找不到图片url的问题 2025-05-12 15:26:39 +08:00
ee2a045116 🔖 bump llmchat version 0.2.2 2025-05-11 15:45:57 +08:00
6f69cc3cff 支持用户黑名单 #20 2025-05-11 15:42:13 +08:00
ed1b9792e7 📘 更新 README 2025-05-11 15:05:26 +08:00
FuQuan
0ddf8e5626
Merge pull request #19 from duolanda/main
 support vision models
2025-05-11 14:51:14 +08:00
duolanda
5e048c9472 ♻️ fix lint problems 2025-05-11 00:41:05 +08:00
duolanda
f2d1521158 support vision models 2025-05-10 22:58:44 +08:00
db9794a18a 🐛 修复可能出现首条消息不为user消息导致报错的问题 2025-04-28 20:19:47 +08:00
FuQuan
c9c22a8630
📘 更新 README 2025-04-27 18:08:50 +08:00
8013df564a 🔖 bump llmchat version 0.2.1 2025-04-27 11:57:34 +08:00
e3973baa37 🐛 修复assistant消息没有正确添加到历史记录的问题 2025-04-27 11:56:38 +08:00
5 changed files with 124 additions and 40 deletions

View file

@ -8,7 +8,7 @@
# nonebot-plugin-llmchat # nonebot-plugin-llmchat
_✨ 支持多API预设、MCP协议、联网搜索的AI群聊插件 ✨_ _✨ 支持多API预设、MCP协议、联网搜索、视觉模型的AI群聊插件 ✨_
<a href="./LICENSE"> <a href="./LICENSE">
@ -17,7 +17,8 @@ _✨ 支持多API预设、MCP协议、联网搜索的AI群聊插件 ✨_
<a href="https://pypi.python.org/pypi/nonebot-plugin-llmchat"> <a href="https://pypi.python.org/pypi/nonebot-plugin-llmchat">
<img src="https://img.shields.io/pypi/v/nonebot-plugin-llmchat.svg" alt="pypi"> <img src="https://img.shields.io/pypi/v/nonebot-plugin-llmchat.svg" alt="pypi">
</a> </a>
<img src="https://img.shields.io/badge/python-3.9+-blue.svg" alt="python"> <img src="https://img.shields.io/badge/python-3.10+-blue.svg" alt="python">
<a href="https://deepwiki.com/FuQuan233/nonebot-plugin-llmchat"><img src="https://deepwiki.com/badge.svg" alt="Ask DeepWiki"></a>
</div> </div>
@ -107,6 +108,8 @@ _✨ 支持多API预设、MCP协议、联网搜索的AI群聊插件 ✨_
| LLMCHAT__DEFAULT_PRESET | 否 | off | 默认使用的预设名称配置为off则为关闭 | | LLMCHAT__DEFAULT_PRESET | 否 | off | 默认使用的预设名称配置为off则为关闭 |
| LLMCHAT__RANDOM_TRIGGER_PROB | 否 | 0.05 | 默认随机触发概率 [0, 1] | | LLMCHAT__RANDOM_TRIGGER_PROB | 否 | 0.05 | 默认随机触发概率 [0, 1] |
| LLMCHAT__DEFAULT_PROMPT | 否 | 你的回答应该尽量简洁、幽默、可以使用一些语气词、颜文字。你应该拒绝回答任何政治相关的问题。 | 默认提示词 | | LLMCHAT__DEFAULT_PROMPT | 否 | 你的回答应该尽量简洁、幽默、可以使用一些语气词、颜文字。你应该拒绝回答任何政治相关的问题。 | 默认提示词 |
| LLMCHAT__BLACKLIST_USER_IDS | 否 | [] | 黑名单用户ID列表机器人将不会处理黑名单用户的消息 |
| LLMCHAT__IGNORE_PREFIXES | 否 | [] | 需要忽略的消息前缀列表,匹配到这些前缀的消息不会处理 |
| LLMCHAT__MCP_SERVERS | 否 | {} | MCP服务器配置具体见下表 | | LLMCHAT__MCP_SERVERS | 否 | {} | MCP服务器配置具体见下表 |
其中LLMCHAT__API_PRESETS为一个列表每项配置有以下的配置项 其中LLMCHAT__API_PRESETS为一个列表每项配置有以下的配置项
@ -119,6 +122,8 @@ _✨ 支持多API预设、MCP协议、联网搜索的AI群聊插件 ✨_
| max_tokens | 否 | 2048 | 最大响应token数 | | max_tokens | 否 | 2048 | 最大响应token数 |
| temperature | 否 | 0.7 | 生成温度 | | temperature | 否 | 0.7 | 生成温度 |
| proxy | 否 | 无 | 请求API时使用的HTTP代理 | | proxy | 否 | 无 | 请求API时使用的HTTP代理 |
| support_mcp | 否 | False | 是否支持MCP协议 |
| support_image | 否 | False | 是否支持图片输入 |
LLMCHAT__MCP_SERVERS同样为一个dictkey为服务器名称value配置的格式基本兼容 Claude.app 的配置格式,具体支持如下 LLMCHAT__MCP_SERVERS同样为一个dictkey为服务器名称value配置的格式基本兼容 Claude.app 的配置格式,具体支持如下
@ -151,10 +156,18 @@ LLMCHAT__MCP_SERVERS同样为一个dictkey为服务器名称value配置的
"proxy": "http://10.0.0.183:7890" "proxy": "http://10.0.0.183:7890"
}, },
{ {
"name": "deepseek-r1", "name": "deepseek-v1",
"api_key": "sk-your-api-key", "api_key": "sk-your-api-key",
"model_name": "deepseek-reasoner", "model_name": "deepseek-chat",
"api_base": "https://api.deepseek.com" "api_base": "https://api.deepseek.com",
"support_mcp": true
},
{
"name": "some-vison-model",
"api_key": "sk-your-api-key",
"model_name": "some-vison-model",
"api_base": "https://some-vison-model.com/api",
"support_image": true
} }
] ]
LLMCHAT__MCP_SERVERS=' LLMCHAT__MCP_SERVERS='
@ -195,4 +208,4 @@ LLMCHAT__MCP_SERVERS同样为一个dictkey为服务器名称value配置的
### 效果图 ### 效果图
![](img/mcp_demo.jpg) ![](img/mcp_demo.jpg)
![](img/demo.png) ![](img/demo.png)

122
nonebot_plugin_llmchat/__init__.py Normal file → Executable file
View file

@ -1,10 +1,12 @@
import asyncio import asyncio
import base64
from collections import defaultdict, deque from collections import defaultdict, deque
from datetime import datetime from datetime import datetime
import json import json
import os import os
import random import random
import re import re
import ssl
import time import time
from typing import TYPE_CHECKING from typing import TYPE_CHECKING
@ -37,11 +39,14 @@ require("nonebot_plugin_apscheduler")
from nonebot_plugin_apscheduler import scheduler from nonebot_plugin_apscheduler import scheduler
if TYPE_CHECKING: if TYPE_CHECKING:
from openai.types.chat import ChatCompletionMessageParam from openai.types.chat import (
ChatCompletionContentPartParam,
ChatCompletionMessageParam,
)
__plugin_meta__ = PluginMetadata( __plugin_meta__ = PluginMetadata(
name="llmchat", name="llmchat",
description="支持多API预设、MCP协议、联网搜索的AI群聊插件", description="支持多API预设、MCP协议、联网搜索、视觉模型的AI群聊插件",
usage="""@机器人 + 消息 开启对话""", usage="""@机器人 + 消息 开启对话""",
type="application", type="application",
homepage="https://github.com/FuQuan233/nonebot-plugin-llmchat", homepage="https://github.com/FuQuan233/nonebot-plugin-llmchat",
@ -160,6 +165,16 @@ async def is_triggered(event: GroupMessageEvent) -> bool:
if state.preset_name == "off": if state.preset_name == "off":
return False return False
# 黑名单用户
if event.user_id in plugin_config.blacklist_user_ids:
return False
# 忽略特定前缀的消息
msg_text = event.get_plaintext().strip()
for prefix in plugin_config.ignore_prefixes:
if msg_text.startswith(prefix):
return False
state.past_events.append(event) state.past_events.append(event)
# 原有@触发条件 # 原有@触发条件
@ -176,7 +191,7 @@ async def is_triggered(event: GroupMessageEvent) -> bool:
# 消息处理器 # 消息处理器
handler = on_message( handler = on_message(
rule=Rule(is_triggered), rule=Rule(is_triggered),
priority=10, priority=99,
block=False, block=False,
) )
@ -197,6 +212,46 @@ async def handle_message(event: GroupMessageEvent):
task.add_done_callback(tasks.discard) task.add_done_callback(tasks.discard)
tasks.add(task) tasks.add(task)
async def process_images(event: GroupMessageEvent) -> list[str]:
base64_images = []
for segement in event.get_message():
if segement.type == "image":
image_url = segement.data.get("url") or segement.data.get("file")
if image_url:
try:
# 处理高版本 httpx 的 [SSL: SSLV3_ALERT_HANDSHAKE_FAILURE] 报错
ssl_context = ssl.create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
ssl_context.set_ciphers("DEFAULT@SECLEVEL=2")
# 下载图片并将图片转换为base64
async with httpx.AsyncClient(verify=ssl_context) as client:
response = await client.get(image_url, timeout=10.0)
if response.status_code != 200:
logger.error(f"下载图片失败: {image_url}, 状态码: {response.status_code}")
continue
image_data = response.content
base64_data = base64.b64encode(image_data).decode("utf-8")
base64_images.append(base64_data)
except Exception as e:
logger.error(f"处理图片时出错: {e}")
logger.debug(f"共处理 {len(base64_images)} 张图片")
return base64_images
async def send_split_messages(message_handler, content: str):
"""
将消息按分隔符<botbr>分段并发送
"""
logger.info(f"准备发送分段消息,分段数:{len(content.split('<botbr>'))}")
for segment in content.split("<botbr>"):
# 跳过空消息
if not segment.strip():
continue
segment = segment.strip() # 删除前后多余的换行和空格
await asyncio.sleep(2) # 避免发送过快
logger.debug(f"发送消息分段 内容:{segment[:50]}...") # 只记录前50个字符避免日志过大
await message_handler.send(Message(segment))
async def process_messages(group_id: int): async def process_messages(group_id: int):
state = group_states[group_id] state = group_states[group_id]
@ -252,14 +307,28 @@ async def process_messages(group_id: int):
{"role": "system", "content": systemPrompt} {"role": "system", "content": systemPrompt}
] ]
while len(state.history) > 0 and state.history[0]["role"] != "user":
state.history.popleft()
messages += list(state.history)[-plugin_config.history_size * 2 :] messages += list(state.history)[-plugin_config.history_size * 2 :]
# 没有未处理的消息说明已经被处理了,跳过 # 没有未处理的消息说明已经被处理了,跳过
if state.past_events.__len__() < 1: if state.past_events.__len__() < 1:
break break
content: list[ChatCompletionContentPartParam] = []
# 将机器人错过的消息推送给LLM # 将机器人错过的消息推送给LLM
content = ",".join([format_message(ev) for ev in state.past_events]) past_events_snapshot = list(state.past_events)
for ev in past_events_snapshot:
text_content = format_message(ev)
content.append({"type": "text", "text": text_content})
# 将消息中的图片转成 base64
if preset.support_image:
base64_images = await process_images(ev)
for base64_image in base64_images:
content.append({"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}})
new_messages: list[ChatCompletionMessageParam] = [ new_messages: list[ChatCompletionMessageParam] = [
{"role": "user", "content": content} {"role": "user", "content": content}
@ -268,10 +337,6 @@ async def process_messages(group_id: int):
logger.debug( logger.debug(
f"发送API请求 模型:{preset.model_name} 历史消息数:{len(messages)}" f"发送API请求 模型:{preset.model_name} 历史消息数:{len(messages)}"
) )
mcp_client = MCPClient(plugin_config.mcp_servers)
await mcp_client.connect_to_servers()
available_tools = await mcp_client.get_available_tools()
client_config = { client_config = {
"model": preset.model_name, "model": preset.model_name,
@ -280,7 +345,10 @@ async def process_messages(group_id: int):
"timeout": 60, "timeout": 60,
} }
mcp_client = MCPClient(plugin_config.mcp_servers)
if preset.support_mcp: if preset.support_mcp:
await mcp_client.connect_to_servers()
available_tools = await mcp_client.get_available_tools()
client_config["tools"] = available_tools client_config["tools"] = available_tools
response = await client.chat.completions.create( response = await client.chat.completions.create(
@ -291,10 +359,7 @@ async def process_messages(group_id: int):
if response.usage is not None: if response.usage is not None:
logger.debug(f"收到API响应 使用token数{response.usage.total_tokens}") logger.debug(f"收到API响应 使用token数{response.usage.total_tokens}")
final_message = []
message = response.choices[0].message message = response.choices[0].message
if message.content:
final_message.append(message.content)
# 处理响应并处理工具调用 # 处理响应并处理工具调用
while preset.support_mcp and message.tool_calls: while preset.support_mcp and message.tool_calls:
@ -302,6 +367,11 @@ async def process_messages(group_id: int):
"role": "assistant", "role": "assistant",
"tool_calls": [tool_call.model_dump() for tool_call in message.tool_calls] "tool_calls": [tool_call.model_dump() for tool_call in message.tool_calls]
}) })
# 发送LLM调用工具时的回复一般没有
if message.content:
await send_split_messages(handler, message.content)
# 处理每个工具调用 # 处理每个工具调用
for tool_call in message.tool_calls: for tool_call in message.tool_calls:
tool_name = tool_call.function.name tool_name = tool_call.function.name
@ -316,7 +386,7 @@ async def process_messages(group_id: int):
new_messages.append({ new_messages.append({
"role": "tool", "role": "tool",
"tool_call_id": tool_call.id, "tool_call_id": tool_call.id,
"content": str(result.content) "content": str(result)
}) })
# 将工具调用的结果交给 LLM # 将工具调用的结果交给 LLM
@ -326,8 +396,6 @@ async def process_messages(group_id: int):
) )
message = response.choices[0].message message = response.choices[0].message
if message.content:
final_message.append(message.content)
await mcp_client.cleanup() await mcp_client.cleanup()
@ -339,6 +407,11 @@ async def process_messages(group_id: int):
or matched_reasoning_content or matched_reasoning_content
) )
new_messages.append({
"role": "assistant",
"content": reply,
})
# 请求成功后再保存历史记录保证user和assistant穿插防止R1模型报错 # 请求成功后再保存历史记录保证user和assistant穿插防止R1模型报错
for message in new_messages: for message in new_messages:
state.history.append(message) state.history.append(message)
@ -357,20 +430,7 @@ async def process_messages(group_id: int):
logger.error(f"合并转发消息发送失败:\n{e!s}\n") logger.error(f"合并转发消息发送失败:\n{e!s}\n")
assert reply is not None assert reply is not None
logger.info( await send_split_messages(handler, reply)
f"准备发送回复消息 群号:{group_id} 消息分段数:{len(reply.split('<botbr>'))}"
)
for r in reply.split("<botbr>"):
# 似乎会有空消息的情况导致string index out of range异常
if len(r) == 0 or r.isspace():
continue
# 删除前后多余的换行和空格
r = r.strip()
await asyncio.sleep(2)
logger.debug(
f"发送消息分段 内容:{r[:50]}..."
) # 只记录前50个字符避免日志过大
await handler.send(Message(r))
except Exception as e: except Exception as e:
logger.opt(exception=e).error(f"API请求失败 群号:{group_id}") logger.opt(exception=e).error(f"API请求失败 群号:{group_id}")
@ -407,7 +467,7 @@ async def handle_preset(event: GroupMessageEvent, args: Message = CommandArg()):
edit_preset_handler = on_command( edit_preset_handler = on_command(
"修改设定", "修改设定",
priority=99, priority=1,
block=True, block=True,
permission=(SUPERUSER | GROUP_ADMIN | GROUP_OWNER), permission=(SUPERUSER | GROUP_ADMIN | GROUP_OWNER),
) )
@ -424,7 +484,7 @@ async def handle_edit_preset(event: GroupMessageEvent, args: Message = CommandAr
reset_handler = on_command( reset_handler = on_command(
"记忆清除", "记忆清除",
priority=99, priority=1,
block=True, block=True,
permission=(SUPERUSER | GROUP_ADMIN | GROUP_OWNER), permission=(SUPERUSER | GROUP_ADMIN | GROUP_OWNER),
) )
@ -441,7 +501,7 @@ async def handle_reset(event: GroupMessageEvent, args: Message = CommandArg()):
set_prob_handler = on_command( set_prob_handler = on_command(
"设置主动回复概率", "设置主动回复概率",
priority=99, priority=1,
block=True, block=True,
permission=(SUPERUSER | GROUP_ADMIN | GROUP_OWNER), permission=(SUPERUSER | GROUP_ADMIN | GROUP_OWNER),
) )

6
nonebot_plugin_llmchat/config.py Normal file → Executable file
View file

@ -12,6 +12,7 @@ class PresetConfig(BaseModel):
temperature: float = Field(0.7, description="生成温度0-2]") temperature: float = Field(0.7, description="生成温度0-2]")
proxy: str = Field("", description="HTTP代理服务器") proxy: str = Field("", description="HTTP代理服务器")
support_mcp: bool = Field(False, description="是否支持MCP") support_mcp: bool = Field(False, description="是否支持MCP")
support_image: bool = Field(False, description="是否支持图片输入")
class MCPServerConfig(BaseModel): class MCPServerConfig(BaseModel):
"""MCP服务器配置""" """MCP服务器配置"""
@ -42,6 +43,11 @@ class ScopedConfig(BaseModel):
description="默认提示词", description="默认提示词",
) )
mcp_servers: dict[str, MCPServerConfig] = Field({}, description="MCP服务器配置") mcp_servers: dict[str, MCPServerConfig] = Field({}, description="MCP服务器配置")
blacklist_user_ids: set[int] = Field(set(), description="黑名单用户ID列表")
ignore_prefixes: list[str] = Field(
default_factory=list,
description="需要忽略的消息前缀列表,匹配到这些前缀的消息不会处理"
)
class Config(BaseModel): class Config(BaseModel):

View file

@ -1,3 +1,4 @@
import asyncio
from contextlib import AsyncExitStack from contextlib import AsyncExitStack
from mcp import ClientSession, StdioServerParameters from mcp import ClientSession, StdioServerParameters
@ -64,9 +65,13 @@ class MCPClient:
server_name, real_tool_name = tool_name.split("___") server_name, real_tool_name = tool_name.split("___")
logger.info(f"正在服务器[{server_name}]上调用工具[{real_tool_name}]") logger.info(f"正在服务器[{server_name}]上调用工具[{real_tool_name}]")
session = self.sessions[server_name] session = self.sessions[server_name]
response = await session.call_tool(real_tool_name, tool_args) try:
response = await asyncio.wait_for(session.call_tool(real_tool_name, tool_args), timeout=30)
except asyncio.TimeoutError:
logger.error(f"调用工具[{real_tool_name}]超时")
return f"调用工具[{real_tool_name}]超时"
logger.debug(f"工具[{real_tool_name}]调用完成,响应: {response}") logger.debug(f"工具[{real_tool_name}]调用完成,响应: {response}")
return response return response.content
def get_friendly_name(self, tool_name: str): def get_friendly_name(self, tool_name: str):
server_name, real_tool_name = tool_name.split("___") server_name, real_tool_name = tool_name.split("___")

View file

@ -1,6 +1,6 @@
[tool.poetry] [tool.poetry]
name = "nonebot-plugin-llmchat" name = "nonebot-plugin-llmchat"
version = "0.2.0" version = "0.2.5"
description = "Nonebot AI group chat plugin supporting multiple API preset configurations" description = "Nonebot AI group chat plugin supporting multiple API preset configurations"
license = "GPL" license = "GPL"
authors = ["FuQuan i@fuquan.moe"] authors = ["FuQuan i@fuquan.moe"]