mirror of
https://github.com/FuQuan233/nonebot-plugin-llmchat.git
synced 2025-09-04 10:20:45 +00:00
Compare commits
24 commits
Author | SHA1 | Date | |
---|---|---|---|
d640f16abe | |||
1600cba172 | |||
9f81a38d5b | |||
53d57beba3 | |||
ea635fd147 | |||
5014d3014b | |||
89baec6abc | |||
19ff0026c0 | |||
52ada66616 | |||
cf2d549f02 | |||
6c27cf56fa | |||
3d85ea90ef | |||
7edd7c913e | |||
84d3851936 | |||
ee2a045116 | |||
6f69cc3cff | |||
ed1b9792e7 | |||
|
0ddf8e5626 | ||
|
5e048c9472 | ||
|
f2d1521158 | ||
db9794a18a | |||
|
c9c22a8630 | ||
8013df564a | |||
e3973baa37 |
5 changed files with 124 additions and 40 deletions
25
README.md
25
README.md
|
@ -8,7 +8,7 @@
|
|||
|
||||
# nonebot-plugin-llmchat
|
||||
|
||||
_✨ 支持多API预设、MCP协议、联网搜索的AI群聊插件 ✨_
|
||||
_✨ 支持多API预设、MCP协议、联网搜索、视觉模型的AI群聊插件 ✨_
|
||||
|
||||
|
||||
<a href="./LICENSE">
|
||||
|
@ -17,7 +17,8 @@ _✨ 支持多API预设、MCP协议、联网搜索的AI群聊插件 ✨_
|
|||
<a href="https://pypi.python.org/pypi/nonebot-plugin-llmchat">
|
||||
<img src="https://img.shields.io/pypi/v/nonebot-plugin-llmchat.svg" alt="pypi">
|
||||
</a>
|
||||
<img src="https://img.shields.io/badge/python-3.9+-blue.svg" alt="python">
|
||||
<img src="https://img.shields.io/badge/python-3.10+-blue.svg" alt="python">
|
||||
<a href="https://deepwiki.com/FuQuan233/nonebot-plugin-llmchat"><img src="https://deepwiki.com/badge.svg" alt="Ask DeepWiki"></a>
|
||||
|
||||
</div>
|
||||
|
||||
|
@ -107,6 +108,8 @@ _✨ 支持多API预设、MCP协议、联网搜索的AI群聊插件 ✨_
|
|||
| LLMCHAT__DEFAULT_PRESET | 否 | off | 默认使用的预设名称,配置为off则为关闭 |
|
||||
| LLMCHAT__RANDOM_TRIGGER_PROB | 否 | 0.05 | 默认随机触发概率 [0, 1] |
|
||||
| LLMCHAT__DEFAULT_PROMPT | 否 | 你的回答应该尽量简洁、幽默、可以使用一些语气词、颜文字。你应该拒绝回答任何政治相关的问题。 | 默认提示词 |
|
||||
| LLMCHAT__BLACKLIST_USER_IDS | 否 | [] | 黑名单用户ID列表,机器人将不会处理黑名单用户的消息 |
|
||||
| LLMCHAT__IGNORE_PREFIXES | 否 | [] | 需要忽略的消息前缀列表,匹配到这些前缀的消息不会处理 |
|
||||
| LLMCHAT__MCP_SERVERS | 否 | {} | MCP服务器配置,具体见下表 |
|
||||
|
||||
其中LLMCHAT__API_PRESETS为一个列表,每项配置有以下的配置项
|
||||
|
@ -119,6 +122,8 @@ _✨ 支持多API预设、MCP协议、联网搜索的AI群聊插件 ✨_
|
|||
| max_tokens | 否 | 2048 | 最大响应token数 |
|
||||
| temperature | 否 | 0.7 | 生成温度 |
|
||||
| proxy | 否 | 无 | 请求API时使用的HTTP代理 |
|
||||
| support_mcp | 否 | False | 是否支持MCP协议 |
|
||||
| support_image | 否 | False | 是否支持图片输入 |
|
||||
|
||||
|
||||
LLMCHAT__MCP_SERVERS同样为一个dict,key为服务器名称,value配置的格式基本兼容 Claude.app 的配置格式,具体支持如下
|
||||
|
@ -151,10 +156,18 @@ LLMCHAT__MCP_SERVERS同样为一个dict,key为服务器名称,value配置的
|
|||
"proxy": "http://10.0.0.183:7890"
|
||||
},
|
||||
{
|
||||
"name": "deepseek-r1",
|
||||
"name": "deepseek-v1",
|
||||
"api_key": "sk-your-api-key",
|
||||
"model_name": "deepseek-reasoner",
|
||||
"api_base": "https://api.deepseek.com"
|
||||
"model_name": "deepseek-chat",
|
||||
"api_base": "https://api.deepseek.com",
|
||||
"support_mcp": true
|
||||
},
|
||||
{
|
||||
"name": "some-vison-model",
|
||||
"api_key": "sk-your-api-key",
|
||||
"model_name": "some-vison-model",
|
||||
"api_base": "https://some-vison-model.com/api",
|
||||
"support_image": true
|
||||
}
|
||||
]
|
||||
LLMCHAT__MCP_SERVERS='
|
||||
|
@ -195,4 +208,4 @@ LLMCHAT__MCP_SERVERS同样为一个dict,key为服务器名称,value配置的
|
|||
|
||||
### 效果图
|
||||

|
||||

|
||||

|
||||
|
|
122
nonebot_plugin_llmchat/__init__.py
Normal file → Executable file
122
nonebot_plugin_llmchat/__init__.py
Normal file → Executable file
|
@ -1,10 +1,12 @@
|
|||
import asyncio
|
||||
import base64
|
||||
from collections import defaultdict, deque
|
||||
from datetime import datetime
|
||||
import json
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
import ssl
|
||||
import time
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
|
@ -37,11 +39,14 @@ require("nonebot_plugin_apscheduler")
|
|||
from nonebot_plugin_apscheduler import scheduler
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from openai.types.chat import ChatCompletionMessageParam
|
||||
from openai.types.chat import (
|
||||
ChatCompletionContentPartParam,
|
||||
ChatCompletionMessageParam,
|
||||
)
|
||||
|
||||
__plugin_meta__ = PluginMetadata(
|
||||
name="llmchat",
|
||||
description="支持多API预设、MCP协议、联网搜索的AI群聊插件",
|
||||
description="支持多API预设、MCP协议、联网搜索、视觉模型的AI群聊插件",
|
||||
usage="""@机器人 + 消息 开启对话""",
|
||||
type="application",
|
||||
homepage="https://github.com/FuQuan233/nonebot-plugin-llmchat",
|
||||
|
@ -160,6 +165,16 @@ async def is_triggered(event: GroupMessageEvent) -> bool:
|
|||
if state.preset_name == "off":
|
||||
return False
|
||||
|
||||
# 黑名单用户
|
||||
if event.user_id in plugin_config.blacklist_user_ids:
|
||||
return False
|
||||
|
||||
# 忽略特定前缀的消息
|
||||
msg_text = event.get_plaintext().strip()
|
||||
for prefix in plugin_config.ignore_prefixes:
|
||||
if msg_text.startswith(prefix):
|
||||
return False
|
||||
|
||||
state.past_events.append(event)
|
||||
|
||||
# 原有@触发条件
|
||||
|
@ -176,7 +191,7 @@ async def is_triggered(event: GroupMessageEvent) -> bool:
|
|||
# 消息处理器
|
||||
handler = on_message(
|
||||
rule=Rule(is_triggered),
|
||||
priority=10,
|
||||
priority=99,
|
||||
block=False,
|
||||
)
|
||||
|
||||
|
@ -197,6 +212,46 @@ async def handle_message(event: GroupMessageEvent):
|
|||
task.add_done_callback(tasks.discard)
|
||||
tasks.add(task)
|
||||
|
||||
async def process_images(event: GroupMessageEvent) -> list[str]:
|
||||
base64_images = []
|
||||
for segement in event.get_message():
|
||||
if segement.type == "image":
|
||||
image_url = segement.data.get("url") or segement.data.get("file")
|
||||
if image_url:
|
||||
try:
|
||||
# 处理高版本 httpx 的 [SSL: SSLV3_ALERT_HANDSHAKE_FAILURE] 报错
|
||||
ssl_context = ssl.create_default_context()
|
||||
ssl_context.check_hostname = False
|
||||
ssl_context.verify_mode = ssl.CERT_NONE
|
||||
ssl_context.set_ciphers("DEFAULT@SECLEVEL=2")
|
||||
|
||||
# 下载图片并将图片转换为base64
|
||||
async with httpx.AsyncClient(verify=ssl_context) as client:
|
||||
response = await client.get(image_url, timeout=10.0)
|
||||
if response.status_code != 200:
|
||||
logger.error(f"下载图片失败: {image_url}, 状态码: {response.status_code}")
|
||||
continue
|
||||
image_data = response.content
|
||||
base64_data = base64.b64encode(image_data).decode("utf-8")
|
||||
base64_images.append(base64_data)
|
||||
except Exception as e:
|
||||
logger.error(f"处理图片时出错: {e}")
|
||||
logger.debug(f"共处理 {len(base64_images)} 张图片")
|
||||
return base64_images
|
||||
|
||||
async def send_split_messages(message_handler, content: str):
|
||||
"""
|
||||
将消息按分隔符<botbr>分段并发送
|
||||
"""
|
||||
logger.info(f"准备发送分段消息,分段数:{len(content.split('<botbr>'))}")
|
||||
for segment in content.split("<botbr>"):
|
||||
# 跳过空消息
|
||||
if not segment.strip():
|
||||
continue
|
||||
segment = segment.strip() # 删除前后多余的换行和空格
|
||||
await asyncio.sleep(2) # 避免发送过快
|
||||
logger.debug(f"发送消息分段 内容:{segment[:50]}...") # 只记录前50个字符避免日志过大
|
||||
await message_handler.send(Message(segment))
|
||||
|
||||
async def process_messages(group_id: int):
|
||||
state = group_states[group_id]
|
||||
|
@ -252,14 +307,28 @@ async def process_messages(group_id: int):
|
|||
{"role": "system", "content": systemPrompt}
|
||||
]
|
||||
|
||||
while len(state.history) > 0 and state.history[0]["role"] != "user":
|
||||
state.history.popleft()
|
||||
|
||||
messages += list(state.history)[-plugin_config.history_size * 2 :]
|
||||
|
||||
# 没有未处理的消息说明已经被处理了,跳过
|
||||
if state.past_events.__len__() < 1:
|
||||
break
|
||||
|
||||
content: list[ChatCompletionContentPartParam] = []
|
||||
|
||||
# 将机器人错过的消息推送给LLM
|
||||
content = ",".join([format_message(ev) for ev in state.past_events])
|
||||
past_events_snapshot = list(state.past_events)
|
||||
for ev in past_events_snapshot:
|
||||
text_content = format_message(ev)
|
||||
content.append({"type": "text", "text": text_content})
|
||||
|
||||
# 将消息中的图片转成 base64
|
||||
if preset.support_image:
|
||||
base64_images = await process_images(ev)
|
||||
for base64_image in base64_images:
|
||||
content.append({"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}})
|
||||
|
||||
new_messages: list[ChatCompletionMessageParam] = [
|
||||
{"role": "user", "content": content}
|
||||
|
@ -268,10 +337,6 @@ async def process_messages(group_id: int):
|
|||
logger.debug(
|
||||
f"发送API请求 模型:{preset.model_name} 历史消息数:{len(messages)}"
|
||||
)
|
||||
mcp_client = MCPClient(plugin_config.mcp_servers)
|
||||
await mcp_client.connect_to_servers()
|
||||
|
||||
available_tools = await mcp_client.get_available_tools()
|
||||
|
||||
client_config = {
|
||||
"model": preset.model_name,
|
||||
|
@ -280,7 +345,10 @@ async def process_messages(group_id: int):
|
|||
"timeout": 60,
|
||||
}
|
||||
|
||||
mcp_client = MCPClient(plugin_config.mcp_servers)
|
||||
if preset.support_mcp:
|
||||
await mcp_client.connect_to_servers()
|
||||
available_tools = await mcp_client.get_available_tools()
|
||||
client_config["tools"] = available_tools
|
||||
|
||||
response = await client.chat.completions.create(
|
||||
|
@ -291,10 +359,7 @@ async def process_messages(group_id: int):
|
|||
if response.usage is not None:
|
||||
logger.debug(f"收到API响应 使用token数:{response.usage.total_tokens}")
|
||||
|
||||
final_message = []
|
||||
message = response.choices[0].message
|
||||
if message.content:
|
||||
final_message.append(message.content)
|
||||
|
||||
# 处理响应并处理工具调用
|
||||
while preset.support_mcp and message.tool_calls:
|
||||
|
@ -302,6 +367,11 @@ async def process_messages(group_id: int):
|
|||
"role": "assistant",
|
||||
"tool_calls": [tool_call.model_dump() for tool_call in message.tool_calls]
|
||||
})
|
||||
|
||||
# 发送LLM调用工具时的回复,一般没有
|
||||
if message.content:
|
||||
await send_split_messages(handler, message.content)
|
||||
|
||||
# 处理每个工具调用
|
||||
for tool_call in message.tool_calls:
|
||||
tool_name = tool_call.function.name
|
||||
|
@ -316,7 +386,7 @@ async def process_messages(group_id: int):
|
|||
new_messages.append({
|
||||
"role": "tool",
|
||||
"tool_call_id": tool_call.id,
|
||||
"content": str(result.content)
|
||||
"content": str(result)
|
||||
})
|
||||
|
||||
# 将工具调用的结果交给 LLM
|
||||
|
@ -326,8 +396,6 @@ async def process_messages(group_id: int):
|
|||
)
|
||||
|
||||
message = response.choices[0].message
|
||||
if message.content:
|
||||
final_message.append(message.content)
|
||||
|
||||
await mcp_client.cleanup()
|
||||
|
||||
|
@ -339,6 +407,11 @@ async def process_messages(group_id: int):
|
|||
or matched_reasoning_content
|
||||
)
|
||||
|
||||
new_messages.append({
|
||||
"role": "assistant",
|
||||
"content": reply,
|
||||
})
|
||||
|
||||
# 请求成功后再保存历史记录,保证user和assistant穿插,防止R1模型报错
|
||||
for message in new_messages:
|
||||
state.history.append(message)
|
||||
|
@ -357,20 +430,7 @@ async def process_messages(group_id: int):
|
|||
logger.error(f"合并转发消息发送失败:\n{e!s}\n")
|
||||
|
||||
assert reply is not None
|
||||
logger.info(
|
||||
f"准备发送回复消息 群号:{group_id} 消息分段数:{len(reply.split('<botbr>'))}"
|
||||
)
|
||||
for r in reply.split("<botbr>"):
|
||||
# 似乎会有空消息的情况导致string index out of range异常
|
||||
if len(r) == 0 or r.isspace():
|
||||
continue
|
||||
# 删除前后多余的换行和空格
|
||||
r = r.strip()
|
||||
await asyncio.sleep(2)
|
||||
logger.debug(
|
||||
f"发送消息分段 内容:{r[:50]}..."
|
||||
) # 只记录前50个字符避免日志过大
|
||||
await handler.send(Message(r))
|
||||
await send_split_messages(handler, reply)
|
||||
|
||||
except Exception as e:
|
||||
logger.opt(exception=e).error(f"API请求失败 群号:{group_id}")
|
||||
|
@ -407,7 +467,7 @@ async def handle_preset(event: GroupMessageEvent, args: Message = CommandArg()):
|
|||
|
||||
edit_preset_handler = on_command(
|
||||
"修改设定",
|
||||
priority=99,
|
||||
priority=1,
|
||||
block=True,
|
||||
permission=(SUPERUSER | GROUP_ADMIN | GROUP_OWNER),
|
||||
)
|
||||
|
@ -424,7 +484,7 @@ async def handle_edit_preset(event: GroupMessageEvent, args: Message = CommandAr
|
|||
|
||||
reset_handler = on_command(
|
||||
"记忆清除",
|
||||
priority=99,
|
||||
priority=1,
|
||||
block=True,
|
||||
permission=(SUPERUSER | GROUP_ADMIN | GROUP_OWNER),
|
||||
)
|
||||
|
@ -441,7 +501,7 @@ async def handle_reset(event: GroupMessageEvent, args: Message = CommandArg()):
|
|||
|
||||
set_prob_handler = on_command(
|
||||
"设置主动回复概率",
|
||||
priority=99,
|
||||
priority=1,
|
||||
block=True,
|
||||
permission=(SUPERUSER | GROUP_ADMIN | GROUP_OWNER),
|
||||
)
|
||||
|
|
6
nonebot_plugin_llmchat/config.py
Normal file → Executable file
6
nonebot_plugin_llmchat/config.py
Normal file → Executable file
|
@ -12,6 +12,7 @@ class PresetConfig(BaseModel):
|
|||
temperature: float = Field(0.7, description="生成温度(0-2]")
|
||||
proxy: str = Field("", description="HTTP代理服务器")
|
||||
support_mcp: bool = Field(False, description="是否支持MCP")
|
||||
support_image: bool = Field(False, description="是否支持图片输入")
|
||||
|
||||
class MCPServerConfig(BaseModel):
|
||||
"""MCP服务器配置"""
|
||||
|
@ -42,6 +43,11 @@ class ScopedConfig(BaseModel):
|
|||
description="默认提示词",
|
||||
)
|
||||
mcp_servers: dict[str, MCPServerConfig] = Field({}, description="MCP服务器配置")
|
||||
blacklist_user_ids: set[int] = Field(set(), description="黑名单用户ID列表")
|
||||
ignore_prefixes: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="需要忽略的消息前缀列表,匹配到这些前缀的消息不会处理"
|
||||
)
|
||||
|
||||
|
||||
class Config(BaseModel):
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
import asyncio
|
||||
from contextlib import AsyncExitStack
|
||||
|
||||
from mcp import ClientSession, StdioServerParameters
|
||||
|
@ -64,9 +65,13 @@ class MCPClient:
|
|||
server_name, real_tool_name = tool_name.split("___")
|
||||
logger.info(f"正在服务器[{server_name}]上调用工具[{real_tool_name}]")
|
||||
session = self.sessions[server_name]
|
||||
response = await session.call_tool(real_tool_name, tool_args)
|
||||
try:
|
||||
response = await asyncio.wait_for(session.call_tool(real_tool_name, tool_args), timeout=30)
|
||||
except asyncio.TimeoutError:
|
||||
logger.error(f"调用工具[{real_tool_name}]超时")
|
||||
return f"调用工具[{real_tool_name}]超时"
|
||||
logger.debug(f"工具[{real_tool_name}]调用完成,响应: {response}")
|
||||
return response
|
||||
return response.content
|
||||
|
||||
def get_friendly_name(self, tool_name: str):
|
||||
server_name, real_tool_name = tool_name.split("___")
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[tool.poetry]
|
||||
name = "nonebot-plugin-llmchat"
|
||||
version = "0.2.0"
|
||||
version = "0.2.5"
|
||||
description = "Nonebot AI group chat plugin supporting multiple API preset configurations"
|
||||
license = "GPL"
|
||||
authors = ["FuQuan i@fuquan.moe"]
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue