Merge pull request #1 from StarHeartHunt/main

♻️ 重构代码
This commit is contained in:
FuQuan233 2025-02-15 16:25:32 +08:00 committed by GitHub
commit 4adf580493
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 1646 additions and 87 deletions

32
.editorconfig Normal file
View file

@ -0,0 +1,32 @@
# http://editorconfig.org
root = true
[*]
indent_style = space
indent_size = 2
end_of_line = lf
charset = utf-8
trim_trailing_whitespace = true
insert_final_newline = true
# The JSON files contain newlines inconsistently
[*.json]
insert_final_newline = ignore
# Makefiles always use tabs for indentation
[Makefile]
indent_style = tab
# Batch files use tabs for indentation
[*.bat]
indent_style = tab
[*.md]
trim_trailing_whitespace = false
# Matches the exact files either package.json or .travis.yml
[{package.json,.travis.yml}]
indent_size = 2
[{*.py,*.pyi}]
indent_size = 4

View file

@ -1,22 +1,31 @@
import aiofiles
from nonebot import get_plugin_config, on_message, logger, on_command, get_driver, require
from nonebot.plugin import PluginMetadata
from nonebot.adapters.onebot.v11 import GroupMessageEvent, Message
from nonebot.adapters.onebot.v11.permission import GROUP_ADMIN, GROUP_OWNER
from nonebot.params import CommandArg
from nonebot.rule import Rule
from nonebot.permission import SUPERUSER
from typing import Dict
from datetime import datetime
from collections import deque
import asyncio
from openai import AsyncOpenAI
from .config import Config, PresetConfig
import time
from collections import deque
from datetime import datetime
import json
import os
import random
import re
import time
from typing import TYPE_CHECKING, Optional
import aiofiles
from nonebot import (
get_driver,
get_plugin_config,
logger,
on_command,
on_message,
require,
)
from nonebot.adapters.onebot.v11 import GroupMessageEvent, Message
from nonebot.adapters.onebot.v11.permission import GROUP_ADMIN, GROUP_OWNER
from nonebot.params import CommandArg
from nonebot.permission import SUPERUSER
from nonebot.plugin import PluginMetadata
from nonebot.rule import Rule
from openai import AsyncOpenAI
from .config import Config, PresetConfig
require("nonebot_plugin_localstore")
import nonebot_plugin_localstore as store
@ -24,6 +33,11 @@ import nonebot_plugin_localstore as store
require("nonebot_plugin_apscheduler")
from nonebot_plugin_apscheduler import scheduler
if TYPE_CHECKING:
from collections.abc import Iterable
from openai.types.chat import ChatCompletionMessageParam
__plugin_meta__ = PluginMetadata(
name="llmchat",
description="支持多API预设配置的AI群聊插件",
@ -36,6 +50,16 @@ __plugin_meta__ = PluginMetadata(
pluginConfig = get_plugin_config(Config).llmchat
driver = get_driver()
tasks: set["asyncio.Task"] = set()
def filter_think(content: Optional[str]) -> Optional[str]:
if content is None:
return None
filtered_content = re.sub(r"<think>.*?</think>", "", content, flags=re.DOTALL)
return filtered_content.strip()
# 初始化群组状态
class GroupState:
@ -46,10 +70,12 @@ class GroupState:
self.processing = False
self.last_active = time.time()
self.past_events = deque(maxlen=pluginConfig.past_events_size)
self.group_prompt = None
self.group_prompt: Optional[str] = None
self.output_reasoning_content = False
group_states: Dict[int, GroupState] = {}
group_states: dict[int, GroupState] = {}
# 获取当前预设配置
def get_preset(group_id: int) -> PresetConfig:
@ -59,14 +85,15 @@ def get_preset(group_id: int) -> PresetConfig:
return preset
return pluginConfig.api_presets[0] # 默认返回第一个预设
# 消息格式转换
def format_message(event: GroupMessageEvent) -> Dict:
def format_message(event: GroupMessageEvent) -> str:
text_message = ""
if event.reply != None:
if event.reply is not None:
text_message += f"[回复 {event.reply.sender.nickname} 的消息 {event.reply.message.extract_plain_text()}]\n"
if event.is_tome():
text_message += f"@{list(driver.config.nickname)[0]} "
text_message += f"@{next(iter(driver.config.nickname))} "
for msgseg in event.get_message():
if msgseg.type == "at":
@ -80,14 +107,15 @@ def format_message(event: GroupMessageEvent) -> Dict:
elif msgseg.type == "text":
text_message += msgseg.data.get("text", "")
message = {
message = {
"SenderNickname": str(event.sender.card or event.sender.nickname),
"SenderUserId": str(event.user_id),
"Message": text_message,
"SendTime" : datetime.fromtimestamp(event.time).isoformat()
"SendTime": datetime.fromtimestamp(event.time).isoformat(),
}
return json.dumps(message, ensure_ascii=False)
async def isTriggered(event: GroupMessageEvent) -> bool:
"""扩展后的消息处理规则"""
@ -122,10 +150,13 @@ handler = on_message(
block=False,
)
@handler.handle()
async def handle_message(event: GroupMessageEvent):
group_id = event.group_id
logger.debug(f"收到群聊消息 群号:{group_id} 用户:{event.user_id} 内容:{event.get_plaintext()}")
logger.debug(
f"收到群聊消息 群号:{group_id} 用户:{event.user_id} 内容:{event.get_plaintext()}"
)
if group_id not in group_states:
group_states[group_id] = GroupState()
@ -135,7 +166,10 @@ async def handle_message(event: GroupMessageEvent):
await state.queue.put(event)
if not state.processing:
state.processing = True
asyncio.create_task(process_messages(group_id))
task = asyncio.create_task(process_messages(group_id))
task.add_done_callback(tasks.discard)
tasks.add(task)
async def process_messages(group_id: int):
state = group_states[group_id]
@ -145,16 +179,17 @@ async def process_messages(group_id: int):
client = AsyncOpenAI(
base_url=preset.api_base,
api_key=preset.api_key,
timeout=pluginConfig.request_timeout
timeout=pluginConfig.request_timeout,
)
logger.info(f"开始处理群聊消息 群号:{group_id} 当前队列长度:{state.queue.qsize()}")
logger.info(
f"开始处理群聊消息 群号:{group_id} 当前队列长度:{state.queue.qsize()}"
)
while not state.queue.empty():
event = await state.queue.get()
logger.debug(f"从队列获取消息 群号:{group_id} 消息ID{event.message_id}")
try:
systemPrompt = (
f'''
systemPrompt = f"""
我想要你帮我在群聊中闲聊大家一般叫你{"".join(list(driver.config.nickname))}我将会在后面的信息中告诉你每条群聊信息的发送者和发送时间你可以直接称呼发送者为他对应的昵称
你的回复需要遵守以下几点规则
- 你可以使用多条消息回复每两条消息之间使用<botbr>分隔<botbr>前后不需要包含额外的换行和空格
@ -168,12 +203,13 @@ f'''
- 如果你需要思考的话你应该思考尽量少以节省时间
下面是关于你性格的设定如果设定中提到让你扮演某个人或者设定中有提到名字则优先使用设定中的名字
{state.group_prompt or pluginConfig.default_prompt}
'''
)
"""
messages = [{"role": "system", "content": systemPrompt}]
messages: Iterable[ChatCompletionMessageParam] = [
{"role": "system", "content": systemPrompt}
]
messages += list(state.history)[-pluginConfig.history_size:]
messages += list(state.history)[-pluginConfig.history_size :]
# 没有未处理的消息说明已经被处理了,跳过
if state.past_events.__len__() < 1:
@ -182,51 +218,72 @@ f'''
# 将机器人错过的消息推送给LLM
content = ",".join([format_message(ev) for ev in state.past_events])
logger.debug(f"发送API请求 模型:{preset.model_name} 历史消息数:{len(messages)}")
logger.debug(
f"发送API请求 模型:{preset.model_name} 历史消息数:{len(messages)}"
)
response = await client.chat.completions.create(
model=preset.model_name,
messages=messages + [{"role": "user", "content": content}],
messages=[*messages, {"role": "user", "content": content}],
max_tokens=preset.max_tokens,
temperature=preset.temperature,
timeout=60
timeout=60,
)
logger.debug(f"收到API响应 使用token数{response.usage.total_tokens}")
reply = response.choices[0].message.content
if response.usage is not None:
logger.debug(f"收到API响应 使用token数{response.usage.total_tokens}")
if not state.output_reasoning_content:
reply = filter_think(response.choices[0].message.content)
else:
reply = response.choices[0].message.content
# 请求成功后再保存历史记录保证user和assistant穿插防止R1模型报错
state.history.append({"role": "user", "content": content})
state.past_events.clear()
reasoning_content: str | None = getattr(response.choices[0].message, "reasoning_content", None)
reasoning_content = getattr(
response.choices[0].message, "reasoning_content", None
)
if state.output_reasoning_content and reasoning_content:
await handler.send(Message(reasoning_content))
logger.info(f"准备发送回复消息 群号:{group_id} 消息分段数:{len(reply.split('<botbr>'))}")
assert reply is not None
logger.info(
f"准备发送回复消息 群号:{group_id} 消息分段数:{len(reply.split('<botbr>'))}"
)
for r in reply.split("<botbr>"):
# 删除前后多余的换行和空格
while r[0] == "\n" or r[0] == " ": r = r[1:]
while r[-1] == "\n" or r[0] == " ": r = r[:-1]
while r[0] == "\n" or r[0] == " ":
r = r[1:]
while r[-1] == "\n" or r[0] == " ":
r = r[:-1]
await asyncio.sleep(2)
logger.debug(f"发送消息分段 内容:{r[:50]}...") # 只记录前50个字符避免日志过大
logger.debug(
f"发送消息分段 内容:{r[:50]}..."
) # 只记录前50个字符避免日志过大
await handler.send(Message(r))
# 添加助手回复到历史
state.history.append({
"role": "assistant",
"content": reply,
})
state.history.append(
{
"role": "assistant",
"content": reply,
}
)
except Exception as e:
logger.error(f"API请求失败 群号:{group_id} 错误:{str(e)}", exc_info=True)
await handler.send(Message(f"服务暂时不可用,请稍后再试\n{str(e)}"))
logger.error(f"API请求失败 群号:{group_id} 错误:{e!s}", exc_info=True)
await handler.send(Message(f"服务暂时不可用,请稍后再试\n{e!s}"))
finally:
state.queue.task_done()
state.processing = False
# 预设切换命令
preset_handler = on_command("API预设", priority=1, block=True, permission=SUPERUSER)
@preset_handler.handle()
async def handle_preset(event: GroupMessageEvent, args: Message = CommandArg()):
group_id = event.group_id
@ -237,18 +294,29 @@ async def handle_preset(event: GroupMessageEvent, args: Message = CommandArg()):
if preset_name == "off":
group_states[group_id].preset_name = preset_name
await preset_handler.finish(f"已关闭llmchat")
await preset_handler.finish("已关闭llmchat")
available_presets = {p.name for p in pluginConfig.api_presets}
if preset_name not in available_presets:
await preset_handler.finish(f"当前API预设{group_states[group_id].preset_name}\n可用API预设\n- {'\n- '.join(available_presets)}")
available_presets_str = "\n- ".join(available_presets)
await preset_handler.finish(
f"当前API预设{group_states[group_id].preset_name}\n可用API预设\n- {available_presets_str}"
)
group_states[group_id].preset_name = preset_name
await preset_handler.finish(f"已切换至API预设{preset_name}")
preset_handler = on_command("修改设定", priority=1, block=True, permission=(SUPERUSER|GROUP_ADMIN|GROUP_OWNER))
@preset_handler.handle()
async def handle_preset(event: GroupMessageEvent, args: Message = CommandArg()):
edit_preset_handler = on_command(
"修改设定",
priority=1,
block=True,
permission=(SUPERUSER | GROUP_ADMIN | GROUP_OWNER),
)
@edit_preset_handler.handle()
async def handle_edit_preset(event: GroupMessageEvent, args: Message = CommandArg()):
group_id = event.group_id
group_prompt = args.extract_plain_text().strip()
@ -258,7 +326,15 @@ async def handle_preset(event: GroupMessageEvent, args: Message = CommandArg()):
group_states[group_id].group_prompt = group_prompt
await preset_handler.finish("修改成功")
reset_handler = on_command("记忆清除", priority=99, block=True, permission=(SUPERUSER|GROUP_ADMIN|GROUP_OWNER))
reset_handler = on_command(
"记忆清除",
priority=99,
block=True,
permission=(SUPERUSER | GROUP_ADMIN | GROUP_OWNER),
)
@reset_handler.handle()
async def handle_reset(event: GroupMessageEvent, args: Message = CommandArg()):
group_id = event.group_id
@ -268,12 +344,20 @@ async def handle_reset(event: GroupMessageEvent, args: Message = CommandArg()):
group_states[group_id].past_events.clear()
group_states[group_id].history.clear()
await preset_handler.finish(f"记忆已清空")
await preset_handler.finish("记忆已清空")
# 预设切换命令
preset_handler = on_command("切换思维输出", priority=1, block=True, permission=(SUPERUSER|GROUP_ADMIN|GROUP_OWNER))
@preset_handler.handle()
async def handle_preset(event: GroupMessageEvent, args: Message = CommandArg()):
think_handler = on_command(
"切换思维输出",
priority=1,
block=True,
permission=(SUPERUSER | GROUP_ADMIN | GROUP_OWNER),
)
@think_handler.handle()
async def handle_think(event: GroupMessageEvent, args: Message = CommandArg()):
group_id = event.group_id
if group_id not in group_states:
@ -294,6 +378,7 @@ data_dir = store.get_plugin_data_dir()
# 获取插件数据文件
data_file = store.get_plugin_data_file("llmchat_state.json")
async def save_state():
"""保存群组状态到文件"""
logger.info(f"开始保存群组状态到文件:{data_file}")
@ -303,7 +388,7 @@ async def save_state():
"history": list(state.history),
"last_active": state.last_active,
"group_prompt": state.group_prompt,
"output_reasoning_content": state.output_reasoning_content
"output_reasoning_content": state.output_reasoning_content,
}
for gid, state in group_states.items()
}
@ -312,30 +397,35 @@ async def save_state():
async with aiofiles.open(data_file, "w") as f:
await f.write(json.dumps(data, ensure_ascii=False))
async def load_state():
"""从文件加载群组状态"""
logger.info(f"从文件加载群组状态:{data_file}")
if not os.path.exists(data_file):
return
async with aiofiles.open(data_file, "r") as f:
async with aiofiles.open(data_file) as f:
data = json.loads(await f.read())
for gid, state_data in data.items():
state = GroupState()
state.preset_name = state_data["preset"]
state.history = deque(state_data["history"], maxlen=pluginConfig.history_size)
state.history = deque(
state_data["history"], maxlen=pluginConfig.history_size
)
state.last_active = state_data["last_active"]
state.group_prompt = state_data["group_prompt"]
state.output_reasoning_content = state_data["output_reasoning_content"]
group_states[int(gid)] = state
# 注册生命周期事件
@driver.on_startup
async def init_plugin():
logger.info("插件启动初始化")
await load_state()
# 每5分钟保存状态
scheduler.add_job(save_state, 'interval', minutes=5)
scheduler.add_job(save_state, "interval", minutes=5)
@driver.on_shutdown
async def cleanup_plugin():

View file

@ -1,8 +1,9 @@
from pydantic import BaseModel, Field
from typing import List, Dict, Optional
class PresetConfig(BaseModel):
"""API预设配置"""
name: str = Field(..., description="预设名称(唯一标识)")
api_base: str = Field(..., description="API基础地址")
api_key: str = Field(..., description="API密钥")
@ -10,15 +11,25 @@ class PresetConfig(BaseModel):
max_tokens: int = Field(2048, description="最大响应token数")
temperature: float = Field(0.7, description="生成温度0-2]")
class ScopedConfig(BaseModel):
"""LLM Chat Plugin配置"""
api_presets: List[PresetConfig] = Field(...,description="API预设列表至少配置1个预设")
api_presets: list[PresetConfig] = Field(
..., description="API预设列表至少配置1个预设"
)
history_size: int = Field(20, description="LLM上下文消息保留数量")
past_events_size : int = Field(10, description="触发回复时发送的群消息数量")
past_events_size: int = Field(10, description="触发回复时发送的群消息数量")
request_timeout: int = Field(30, description="API请求超时时间")
default_preset: str = Field("off", description="默认使用的预设名称")
random_trigger_prob: float = Field(0.05, ge=0.0, le=1.0, description="随机触发概率0-1]")
default_prompt: str = Field("你的回答应该尽量简洁、幽默、可以使用一些语气词、颜文字。你应该拒绝回答任何政治相关的问题。", description="默认提示词")
random_trigger_prob: float = Field(
0.05, ge=0.0, le=1.0, description="随机触发概率0-1]"
)
default_prompt: str = Field(
"你的回答应该尽量简洁、幽默、可以使用一些语气词、颜文字。你应该拒绝回答任何政治相关的问题。",
description="默认提示词",
)
class Config(BaseModel):
llmchat: ScopedConfig

1372
poetry.lock generated Normal file

File diff suppressed because it is too large Load diff

View file

@ -19,6 +19,60 @@ nonebot-plugin-apscheduler = "^0.5.0"
nonebot-adapter-onebot = "^2.0.0"
nonebot-plugin-localstore = "^0.7.3"
[tool.poetry.group.dev.dependencies]
ruff = "^0.8.0"
nonemoji = "^0.1.2"
pre-commit = "^4.0.0"
[tool.ruff]
line-length = 130
target-version = "py39"
[tool.ruff.format]
line-ending = "lf"
[tool.ruff.lint]
select = [
"F", # Pyflakes
"W", # pycodestyle warnings
"E", # pycodestyle errors
"I", # isort
"UP", # pyupgrade
"ASYNC", # flake8-async
"C4", # flake8-comprehensions
"T10", # flake8-debugger
"T20", # flake8-print
"PYI", # flake8-pyi
"PT", # flake8-pytest-style
"Q", # flake8-quotes
"TID", # flake8-tidy-imports
"TC", # flake8-type-checking
"RUF", # Ruff-specific rules
]
ignore = [
"E402", # module-import-not-at-top-of-file
"UP037", # quoted-annotation
"RUF001", # ambiguous-unicode-character-string
"RUF002", # ambiguous-unicode-character-docstring
"RUF003", # ambiguous-unicode-character-comment
]
[tool.ruff.lint.isort]
force-sort-within-sections = true
[tool.ruff.lint.pyupgrade]
keep-runtime-typing = true
[tool.pyright]
pythonVersion = "3.9"
pythonPlatform = "All"
defineConstant = { PYDANTIC_V2 = true }
executionEnvironments = [{ root = "./" }]
typeCheckingMode = "standard"
reportShadowedImports = false
disableBytesTypePromotions = true
[build-system]
requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"