diff --git a/nonebot_plugin_llmchat/__init__.py b/nonebot_plugin_llmchat/__init__.py index 86281a0..5903779 100644 --- a/nonebot_plugin_llmchat/__init__.py +++ b/nonebot_plugin_llmchat/__init__.py @@ -9,6 +9,7 @@ import time from typing import TYPE_CHECKING, Optional import aiofiles +import httpx from nonebot import ( get_bot, get_driver, @@ -203,11 +204,19 @@ async def process_messages(group_id: int): preset = get_preset(group_id) # 初始化OpenAI客户端 - client = AsyncOpenAI( - base_url=preset.api_base, - api_key=preset.api_key, - timeout=plugin_config.request_timeout, - ) + if preset.proxy: + client = AsyncOpenAI( + base_url=preset.api_base, + api_key=preset.api_key, + timeout=plugin_config.request_timeout, + http_client=httpx.AsyncClient(proxy=preset.proxy), + ) + else: + client = AsyncOpenAI( + base_url=preset.api_base, + api_key=preset.api_key, + timeout=plugin_config.request_timeout, + ) logger.info( f"开始处理群聊消息 群号:{group_id} 当前队列长度:{state.queue.qsize()}" diff --git a/nonebot_plugin_llmchat/config.py b/nonebot_plugin_llmchat/config.py index c5e4f37..bbb1f66 100644 --- a/nonebot_plugin_llmchat/config.py +++ b/nonebot_plugin_llmchat/config.py @@ -10,6 +10,7 @@ class PresetConfig(BaseModel): model_name: str = Field(..., description="模型名称") max_tokens: int = Field(2048, description="最大响应token数") temperature: float = Field(0.7, description="生成温度(0-2]") + proxy: str = Field(None, description="HTTP代理服务器") class ScopedConfig(BaseModel):