mirror of
https://github.com/LiteyukiStudio/nonebot-plugin-marshoai.git
synced 2024-11-26 23:05:04 +08:00
✍️ 修复备份在未进行对话的情况下reset可能仍会加载的问题,用词修正,readme修改,增加备份与恢复的log输出
This commit is contained in:
parent
d04fc34e9b
commit
978d67268d
14
README.md
14
README.md
@ -8,7 +8,7 @@
|
||||
|
||||
# nonebot-plugin-marshoai
|
||||
|
||||
_✨ 使用 Azure OpenAI 推理服务的聊天机器人插件 ✨_
|
||||
_✨ 使用 OpenAI 标准格式 API 的聊天机器人插件 ✨_
|
||||
|
||||
<a href="./LICENSE">
|
||||
<img src="https://img.shields.io/github/license/LiteyukiStudio/nonebot-plugin-marshoai.svg" alt="license">
|
||||
@ -22,10 +22,10 @@ _✨ 使用 Azure OpenAI 推理服务的聊天机器人插件 ✨_
|
||||
|
||||
## 📖 介绍
|
||||
|
||||
通过调用由 Azure OpenAI 驱动,GitHub Models 提供访问的生成式 AI 推理 API 来实现聊天的插件。
|
||||
通过调用 OpenAI 标准格式 API(例如由 Azure OpenAI 驱动,GitHub Models 提供访问的生成式 AI 推理 API) 来实现聊天的插件。
|
||||
插件内置了猫娘小棉(Marsho)的人物设定,可以进行可爱的聊天!
|
||||
*谁不喜欢回复消息快又可爱的猫娘呢?*
|
||||
**※对 Azure AI Studio等的支持待定。对 OneBot 以外的适配器支持未经过完全验证。**
|
||||
**对 OneBot 以外的适配器与非 GitHub Models API的支持未经过完全验证。**
|
||||
[Melobot 实现](https://github.com/LiteyukiStudio/marshoai-melo)
|
||||
|
||||
## 🐱 设定
|
||||
@ -87,7 +87,7 @@ _✨ 使用 Azure OpenAI 推理服务的聊天机器人插件 ✨_
|
||||
|
||||
</details>
|
||||
|
||||
## 🤖 获取 token
|
||||
## 🤖 获取 token(GitHub Models)
|
||||
|
||||
- 新建一个[personal access token](https://github.com/settings/tokens/new),**不需要给予任何权限**。
|
||||
- 将新建的 token 复制,添加到`.env`文件中的`marshoai_token`配置项中。
|
||||
@ -129,7 +129,7 @@ _✨ 使用 Azure OpenAI 推理服务的聊天机器人插件 ✨_
|
||||
|
||||
| 配置项 | 必填 | 默认值 | 说明 |
|
||||
|:---------------------------------:|:--:|:---------------------------------------:|:---------------------------------------------------------------------------------------------:|
|
||||
| MARSHOAI_TOKEN | 是 | 无 | 调用 API 必需的访问 token |
|
||||
| MARSHOAI_TOKEN | 是? | 无 | 调用 API 所需的访问 token |
|
||||
| MARSHOAI_DEFAULT_NAME | 否 | `marsho` | 调用 Marsho 默认的命令前缀 |
|
||||
| MARSHOAI_ALIASES | 否 | `set{"小棉"}` | 调用 Marsho 的命令别名 |
|
||||
| MARSHOAI_DEFAULT_MODEL | 否 | `gpt-4o-mini` | Marsho 默认调用的模型 |
|
||||
@ -140,7 +140,7 @@ _✨ 使用 Azure OpenAI 推理服务的聊天机器人插件 ✨_
|
||||
| MARSHOAI_ENABLE_NICKNAME_TIP | 否 | `true` | 启用后用户未设置昵称时提示用户设置 |
|
||||
| MARSHOAI_ENABLE_PRAISES | 否 | `true` | 是否启用夸赞名单功能 |
|
||||
| MARSHOAI_ENABLE_TIME_PROMPT | 否 | `true` | 是否启用实时更新的日期与时间(精确到秒)与农历日期系统提示词 |
|
||||
| MARSHOAI_AZURE_ENDPOINT | 否 | `https://models.inference.ai.azure.com` | 调用 Azure OpenAI 服务的 API 终结点 |
|
||||
| MARSHOAI_AZURE_ENDPOINT | 否 | `https://models.inference.ai.azure.com` | OpenAI 标准格式 API 端点 |
|
||||
| MARSHOAI_TEMPERATURE | 否 | 无 | 进行推理时的温度参数 |
|
||||
| MARSHOAI_TOP_P | 否 | 无 | 进行推理时的核采样参数 |
|
||||
| MARSHOAI_MAX_TOKENS | 否 | 无 | 返回消息的最大 token 数 |
|
||||
@ -155,6 +155,6 @@ _✨ 使用 Azure OpenAI 推理服务的聊天机器人插件 ✨_
|
||||
|
||||
- [x] [Melobot](https://github.com/Meloland/melobot) 实现
|
||||
- [x] 对聊天发起者的认知(认出是谁在问 Marsho)(初步实现)
|
||||
- [ ] 自定义 API 接入点(不局限于Azure)
|
||||
- [ ] 自定义 API 接入点的适配(不局限于GitHub Models)
|
||||
- [ ] 上下文通过数据库持久化存储
|
||||
|
||||
|
@ -12,7 +12,7 @@ from azure.ai.inference.models import (
|
||||
CompletionsFinishReason,
|
||||
)
|
||||
from azure.core.credentials import AzureKeyCredential
|
||||
from nonebot import on_command
|
||||
from nonebot import on_command, logger
|
||||
from nonebot.adapters import Message, Event
|
||||
from nonebot.params import CommandArg
|
||||
from nonebot.permission import SUPERUSER
|
||||
@ -56,7 +56,7 @@ context = MarshoContext()
|
||||
token = config.marshoai_token
|
||||
endpoint = config.marshoai_azure_endpoint
|
||||
client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(token))
|
||||
target_list = [] # 记录需保存历史记录的列表
|
||||
target_list = [] # 记录需保存历史上下文的列表
|
||||
|
||||
|
||||
@add_usermsg_cmd.handle()
|
||||
@ -101,6 +101,7 @@ async def save_context(target: MsgTarget, arg: Message = CommandArg()):
|
||||
@load_context_cmd.handle()
|
||||
async def load_context(target: MsgTarget, arg: Message = CommandArg()):
|
||||
if msg := arg.extract_plain_text():
|
||||
await get_backup_context(target.id, target.private) # 为了将当前会话添加到"已恢复过备份"的列表而添加,防止上下文被覆盖(好奇怪QwQ
|
||||
context.set_context(
|
||||
await load_context_from_json(msg, "contexts"), target.id, target.private
|
||||
)
|
||||
@ -109,6 +110,7 @@ async def load_context(target: MsgTarget, arg: Message = CommandArg()):
|
||||
|
||||
@resetmem_cmd.handle()
|
||||
async def resetmem(target: MsgTarget):
|
||||
if [target.id, target.private] not in target_list: target_list.append([target.id, target.private])
|
||||
context.reset(target.id, target.private)
|
||||
await resetmem_cmd.finish("上下文已重置")
|
||||
|
||||
@ -158,8 +160,10 @@ async def marsho(target: MsgTarget, event: Event, text: Optional[UniMsg] = None)
|
||||
if nickname != "":
|
||||
nickname_prompt = f"\n*此消息的说话者:{user_nickname}*"
|
||||
else:
|
||||
user_nickname = event.sender.nickname # 未设置昵称时获取用户名
|
||||
nickname_prompt = f"\n*此消息的说话者:{user_nickname}"
|
||||
nickname_prompt = ""
|
||||
#用户名无法获取,暂时注释
|
||||
#user_nickname = event.sender.nickname # 未设置昵称时获取用户名
|
||||
#nickname_prompt = f"\n*此消息的说话者:{user_nickname}"
|
||||
if config.marshoai_enable_nickname_tip:
|
||||
await UniMessage(
|
||||
"*你未设置自己的昵称。推荐使用'nickname [昵称]'命令设置昵称来获得个性化(可能)回答。"
|
||||
@ -186,8 +190,8 @@ async def marsho(target: MsgTarget, event: Event, text: Optional[UniMsg] = None)
|
||||
backup_context = await get_backup_context(target.id, target.private)
|
||||
if backup_context:
|
||||
context.set_context(backup_context, target.id, target.private) # 加载历史记录
|
||||
logger.info(f"已恢复会话 {target.id} 的上下文备份~")
|
||||
context_msg = context.build(target.id, target.private)
|
||||
target_list.append([target.id, target.private])
|
||||
if not is_reasoning_model:
|
||||
context_msg = [get_prompt()] + context_msg
|
||||
# o1等推理模型不支持系统提示词, 故不添加
|
||||
@ -205,6 +209,8 @@ async def marsho(target: MsgTarget, event: Event, text: Optional[UniMsg] = None)
|
||||
UserMessage(content=usermsg).as_dict(), target.id, target.private
|
||||
)
|
||||
context.append(choice.message.as_dict(), target.id, target.private)
|
||||
if [target.id, target.private] not in target_list:
|
||||
target_list.append([target.id, target.private])
|
||||
elif choice["finish_reason"] == CompletionsFinishReason.CONTENT_FILTERED:
|
||||
await UniMessage("*已被内容过滤器过滤。请调整聊天内容后重试。").send(
|
||||
reply_to=True
|
||||
@ -252,7 +258,7 @@ with contextlib.suppress(ImportError): # 优化先不做()
|
||||
|
||||
|
||||
@driver.on_shutdown
|
||||
async def save_context():
|
||||
async def auto_backup_context():
|
||||
for target_info in target_list:
|
||||
target_id, target_private = target_info
|
||||
contexts_data = context.build(target_id, target_private)
|
||||
@ -261,3 +267,4 @@ async def save_context():
|
||||
else:
|
||||
target_uid = "group_" + target_id
|
||||
await save_context_to_json(f"back_up_context_{target_uid}", contexts_data, "contexts/backup")
|
||||
logger.info(f"已保存会话 {target_id} 的上下文备份,将在下次对话时恢复~")
|
||||
|
@ -5,7 +5,7 @@ from .constants import USAGE
|
||||
|
||||
metadata = PluginMetadata(
|
||||
name="Marsho AI插件",
|
||||
description="接入Azure服务的AI聊天插件",
|
||||
description="接入Azure服务或其他API的AI猫娘聊天插件",
|
||||
usage=USAGE,
|
||||
type="application",
|
||||
config=ConfigModel,
|
||||
|
@ -14,8 +14,8 @@ from azure.ai.inference.models import SystemMessage
|
||||
from .config import config
|
||||
|
||||
nickname_json = None # 记录昵称
|
||||
praises_json = None # 记录赞扬名单
|
||||
loaded_target_list = [] # 记录已恢复历史记录的列表
|
||||
praises_json = None # 记录夸赞名单
|
||||
loaded_target_list = [] # 记录已恢复备份的上下文的列表
|
||||
|
||||
|
||||
async def get_image_b64(url):
|
||||
@ -192,7 +192,7 @@ def suggest_solution(errinfo: str) -> str:
|
||||
"RateLimitReached": "模型达到调用速率限制。请稍等一段时间或联系Bot管理员。",
|
||||
"tokens_limit_reached": "请求token达到上限。请重置上下文。",
|
||||
"content_length_limit": "请求体过大。请重置上下文。",
|
||||
"unauthorized": "Azure凭据无效。请联系Bot管理员。",
|
||||
"unauthorized": "访问token无效。请联系Bot管理员。",
|
||||
"invalid type: parameter messages.content is of type array but should be of type string.": "聊天请求体包含此模型不支持的数据类型。请重置上下文。",
|
||||
"At most 1 image(s) may be provided in one request.": "此模型只能在上下文中包含1张图片。如果此前的聊天已经发送过图片,请重置上下文。",
|
||||
}
|
||||
@ -205,7 +205,7 @@ def suggest_solution(errinfo: str) -> str:
|
||||
|
||||
|
||||
async def get_backup_context(target_id: str, target_private: bool) -> list:
|
||||
"""获取历史记录"""
|
||||
"""获取历史上下文"""
|
||||
global loaded_target_list
|
||||
if target_private:
|
||||
target_uid = f"private_{target_id}"
|
||||
|
Loading…
Reference in New Issue
Block a user