From e408a48e425ca0d98fbf50ced4fade5b1feeb9bb Mon Sep 17 00:00:00 2001 From: Asankilp Date: Tue, 1 Oct 2024 22:12:21 +0800 Subject: [PATCH] =?UTF-8?q?=E4=B8=8D=E5=90=8C=E4=BC=9A=E8=AF=9D=E9=97=B4?= =?UTF-8?q?=E7=9A=84=E4=B8=8A=E4=B8=8B=E6=96=87=E5=88=86=E7=A6=BB=EF=BC=8C?= =?UTF-8?q?=E4=BC=98=E5=8C=96=E5=B8=AE=E5=8A=A9=E6=96=87=E6=9C=AC?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- __init__.py | 20 ++++++++++---------- azure.py | 48 +++++++++++++++++++++++------------------------- config.py | 1 - models.py | 37 +++++++++++++++++++++---------------- util.py | 4 +--- 5 files changed, 55 insertions(+), 55 deletions(-) diff --git a/__init__.py b/__init__.py index a7b4f01..94fc0a1 100644 --- a/__init__.py +++ b/__init__.py @@ -1,20 +1,20 @@ from nonebot.plugin import PluginMetadata, inherit_supported_adapters, require -require("nonebot_plugin_htmlrender") require("nonebot_plugin_alconna") from .azure import * from nonebot import get_driver -#from .config import ConfigModel usage = """MarshoAI Alpha by Asankilp 用法: - marsho <聊天内容> -与 Marsho 进行对话。当模型为gpt时,可以带上图片进行对话。 - changemodel <模型名> -切换 AI 模型。仅超级用户可用。 - reset -重置上下文。仅超级用户可用。 + marsho <聊天内容> : 与 Marsho 进行对话。当模型为 GPT-4o(-mini) 等时,可以带上图片进行对话。 + reset : 重置当前会话的上下文。 +超级用户命令: + changemodel <模型名> : 切换全局 AI 模型。 + contexts : 返回当前会话的上下文列表。 ※当上下文包含图片时,不要使用此命令。 + praises : 返回夸赞名单的提示词。 + usermsg <消息> : 往当前会话添加用户消息(UserMessage)。 + assistantmsg <消息> : 往当前会话添加助手消息(AssistantMessage)。 注意事项: -当 Marsho 回复消息为None或以content_filter开头的错误信息时,表示该消息被内容过滤器过滤,请调整你的聊天内容确保其合规。 -当回复以RateLimitReached开头的错误信息时,该 AI 模型的次数配额已用尽,请联系Bot管理员。 + - 当 Marsho 回复消息为None或以content_filter开头的错误信息时,表示该消息被内容过滤器过滤,请调整你的聊天内容确保其合规。 + - 当回复以RateLimitReached开头的错误信息时,该 AI 模型的次数配额已用尽,请联系Bot管理员。 ※本AI的回答"按原样"提供,不提供任何担保。AI也会犯错,请仔细甄别回答的准确性。""" __author__ = "Asankilp" __plugin_meta__ = PluginMetadata( diff --git a/azure.py b/azure.py index 1690614..c3d54ad 100644 --- a/azure.py +++ b/azure.py @@ -1,10 +1,9 @@ -from nonebot.typing import T_State from nonebot import on_command from nonebot.adapters import Message from nonebot.params import CommandArg from nonebot.permission import SUPERUSER #from .acgnapis import * -from nonebot_plugin_alconna import on_alconna +from nonebot_plugin_alconna import on_alconna, MsgTarget from nonebot_plugin_alconna.uniseg import UniMessage, UniMsg from arclet.alconna import Alconna, Args, AllParam from .util import * @@ -16,8 +15,8 @@ from .__init__ import __plugin_meta__ from .config import config from .models import MarshoContext changemodel_cmd = on_command("changemodel",permission=SUPERUSER) -resetmem_cmd = on_command("reset",permission=SUPERUSER) -setprompt_cmd = on_command("prompt",permission=SUPERUSER) +resetmem_cmd = on_command("reset") +#setprompt_cmd = on_command("prompt",permission=SUPERUSER) praises_cmd = on_command("praises",permission=SUPERUSER) add_usermsg_cmd = on_command("usermsg",permission=SUPERUSER) add_assistantmsg_cmd = on_command("assistantmsg",permission=SUPERUSER) @@ -30,20 +29,17 @@ marsho_cmd = on_alconna( ) model_name = config.marshoai_default_model context = MarshoContext() -context_limit = 50 - - @add_usermsg_cmd.handle() -async def add_usermsg(arg: Message = CommandArg()): +async def add_usermsg(target: MsgTarget, arg: Message = CommandArg()): if msg := arg.extract_plain_text(): - context.append(UserMessage(content=msg)) + context.append(UserMessage(content=msg), target.id, target.private) await UniMessage("已添加用户消息").send() @add_assistantmsg_cmd.handle() -async def add_assistantmsg(arg: Message = CommandArg()): +async def add_assistantmsg(target: MsgTarget, arg: Message = CommandArg()): if msg := arg.extract_plain_text(): - context.append(AssistantMessage(content=msg)) + context.append(AssistantMessage(content=msg), target.id, target.private) await UniMessage("已添加助手消息").send() @praises_cmd.handle() @@ -51,8 +47,8 @@ async def praises(): await UniMessage(build_praises()).send() @contexts_cmd.handle() -async def contexts(): - await UniMessage(str(context.build()[1:])).send() +async def contexts(target: MsgTarget): + await UniMessage(str(context.build(target.id, target.private)[1:])).send() # @setprompt_cmd.handle() #用不了了 # async def setprompt(arg: Message = CommandArg()): @@ -67,9 +63,8 @@ async def contexts(): @resetmem_cmd.handle() -async def resetmem_cmd(): - context.reset() - context.resetcount() +async def resetmem(target: MsgTarget): + context.reset(target.id, target.private) await resetmem_cmd.finish("上下文已重置") @changemodel_cmd.handle() @@ -80,6 +75,7 @@ async def changemodel(arg : Message = CommandArg()): await changemodel_cmd.finish("已切换") @marsho_cmd.handle() async def marsho( + target: MsgTarget, message: UniMsg, text = None ): @@ -94,14 +90,11 @@ async def marsho( await UniMessage( __plugin_meta__.usage+"\n当前使用的模型:"+model_name).send() return - if context.count >= context_limit: - await UniMessage("上下文数量达到阈值。已自动重置上下文。").send() - context.reset() - context.resetcount() # await UniMessage(str(text)).send() try: is_support_image_model = model_name.lower() in config.marshoai_support_image_models usermsg = [] if is_support_image_model else "" + marsho_string_removed = False for i in message: if i.type == "image": if is_support_image_model: @@ -113,20 +106,25 @@ async def marsho( else: await UniMessage("*此模型不支持图片处理。").send() elif i.type == "text": + if not marsho_string_removed: + # 去掉最前面的"marsho "字符串 + clean_text = i.data["text"].lstrip("marsho ") + marsho_string_removed = True # 标记文本已处理 + else: + clean_text = i.data["text"] if is_support_image_model: - usermsg.append(TextContentItem(text=i.data["text"])) + usermsg.append(TextContentItem(text=clean_text)) else: usermsg += str(i.data["text"]) response = await client.complete( - messages=context.build()+[UserMessage(content=usermsg)], + messages=context.build(target.id, target.private)+[UserMessage(content=usermsg)], model=model_name ) #await UniMessage(str(response)).send() choice = response.choices[0] if choice["finish_reason"] == CompletionsFinishReason.STOPPED: - context.append(UserMessage(content=usermsg)) - context.append(choice.message) - context.addcount() + context.append(UserMessage(content=usermsg), target.id, target.private) + context.append(choice.message, target.id, target.private) elif choice["finish_reason"] == CompletionsFinishReason.CONTENT_FILTERED: await UniMessage("*已被内容过滤器过滤。*").send() #await UniMessage(str(choice)).send() diff --git a/config.py b/config.py index 59f6c16..b472ad9 100644 --- a/config.py +++ b/config.py @@ -1,4 +1,3 @@ -from pathlib import Path from pydantic import BaseModel from nonebot import get_plugin_config diff --git a/models.py b/models.py index 4ce37a8..07f02ac 100644 --- a/models.py +++ b/models.py @@ -1,35 +1,40 @@ from .util import * + class MarshoContext: """ Marsho 的上下文类 """ def __init__(self): - self.contents = [] - self.count = 0 + self.contents = { + "private": {}, + "non-private": {} + } - def append(self, content): + def _get_target_dict(self, is_private): + return self.contents["private"] if is_private else self.contents["non-private"] + + def append(self, content, target_id, is_private): """ 往上下文中添加消息 - Args: - content: 消息 """ - self.contents.append(content) + target_dict = self._get_target_dict(is_private) + if target_id not in target_dict: + target_dict[target_id] = [] + target_dict[target_id].append(content) - def reset(self): + def reset(self, target_id, is_private): """ 重置上下文 """ - self.contents.clear() + target_dict = self._get_target_dict(is_private) + target_dict[target_id].clear() - def addcount(self, num = 1): - self.count += num - - def resetcount(self): - self.count = 0 - - def build(self): + def build(self, target_id, is_private): """ 构建返回的上下文,其中包括系统消息 """ spell = get_prompt() - return [spell] + self.contents + target_dict = self._get_target_dict(is_private) + if target_id not in target_dict: + target_dict[target_id] = [] + return [spell] + target_dict[target_id] \ No newline at end of file diff --git a/util.py b/util.py index c8e93b9..85260d5 100644 --- a/util.py +++ b/util.py @@ -1,10 +1,8 @@ import base64 import mimetypes -import random import os import json import httpx -from pathlib import Path from datetime import datetime from zhDateTime import DateTime from azure.ai.inference.models import SystemMessage @@ -34,7 +32,7 @@ def get_praises(): if not os.path.exists("praises.json"): init_data = { "like": [ - {"name":"Asankilp","advantages":"赋予了Marsho猫娘人格,使用vim为Marsho写了许多代码,使Marsho更加可爱"} + {"name":"Asankilp","advantages":"赋予了Marsho猫娘人格,使用vim与vscode为Marsho写了许多代码,使Marsho更加可爱"} ] } with open(filename,"w",encoding="utf-8") as f: