不同会话间的上下文分离,优化帮助文本

This commit is contained in:
Asankilp 2024-10-01 22:12:21 +08:00
parent 6601b236b7
commit e408a48e42
5 changed files with 55 additions and 55 deletions

View File

@ -1,20 +1,20 @@
from nonebot.plugin import PluginMetadata, inherit_supported_adapters, require from nonebot.plugin import PluginMetadata, inherit_supported_adapters, require
require("nonebot_plugin_htmlrender")
require("nonebot_plugin_alconna") require("nonebot_plugin_alconna")
from .azure import * from .azure import *
from nonebot import get_driver from nonebot import get_driver
#from .config import ConfigModel
usage = """MarshoAI Alpha by Asankilp usage = """MarshoAI Alpha by Asankilp
用法 用法
marsho <聊天内容> marsho <聊天内容> : Marsho 进行对话当模型为 GPT-4o(-mini) 等时可以带上图片进行对话
Marsho 进行对话当模型为gpt时可以带上图片进行对话 reset : 重置当前会话的上下文
changemodel <模型名> 超级用户命令:
切换 AI 模型仅超级用户可用 changemodel <模型名> : 切换全局 AI 模型
reset contexts : 返回当前会话的上下文列表 当上下文包含图片时不要使用此命令
重置上下文仅超级用户可用 praises : 返回夸赞名单的提示词
usermsg <消息> : 往当前会话添加用户消息(UserMessage)
assistantmsg <消息> : 往当前会话添加助手消息(AssistantMessage)
注意事项 注意事项
Marsho 回复消息为None或以content_filter开头的错误信息时表示该消息被内容过滤器过滤请调整你的聊天内容确保其合规 - Marsho 回复消息为None或以content_filter开头的错误信息时表示该消息被内容过滤器过滤请调整你的聊天内容确保其合规
当回复以RateLimitReached开头的错误信息时 AI 模型的次数配额已用尽请联系Bot管理员 - 当回复以RateLimitReached开头的错误信息时 AI 模型的次数配额已用尽请联系Bot管理员
本AI的回答"按原样"提供不提供任何担保AI也会犯错请仔细甄别回答的准确性""" 本AI的回答"按原样"提供不提供任何担保AI也会犯错请仔细甄别回答的准确性"""
__author__ = "Asankilp" __author__ = "Asankilp"
__plugin_meta__ = PluginMetadata( __plugin_meta__ = PluginMetadata(

View File

@ -1,10 +1,9 @@
from nonebot.typing import T_State
from nonebot import on_command from nonebot import on_command
from nonebot.adapters import Message from nonebot.adapters import Message
from nonebot.params import CommandArg from nonebot.params import CommandArg
from nonebot.permission import SUPERUSER from nonebot.permission import SUPERUSER
#from .acgnapis import * #from .acgnapis import *
from nonebot_plugin_alconna import on_alconna from nonebot_plugin_alconna import on_alconna, MsgTarget
from nonebot_plugin_alconna.uniseg import UniMessage, UniMsg from nonebot_plugin_alconna.uniseg import UniMessage, UniMsg
from arclet.alconna import Alconna, Args, AllParam from arclet.alconna import Alconna, Args, AllParam
from .util import * from .util import *
@ -16,8 +15,8 @@ from .__init__ import __plugin_meta__
from .config import config from .config import config
from .models import MarshoContext from .models import MarshoContext
changemodel_cmd = on_command("changemodel",permission=SUPERUSER) changemodel_cmd = on_command("changemodel",permission=SUPERUSER)
resetmem_cmd = on_command("reset",permission=SUPERUSER) resetmem_cmd = on_command("reset")
setprompt_cmd = on_command("prompt",permission=SUPERUSER) #setprompt_cmd = on_command("prompt",permission=SUPERUSER)
praises_cmd = on_command("praises",permission=SUPERUSER) praises_cmd = on_command("praises",permission=SUPERUSER)
add_usermsg_cmd = on_command("usermsg",permission=SUPERUSER) add_usermsg_cmd = on_command("usermsg",permission=SUPERUSER)
add_assistantmsg_cmd = on_command("assistantmsg",permission=SUPERUSER) add_assistantmsg_cmd = on_command("assistantmsg",permission=SUPERUSER)
@ -30,20 +29,17 @@ marsho_cmd = on_alconna(
) )
model_name = config.marshoai_default_model model_name = config.marshoai_default_model
context = MarshoContext() context = MarshoContext()
context_limit = 50
@add_usermsg_cmd.handle() @add_usermsg_cmd.handle()
async def add_usermsg(arg: Message = CommandArg()): async def add_usermsg(target: MsgTarget, arg: Message = CommandArg()):
if msg := arg.extract_plain_text(): if msg := arg.extract_plain_text():
context.append(UserMessage(content=msg)) context.append(UserMessage(content=msg), target.id, target.private)
await UniMessage("已添加用户消息").send() await UniMessage("已添加用户消息").send()
@add_assistantmsg_cmd.handle() @add_assistantmsg_cmd.handle()
async def add_assistantmsg(arg: Message = CommandArg()): async def add_assistantmsg(target: MsgTarget, arg: Message = CommandArg()):
if msg := arg.extract_plain_text(): if msg := arg.extract_plain_text():
context.append(AssistantMessage(content=msg)) context.append(AssistantMessage(content=msg), target.id, target.private)
await UniMessage("已添加助手消息").send() await UniMessage("已添加助手消息").send()
@praises_cmd.handle() @praises_cmd.handle()
@ -51,8 +47,8 @@ async def praises():
await UniMessage(build_praises()).send() await UniMessage(build_praises()).send()
@contexts_cmd.handle() @contexts_cmd.handle()
async def contexts(): async def contexts(target: MsgTarget):
await UniMessage(str(context.build()[1:])).send() await UniMessage(str(context.build(target.id, target.private)[1:])).send()
# @setprompt_cmd.handle() #用不了了 # @setprompt_cmd.handle() #用不了了
# async def setprompt(arg: Message = CommandArg()): # async def setprompt(arg: Message = CommandArg()):
@ -67,9 +63,8 @@ async def contexts():
@resetmem_cmd.handle() @resetmem_cmd.handle()
async def resetmem_cmd(): async def resetmem(target: MsgTarget):
context.reset() context.reset(target.id, target.private)
context.resetcount()
await resetmem_cmd.finish("上下文已重置") await resetmem_cmd.finish("上下文已重置")
@changemodel_cmd.handle() @changemodel_cmd.handle()
@ -80,6 +75,7 @@ async def changemodel(arg : Message = CommandArg()):
await changemodel_cmd.finish("已切换") await changemodel_cmd.finish("已切换")
@marsho_cmd.handle() @marsho_cmd.handle()
async def marsho( async def marsho(
target: MsgTarget,
message: UniMsg, message: UniMsg,
text = None text = None
): ):
@ -94,14 +90,11 @@ async def marsho(
await UniMessage( await UniMessage(
__plugin_meta__.usage+"\n当前使用的模型:"+model_name).send() __plugin_meta__.usage+"\n当前使用的模型:"+model_name).send()
return return
if context.count >= context_limit:
await UniMessage("上下文数量达到阈值。已自动重置上下文。").send()
context.reset()
context.resetcount()
# await UniMessage(str(text)).send() # await UniMessage(str(text)).send()
try: try:
is_support_image_model = model_name.lower() in config.marshoai_support_image_models is_support_image_model = model_name.lower() in config.marshoai_support_image_models
usermsg = [] if is_support_image_model else "" usermsg = [] if is_support_image_model else ""
marsho_string_removed = False
for i in message: for i in message:
if i.type == "image": if i.type == "image":
if is_support_image_model: if is_support_image_model:
@ -113,20 +106,25 @@ async def marsho(
else: else:
await UniMessage("*此模型不支持图片处理。").send() await UniMessage("*此模型不支持图片处理。").send()
elif i.type == "text": elif i.type == "text":
if not marsho_string_removed:
# 去掉最前面的"marsho "字符串
clean_text = i.data["text"].lstrip("marsho ")
marsho_string_removed = True # 标记文本已处理
else:
clean_text = i.data["text"]
if is_support_image_model: if is_support_image_model:
usermsg.append(TextContentItem(text=i.data["text"])) usermsg.append(TextContentItem(text=clean_text))
else: else:
usermsg += str(i.data["text"]) usermsg += str(i.data["text"])
response = await client.complete( response = await client.complete(
messages=context.build()+[UserMessage(content=usermsg)], messages=context.build(target.id, target.private)+[UserMessage(content=usermsg)],
model=model_name model=model_name
) )
#await UniMessage(str(response)).send() #await UniMessage(str(response)).send()
choice = response.choices[0] choice = response.choices[0]
if choice["finish_reason"] == CompletionsFinishReason.STOPPED: if choice["finish_reason"] == CompletionsFinishReason.STOPPED:
context.append(UserMessage(content=usermsg)) context.append(UserMessage(content=usermsg), target.id, target.private)
context.append(choice.message) context.append(choice.message, target.id, target.private)
context.addcount()
elif choice["finish_reason"] == CompletionsFinishReason.CONTENT_FILTERED: elif choice["finish_reason"] == CompletionsFinishReason.CONTENT_FILTERED:
await UniMessage("*已被内容过滤器过滤。*").send() await UniMessage("*已被内容过滤器过滤。*").send()
#await UniMessage(str(choice)).send() #await UniMessage(str(choice)).send()

View File

@ -1,4 +1,3 @@
from pathlib import Path
from pydantic import BaseModel from pydantic import BaseModel
from nonebot import get_plugin_config from nonebot import get_plugin_config

View File

@ -1,35 +1,40 @@
from .util import * from .util import *
class MarshoContext: class MarshoContext:
""" """
Marsho 的上下文类 Marsho 的上下文类
""" """
def __init__(self): def __init__(self):
self.contents = [] self.contents = {
self.count = 0 "private": {},
"non-private": {}
}
def append(self, content): def _get_target_dict(self, is_private):
return self.contents["private"] if is_private else self.contents["non-private"]
def append(self, content, target_id, is_private):
""" """
往上下文中添加消息 往上下文中添加消息
Args:
content: 消息
""" """
self.contents.append(content) target_dict = self._get_target_dict(is_private)
if target_id not in target_dict:
target_dict[target_id] = []
target_dict[target_id].append(content)
def reset(self): def reset(self, target_id, is_private):
""" """
重置上下文 重置上下文
""" """
self.contents.clear() target_dict = self._get_target_dict(is_private)
target_dict[target_id].clear()
def addcount(self, num = 1): def build(self, target_id, is_private):
self.count += num
def resetcount(self):
self.count = 0
def build(self):
""" """
构建返回的上下文其中包括系统消息 构建返回的上下文其中包括系统消息
""" """
spell = get_prompt() spell = get_prompt()
return [spell] + self.contents target_dict = self._get_target_dict(is_private)
if target_id not in target_dict:
target_dict[target_id] = []
return [spell] + target_dict[target_id]

View File

@ -1,10 +1,8 @@
import base64 import base64
import mimetypes import mimetypes
import random
import os import os
import json import json
import httpx import httpx
from pathlib import Path
from datetime import datetime from datetime import datetime
from zhDateTime import DateTime from zhDateTime import DateTime
from azure.ai.inference.models import SystemMessage from azure.ai.inference.models import SystemMessage
@ -34,7 +32,7 @@ def get_praises():
if not os.path.exists("praises.json"): if not os.path.exists("praises.json"):
init_data = { init_data = {
"like": [ "like": [
{"name":"Asankilp","advantages":"赋予了Marsho猫娘人格使用vim为Marsho写了许多代码使Marsho更加可爱"} {"name":"Asankilp","advantages":"赋予了Marsho猫娘人格使用vim与vscode为Marsho写了许多代码使Marsho更加可爱"}
] ]
} }
with open(filename,"w",encoding="utf-8") as f: with open(filename,"w",encoding="utf-8") as f: