更新配置选项,添加请求超时和思维链发送功能,兼容Deepseek-R1模型

This commit is contained in:
Asankilp 2025-01-27 18:50:15 +08:00
parent 744c99273d
commit 13cbf87867
6 changed files with 28 additions and 6 deletions

View File

@ -127,7 +127,7 @@ Add options in the `.env` file from the diagram below in nonebot2 project.
| MARSHOAI_MAX_TOKENS | `int` | `null` | Max token number |
| MARSHOAI_ADDITIONAL_IMAGE_MODELS | `list` | `[]` | External image-support model list, such as `hunyuan-vision` |
| MARSHOAI_NICKNAME_LIMIT | `int` | `16` | Limit for nickname length |
| MARSHOAI_FIX_TOOLCALLS | `bool` | `true` | Fix tool calls or not |
| MARSHOAI_TIMEOUT | `float` | `50` | AI request timeout (seconds) |
#### Feature Switches
@ -145,3 +145,5 @@ Add options in the `.env` file from the diagram below in nonebot2 project.
| MARSHOAI_DISABLED_TOOLKITS | `list` | `[]` | List of disabled toolkits' name |
| MARSHOAI_ENABLE_RICHTEXT_PARSE | `bool` | `true` | Turn on auto parse rich text feature(including image, LaTeX equation) |
| MARSHOAI_SINGLE_LATEX_PARSE | `bool` | `false`| Render single-line equation or not |
| MARSHOAI_FIX_TOOLCALLS | `bool` | `true` | Fix tool calls or not |
| MARSHOAI_SEND_THINKING | `bool` | `true` | Send thinking chain or not |

View File

@ -129,8 +129,7 @@ GitHub Models API 的限制较多,不建议使用,建议通过修改`MARSHOA
| MARSHOAI_MAX_TOKENS | `int` | `null` | 最大生成 token 数 |
| MARSHOAI_ADDITIONAL_IMAGE_MODELS | `list` | `[]` | 额外添加的支持图片的模型列表,例如`hunyuan-vision` |
| MARSHOAI_NICKNAME_LIMIT | `int` | `16` | 昵称长度限制 |
| MARSHOAI_FIX_TOOLCALLS | `bool` | `true` | 是否修复工具调用(部分模型须关闭,使用 vLLM 部署的模型时须关闭) |
| MARSHOAI_TIMEOUT | `float` | `50` | AI 请求超时时间(秒) |
#### 功能开关
| 配置项 | 类型 | 默认值 | 说明 |
@ -148,6 +147,8 @@ GitHub Models API 的限制较多,不建议使用,建议通过修改`MARSHOA
| MARSHOAI_DISABLED_TOOLKITS | `list` | `[]` | 禁用的工具包包名列表 |
| MARSHOAI_ENABLE_RICHTEXT_PARSE | `bool` | `true` | 是否启用自动解析消息若包含图片链接则发送图片、若包含LaTeX公式则发送公式图 |
| MARSHOAI_SINGLE_LATEX_PARSE | `bool` | `false` | 单行公式是否渲染(当消息富文本解析启用时可用)(如果单行也渲……只能说不好看) |
| MARSHOAI_FIX_TOOLCALLS | `bool` | `true` | 是否修复工具调用(部分模型须关闭,使用 vLLM 部署的模型时须关闭) |
| MARSHOAI_SEND_THINKING | `bool` | `true` | 是否发送思维链(部分模型不支持) |
#### 开发及调试选项

View File

@ -42,12 +42,14 @@ class ConfigModel(BaseModel):
marshoai_enable_plugins: bool = True
marshoai_load_builtin_tools: bool = True
marshoai_fix_toolcalls: bool = True
marshoai_send_thinking: bool = True
marshoai_toolset_dir: list = []
marshoai_disabled_toolkits: list = []
marshoai_azure_endpoint: str = "https://models.inference.ai.azure.com"
marshoai_temperature: float | None = None
marshoai_max_tokens: int | None = None
marshoai_top_p: float | None = None
marshoai_timeout: float | None = 50.0
marshoai_nickname_limit: int = 16
marshoai_additional_image_models: list = []
marshoai_tencent_secretid: str | None = None

View File

@ -42,6 +42,10 @@ marshoai_enable_plugins: true # 是否启用插件功能。
marshoai_load_builtin_tools: true # 是否加载内置工具。
marshoai_fix_toolcalls: true # 是否修复工具调用。
marshoai_send_thinking: true # 是否发送思维链。
marshoai_nickname_limit: 16 # 昵称长度限制。
marshoai_toolset_dir: [] # 工具集路径。
@ -60,6 +64,7 @@ marshoai_azure_endpoint: "https://models.inference.ai.azure.com" # OpenAI 标准
marshoai_temperature: null # 调整生成的多样性,未设置时使用默认值。
marshoai_max_tokens: null # 最大生成的token数未设置时使用默认值。
marshoai_top_p: null # 使用的概率采样值,未设置时使用默认值。
marshoai_timeout: 50.0 # 请求超时时间。
marshoai_additional_image_models: [] # 额外的图片模型列表,默认空。

View File

@ -301,7 +301,15 @@ async def marsho(
context.append(
UserMessage(content=usermsg).as_dict(), target.id, target.private # type: ignore
)
context.append(choice.message, target.id, target.private)
choice_msg_dict = choice.message.to_dict()
if "reasoning_content" in choice_msg_dict:
if config.marshoai_send_thinking:
await UniMessage(
"思维链:\n" + choice_msg_dict["reasoning_content"]
).send()
del choice_msg_dict["reasoning_content"]
context.append(choice_msg_dict, target.id, target.private)
if [target.id, target.private] not in target_list:
target_list.append([target.id, target.private])
@ -402,7 +410,10 @@ async def marsho(
UserMessage(content=usermsg).as_dict(), target.id, target.private # type: ignore
)
# context.append(tool_msg, target.id, target.private)
context.append(choice.message, target.id, target.private)
choice_msg_dict = choice.message.to_dict()
if "reasoning_content" in choice_msg_dict:
del choice_msg_dict["reasoning_content"]
context.append(choice_msg_dict, target.id, target.private)
# 发送消息
if config.marshoai_enable_richtext_parse:
@ -434,7 +445,7 @@ with contextlib.suppress(ImportError): # 优化先不做()
user_nickname = nicknames.get(user_id, "")
try:
if config.marshoai_poke_suffix != "":
response = await make_chat(
response = await make_chat_openai(
client=client,
model_name=model_name,
msg=[

View File

@ -124,6 +124,7 @@ async def make_chat_openai(
temperature=config.marshoai_temperature,
max_tokens=config.marshoai_max_tokens,
top_p=config.marshoai_top_p,
timeout=config.marshoai_timeout,
)