Merge pull request #22 from Twisuki/main

* new file:   nonebot_plugin_marshoai/tools/marshoai-meogirl/__init__.py
	new file:   nonebot_plugin_marshoai/tools/marshoai-meogirl/mg_Info.py
	new file:   nonebot_plugin_marshoai/tools/marshoai-meogirl/tools.json

* 接入萌娘百科搜索

* 改成异步了, 改完彻底不能用了

* 修改, 但搜索功能完全无法使用了

* 修复完成
This commit is contained in:
Akarin~ 2024-12-11 18:43:20 +08:00 committed by GitHub
commit 6a4b0bbd0d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 149 additions and 0 deletions

View File

@ -0,0 +1,10 @@
from . import mg_Info
from . import mg_Search
# meogirl
async def meogirl():
return mg_Info.meogirl()
# Search
async def search(msg : str, num : int = 3):
return str(await mg_Search.search(msg, num))

View File

@ -0,0 +1,4 @@
# Meogirl
def meogirl():
return "Meogirl指的是\"萌娘百科\"(https://zh.moegirl.org.cn/ , 简称\"萌百\"), 是一个\"万物皆可萌的百科全书!\"; 同时, MarshoTools也配有\"Meogirl\"插件, 可调用萌百的api"

View File

@ -0,0 +1,103 @@
from nonebot.log import logger
import re
import httpx
import urllib.parse
from bs4 import BeautifulSoup
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36'
}
async def get_async_data (url):
async with httpx.AsyncClient(timeout = None) as client:
return await client.get(url, headers = headers)
async def search(msg : str, num : int):
logger.info(f"搜索 : \"{msg}\"")
result = ""
url = "https://mzh.moegirl.org.cn/index.php?search=" + urllib.parse.quote_plus(msg)
response = await get_async_data(url)
logger.success(f"连接{url}完成, 状态码 : {response.status_code}")
# 正常搜索
if response.status_code == 200:
"""
萌娘百科搜索页面结构
div.searchresults
p ...
ul.mw-search-results # 若无, 证明无搜索结果
li # 一个搜索结果
div.mw-search-result-heading > a # 标题
div.mw-searchresult # 内容
div.mw-search-result-data
li ...
li ...
"""
soup = BeautifulSoup(response.text, 'html.parser')
# 检测ul.mw-search-results, 是否有结果
if soup.find('ul', class_='mw-search-results'):
ul_tag = soup.select('ul.mw-search-results')[0]
li_tags = ul_tag.select('li')
for li_tag in li_tags:
div_heading = li_tag.select('div.mw-search-result-heading')[0]
if div_heading:
a_tag = div_heading.select('a')[0]
result += a_tag['title'] + "\n"
logger.info(f"搜索到 : \"{a_tag['title']}\"")
div_result = li_tag.find('div', class_='searchresult')
if div_result:
content = str(div_result).replace('<div class=\"searchresult\">', '').replace('</div>', '')
content = content.replace('<span class=\"searchmatch\">', '').replace('</span>', '')
result += content + "\n\n"
num -= 1
if num == 0:
break
return result
# 无ul.mw-search-results, 无结果
else:
logger.info("无结果")
return "无结果"
# 重定向
elif response.status_code == 302:
logger.info(f"\"{msg}\"已被重定向至\"{response.headers.get('location')}\"")
# 读取重定向结果
response = await get_async_data(response.headers.get('location'))
soup = BeautifulSoup(response.text, 'html.parser')
logger.success("重定向成功")
num = 0
"""
萌娘百科重定向介绍页面结构
div#mw-content-text
div.mw-parser-output # 介绍页面
....
p ? # 可能存在的空p
p # 人物介绍
...
"""
if soup.find('div', class_='mw-parser-output'):
div = soup.find('div', class_='mw-parser-output')
p_tags = div.select('p')
for p_tag in p_tags:
p = str(p_tag)
p = re.sub(r'<.*?>', '', p)
if p != '':
result += str(p)
num += 1
if num >= 5:
break
return result
# 状态码非200或302
else:
logger.error(f"网络错误, 状态码 : {response.status_code}")
return f"网络错误, 状态码 : {response.status_code}"

View File

@ -0,0 +1,32 @@
[
{
"type" : "function",
"function" : {
"name" : "marshoai-meogirl__meogirl",
"description" : "介绍Meogirl"
}
},
{
"type" : "function",
"function" : {
"name" : "marshoai-meogirl__search",
"description" : "在萌娘百科中搜索(仅用户指定在萌娘百科中搜索才调用此函数)",
"parameters" : {
"type" : "object",
"properties" : {
"msg" : {
"type": "string",
"description": "搜索关键词"
},
"num" : {
"type": "integer",
"description": "数据显示条数, 默认3, 可留空"
}
}
},
"required": [
"msg"
]
}
}
]