新增 Introduce 功能, 将其从 Search 分离 (#25)

* 修复了style和script标签无法去除的问题

* 添加了图片展示 & 统一了引号格式

* 添加了图片展示 & 统一了引号格式

* 统一引号格式并异步处理函数

* 新增了Introduce功能

* 新增了Introduce功能并将其从Search中分离
This commit is contained in:
Nya_Twisuki 2024-12-13 00:34:50 +08:00 committed by GitHub
parent b939a48b0b
commit 1a34e9b167
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 124 additions and 59 deletions

View File

@ -1,5 +1,7 @@
from . import mg_Info from . import mg_Info
from . import mg_Search from . import mg_Search
from . import mg_Introduce
# meogirl # meogirl
async def meogirl () : async def meogirl () :
@ -8,3 +10,7 @@ async def meogirl():
# Search # Search
async def search (msg : str, num : int = 3) : async def search (msg : str, num : int = 3) :
return str(await mg_Search.search(msg, num)) return str(await mg_Search.search(msg, num))
# Show
async def introduce (msg : str) :
return str(await mg_Introduce.introduce(msg))

View File

@ -0,0 +1,76 @@
from nonebot.log import logger
import re
import httpx
import urllib.parse
from bs4 import BeautifulSoup
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36"
}
async def get_async_data (url) :
async with httpx.AsyncClient(timeout = None) as client:
return await client.get(url, headers = headers)
async def introduce (msg : str) :
logger.info(f"介绍 : \"{msg}\" ...")
result = ""
url = "https://mzh.moegirl.org.cn/" + urllib.parse.quote_plus(msg)
response = await get_async_data(url)
logger.success(f"连接\"{url}\"完成, 状态码 : {response.status_code}")
soup = BeautifulSoup(response.text, "html.parser")
# 正常页
if response.status_code == 200 :
"""
萌娘百科页面结构
div#mw-content-text
div#404search # 空白页面出现
div.mw-parser-output # 正常页面
div, p, table ... # 大量的解释项
"""
result += msg + "\n"
img = soup.find("img", class_="infobox-image")
if img:
result += f"![ {msg} ]( {img["src"]} ) \n"
div = soup.find("div", class_="mw-parser-output")
if div:
p_tags = div.find_all("p")
num = 0
for p_tag in p_tags:
p = str(p_tag)
p = re.sub(r"<script.*?</script>|<style.*?</style>", "", p, flags=re.DOTALL)
p = re.sub(r"<.*?>", "", p, flags=re.DOTALL)
p = re.sub(r"\[.*?]", "", p, flags=re.DOTALL)
if p != "":
result += str(p)
num += 1
if num >= 20:
break
return result
# 空白页
elif response.status_code == 404 :
logger.info(f"未找到\"{msg}\", 进行搜索")
from . import mg_Search
context = await mg_Search.search(msg, 1)
keyword = re.search(r".*?\n", context, flags = re.DOTALL).group()[: -1]
logger.success(f"搜索完成, 打开\"{keyword}\"")
return await introduce(keyword)
# 搜索失败
elif response.status_code == 301 :
return f"未找到{msg}"
else :
logger.error(f"网络错误, 状态码 : {response.status_code}")
return f"网络错误, 状态码 : {response.status_code}"

View File

@ -1,6 +1,5 @@
from nonebot.log import logger from nonebot.log import logger
import re
import httpx import httpx
import urllib.parse import urllib.parse
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
@ -14,7 +13,7 @@ async def get_async_data (url):
return await client.get(url, headers = headers) return await client.get(url, headers = headers)
async def search (msg : str, num : int) : async def search (msg : str, num : int) :
logger.info(f"搜索 : \"{msg}\"") logger.info(f"搜索 : \"{msg}\" ...")
result = "" result = ""
url = "https://mzh.moegirl.org.cn/index.php?search=" + urllib.parse.quote_plus(msg) url = "https://mzh.moegirl.org.cn/index.php?search=" + urllib.parse.quote_plus(msg)
@ -69,44 +68,9 @@ async def search(msg : str, num : int):
elif response.status_code == 302 : elif response.status_code == 302 :
logger.info(f"\"{msg}\"已被重定向至\"{response.headers.get("location")}\"") logger.info(f"\"{msg}\"已被重定向至\"{response.headers.get("location")}\"")
# 读取重定向结果 # 读取重定向结果
response = await get_async_data(response.headers.get("location")) from . import mg_Introduce
soup = BeautifulSoup(response.text, "html.parser") return await mg_Introduce.introduce(msg)
logger.success("重定向成功")
num = 0
"""
萌娘百科重定向介绍页面结构
div#mw-content-text
div.mw-parser-output # 介绍页面
....
p ? # 可能存在的空p
p # 人物介绍
...
"""
result += msg + "\n"
img = soup.find("img", class_="infobox-image")
if img:
logger.info(f"照片{img["src"]}")
result += f"![ {msg} ]( {img["src"]} ) \n"
div = soup.find("div", class_="mw-parser-output")
if div:
p_tags = div.find_all("p")
for p_tag in p_tags:
p = str(p_tag)
p = re.sub(r"<script.*?</script>|<style.*?</style>", "", p, flags=re.DOTALL)
p = re.sub(r"<.*?>", "", p, flags = re.DOTALL)
p = re.sub(r"\[.*?]", "", p, flags = re.DOTALL)
if p != "":
result += str(p)
num += 1
if num >= 5:
break
return result
# 状态码非200或302
else : else :
logger.error(f"网络错误, 状态码 : {response.status_code}") logger.error(f"网络错误, 状态码 : {response.status_code}")
return f"网络错误, 状态码 : {response.status_code}" return f"网络错误, 状态码 : {response.status_code}"

View File

@ -10,7 +10,7 @@
"type": "function", "type": "function",
"function": { "function": {
"name": "marshoai-meogirl__search", "name": "marshoai-meogirl__search",
"description" : "在萌娘百科中搜索(仅用户指定在萌娘百科中搜索才调用此函数)", "description": "查找/搜索 某角色/事物 (使用萌娘百科)",
"parameters": { "parameters": {
"type": "object", "type": "object",
"properties": { "properties": {
@ -28,5 +28,24 @@
"msg" "msg"
] ]
} }
},
{
"type" : "function",
"function" : {
"name" : "marshoai-meogirl__introduce",
"description" : "介绍/展示 某角色/事物 (使用萌娘百科)",
"parameters" : {
"type" : "object",
"properties" : {
"msg" : {
"type": "string",
"description": "关键词"
}
}
},
"required": [
"msg"
]
}
} }
] ]