🧨新增预览音乐功能,修改点数计算方案

This commit is contained in:
EillesWan 2024-07-23 03:21:03 +08:00
parent 4a460d6d41
commit 8188b85501
13 changed files with 1744 additions and 98 deletions

1
.gitignore vendored
View File

@ -21,6 +21,7 @@ src/resources/templates/latest-debug.html
src/plugins/trimo_plugin_msctconverter/config
src/plugins/trimo_plugin_msctconverter/temp
src/plugins/trimo_plugin_msctconverter/MusicPreview/assets/wav
# vuepress
.github

View File

@ -10,7 +10,7 @@ nonebot-adapter-onebot~=2.4.3
nonebot-plugin-alconna~=0.46.3
nonebot_plugin_apscheduler~=0.4.0
nonebot-adapter-satori~=0.11.5
numpy~=2.0.0
numpy<2.0.0
packaging~=23.1
psutil~=5.9.8
py-cpuinfo~=9.0.0
@ -31,4 +31,5 @@ python-dotenv~=1.0.1
nonebot_plugin_session
pypinyin
zhDateTime>=1.0.3
Musicreater>=2.2.0
Musicreater>=2.2.0
librosa==0.10.1

View File

@ -41,7 +41,7 @@ def update_liteyuki() -> tuple[bool, str]:
except:
continue
else:
return False, "Nothing Changed"
return False, "未有更新内容"
else:
raise PermissionError("Update is not allowed.")
raise PermissionError("更新已被禁用")

View File

@ -1,11 +1,25 @@
## 版权声明
本插件由 汉钰律许可协议 授权开源,兼容并继承自 Apache 2.0 许可协议。
本插件引用 [MusicPreview](https://gitee.com/ElapsingDreams/MusicPreview) 项目代码作为库(Library)使用,并加以修改;\
此行为经过 Apache 2.0 许可协议 授权。
MusicPreview 著作声明:
@Author: Envision\
@Github: ElapsingDreams\
@Gitee: ElapsingDreams
本插件由 汉钰律许可协议 授权开源,兼容并继承自 [伶伦转换器](https://gitee.com/TriM-Organization/Linglun-Converter) 项目的 Apache 2.0 许可协议。
继承版权声明:
Copyright © 2024 金羿("Eilles Wan") & 诸葛亮与八卦阵("bgArray") with TriM Org.
版权所有 © 2024 金羿("Eilles Wan") & 诸葛亮与八卦阵("bgArray") with TriM Org.
项目版权声明:
版权所有 © 2024 金羿(Eilles) & 诸葛亮与八卦阵(bgArray) with TriM Org.
伶伦转换器(trimo_plugin_msctconverter)根据 第一版 汉钰律许可协议(“本协议”)授权。\
任何人皆可从以下地址获得本协议副本:[汉钰律许可协议 第一版](https://gitee.com/EillesWan/YulvLicenses/raw/master/%E6%B1%89%E9%92%B0%E5%BE%8B%E8%AE%B8%E5%8F%AF%E5%8D%8F%E8%AE%AE/%E6%B1%89%E9%92%B0%E5%BE%8B%E8%AE%B8%E5%8F%AF%E5%8D%8F%E8%AE%AE.MD)。\

View File

@ -0,0 +1,9 @@
"""
@Author: Envision
@Github: ElapsingDreams
@Gitee: ElapsingDreams
@Email: None
@FileName: __init__.py
@DateTime: 2024/3/8 20:48
@SoftWare: PyCharm
"""

View File

@ -0,0 +1,383 @@
from typing import Dict, Tuple
MM_DISLINK_PITCHED_INSTRUMENT_TABLE: Dict[int, str] = {
0: "note.harp",
1: "note.harp",
2: "note.pling",
3: "note.harp",
4: "note.harp",
5: "note.harp",
6: "note.harp",
7: "note.harp",
8: "note.iron_xylophone", # 打击乐器无音域
9: "note.bell",
10: "note.iron_xylophone",
11: "note.iron_xylophone",
12: "note.iron_xylophone",
13: "note.iron_xylophone",
14: "note.chime",
15: "note.iron_xylophone",
16: "note.harp",
17: "note.harp",
18: "note.harp",
19: "note.harp",
20: "note.harp",
21: "note.harp",
22: "note.harp",
23: "note.harp",
24: "note.guitar",
25: "note.guitar",
26: "note.guitar",
27: "note.guitar",
28: "note.guitar",
29: "note.guitar",
30: "note.guitar",
31: "note.guitar",
32: "note.bass",
33: "note.bass",
34: "note.bass",
35: "note.bass",
36: "note.bass",
37: "note.bass",
38: "note.bass",
39: "note.bass",
40: "note.harp",
41: "note.flute",
42: "note.flute",
43: "note.flute",
44: "note.flute",
45: "note.harp",
46: "note.harp",
47: "note.harp",
48: "note.harp",
49: "note.harp",
50: "note.harp",
51: "note.harp",
52: "note.harp",
53: "note.harp",
54: "note.harp",
55: "note.harp",
56: "note.harp",
57: "note.harp",
58: "note.harp",
59: "note.harp",
60: "note.harp",
61: "note.harp",
62: "note.harp",
63: "note.harp",
64: "note.harp",
65: "note.harp",
66: "note.harp",
67: "note.harp",
68: "note.harp",
69: "note.harp",
70: "note.harp",
71: "note.harp",
72: "note.flute",
73: "note.flute",
74: "note.flute",
75: "note.flute",
76: "note.flute",
77: "note.flute",
78: "note.flute",
79: "note.flute",
80: "note.bit",
81: "note.bit",
82: "note.harp",
83: "note.harp",
84: "note.harp",
85: "note.harp",
86: "note.harp",
87: "note.harp",
88: "note.harp",
89: "note.harp",
90: "note.harp",
91: "note.harp",
92: "note.harp",
93: "note.harp",
94: "note.harp",
95: "note.harp",
96: "note.harp",
97: "note.harp",
98: "note.harp",
99: "note.harp",
100: "note.harp",
101: "note.harp",
102: "note.harp",
103: "note.harp",
104: "note.harp",
105: "note.banjo",
106: "note.harp",
107: "note.harp",
108: "note.harp",
109: "note.harp",
110: "note.harp",
111: "note.harp",
112: "note.cow_bell",
113: "note.harp",
114: "note.harp",
115: "note.bd",
116: "note.bd", # 打击乐器无音域
117: "note.bd",
118: "note.bd",
119: "note.harp", # 打击乐器无音域
120: "note.harp",
121: "note.harp",
122: "note.harp",
123: "note.harp",
124: "note.harp",
125: "note.harp", # 打击乐器无音域
126: "note.harp", # 打击乐器无音域
127: "note.harp", # 打击乐器无音域
}
"""“断联”乐音乐器对照表"""
# 105 'note.banjo'
# 32,33,34,35,36,37,38,39 'note.bass'
# 115,116,117,118'note.basedrum'
# 9'note.bell'
# 80,81'note.bit'
# 112'note.cow_bell'
# 72,73,74,75,76,77,78,79,41,42,43,44'note.flute'
# 24,25,26,27,28,29,30,31'note.guitar'
# 14'note.chime'
# 8,9,10,11,12,13,/*14,*/15'note.iron_xylophone'
# 2'note.pling'
# 'note.harp'
MM_DISLINK_PERCUSSION_INSTRUMENT_TABLE: Dict[int, str] = {
34: "note.bd",
35: "note.bd",
36: "note.snare",
37: "note.snare",
38: "note.bd",
39: "note.snare",
40: "note.bd",
41: "note.hat",
42: "note.bd",
43: "note.hat",
44: "note.bd",
45: "note.hat",
46: "note.bd",
47: "note.bd",
48: "note.bd",
49: "note.bd",
50: "note.bd",
51: "note.bd",
52: "note.bd",
53: "note.bd",
54: "note.bd",
55: "note.cow_bell",
56: "note.bd",
57: "note.bd",
58: "note.bd",
59: "note.bd",
60: "note.bd",
61: "note.bd",
62: "note.bd",
63: "note.bd",
64: "note.bd",
65: "note.bd",
66: "note.bd",
67: "note.bd",
68: "note.bd",
69: "note.bd",
70: "note.bd",
71: "note.bd",
72: "note.bd",
73: "note.bd",
74: "note.bd",
75: "note.bd",
76: "note.bd",
77: "note.bd",
78: "note.bd",
79: "note.bd",
80: "note.bd",
}
"""“断联”打击乐器对照表"""
# 55'note.cow_bell'
# 41,43,45'note.hat'
# 36,37,39'note.snare'
# 'note.bd'
MM_HARP_PITCHED_INSTRUMENT_TABLE: Dict[int, str] = {
0: "note.harp",
1: "note.harp",
2: "note.harp",
3: "note.harp",
4: "note.harp",
5: "note.harp",
6: "note.harp",
7: "note.harp",
8: "note.harp",
9: "note.harp",
10: "note.harp",
11: "note.harp",
12: "note.harp",
13: "note.harp",
14: "note.harp",
15: "note.harp",
16: "note.harp",
17: "note.harp",
18: "note.harp",
19: "note.harp",
20: "note.harp",
21: "note.harp",
22: "note.harp",
23: "note.harp",
24: "note.harp",
25: "note.harp",
26: "note.harp",
27: "note.harp",
28: "note.harp",
29: "note.harp",
30: "note.harp",
31: "note.harp",
32: "note.harp",
33: "note.harp",
34: "note.harp",
35: "note.harp",
36: "note.harp",
37: "note.harp",
38: "note.harp",
39: "note.harp",
40: "note.harp",
41: "note.harp",
42: "note.harp",
43: "note.harp",
44: "note.harp",
45: "note.harp",
46: "note.harp",
47: "note.harp",
48: "note.harp",
49: "note.harp",
50: "note.harp",
51: "note.harp",
52: "note.harp",
53: "note.harp",
54: "note.harp",
55: "note.harp",
56: "note.harp",
57: "note.harp",
58: "note.harp",
59: "note.harp",
60: "note.harp",
61: "note.harp",
62: "note.harp",
63: "note.harp",
64: "note.harp",
65: "note.harp",
66: "note.harp",
67: "note.harp",
68: "note.harp",
69: "note.harp",
70: "note.harp",
71: "note.harp",
72: "note.harp",
73: "note.harp",
74: "note.harp",
75: "note.harp",
76: "note.harp",
77: "note.harp",
78: "note.harp",
79: "note.harp",
80: "note.harp",
81: "note.harp",
82: "note.harp",
83: "note.harp",
84: "note.harp",
85: "note.harp",
86: "note.harp",
87: "note.harp",
88: "note.harp",
89: "note.harp",
90: "note.harp",
91: "note.harp",
92: "note.harp",
93: "note.harp",
94: "note.harp",
95: "note.harp",
96: "note.harp",
97: "note.harp",
98: "note.harp",
99: "note.harp",
100: "note.harp",
101: "note.harp",
102: "note.harp",
103: "note.harp",
104: "note.harp",
105: "note.harp",
106: "note.harp",
107: "note.harp",
108: "note.harp",
109: "note.harp",
110: "note.harp",
111: "note.harp",
112: "note.harp",
113: "note.harp",
114: "note.harp",
115: "note.harp",
116: "note.harp",
117: "note.harp",
118: "note.harp",
119: "note.harp",
120: "note.harp",
121: "note.harp",
122: "note.harp",
123: "note.harp",
124: "note.harp",
125: "note.harp",
126: "note.harp",
127: "note.harp",
}
"""“听个响纯harp”音乐乐器对照表"""
MM_HARP_PERCUSSION_INSTRUMENT_TABLE: Dict[int, str] = {
34: "note.harp",
35: "note.harp",
36: "note.harp",
37: "note.harp",
38: "note.harp",
39: "note.harp",
40: "note.harp",
41: "note.harp",
42: "note.harp",
43: "note.harp",
44: "note.harp",
45: "note.harp",
46: "note.harp",
47: "note.harp",
48: "note.harp",
49: "note.harp",
50: "note.harp",
51: "note.harp",
52: "note.harp",
53: "note.harp",
54: "note.harp",
55: "note.harp",
56: "note.harp",
57: "note.harp",
58: "note.harp",
59: "note.harp",
60: "note.harp",
61: "note.harp",
62: "note.harp",
63: "note.harp",
64: "note.harp",
65: "note.harp",
66: "note.harp",
67: "note.harp",
68: "note.harp",
69: "note.harp",
70: "note.harp",
71: "note.harp",
72: "note.harp",
73: "note.harp",
74: "note.harp",
75: "note.harp",
76: "note.harp",
77: "note.harp",
78: "note.harp",
79: "note.harp",
80: "note.harp",
}
"""“听个响纯harp”打击乐器对照表"""

View File

@ -0,0 +1,491 @@
"""
@Author: Envision
@Github: ElapsingDreams
@Gitee: ElapsingDreams
@Email: None
@FileName: main.py
@DateTime: 2024/3/8 18:41
@SoftWare: PyCharm
"""
import os
import pathlib
# import threading
import warnings
import Musicreater
# import mido
import numpy as np
# import sounddevice as sd
# import soundfile as sf
from Musicreater import MM_INSTRUMENT_DEVIATION_TABLE
from librosa import load as librosa_load
from librosa import resample as librosa_resample
from librosa.effects import pitch_shift as librosa_effects_pitch_shift
from librosa.effects import time_stretch as librosa_effects_time_stretch
# from MusicPreview.classes import MusicSequenceRepair
# from .constants import MM_DISLINK_PITCHED_INSTRUMENT_TABLE, MM_DISLINK_PERCUSSION_INSTRUMENT_TABLE, MM_HARP_PITCHED_INSTRUMENT_TABLE, MM_HARP_PERCUSSION_INSTRUMENT_TABLE
PATH = pathlib.Path(__file__)
# 我寻思着ASSETS直接内置咯
ASSETS_PATH = PATH.parent / "assets" / "wav"
"""已弃用"""
'''
INSTRUMENT_OFFSET_POS_TABLE: Dict[str, int] = {
"note.harp": 66, #
"note.pling": 66,
"note.guitar": 54, #
"note.iron_xylophone": 66, #
"note.bell": 90, #
"note.xylophone": 90, #
"note.chime": 90, #
"note.banjo": 66,
"note.flute": 78, #
"note.bass": 42, #
"note.snare": 0, # #
"note.didgeridoo": 42, #
"mob.zombie.wood": 0, # #
"note.bit": 66,
"note.hat": 0, # #
"note.bd": 0, # #
"note.basedrum": 0, # #
"firework.blast": 0, # #
"firework.twinkle": 0, # #
"fire.ignite": 0, # #
"note.cow_bell": 66,
}
"""不同乐器的音调偏离对照表"""
'''
class PreviewMusic:
"""
将Midi转为音频之参数
:param usr_input_path: str 用户输入midi文件路径
:param usr_output_path: str 用户输入音频文件输出路径
:param mode: bool 是否依照中文wiki定义pitch即 播放速度 新播放速度
:param out_sr: int 输出音频采样率即质量
"""
def __init__(
self,
musicsq: Musicreater.MusicSequence,
mode: int = 0,
gvm: int = 0,
out_sr: int = 44100,
overlay_channels: int = 1,
default_channel_num: int = 1,
):
# mode:
# 0-OriginLength
# 1-use_mc_player_define
# 2-matchMIDI-cut
# 3-matchMixing
# 4-matchMIDI-TSM
if (
overlay_channels not in [1, 2]
or default_channel_num not in [1, 2]
or mode not in [0, 1, 2, 3, 4]
):
raise ValueError("Illegal Value.")
self.music_seq = musicsq
self.in_path = None
self.out_path = None
self.mode = mode
self.out_sr = out_sr
self.gvm = gvm
self.assets_dict = {}
self.cache_dict = {}
self.oc = overlay_channels
self.dc = default_channel_num
self.dev_list = self.__init_midi__()
# self.dev_list = self.__init_midi__()
# 预读取
self.__int_read_assets()
# 预生成
self.__init_cache()
def __init_midi__(self):
# MusicSequence return: Tuple[Mapping[int, List[MineNote]], int, Dict[str, int], Dict[str, int]]
# List[List[ str[sound_ID] int[midi_note_pitch] int[mc_tick_pos注意是多少tick《位置》执行] ]]
"""ii = 1
for i in [i for j in Musicreater.MusicSequence.to_music_note_channels(
mido.MidiFile(
self.in_path,
clip=True,
),
)[0].values() for i in j]:
print(f"{i.sound_name}\t{i.note_pitch - 60 - MM_INSTRUMENT_DEVIATION_TABLE.get(i.sound_name, 6) if not i.percussive else None}\t{i.note_pitch - INSTRUMENT_OFFSET_POS_TABLE[i.sound_name] if not i.percussive else None}")
"""
return sorted(
(
(
i.sound_name,
(
i.note_pitch
- 60
- MM_INSTRUMENT_DEVIATION_TABLE.get(i.sound_name, 6)
if not i.percussive
else None
),
i.start_tick,
i.velocity / 127,
i.duration,
)
for i in sorted(
[i for j in self.music_seq.channels.values() for i in j],
key=lambda note: note.start_tick,
)
),
key=lambda x: x[2],
)
def __int_read_assets(self):
files = [os.path.join(ASSETS_PATH, file) for file in os.listdir(ASSETS_PATH)]
for file in files:
self.assets_dict[os.path.split(file)[1].rsplit(".wav", 1)[0]] = (
librosa_load(file, sr=None)
)
def __init_cache(self):
# print(self.dev_list)
for item in set(
[(ii[0], ii[1], ii[4]) for ii in self.dev_list]
): # 初始化音频数据 set( List[List[ str[sound_ID] int[midi_note_pitch] int[mc_tick_delay注意是多少tick《位置》执行] ]])
y_orig, sr_orig = self.assets_dict[item[0]]
if self.oc == 2 and len(y_orig.shape) == 1:
warnings.warn("Meaningless")
y_orig = np.array([y_orig, y_orig])
# print(y_orig)
elif self.oc == 1 and len(y_orig.shape) == 2:
y_orig = np.array(y_orig[self.dc])
if item[1]: # 适配打击乐
# n_step = item[1] - INSTRUMENT_OFFSET_POS_TABLE[item[0]]
# n_step = item[1]
# times = 2 ** (item[1] / 12)
raw_name = item[0] + "." + str(item[1])
if self.mode == 1:
# 变调, 时域压扩, 重采样 mc方法
self.cache_dict[raw_name] = librosa_resample(
librosa_effects_time_stretch(
librosa_effects_pitch_shift(
y_orig, sr=sr_orig, n_steps=item[1]
),
rate=2 ** (item[1] / 12),
),
orig_sr=sr_orig,
target_sr=self.out_sr,
fix=False,
)
elif self.mode == 0:
# 重采样, 变调
self.cache_dict[raw_name] = librosa_resample(
librosa_effects_pitch_shift(
y_orig, sr=sr_orig, n_steps=item[1]
),
orig_sr=sr_orig,
target_sr=self.out_sr,
fix=False,
)
elif self.mode == 4:
# 变调, 时域压扩, 重采样 MIDI-FFT
if self.oc == 2:
rate = item[2] / 20 / (len(y_orig[0]) / sr_orig)
rate = rate if rate != 0 else 1
self.cache_dict[raw_name] = librosa_resample(
librosa_effects_time_stretch(
librosa_effects_pitch_shift(
y_orig, sr=sr_orig, n_steps=item[1]
),
rate=rate,
),
orig_sr=sr_orig,
target_sr=self.out_sr,
fix=False,
)
else:
rate = item[2] / 20 / (len(y_orig) / sr_orig)
rate = rate if rate != 0 else 1
self.cache_dict[raw_name] = librosa_resample(
librosa_effects_time_stretch(
librosa_effects_pitch_shift(
y_orig, sr=sr_orig, n_steps=item[1]
),
rate=rate,
),
orig_sr=sr_orig,
target_sr=self.out_sr,
fix=False,
)
elif self.mode == 2:
# 变调, 时域压扩, 重采样 MIDI-cut
if self.oc == 2:
deal = librosa_effects_pitch_shift(
y_orig, sr=sr_orig, n_steps=item[1]
)[
...,
: (
int(item[2] / 20 * sr_orig)
if item[2] / 20 * sr_orig > len(y_orig[0])
else len(y_orig[0])
),
]
else:
deal = librosa_effects_pitch_shift(
y_orig, sr=sr_orig, n_steps=item[1]
)[
: (
int(item[2] / 20 * sr_orig)
if item[2] / 20 * sr_orig > len(y_orig)
else len(y_orig)
)
]
self.cache_dict[raw_name] = librosa_resample(
deal, orig_sr=sr_orig, target_sr=self.out_sr, fix=False
)
else:
raw_name = item[0]
# if self.mode == 1:
# 重采样, 不变调
self.cache_dict[raw_name] = librosa_resample(
y_orig, orig_sr=sr_orig, target_sr=self.out_sr, fix=False
)
"""
elif self.mode == 0:
# 重采样, 不变调, 衰弱
self.cache_dict[raw_name] = librosa_resample(
y_orig,
orig_sr=sr_orig,
target_sr=self.out_sr,
fix=False
)
"""
del self.assets_dict
def to_wav(self) -> np.ndarray:
# 这玩意,真的太离谱。。虽然早考虑到这个问题,但在眼皮子底下我都没想着去改()
# 真的 我盯着这玩意想了大半个小时
# 我 是 __ __
# 遍历一次devlist当前位置采样长度+对应音频采样长度 组成数组,找最大
# len(self.cache_dict[(self.dev_list[i-1][0] + "." + str(
# self.dev_list[i-1][1] - INSTRUMENT_OFFSET_POS_TABLE[self.dev_list[i-1][0]])) if self.dev_list[i-1][1] else
# self.dev_list[i-1][0]])
# max_duration = int(max([(i[2] * 0.05 * self.out_sr + len((self.cache_dict[i[0] + "." + str(i[1] - INSTRUMENT_OFFSET_POS_TABLE[i[0]])]) if i[1] else self.cache_dict[i[0]])) for i in self.dev_list]))
# wav_model = np.zeros(max_duration, dtype=np.float32)
# - INSTRUMENT_OFFSET_POS_TABLE[i[0]]
if self.oc == 1:
def overlay(seg_overlay: np.ndarray, pos_tick: int):
pos_ = int(out_sr * pos_tick * 0.05)
# print(pos_, seg_overlay.size, wav_model.size, wav_model[pos_:seg_overlay.size + pos_].size, seg_overlay.dtype)
wav_model[pos_ : seg_overlay.size + pos_] += seg_overlay
wav_model = np.zeros(
int(
max(
[
(
i[2] * 0.05 * self.out_sr
+ len(
(self.cache_dict[i[0] + "." + str(i[1])])
if i[1]
else self.cache_dict[i[0]]
)
)
for i in self.dev_list
]
)
),
dtype=np.float32,
)
elif self.oc == 2:
def overlay(seg_overlay: np.ndarray, pos_tick: int):
pos_ = int(out_sr * pos_tick * 0.05)
# print(pos_, seg_overlay.size, wav_model.size, wav_model[pos_:seg_overlay.size + pos_].size, seg_overlay.dtype)
wav_model[..., pos_ : len(seg_overlay[0]) + pos_] += seg_overlay
wav_model = np.zeros(
(
2,
int(
max(
[
(
i[2] * 0.05 * self.out_sr
+ len(
(self.cache_dict[i[0] + "." + str(i[1])][0])
if i[1]
else self.cache_dict[i[0]]
)
)
for i in self.dev_list
]
)
),
),
dtype=np.float32,
)
else:
raise ValueError("illegal overlay_mode")
out_sr = self.out_sr
i = 0
for item in self.dev_list:
if item[1]: # 适配打击乐
# n_step = item[1] - INSTRUMENT_OFFSET_POS_TABLE[item[0]]
raw_name = item[0] + "." + str(item[1])
# print(self.cache_dict[raw_name].shape, "\n")
overlay(self.cache_dict[raw_name] * item[3], item[2])
else:
raw_name = item[0]
# print(self.cache_dict[raw_name].shape, "\n")
overlay(self.cache_dict[raw_name] * item[3], item[2])
# print(self.dev_list[-1][1] ,self.dev_list[-1][0])
i += 1
# print(i, len(self.dev_list))
if self.gvm == 0:
# 归一化,抚摸耳朵 (bushi
max_val = np.max(np.abs(wav_model))
if not max_val == 0:
wav_model = wav_model / max_val
elif self.gvm == 1:
wav_model[wav_model > 1] = 1
wav_model[wav_model < -1] = -1
if self.oc == 2:
return wav_model.T
else:
return wav_model[:, np.newaxis]
# # 请使用本函数进行导出
# def to_wav_file(self, out_file_path):
# sf.write(
# out_file_path,
# self.to_wav(),
# samplerate=self.out_sr,
# format="wav",
# )
# def play(self):
# event = threading.Event()
# data, fs = self.to_wav(), self.out_sr
# if self.oc == 1:
# data = data[:, np.newaxis]
# self.current_frame = 0
# def callback(outdata, frames, time, status): # CALLBACK need
# if status:
# print(status)
# chunksize = min(len(data) - self.current_frame, frames)
# outdata[:chunksize] = data[self.current_frame:self.current_frame + chunksize]
# if chunksize < frames:
# outdata[chunksize:] = 0
# raise sd.CallbackStop()
# self.current_frame += chunksize
# stream = sd.OutputStream(
# samplerate=fs, device=None, channels=self.oc,
# callback=callback, finished_callback=event.set)
# with stream:
# event.wait() # Wait until playback is finished
# @staticmethod
# def _to_rel_mctick(messages):
# rel_messages = []
# now = 0
# for msg in messages:
# delta = msg[2] - now
# rel_messages.append((msg[0], msg[1], delta, msg[3], msg[4]))
# now = msg[2]
# return rel_messages
# def stream(self):
# event = threading.Event()
# self.end = int(self.out_sr * self.dev_list[-1][2] * 0.05)
# self.current_frame = 0
# self.pos = 0
# if self.oc == 1:
# def overlay(seg_overlay: np.ndarray, pos_tick: int):
# pos_ = int(self.out_sr * pos_tick * 0.05)
# # print(pos_, seg_overlay.size, wav_model.size, wav_model[pos_:seg_overlay.size + pos_].size, seg_overlay.dtype)
# wav_model[pos_:seg_overlay.size + pos_] += seg_overlay
# wav_model = np.zeros(int(max([(i[2] * 0.05 * self.out_sr +
# len((self.cache_dict[i[0] + "." + str(i[1])])
# if i[1] else self.cache_dict[i[0]])) for i in self.dev_list])),
# dtype=np.float32)
# elif self.oc == 2:
# def overlay(seg_overlay: np.ndarray, pos_tick: int):
# pos_ = int(self.out_sr * pos_tick * 0.05)
# # print(pos_, seg_overlay.size, wav_model.size, wav_model[pos_:seg_overlay.size + pos_].size, seg_overlay.dtype)
# wav_model[..., pos_:len(seg_overlay[0]) + pos_] += seg_overlay
# wav_model[wav_model > 1] = 1
# wav_model[wav_model < -1] = -1
# wav_model = np.zeros((2, int(max([(i[2] * 0.05 * self.out_sr +
# len((self.cache_dict[i[0] + "." + str(i[1])][0])
# if i[1] else self.cache_dict[i[0]])) for i in self.dev_list]))),
# dtype=np.float32)
# else:
# raise ValueError("illegal overlay_mode")
# i = 0
# def callback(outdata, frames, _, status): # CALLBACK need
# if status:
# print(status)
# chunksize = min(len(wav_model) - self.current_frame, frames)
# if self.pos < self.current_frame + chunksize and self.pos < self.end:
# outdata[:] = 0
# else:
# if self.oc == 1:
# outdata[:chunksize] = wav_model[:, np.newaxis][self.current_frame:self.current_frame + chunksize]
# else:
# outdata[:chunksize] = wav_model[self.current_frame:self.current_frame + chunksize]
# if chunksize < frames:
# outdata[chunksize:] = 0
# raise sd.CallbackStop()
# self.current_frame += chunksize
# stream = sd.OutputStream(
# samplerate=self.out_sr, device=None, channels=self.oc,
# callback=callback, finished_callback=event.set)
# with stream:
# for item in self.dev_list:
# self.pos = int(self.out_sr * item[2] * 0.05)
# if item[1]: # 适配打击乐
# # n_step = item[1] - INSTRUMENT_OFFSET_POS_TABLE[item[0]]
# raw_name = item[0] + "." + str(item[1])
# # print(self.cache_dict[raw_name].shape, "\n")
# overlay(self.cache_dict[raw_name] * item[3], item[2])
# else:
# raw_name = item[0]
# # print(self.cache_dict[raw_name].shape, "\n")
# overlay(self.cache_dict[raw_name] * item[3], item[2])
# # print(self.dev_list[-1][1] ,self.dev_list[-1][0])
# i += 1
# # print(i, len(self.dev_list))
# event.wait() # Wait until playback is finished

View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -2,6 +2,7 @@
from nonebot.plugin import PluginMetadata
from .msctexec import *
from .mspvexec import *
__author__ = "金羿Eilles"
__plugin_meta__ = PluginMetadata(

View File

@ -1,4 +1,3 @@
## 伶伦转换器 - 机器人版使用文档
命令为标题,后面是功能和子命令。
@ -11,46 +10,65 @@
**以下所有命令中,若字符串类型的参数需要且可以填入多个内容,则可用 `all` 代替参数中的全部数据;此情况下,也可以用和符`&`分割多个你想要填写的信息;同样的,若参数中需要包含空格,则须以英文双引号`"`扩起。**
### llmscvt | linglun_convert | 音乐转换 | midi转换 | 转换音乐 | linglun_music_convert
### llmscvt | linglun_convert | 音乐转换 | midi 转换 | 转换音乐 | linglun_music_convert
转换midi音乐到指定的格式支持批量格式批量文件。每次转换默认基础增加 0.5 点数,每多一种转换格式多增加 0.5 点数,`MSQ`格式不计入后续点数消耗。每日点数在凌晨四时整归零点数达到20则不可进行转换。
转换 midi 音乐到指定的格式,支持批量格式批量文件。每次转换基础随机消耗一次点数,每多一种转换格式多消耗一次附加点数,`MSQ`格式不计入后续附加点数消耗。每日点数在凌晨四时整重置。每人每天默认点数 25每次消耗点数随机于 [0.3,0.8]。**若短时间内已使用同样的参数运行过一次 音乐合成 命令,则不消耗基础点数**
- `-f | --file <字符串>` : 缓存中的midi文件名称需提前上传mid文件默认为`all`
- `-f | --file <字符串>` : 缓存中的 midi 文件名称,需提前上传 mid 文件;默认为`all`
- `-emr | --enable-mismatch-error` : 对音符的不匹配报错;默认为关
- `-emr | --enable-mismatch-error` : 对音符的不匹配报错;默认为关
- `-ps | --play-speed <小数>` : 播放速度;默认为`1.0`
- `-ps | --play-speed <小数>` : 播放速度;默认为`1.0`
- `-dftp | --default-tempo <整数>` : 默认的tempo默认为`500000`
- `-dftp | --default-tempo <整数>` : 默认的 tempo默认为`500000`
- `-ptc | --pitched-note-table <字符串>` : **不可多填** : 乐音乐器对照表需要提前上传json文件此处输入缓存中的json文件名称或者默认存有的三组对照表名称`touch`、`classic`、`dislink`;默认为`touch`
- `-ptc | --pitched-note-table <字符串>` : **不可多填** : 乐音乐器对照表,需要提前上传 json 文件,此处输入缓存中的 json 文件名称,或者默认存有的三组对照表名称:`touch`、`classic`、`dislink`;默认为`touch`
- `-pcs | --percussion-note-table <字符串>` : **不可多填** : 打击乐器对照表需要提前上传json文件此处输入缓存中的json文件名称或者默认存有的三组对照表名称`touch`、`classic`、`dislink`;默认为`touch`
- `-pcs | --percussion-note-table <字符串>` : **不可多填** : 打击乐器对照表,需要提前上传 json 文件,此处输入缓存中的 json 文件名称,或者默认存有的三组对照表名称:`touch`、`classic`、`dislink`;默认为`touch`
- `-e | --old-execute-format` : 是否使用旧版execute指令格式默认为关
- `-e | --old-execute-format` : 是否使用旧版 execute 指令格式;默认为关
- `-mv | --minimal-volume <小数>` : 最小播放音量;默认为`0.1`
- `-mv | --minimal-volume <小数>` : 最小播放音量;默认为`0.1`
- `-vpf | --volume-processing-function <字符串>` : 音量处理函数,支持两种音量函数:`natural`、`straight`;默认为`natural`
- `-vpf | --volume-processing-function <字符串>` : 音量处理函数,支持两种音量函数:`natural`、`straight`;默认为`natural`
- `-t | --type <字符串>` : 转换结果类型,支持的类型有:`addon-delay`、`addon-score`、 `mcstructure-dalay`、`mcstructure-score`、`bdx-delay`、`bdx-score`、`msq`;默认为`all`
- `-t | --type <字符串>` : 转换结果类型,支持的类型有:`addon-delay`、`addon-score`、 `mcstructure-dalay`、`mcstructure-score`、`bdx-delay`、`bdx-score`、`msq`;默认为`all`
- `-htp | --high-time-precision` : **仅当结果类型包含 `msq` 时生效** : 是否使用高精度时间存储MSQ文件默认为关
- `-htp | --high-time-precision` : **仅当结果类型包含 `msq` 时生效** : 是否使用高精度时间存储 MSQ 文件;默认为关
- `-pgb | --progress-bar <字符串> <字符串> <字符串>` : **仅当结果包含 `addon-*`、`bdx-*` 之一时生效、不可多填** : 进度条样式,参照[进度条自定义](https://gitee.com/TriM-Organization/Musicreater/blob/master/docs/%E5%BA%93%E7%9A%84%E7%94%9F%E6%88%90%E4%B8%8E%E5%8A%9F%E8%83%BD%E6%96%87%E6%A1%A3.md#%E8%BF%9B%E5%BA%A6%E6%9D%A1%E8%87%AA%E5%AE%9A%E4%B9%89),以空格拆分三个字符串;默认请查阅上述文档
- `-pgb | --progress-bar <字符串> <字符串> <字符串>` : **仅当结果包含 `addon-*`、`bdx-*` 之一时生效、不可多填** : 进度条样式,参照[进度条自定义](https://gitee.com/TriM-Organization/Musicreater/blob/master/docs/%E5%BA%93%E7%9A%84%E7%94%9F%E6%88%90%E4%B8%8E%E5%8A%9F%E8%83%BD%E6%96%87%E6%A1%A3.md#%E8%BF%9B%E5%BA%A6%E6%9D%A1%E8%87%AA%E5%AE%9A%E4%B9%89),以空格拆分三个字符串;默认请查阅上述文档
- `-s | --scoreboard-name <字符串>` : **仅当结果类型包含 `*-score` 之一时生效、不可多填** : 播放使用的计分板名称;默认为`mscplay`
- `-s | --scoreboard-name <字符串>` : **仅当结果类型包含 `*-score` 之一时生效、不可多填** : 播放使用的计分板名称;默认为`mscplay`
- `-dsa | --disable-scoreboard-autoreset` : **仅当结果类型包含 `*-score` 之一时生效** : 是否禁用计分板自动重置;默认为关
- `-dsa | --disable-scoreboard-autoreset` : **仅当结果类型包含 `*-score` 之一时生效** : 是否禁用计分板自动重置;默认为关
- `-p | --player-selector <字符串>` : **仅当结果类型包含 `*-delay` 之一时生效、不可多填** : 播放使用的玩家选择器;默认为`@a`
- `-p | --player-selector <字符串>` : **仅当结果类型包含 `*-delay` 之一时生效、不可多填** : 播放使用的玩家选择器;默认为`@a`
- `-h | --height-limit <整数>` : **仅当结果类型包含 `*-delay`、`bdx-*` 之一时生效** : 生成结构的最大高度限制;默认为`32`
- `-h | --height-limit <整数>` : **仅当结果类型包含 `*-delay`、`bdx-*` 之一时生效** : 生成结构的最大高度限制;默认为`32`
- `-a | --author <字符串>` : **仅当结果类型包含 `bdx-*` 之一时生效、不可多填** : 音乐文件的作者署名;默认为`Eilles`
- `-a | --author <字符串>` : **仅当结果类型包含 `bdx-*` 之一时生效、不可多填** : 音乐文件的作者署名;默认为`Eilles`
- `-fa | --forward-axis <字符串>` : **仅当结果类型包含 `*-repeater` 之一时生效、不可多填** : 生成结构的朝向;默认为`z+`**未来功能**
- `-fa | --forward-axis <字符串>` : **仅当结果类型包含 `*-repeater` 之一时生效、不可多填** : 生成结构的朝向;默认为`z+`**未来功能**
### mscprv | music_preview | 预览音乐效果 | 预览音乐 | 音乐预览 | 音乐合成 | midi 合成
生成 midi 音乐的《我的世界》播放预览效果。每次转换基础随机消耗一次点数,**若短时间内已使用同样的参数运行过一次 音乐转换 命令,则不消耗基础点数**,并随机消耗附加点数 [1.3, 2.9] 。该命令与上文中的 `音乐转换` 命令共享点数信息。
- `-n | --file-name` : 缓存中的 midi 文件名称,需提前上传 mid 文件;默认为`all`
- `-m | --mode` : 合成模式,支持以下内容。默认为 `0`
- 0 原始长度,不变化时长
- 1 拉伸至 mc 播放器定义(我的世界效果)
- 2 根据 midi 音符长度裁剪
- 3 混音预留
- 4 匹配 midi 音符长度(最佳效果)
- `-o | --output-file` : 是否输出文件,默认为`False`
- 以下命令同上 音乐转换 参数
- `-emr | --enable-mismatch-error`
- `-ps | --play-speed`
- `-dftp | --default-tempo`
- `-ptc | --pitched-note-table`
- `-pcs | --percussion-note-table`
- `-vpf | --volume-processing-function`
### 查看缓存 | listCache | 查看文件缓存 | 查看缓存文件
@ -63,5 +81,3 @@
### 转换帮助 | 查看转换帮助 | 查看帮助 | cvt_help | convert_help | cvthlp
查看此帮助文档

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.8 MiB

After

Width:  |  Height:  |  Size: 564 KiB

View File

@ -2,16 +2,18 @@ import os
import sys
import time
import json
import random
# import uuid
import shutil
import requests
from io import StringIO
from pathlib import Path
from typing import Annotated, Any
# from nonebot import require
import requests
import zhDateTime
import Musicreater
@ -49,7 +51,8 @@ from src.utils.base.ly_typing import T_Bot, T_MessageEvent
from src.utils import event as event_utils
from src.utils.base.language import get_user_lang
from src.utils.base.config import get_config
# from src.utils.base.config import get_config
from src.utils.message.message import MarkdownMessage
from .execute_auto_translator import auto_translate # type: ignore
@ -95,7 +98,7 @@ else:
".json": 8192,
},
"maxPersonConvert": {
"music": 20,
"music": 25,
"structure": 20,
},
}
@ -146,15 +149,43 @@ save_filesaves()
enable_auto_exe_translate = {}
people_convert_times = {}
people_convert_point: dict[str, dict[str, dict[str, float | Any]]] = {}
def query_convert_points(
usr_id: str, item: str, decline: float = 0, store: Any = None
) -> tuple[Any, float]:
global people_convert_point
if usr_id in people_convert_point:
if item in people_convert_point[usr_id]:
if store:
people_convert_point[usr_id][item][item] = store
if people_convert_point[usr_id][item]["point"] >= decline:
people_convert_point[usr_id][item]["point"] -= decline
return (
people_convert_point[usr_id][item].get(item, None),
people_convert_point[usr_id][item]["point"],
)
else:
return False, people_convert_point[usr_id][item]["point"]
else:
people_convert_point[usr_id][item] = {
"point": configdict["maxPersonConvert"][item] - decline,
item: store,
}
return store, people_convert_point[usr_id][item]["point"]
people_convert_point[usr_id] = {
item: {"point": configdict["maxPersonConvert"][item] - decline, item: store}
}
return store, people_convert_point[usr_id][item]["point"]
# 每天1点更新
@scheduler.scheduled_job("cron", hour=4)
async def every_day_update():
# ulang = Language(get_default_lang_code(), "zh-WY")
global people_convert_times
people_convert_times = {}
global people_convert_point
people_convert_point = {}
nonebot.logger.success("已重置每日转换次数")
@ -193,6 +224,9 @@ async def _():
os.remove(database_dir / qqid / name)
except:
pass
if qqid in people_convert_point:
del people_convert_point[qqid]
filesaves[qqid]["totalSize"] -= filesaves[qqid][name]["size"]
nonebot.logger.info(
"\t删除{}".format(name),
@ -372,7 +406,10 @@ async def _(
event: GroupMessageEvent,
bot: T_Bot,
):
if (usr_id := str(event.user_id)) in filesaves.keys():
if (usr_id := str(event.user_id)) in people_convert_point:
del people_convert_point[usr_id]
if usr_id in filesaves.keys():
shutil.rmtree(database_dir / usr_id)
genText = (
"".join([i if i != "totalSize" else "" for i in filesaves[usr_id].keys()])
@ -537,19 +574,16 @@ async def _(
usr_id = str(event.user_id)
if usr_id not in people_convert_times.keys():
people_convert_times[usr_id] = 0
else:
if people_convert_times[usr_id] > configdict["maxPersonConvert"]["music"]:
await linglun_convert.finish(
UniMessage.text(
"你今天音乐转换点数超限: {}/{}".format(
people_convert_times[usr_id],
configdict["maxPersonConvert"]["music"],
)
),
at_sender=True,
)
if (qres := query_convert_points(usr_id, "music"))[0] is False:
await linglun_convert.finish(
UniMessage.text(
"转换点数不足,当前剩余:{}|{}".format(
qres[1],
configdict["maxPersonConvert"]["music"],
)
),
at_sender=True,
)
if usr_id not in filesaves.keys():
await linglun_convert.finish(
@ -681,6 +715,19 @@ async def _(
sys.stdout = buffer
sys.stderr = buffer
def go_chk_point() -> bool:
res, pnt = query_convert_points(
usr_id,
"music",
random.random() % 0.5 + 0.3,
)
if res is False:
buffer.write("中途退出,转换点不足:{}\n".format(pnt))
return False
else:
return True
# return res, pnt
try:
progress_bar_style = (
@ -698,20 +745,78 @@ async def _(
):
if file_to_convert.endswith(".mid") or file_to_convert.endswith(".midi"):
nonebot.logger.info("载入转换文件:{}".format(file_to_convert))
all_files[file_to_convert] = {}
msct_obj = Musicreater.MidiConvert.from_midi_file(
midi_file_path=usr_data_path / file_to_convert,
mismatch_error_ignorance=not _args["enable-mismatch-error"],
play_speed=_args["play-speed"],
default_tempo=_args["default-tempo"],
pitched_note_table=pitched_notechart,
percussion_note_table=percussion_notechart,
old_exe_format=_args["old-execute-format"],
min_volume=_args["minimal-volume"],
vol_processing_func=volume_curve,
)
people_convert_times[usr_id] += 0.5
all_files[file_to_convert] = {}
if (
((msct_obj := query_convert_points(usr_id, "music", 0)[0]) is None)
or (
isinstance(msct_obj, tuple)
and (
isinstance(msct_obj[0], Musicreater.MidiConvert)
and msct_obj[1]
!= (
not _args["enable-mismatch-error"],
_args["play-speed"],
_args["default-tempo"],
pitched_notechart,
percussion_notechart,
volume_curve,
)
)
)
) and go_chk_point():
msct_obj = Musicreater.MidiConvert.from_midi_file(
midi_file_path=usr_data_path / file_to_convert,
mismatch_error_ignorance=not _args["enable-mismatch-error"],
play_speed=_args["play-speed"],
default_tempo=_args["default-tempo"],
pitched_note_table=pitched_notechart,
percussion_note_table=percussion_notechart,
old_exe_format=_args["old-execute-format"],
min_volume=_args["minimal-volume"],
vol_processing_func=volume_curve,
)
query_convert_points(
usr_id,
"music",
0,
(
msct_obj,
(
not _args["enable-mismatch-error"],
_args["play-speed"],
_args["default-tempo"],
pitched_notechart,
percussion_notechart,
volume_curve,
),
),
)
elif isinstance(msct_obj, tuple) and (
isinstance(msct_obj[0], Musicreater.MidiConvert)
and msct_obj[1]
== (
not _args["enable-mismatch-error"],
_args["play-speed"],
_args["default-tempo"],
pitched_notechart,
percussion_notechart,
volume_curve,
)
):
msct_obj = msct_obj[0]
msct_obj.redefine_execute_format(_args["old-execute-format"])
msct_obj.set_min_volume(_args["minimal-volume"])
# msct_obj.set_deviation()
else:
buffer.write(
"点数不足或出现错误:{}".format(
_args,
)
)
break
# people_convert_point[usr_id] += 0.5
if "msq" in all_cvt_types:
all_files[file_to_convert]["msq"] = {"MSQ版本": "2-MSQ@"}
@ -723,7 +828,7 @@ async def _(
)
)
if "addon-delay" in all_cvt_types:
if go_chk_point() and "addon-delay" in all_cvt_types:
all_files[file_to_convert]["addon-delay"] = dict(
zip(
["指令数量", "音乐刻长"],
@ -736,10 +841,10 @@ async def _(
),
)
)
people_convert_times[usr_id] += 0.5
# people_convert_point[usr_id] += 0.5
# all_cvt_types.remove("addon-delay")
if "addon-score" in all_cvt_types:
if go_chk_point() and "addon-score" in all_cvt_types:
all_files[file_to_convert]["addon-score"] = dict(
zip(
["指令数量", "音乐刻长"],
@ -752,10 +857,10 @@ async def _(
),
)
)
people_convert_times[usr_id] += 0.5
# people_convert_point[usr_id] += 0.5
# all_cvt_types.remove("addon-score")
if "mcstructure-dalay" in all_cvt_types:
if go_chk_point() and "mcstructure-dalay" in all_cvt_types:
all_files[file_to_convert]["mcstructure-dalay"] = dict(
zip(
["结构尺寸", "音乐刻长"],
@ -767,10 +872,10 @@ async def _(
),
)
)
people_convert_times[usr_id] += 0.5
# people_convert_point[usr_id] += 0.5
# all_cvt_types.remove("mcstructure-dalay")
if "mcstructure-score" in all_cvt_types:
if go_chk_point() and "mcstructure-score" in all_cvt_types:
all_files[file_to_convert]["mcstructure-score"] = dict(
zip(
["结构尺寸", "音乐刻长", "指令数量"],
@ -783,10 +888,10 @@ async def _(
),
)
)
people_convert_times[usr_id] += 0.5
# people_convert_point[usr_id] += 0.5
# all_cvt_types.remove("mcstructure-score")
if "bdx-delay" in all_cvt_types:
if go_chk_point() and "bdx-delay" in all_cvt_types:
all_files[file_to_convert]["bdx-delay"] = dict(
zip(
[
@ -805,10 +910,10 @@ async def _(
),
)
)
people_convert_times[usr_id] += 0.5
# people_convert_point[usr_id] += 0.5
# all_cvt_types.remove("bdx-delay")
if "bdx-score" in all_cvt_types:
if go_chk_point() and "bdx-score" in all_cvt_types:
all_files[file_to_convert]["bdx-score"] = dict(
zip(
[
@ -828,7 +933,7 @@ async def _(
),
)
)
people_convert_times[usr_id] += 0.5
# people_convert_point[usr_id] += 0.5
# all_cvt_types.remove("bdx-score")
elif file_to_convert != "totalSize":
nonebot.logger.warning(
@ -836,19 +941,6 @@ async def _(
)
buffer.write("文件 {} 已跳过\n".format(file_to_convert))
if people_convert_times[usr_id] > configdict["maxPersonConvert"]["music"]:
buffer.write("中途退出:转换点不足\n")
await linglun_convert.send(
UniMessage.text(
"今日音乐转换点数超限: {}/{}".format(
people_convert_times[usr_id],
configdict["maxPersonConvert"]["music"],
)
),
at_sender=True,
)
break
if not all_files:
nonebot.logger.warning(
"无可供转换的文件",
@ -922,8 +1014,9 @@ async def _(
await linglun_convert.finish(
UniMessage.text(
"转换结束,当前所用转换点数: {}/{}".format(
people_convert_times[usr_id], configdict["maxPersonConvert"]["music"]
"转换结束,当前剩余转换点数: {}|{}".format(
query_convert_points(usr_id, "music", 0, None)[1],
configdict["maxPersonConvert"]["music"],
)
),
at_sender=True,
@ -943,14 +1036,20 @@ reset_point = on_alconna(
default=0,
args=Args["value", float | int, 0],
),
Option(
"-i|--item",
default="music",
args=Args["item", str, "music"],
),
),
aliases={
"设置转换点数",
"set_convert_point",
"reset_cvt_pnt",
"setcp",
"set_convert_point",
"重设转换点数",
"增加转换点数",
"add_convert_point",
"increase_cvt_pnt",
"addcp",
"icrcp",
"icr_convert_point",
"重设并增加转换点数",
},
permission=SUPERUSER,
rule=nonebot.rule.to_me(),
@ -976,12 +1075,25 @@ async def _(
cd_value = (
result.options["value"].args["value"] if result.options["value"].args else 0
)
people_convert_times[to_change] = cd_value
v_item = (
result.options["item"].args["item"] if result.options["item"].args else "music"
)
if v_item not in configdict["maxPersonConvert"]:
await linglun_convert.finish(
UniMessage.text(
"错误!没有名为 {} 的项目。".format(v_item),
),
at_sender=True,
)
await linglun_convert.finish(
UniMessage.text(
"修改成功!当前 {} 剩余点数: {}/{}".format(
to_change, cd_value, configdict["maxPersonConvert"]["music"]
"重置转换状况并修改点数成功!当前{}{}点数为:{}|{}".format(
to_change,
v_item,
query_convert_points(to_change, v_item, -cd_value, None)[1],
configdict["maxPersonConvert"][v_item],
)
),
# at_sender=True,

View File

@ -0,0 +1,417 @@
import os
import sys
import json
import shutil
import random
from io import StringIO
from pathlib import Path
# import nonebot.rule
import nonebot
import soundfile
import Musicreater
import Musicreater.plugin
from .MusicPreview.main import PreviewMusic
from nonebot.adapters.onebot.v11.event import (
GroupMessageEvent,
PrivateMessageEvent,
GroupUploadNoticeEvent,
)
from nonebot_plugin_alconna import (
Alconna,
AlconnaQuery,
Args,
Image,
Option,
Query,
Text,
UniMessage,
on_alconna,
Voice,
Arparma,
Args,
store_true,
)
from src.utils.base.ly_typing import T_Bot, T_MessageEvent
from src.utils.message.message import MarkdownMessage
from .msctexec import (
people_convert_point,
query_convert_points,
filesaves,
configdict,
database_dir,
temporary_dir,
)
mspv_sync = on_alconna(
Alconna(
"音乐合成",
Option("-n|--file-name", default="all", args=Args["file-name", str, "all"]),
Option("-m|--mode", default=0, args=Args["mode", int, 0]),
Option("-o|--output-file", default=False, action=store_true),
Option("-emr|--enable-mismatch-error", default=False, action=store_true),
Option("-ps|--play-speed", default=1.0, args=Args["play-speed", float, 1.0]),
Option(
"-dftp|--default-tempo",
default=Musicreater.mido.midifiles.midifiles.DEFAULT_TEMPO,
args=Args[
"default-tempo", int, Musicreater.mido.midifiles.midifiles.DEFAULT_TEMPO
],
),
Option(
"-ptc|--pitched-note-table",
default="touch",
args=Args["pitched-note-table", str, "touch"],
),
Option(
"-pcs|--percussion-note-table",
default="touch",
args=Args["percussion-note-table", str, "touch"],
),
Option(
"-vpf|--volume-processing-function",
default="natural",
args=Args["volume-processing-function", str, "natural"],
),
),
aliases={
"midi合成",
"音乐预览",
"mscprv",
"music_preview",
"预览音乐效果",
"预览音乐",
},
# rule=nonebot.rule.to_me(),
# use_cmd_start=True,
# block=True,
# priority=13,
)
@mspv_sync.handle()
async def _(
result: Arparma,
event: GroupMessageEvent | PrivateMessageEvent,
bot: T_Bot,
):
# print("E:\\Work2024\\test-midi\\" + name.result)
nonebot.logger.info(result.options)
usr_id = str(event.user_id)
if (qres := query_convert_points(usr_id, "music"))[0] is False:
await mspv_sync.finish(
UniMessage.text(
"转换点数不足,当前剩余:{}|{}".format(
qres[1],
configdict["maxPersonConvert"]["music"],
)
),
at_sender=True,
)
if usr_id not in filesaves.keys():
await mspv_sync.finish(
UniMessage.text("服务器内未存入你的任何文件请先使用上传midi文件吧")
)
_args: dict = {
"file-name": "all",
"output-file": False,
"mode": 0,
"enable-mismatch-error": False,
"play-speed": 1.0,
"default-tempo": 500000,
"pitched-note-table": "touch",
"percussion-note-table": "touch",
"volume-processing-function": "natural",
}
for arg in _args.keys():
_args[arg] = (
(
result.options[arg].args[arg]
if arg in result.options[arg].args.keys()
else result.options[arg].args
)
if (_vlu := result.options[arg].value) is None
else _vlu
)
# await musicreater_convert.finish(
# UniMessage.text(json.dumps(_args, indent=4, sort_keys=True, ensure_ascii=False))
# )
nonebot.logger.info(_args)
if _args["mode"] not in [0, 1, 2, 3, 4]:
await mspv_sync.finish(
UniMessage.text("模式 {} 不存在,请详阅文档。".format(_args["mode"]))
)
usr_data_path = database_dir / usr_id
(usr_temp_path := temporary_dir / usr_id).mkdir(exist_ok=True)
if (_ppnt := _args["pitched-note-table"].lower()) in [
"touch",
"classic",
"dislink",
]:
pitched_notechart = (
Musicreater.MM_DISLINK_PITCHED_INSTRUMENT_TABLE
if _ppnt == "dislink"
else (
Musicreater.MM_CLASSIC_PITCHED_INSTRUMENT_TABLE
if _ppnt == "classic"
else Musicreater.MM_TOUCH_PITCHED_INSTRUMENT_TABLE
)
)
elif (_ppnt := (usr_data_path / _args["pitched-note-table"])).exists():
pitched_notechart = Musicreater.MM_TOUCH_PITCHED_INSTRUMENT_TABLE.copy()
pitched_notechart.update(json.load(_ppnt.open("r")))
else:
await mspv_sync.finish(
UniMessage.text("乐器对照表 {} 不存在".format(_args["pitched-note-table"]))
)
return
if (_ppnt := _args["percussion-note-table"].lower()) in [
"touch",
"classic",
"dislink",
]:
percussion_notechart = (
Musicreater.MM_DISLINK_PERCUSSION_INSTRUMENT_TABLE
if _ppnt == "dislink"
else (
Musicreater.MM_CLASSIC_PERCUSSION_INSTRUMENT_TABLE
if _ppnt == "classic"
else Musicreater.MM_TOUCH_PERCUSSION_INSTRUMENT_TABLE
)
)
elif (_ppnt := (usr_data_path / _args["percussion-note-table"])).exists():
percussion_notechart = Musicreater.MM_TOUCH_PERCUSSION_INSTRUMENT_TABLE.copy()
percussion_notechart.update(json.load(_ppnt.open("r")))
else:
await mspv_sync.finish(
UniMessage.text(
"乐器对照表 {} 不存在".format(_args["percussion-note-table"])
)
)
return
if (_ppnt := _args["volume-processing-function"].lower()) in [
"natural",
"straight",
]:
volume_curve = (
Musicreater.straight_line
if _ppnt == "straight"
else Musicreater.natural_curve
)
else:
await mspv_sync.finish(
UniMessage.text(
"音量处理曲线 {} 不存在".format(_args["volume-processing-function"])
)
)
return
# 重定向标准输出
buffer = StringIO()
sys.stdout = buffer
sys.stderr = buffer
def go_chk_point():
res, pnt = query_convert_points(
usr_id,
"music",
random.random() % 1.6 + 1.3,
)
if res is False:
buffer.write("中途退出,转换点不足:{}\n".format(pnt))
return False
else:
return True
try:
all_files = []
for file_to_convert in (
filesaves[usr_id].keys()
if _args["file-name"].lower() == "all"
else _args["file-name"].split("&")
):
if file_to_convert.endswith(".mid") or file_to_convert.endswith(".midi"):
nonebot.logger.info("载入待合成文件:{}".format(file_to_convert))
# print("1")
# await mspv_sync.finish("处理中")
if (
((msct_obj := query_convert_points(usr_id, "music", 0)[0]) is None)
or (
isinstance(msct_obj, tuple)
and (
isinstance(msct_obj[0], Musicreater.MidiConvert)
and msct_obj[1]
!= (
not _args["enable-mismatch-error"],
_args["play-speed"],
_args["default-tempo"],
pitched_notechart,
percussion_notechart,
volume_curve,
)
)
)
) and go_chk_point():
msct_obj = Musicreater.MidiConvert.from_midi_file(
midi_file_path=usr_data_path / file_to_convert,
mismatch_error_ignorance=not _args["enable-mismatch-error"],
play_speed=_args["play-speed"],
default_tempo=_args["default-tempo"],
pitched_note_table=pitched_notechart,
percussion_note_table=percussion_notechart,
vol_processing_func=volume_curve,
)
query_convert_points(
usr_id,
"music",
0,
(
msct_obj,
(
not _args["enable-mismatch-error"],
_args["play-speed"],
_args["default-tempo"],
pitched_notechart,
percussion_notechart,
volume_curve,
),
),
)
elif isinstance(msct_obj, tuple) and (
isinstance(msct_obj[0], Musicreater.MidiConvert)
and msct_obj[1]
== (
not _args["enable-mismatch-error"],
_args["play-speed"],
_args["default-tempo"],
pitched_notechart,
percussion_notechart,
volume_curve,
)
):
nonebot.logger.info("载入已有缓存。")
msct_obj = msct_obj[0]
else:
buffer.write(
"点数不足或出现错误:\n{}".format(
_args,
)
)
break
all_files.append(file_to_convert)
music_temp = PreviewMusic(
msct_obj,
mode=1,
gvm=1,
default_channel_num=1,
overlay_channels=1,
out_sr=44100,
)
soundfile.write(
fp := (
usr_temp_path
/ "[MP0.2.0]{}-M{}.wav".format(
msct_obj.music_name, _args["mode"]
)
),
music_temp.to_wav(),
samplerate=music_temp.out_sr,
format="wav",
)
await mspv_sync.send(UniMessage.text("曲目 {}".format(file_to_convert)))
fp.open("ab").write(b"DM-MPvR0.2.0")
await mspv_sync.send(
UniMessage.voice(
path=fp,
name="[MP0.2.0]{}-M{}.wav".format(
msct_obj.music_name, _args["mode"]
),
)
)
elif file_to_convert != "totalSize":
nonebot.logger.warning(
"文件类型错误:{}".format(file_to_convert),
)
buffer.write("文件 {} 已跳过\n".format(file_to_convert))
if not all_files:
nonebot.logger.warning(
"无可供转换的文件",
)
await mspv_sync.finish(
UniMessage("我服了老弟,这机器人也不能给路易十六理发啊。")
)
except Exception as e:
nonebot.logger.error("合成存在错误:{}".format(e))
buffer.write("[ERROR] {}\n".format(e))
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
if _args["output-file"]:
Musicreater.plugin.archive.compress_zipfile(
usr_temp_path,
fp := str(temporary_dir / (fn := "mpr-wav-{}.zip".format(usr_id))),
)
if isinstance(event, GroupMessageEvent) or isinstance(
event, GroupUploadNoticeEvent
):
await bot.call_api(
"upload_group_file", group_id=event.group_id, name=fn, file=fp
)
else:
await bot.call_api(
"upload_private_file", user_id=event.user_id, name=fn, file=fp
)
os.remove(fp)
await MarkdownMessage.send_md(
"##{}\n\n```\n{}\n```".format(
MarkdownMessage.escape("日志信息:"),
buffer.getvalue().replace("\\", "/"),
),
bot,
event=event,
)
# nonebot.logger.info(buffer.getvalue())
await mspv_sync.send(UniMessage.text("成功转换:{}".format("".join(all_files))))
shutil.rmtree(usr_temp_path)
await mspv_sync.finish(
UniMessage.text(
"转换结束,当前剩余转换点数: {}|{}".format(
query_convert_points(usr_id, "music", 0, None)[1],
configdict["maxPersonConvert"]["music"],
)
),
at_sender=True,
)