diff --git a/commands/natural_language.py b/commands/natural_language.py index cb4a4800..24ff7cfc 100644 --- a/commands/natural_language.py +++ b/commands/natural_language.py @@ -17,8 +17,8 @@ __registry__ = cr = CommandRegistry(init_func=_init) @cr.register('process') @cr.restrict(full_command_only=True) -def process(sentence, ctx_msg, internal=False): - sentence = sentence.strip() +def process(args_text, ctx_msg, internal=False): + sentence = args_text.strip() potential_commands = parse_potential_commands(sentence) potential_commands = sorted(filter(lambda x: x[0] > 60, potential_commands), key=lambda x: x[0], reverse=True) if len(potential_commands) > 0: @@ -26,7 +26,13 @@ def process(sentence, ctx_msg, internal=False): ctx_msg['parsed_data'] = most_possible_cmd[3] cmdhub.call(most_possible_cmd[1], most_possible_cmd[2], ctx_msg) else: - core.echo('我暂时不理解你在说什么哦~', ctx_msg, internal) + if ctx_msg.get('from_voice'): + core.echo('暂时无法理解你的意思,下面将发送图灵机器人的回复……', ctx_msg, internal) + core.tuling123(sentence, ctx_msg, internal) + else: + core.echo('暂时无法理解你的意思。\n' + '由于自然语言识别还非常不完善,建议使用命令来精确控制我。\n' + '如需帮助请发送「使用帮助」。', ctx_msg, internal) def _load_processors(): diff --git a/nl_processor.py b/nl_processor.py index d20073bf..72a26daa 100644 --- a/nl_processor.py +++ b/nl_processor.py @@ -26,7 +26,9 @@ def parse_potential_commands(sentence): for regex in processor[0]: for word, flag in segmentation: if re.match(regex, word): - potential_commands.append(processor[1](sentence, segmentation)) + result = processor[1](sentence, segmentation) + if result: + potential_commands.append(result) processed = True # A word matched, skip the rest of words break @@ -34,5 +36,7 @@ def parse_potential_commands(sentence): # Current processor has processed, skip the rest of keywords break for func in _processors_without_keyword: - potential_commands.append(func(sentence, segmentation)) + result = func(sentence, segmentation) + if result: + potential_commands.append(result) return potential_commands diff --git a/nl_processors/translate.py b/nl_processors/translate.py index b3888ded..d6203729 100644 --- a/nl_processors/translate.py +++ b/nl_processors/translate.py @@ -3,14 +3,14 @@ import re from nl_processor import as_processor _query_lang_matcher = [ - re.compile('[把将]?[ ,.,。]?(.*?)[ ,.,。]?(?:这[个]?(?:词[组]?|句(?:子|话)?|短语))翻译[成为到](\w+?[文语])(?![ ::,,.。])'), - re.compile('(\w+?)[ ,.,。]?(?:这[个]?(?:词[组]?|句(?:子|话)?|短语))?[的用](\w+?[文语])') + re.compile('[把将]?[\s,.,。]?(.*?)[\s,.,。]?(?:这[个]?(?:词[组]?|句(?:子|话)?|短语))?翻译[成为到](\w+?[文语])(?![\s::,,.。])'), + re.compile('(\w+?)[\s,.,。]?(?:这[个]?(?:词[组]?|句(?:子|话)?|短语))?[的用](\w+?[文语])') ] _lang_query_matcher = [ - re.compile('[把将]?(?:(?:这[个]?|[下后][面]?)(?:词[组]?|句(?:子|话)?|短语))翻译[成为到](\w+?[文语])[ ::,,.。](.*)'), + re.compile('.*[把将]?(?:(?:这[个]?|[下后][面]?)(?:词[组]?|句(?:子|话)?|短语))?翻译[成为到]\s*(\w+?[文语])[\s::,,](.*)'), re.compile('[用]?(\w+[文语])\w+?(?:说|讲|表达|表示)(.*)(?:这[个]?(?:词[组]?|句(?:子|话)?|短语))'), - re.compile('[用]?(\w+[文语])\w+?(?:说|讲|表达|表示)(.*)') + re.compile('[用]?(\w+[文语])\w+?(?:说|讲|表达|表示)(.*)'), ] @@ -27,5 +27,6 @@ def _processor(sentence, segmentation): lang, query = m.group(2), m.group(1) break if lang and query: + print('翻译: 目标语言:', lang, ', 待翻译文本:', query) return 90, 'translate.translate_to', ' '.join((lang.strip(), query.strip(' ,,'))), None return None