2018-09-27 22:32:17 +08:00
|
|
|
use std::mem;
|
2019-02-23 21:57:13 +08:00
|
|
|
use crate::is_cjk;
|
2018-09-27 22:32:17 +08:00
|
|
|
use self::Separator::*;
|
|
|
|
|
2018-11-16 00:55:20 +08:00
|
|
|
pub trait TokenizerBuilder {
|
2018-12-23 23:46:49 +08:00
|
|
|
fn build<'a>(&self, text: &'a str) -> Box<Iterator<Item=Token<'a>> + 'a>;
|
2018-09-27 22:32:17 +08:00
|
|
|
}
|
|
|
|
|
2018-11-16 00:55:20 +08:00
|
|
|
pub struct DefaultBuilder;
|
|
|
|
|
|
|
|
impl DefaultBuilder {
|
|
|
|
pub fn new() -> DefaultBuilder {
|
|
|
|
DefaultBuilder
|
2018-09-27 22:32:17 +08:00
|
|
|
}
|
2018-11-16 00:55:20 +08:00
|
|
|
}
|
2018-09-27 22:32:17 +08:00
|
|
|
|
2018-12-23 23:46:49 +08:00
|
|
|
#[derive(Debug, PartialEq, Eq)]
|
|
|
|
pub struct Token<'a> {
|
|
|
|
pub word: &'a str,
|
|
|
|
pub word_index: usize,
|
|
|
|
pub char_index: usize,
|
|
|
|
}
|
|
|
|
|
2018-11-16 00:55:20 +08:00
|
|
|
impl TokenizerBuilder for DefaultBuilder {
|
2018-12-23 23:46:49 +08:00
|
|
|
fn build<'a>(&self, text: &'a str) -> Box<Iterator<Item=Token<'a>> + 'a> {
|
2018-11-16 00:55:20 +08:00
|
|
|
Box::new(Tokenizer::new(text))
|
2018-09-27 22:32:17 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-16 00:55:20 +08:00
|
|
|
pub struct Tokenizer<'a> {
|
2018-12-23 23:46:49 +08:00
|
|
|
word_index: usize,
|
|
|
|
char_index: usize,
|
2018-09-27 22:32:17 +08:00
|
|
|
inner: &'a str,
|
|
|
|
}
|
|
|
|
|
2018-11-16 00:55:20 +08:00
|
|
|
impl<'a> Tokenizer<'a> {
|
|
|
|
pub fn new(string: &str) -> Tokenizer {
|
2018-12-23 23:46:49 +08:00
|
|
|
let mut char_advance = 0;
|
|
|
|
let mut index_advance = 0;
|
|
|
|
for (n, (i, c)) in string.char_indices().enumerate() {
|
|
|
|
char_advance = n;
|
|
|
|
index_advance = i;
|
|
|
|
if detect_separator(c).is_none() { break }
|
|
|
|
}
|
|
|
|
|
2018-11-16 00:55:20 +08:00
|
|
|
Tokenizer {
|
2018-12-23 23:46:49 +08:00
|
|
|
word_index: 0,
|
|
|
|
char_index: char_advance,
|
|
|
|
inner: &string[index_advance..],
|
2018-09-27 22:32:17 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Clone, Copy)]
|
|
|
|
enum Separator {
|
|
|
|
Short,
|
|
|
|
Long,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Separator {
|
|
|
|
fn add(self, add: Separator) -> Separator {
|
|
|
|
match (self, add) {
|
|
|
|
(_, Long) => Long,
|
|
|
|
(Short, Short) => Short,
|
|
|
|
(Long, Short) => Long,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn to_usize(self) -> usize {
|
|
|
|
match self {
|
|
|
|
Short => 1,
|
|
|
|
Long => 8,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-23 23:46:49 +08:00
|
|
|
fn detect_separator(c: char) -> Option<Separator> {
|
|
|
|
match c {
|
2019-02-22 22:40:39 +08:00
|
|
|
'.' | ';' | ',' | '!' | '?' | '-' | '(' | ')' => Some(Long),
|
|
|
|
' ' | '\'' | '"' => Some(Short),
|
|
|
|
_ => None,
|
2018-12-23 23:46:49 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-16 00:55:20 +08:00
|
|
|
impl<'a> Iterator for Tokenizer<'a> {
|
2018-12-23 23:46:49 +08:00
|
|
|
type Item = Token<'a>;
|
2018-09-27 22:32:17 +08:00
|
|
|
|
|
|
|
fn next(&mut self) -> Option<Self::Item> {
|
|
|
|
let mut start_word = None;
|
|
|
|
let mut distance = None;
|
|
|
|
|
|
|
|
for (i, c) in self.inner.char_indices() {
|
2018-12-23 23:46:49 +08:00
|
|
|
match detect_separator(c) {
|
|
|
|
Some(sep) => {
|
2018-09-27 22:32:17 +08:00
|
|
|
if let Some(start_word) = start_word {
|
2018-12-23 23:46:49 +08:00
|
|
|
let (prefix, tail) = self.inner.split_at(i);
|
|
|
|
let (spaces, word) = prefix.split_at(start_word);
|
2018-09-27 22:32:17 +08:00
|
|
|
|
|
|
|
self.inner = tail;
|
2019-01-10 03:14:08 +08:00
|
|
|
self.char_index += spaces.chars().count();
|
2018-12-23 23:46:49 +08:00
|
|
|
self.word_index += distance.map(Separator::to_usize).unwrap_or(0);
|
|
|
|
|
|
|
|
let token = Token {
|
|
|
|
word: word,
|
|
|
|
word_index: self.word_index,
|
|
|
|
char_index: self.char_index,
|
|
|
|
};
|
2018-09-27 22:32:17 +08:00
|
|
|
|
2019-01-10 03:14:08 +08:00
|
|
|
self.char_index += word.chars().count();
|
2018-12-23 23:46:49 +08:00
|
|
|
return Some(token)
|
2018-09-27 22:32:17 +08:00
|
|
|
}
|
2018-12-23 23:46:49 +08:00
|
|
|
|
2019-02-23 01:17:43 +08:00
|
|
|
distance = Some(distance.map_or(sep, |s| s.add(sep)));
|
|
|
|
},
|
|
|
|
None => {
|
|
|
|
// if this is a Chinese, a Japanese or a Korean character
|
|
|
|
// See <http://unicode-table.com>
|
2019-02-23 21:57:13 +08:00
|
|
|
if is_cjk(c) {
|
2019-02-23 01:17:43 +08:00
|
|
|
match start_word {
|
|
|
|
Some(start_word) => {
|
|
|
|
let (prefix, tail) = self.inner.split_at(i);
|
|
|
|
let (spaces, word) = prefix.split_at(start_word);
|
|
|
|
|
|
|
|
self.inner = tail;
|
|
|
|
self.char_index += spaces.chars().count();
|
|
|
|
self.word_index += distance.map(Separator::to_usize).unwrap_or(0);
|
|
|
|
|
|
|
|
let token = Token {
|
|
|
|
word: word,
|
|
|
|
word_index: self.word_index,
|
|
|
|
char_index: self.char_index,
|
|
|
|
};
|
|
|
|
|
|
|
|
self.word_index += 1;
|
|
|
|
self.char_index += word.chars().count();
|
|
|
|
|
|
|
|
return Some(token)
|
|
|
|
},
|
|
|
|
None => {
|
2019-02-23 06:06:42 +08:00
|
|
|
let (prefix, tail) = self.inner.split_at(i + c.len_utf8());
|
2019-02-23 01:17:43 +08:00
|
|
|
let (spaces, word) = prefix.split_at(i);
|
|
|
|
|
|
|
|
self.inner = tail;
|
|
|
|
self.char_index += spaces.chars().count();
|
|
|
|
self.word_index += distance.map(Separator::to_usize).unwrap_or(0);
|
|
|
|
|
|
|
|
let token = Token {
|
|
|
|
word: word,
|
|
|
|
word_index: self.word_index,
|
|
|
|
char_index: self.char_index,
|
|
|
|
};
|
|
|
|
|
|
|
|
if tail.chars().next().and_then(detect_separator).is_none() {
|
|
|
|
self.word_index += 1;
|
|
|
|
}
|
2019-02-23 06:06:42 +08:00
|
|
|
self.char_index += 1;
|
2019-02-23 01:17:43 +08:00
|
|
|
|
|
|
|
return Some(token)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if start_word.is_none() { start_word = Some(i) }
|
2018-09-27 22:32:17 +08:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if let Some(start_word) = start_word {
|
2018-12-23 23:46:49 +08:00
|
|
|
let prefix = mem::replace(&mut self.inner, "");
|
|
|
|
let (spaces, word) = prefix.split_at(start_word);
|
2018-09-27 22:32:17 +08:00
|
|
|
|
2018-12-23 23:46:49 +08:00
|
|
|
let token = Token {
|
|
|
|
word: word,
|
|
|
|
word_index: self.word_index + distance.map(Separator::to_usize).unwrap_or(0),
|
2019-01-10 03:14:08 +08:00
|
|
|
char_index: self.char_index + spaces.chars().count(),
|
2018-12-23 23:46:49 +08:00
|
|
|
};
|
|
|
|
return Some(token)
|
2018-09-27 22:32:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use super::*;
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn easy() {
|
2018-11-16 00:55:20 +08:00
|
|
|
let mut tokenizer = Tokenizer::new("salut");
|
2018-09-27 22:32:17 +08:00
|
|
|
|
2018-12-23 23:46:49 +08:00
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "salut", word_index: 0, char_index: 0 }));
|
2018-11-16 00:55:20 +08:00
|
|
|
assert_eq!(tokenizer.next(), None);
|
2018-09-27 22:32:17 +08:00
|
|
|
|
2018-11-16 00:55:20 +08:00
|
|
|
let mut tokenizer = Tokenizer::new("yo ");
|
2018-09-27 22:32:17 +08:00
|
|
|
|
2018-12-23 23:46:49 +08:00
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "yo", word_index: 0, char_index: 0 }));
|
2018-11-16 00:55:20 +08:00
|
|
|
assert_eq!(tokenizer.next(), None);
|
2018-09-27 22:32:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn hard() {
|
2019-02-22 22:40:39 +08:00
|
|
|
let mut tokenizer = Tokenizer::new(" .? yo lolo. aïe (ouch)");
|
2018-11-16 00:55:20 +08:00
|
|
|
|
2018-12-23 23:46:49 +08:00
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "yo", word_index: 0, char_index: 4 }));
|
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "lolo", word_index: 1, char_index: 7 }));
|
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "aïe", word_index: 9, char_index: 13 }));
|
2019-02-22 22:40:39 +08:00
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "ouch", word_index: 17, char_index: 18 }));
|
2018-11-16 00:55:20 +08:00
|
|
|
assert_eq!(tokenizer.next(), None);
|
|
|
|
|
|
|
|
let mut tokenizer = Tokenizer::new("yo ! lolo ? wtf - lol . aïe ,");
|
|
|
|
|
2018-12-23 23:46:49 +08:00
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "yo", word_index: 0, char_index: 0 }));
|
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "lolo", word_index: 8, char_index: 5 }));
|
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "wtf", word_index: 16, char_index: 12 }));
|
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "lol", word_index: 24, char_index: 18 }));
|
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "aïe", word_index: 32, char_index: 24 }));
|
|
|
|
assert_eq!(tokenizer.next(), None);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn hard_long_chars() {
|
|
|
|
let mut tokenizer = Tokenizer::new(" .? yo 😂. aïe");
|
|
|
|
|
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "yo", word_index: 0, char_index: 4 }));
|
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "😂", word_index: 1, char_index: 7 }));
|
2019-01-10 03:14:08 +08:00
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "aïe", word_index: 9, char_index: 10 }));
|
2018-12-23 23:46:49 +08:00
|
|
|
assert_eq!(tokenizer.next(), None);
|
|
|
|
|
|
|
|
let mut tokenizer = Tokenizer::new("yo ! lolo ? 😱 - lol . 😣 ,");
|
|
|
|
|
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "yo", word_index: 0, char_index: 0 }));
|
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "lolo", word_index: 8, char_index: 5 }));
|
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "😱", word_index: 16, char_index: 12 }));
|
2019-01-10 03:14:08 +08:00
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "lol", word_index: 24, char_index: 16 }));
|
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "😣", word_index: 32, char_index: 22 }));
|
2018-11-16 00:55:20 +08:00
|
|
|
assert_eq!(tokenizer.next(), None);
|
2018-09-27 22:32:17 +08:00
|
|
|
}
|
2019-02-23 01:17:43 +08:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn hard_kanjis() {
|
|
|
|
let mut tokenizer = Tokenizer::new("\u{2ec4}lolilol\u{2ec7}");
|
|
|
|
|
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "\u{2ec4}", word_index: 0, char_index: 0 }));
|
2019-02-23 06:06:42 +08:00
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "lolilol", word_index: 1, char_index: 1 }));
|
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "\u{2ec7}", word_index: 2, char_index: 8 }));
|
2019-02-23 01:17:43 +08:00
|
|
|
assert_eq!(tokenizer.next(), None);
|
|
|
|
|
|
|
|
let mut tokenizer = Tokenizer::new("\u{2ec4}\u{2ed3}\u{2ef2} lolilol - hello \u{2ec7}");
|
|
|
|
|
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "\u{2ec4}", word_index: 0, char_index: 0 }));
|
2019-02-23 06:06:42 +08:00
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "\u{2ed3}", word_index: 1, char_index: 1 }));
|
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "\u{2ef2}", word_index: 2, char_index: 2 }));
|
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "lolilol", word_index: 3, char_index: 4 }));
|
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "hello", word_index: 11, char_index: 14 }));
|
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "\u{2ec7}", word_index: 12, char_index: 23 }));
|
2019-02-23 01:17:43 +08:00
|
|
|
assert_eq!(tokenizer.next(), None);
|
|
|
|
}
|
2018-09-27 22:32:17 +08:00
|
|
|
}
|