2018-12-30 20:04:02 +08:00
|
|
|
#![cfg_attr(feature = "nightly", feature(test))]
|
|
|
|
|
2018-11-28 02:11:33 +08:00
|
|
|
pub mod automaton;
|
2018-12-02 23:45:17 +08:00
|
|
|
pub mod database;
|
2018-11-08 19:05:59 +08:00
|
|
|
pub mod data;
|
2018-05-27 21:23:43 +08:00
|
|
|
pub mod rank;
|
2018-09-27 22:32:17 +08:00
|
|
|
pub mod tokenizer;
|
2019-01-10 18:59:42 +08:00
|
|
|
mod attribute;
|
2019-01-10 20:37:22 +08:00
|
|
|
mod word_area;
|
2018-10-10 00:23:35 +08:00
|
|
|
mod common_words;
|
2018-07-11 03:29:17 +08:00
|
|
|
|
2018-12-13 18:52:34 +08:00
|
|
|
pub use rocksdb;
|
|
|
|
|
2018-09-27 22:59:41 +08:00
|
|
|
pub use self::tokenizer::Tokenizer;
|
2018-10-10 00:23:35 +08:00
|
|
|
pub use self::common_words::CommonWords;
|
2019-01-10 18:59:42 +08:00
|
|
|
pub use self::attribute::{Attribute, AttributeError};
|
2019-01-10 20:37:22 +08:00
|
|
|
pub use self::word_area::{WordArea, WordAreaError};
|
2018-05-13 21:12:15 +08:00
|
|
|
|
2018-12-22 19:00:24 +08:00
|
|
|
/// Represent an internally generated document unique identifier.
|
|
|
|
///
|
|
|
|
/// It is used to inform the database the document you want to deserialize.
|
|
|
|
/// Helpful for custom ranking.
|
|
|
|
#[derive(Debug, Copy, Clone, Eq, PartialEq, PartialOrd, Ord, Hash)]
|
2018-12-25 19:26:38 +08:00
|
|
|
pub struct DocumentId(u64);
|
2018-05-27 21:23:43 +08:00
|
|
|
|
2018-05-27 17:15:05 +08:00
|
|
|
/// This structure represent the position of a word
|
|
|
|
/// in a document and its attributes.
|
|
|
|
///
|
|
|
|
/// This is stored in the map, generated at index time,
|
|
|
|
/// extracted and interpreted at search time.
|
2018-12-23 23:46:49 +08:00
|
|
|
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
2018-07-11 03:29:17 +08:00
|
|
|
#[repr(C)]
|
2018-05-27 17:15:05 +08:00
|
|
|
pub struct DocIndex {
|
|
|
|
/// The document identifier where the word was found.
|
2018-10-17 19:35:34 +08:00
|
|
|
pub document_id: DocumentId,
|
2018-05-27 17:15:05 +08:00
|
|
|
|
2018-12-23 23:46:49 +08:00
|
|
|
/// The attribute in the document where the word was found
|
|
|
|
/// along with the index in it.
|
|
|
|
pub attribute: Attribute,
|
2018-05-27 17:15:05 +08:00
|
|
|
|
2018-12-23 23:46:49 +08:00
|
|
|
/// The position in bytes where the word was found
|
|
|
|
/// along with the length of it.
|
2018-05-27 17:15:05 +08:00
|
|
|
///
|
2018-12-23 23:46:49 +08:00
|
|
|
/// It informs on the original word area in the text indexed
|
|
|
|
/// without needing to run the tokenizer again.
|
|
|
|
pub word_area: WordArea,
|
2018-05-13 21:12:15 +08:00
|
|
|
}
|
|
|
|
|
2018-05-27 17:15:05 +08:00
|
|
|
/// This structure represent a matching word with informations
|
|
|
|
/// on the location of the word in the document.
|
|
|
|
///
|
|
|
|
/// The order of the field is important because it defines
|
|
|
|
/// the way these structures are ordered between themselves.
|
|
|
|
///
|
|
|
|
/// The word in itself is not important.
|
2018-05-27 21:23:43 +08:00
|
|
|
// TODO do data oriented programming ? very arrays ?
|
2018-12-23 23:46:49 +08:00
|
|
|
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
2018-05-27 17:15:05 +08:00
|
|
|
pub struct Match {
|
2018-05-27 21:23:43 +08:00
|
|
|
/// The word index in the query sentence.
|
|
|
|
/// Same as the `attribute_index` but for the query words.
|
|
|
|
///
|
|
|
|
/// Used to retrieve the automaton that match this word.
|
|
|
|
pub query_index: u32,
|
|
|
|
|
2018-05-27 17:15:05 +08:00
|
|
|
/// The distance the word has with the query word
|
|
|
|
/// (i.e. the Levenshtein distance).
|
|
|
|
pub distance: u8,
|
|
|
|
|
2018-12-23 23:46:49 +08:00
|
|
|
/// The attribute in the document where the word was found
|
|
|
|
/// along with the index in it.
|
|
|
|
pub attribute: Attribute,
|
2018-07-07 02:58:06 +08:00
|
|
|
|
|
|
|
/// Whether the word that match is an exact match or a prefix.
|
|
|
|
pub is_exact: bool,
|
2018-12-23 23:46:49 +08:00
|
|
|
|
|
|
|
/// The position in bytes where the word was found
|
|
|
|
/// along with the length of it.
|
|
|
|
///
|
|
|
|
/// It informs on the original word area in the text indexed
|
|
|
|
/// without needing to run the tokenizer again.
|
|
|
|
pub word_area: WordArea,
|
2018-05-13 21:12:15 +08:00
|
|
|
}
|
2018-05-27 21:23:43 +08:00
|
|
|
|
|
|
|
impl Match {
|
|
|
|
pub fn zero() -> Self {
|
|
|
|
Match {
|
|
|
|
query_index: 0,
|
|
|
|
distance: 0,
|
2018-12-28 23:15:22 +08:00
|
|
|
attribute: Attribute::new_faillible(0, 0),
|
2018-07-07 02:58:06 +08:00
|
|
|
is_exact: false,
|
2018-12-28 23:15:22 +08:00
|
|
|
word_area: WordArea::new_faillible(0, 0),
|
2018-05-27 21:23:43 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn max() -> Self {
|
|
|
|
Match {
|
|
|
|
query_index: u32::max_value(),
|
|
|
|
distance: u8::max_value(),
|
2019-01-10 18:59:42 +08:00
|
|
|
attribute: Attribute::max_value(),
|
2018-07-07 02:58:06 +08:00
|
|
|
is_exact: true,
|
2019-01-10 20:37:22 +08:00
|
|
|
word_area: WordArea::max_value(),
|
2018-12-23 23:46:49 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use super::*;
|
|
|
|
use std::mem;
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn docindex_mem_size() {
|
|
|
|
assert_eq!(mem::size_of::<DocIndex>(), 16);
|
|
|
|
}
|
2018-05-27 21:23:43 +08:00
|
|
|
}
|