2019-10-09 19:44:18 +08:00
|
|
|
use hashbrown::HashMap;
|
2019-10-26 21:48:36 +08:00
|
|
|
use std::convert::TryFrom;
|
2019-10-09 19:44:18 +08:00
|
|
|
use std::ops::Range;
|
|
|
|
use std::rc::Rc;
|
2019-10-18 19:05:28 +08:00
|
|
|
use std::time::{Duration, Instant};
|
2019-11-05 23:40:34 +08:00
|
|
|
use std::{cmp, mem};
|
2019-10-02 23:34:32 +08:00
|
|
|
|
|
|
|
use fst::{IntoStreamer, Streamer};
|
2019-11-29 23:31:47 +08:00
|
|
|
use log::debug;
|
2019-10-02 23:34:32 +08:00
|
|
|
use sdset::SetBuf;
|
|
|
|
use slice_group_by::{GroupBy, GroupByMut};
|
|
|
|
|
2019-11-26 23:12:06 +08:00
|
|
|
use crate::database::MainT;
|
2019-10-23 00:15:43 +08:00
|
|
|
use crate::automaton::{Automaton, AutomatonGroup, AutomatonProducer, QueryEnhancer};
|
2019-10-18 19:05:28 +08:00
|
|
|
use crate::distinct_map::{BufferedDistinctMap, DistinctMap};
|
2019-10-31 00:25:42 +08:00
|
|
|
use crate::levenshtein::prefix_damerau_levenshtein;
|
2019-10-18 19:05:28 +08:00
|
|
|
use crate::raw_document::{raw_documents_from, RawDocument};
|
2019-11-29 23:31:47 +08:00
|
|
|
use crate::{criterion::Criteria, Document, DocumentId, Highlight, TmpMatch, AttrCount};
|
2019-10-18 19:05:28 +08:00
|
|
|
use crate::{reordered_attrs::ReorderedAttrs, store, MResult};
|
2019-10-02 23:34:32 +08:00
|
|
|
|
2019-10-17 20:45:21 +08:00
|
|
|
pub struct QueryBuilder<'c, 'f, 'd> {
|
2019-10-09 19:44:18 +08:00
|
|
|
criteria: Criteria<'c>,
|
|
|
|
searchable_attrs: Option<ReorderedAttrs>,
|
2019-10-17 20:45:21 +08:00
|
|
|
filter: Option<Box<dyn Fn(DocumentId) -> bool + 'f>>,
|
|
|
|
distinct: Option<(Box<dyn Fn(DocumentId) -> Option<u64> + 'd>, usize)>,
|
2019-10-09 23:59:31 +08:00
|
|
|
timeout: Option<Duration>,
|
2019-10-03 21:04:11 +08:00
|
|
|
main_store: store::Main,
|
|
|
|
postings_lists_store: store::PostingsLists,
|
2019-10-15 00:48:32 +08:00
|
|
|
documents_fields_counts_store: store::DocumentsFieldsCounts,
|
2019-10-02 23:34:32 +08:00
|
|
|
synonyms_store: store::Synonyms,
|
|
|
|
}
|
|
|
|
|
|
|
|
fn multiword_rewrite_matches(
|
|
|
|
mut matches: Vec<(DocumentId, TmpMatch)>,
|
|
|
|
query_enhancer: &QueryEnhancer,
|
2019-10-18 19:05:28 +08:00
|
|
|
) -> SetBuf<(DocumentId, TmpMatch)> {
|
2019-11-30 23:33:48 +08:00
|
|
|
if true {
|
|
|
|
let before_sort = Instant::now();
|
|
|
|
matches.sort_unstable();
|
|
|
|
let matches = SetBuf::new_unchecked(matches);
|
|
|
|
debug!("sorting dirty matches took {:.02?}", before_sort.elapsed());
|
|
|
|
return matches;
|
|
|
|
}
|
|
|
|
|
2019-10-02 23:34:32 +08:00
|
|
|
let mut padded_matches = Vec::with_capacity(matches.len());
|
|
|
|
|
|
|
|
// we sort the matches by word index to make them rewritable
|
|
|
|
matches.sort_unstable_by_key(|(id, match_)| (*id, match_.attribute, match_.word_index));
|
|
|
|
|
|
|
|
// for each attribute of each document
|
|
|
|
for same_document_attribute in matches.linear_group_by_key(|(id, m)| (*id, m.attribute)) {
|
|
|
|
// padding will only be applied
|
|
|
|
// to word indices in the same attribute
|
|
|
|
let mut padding = 0;
|
|
|
|
let mut iter = same_document_attribute.linear_group_by_key(|(_, m)| m.word_index);
|
|
|
|
|
|
|
|
// for each match at the same position
|
|
|
|
// in this document attribute
|
|
|
|
while let Some(same_word_index) = iter.next() {
|
|
|
|
// find the biggest padding
|
|
|
|
let mut biggest = 0;
|
|
|
|
for (id, match_) in same_word_index {
|
|
|
|
let mut replacement = query_enhancer.replacement(match_.query_index);
|
|
|
|
let replacement_len = replacement.len();
|
|
|
|
let nexts = iter.remainder().linear_group_by_key(|(_, m)| m.word_index);
|
|
|
|
|
|
|
|
if let Some(query_index) = replacement.next() {
|
|
|
|
let word_index = match_.word_index + padding as u16;
|
2019-10-18 19:05:28 +08:00
|
|
|
let match_ = TmpMatch {
|
|
|
|
query_index,
|
|
|
|
word_index,
|
2019-10-18 19:21:41 +08:00
|
|
|
..*match_
|
2019-10-18 19:05:28 +08:00
|
|
|
};
|
2019-10-02 23:34:32 +08:00
|
|
|
padded_matches.push((*id, match_));
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut found = false;
|
|
|
|
|
|
|
|
// look ahead and if there already is a match
|
|
|
|
// corresponding to this padding word, abort the padding
|
|
|
|
'padding: for (x, next_group) in nexts.enumerate() {
|
|
|
|
for (i, query_index) in replacement.clone().enumerate().skip(x) {
|
|
|
|
let word_index = match_.word_index + padding as u16 + (i + 1) as u16;
|
2019-10-18 19:05:28 +08:00
|
|
|
let padmatch = TmpMatch {
|
|
|
|
query_index,
|
|
|
|
word_index,
|
2019-10-18 19:21:41 +08:00
|
|
|
..*match_
|
2019-10-18 19:05:28 +08:00
|
|
|
};
|
2019-10-02 23:34:32 +08:00
|
|
|
|
|
|
|
for (_, nmatch_) in next_group {
|
|
|
|
let mut rep = query_enhancer.replacement(nmatch_.query_index);
|
|
|
|
let query_index = rep.next().unwrap();
|
|
|
|
if query_index == padmatch.query_index {
|
|
|
|
if !found {
|
|
|
|
// if we find a corresponding padding for the
|
|
|
|
// first time we must push preceding paddings
|
2019-10-18 19:05:28 +08:00
|
|
|
for (i, query_index) in replacement.clone().enumerate().take(i)
|
|
|
|
{
|
|
|
|
let word_index =
|
|
|
|
match_.word_index + padding as u16 + (i + 1) as u16;
|
|
|
|
let match_ = TmpMatch {
|
|
|
|
query_index,
|
|
|
|
word_index,
|
2019-10-18 19:21:41 +08:00
|
|
|
..*match_
|
2019-10-18 19:05:28 +08:00
|
|
|
};
|
2019-10-02 23:34:32 +08:00
|
|
|
padded_matches.push((*id, match_));
|
|
|
|
biggest = biggest.max(i + 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
padded_matches.push((*id, padmatch));
|
|
|
|
found = true;
|
|
|
|
continue 'padding;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// if we do not find a corresponding padding in the
|
|
|
|
// next groups so stop here and pad what was found
|
2019-10-18 19:05:28 +08:00
|
|
|
break;
|
2019-10-02 23:34:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if !found {
|
|
|
|
// if no padding was found in the following matches
|
|
|
|
// we must insert the entire padding
|
|
|
|
for (i, query_index) in replacement.enumerate() {
|
|
|
|
let word_index = match_.word_index + padding as u16 + (i + 1) as u16;
|
2019-10-18 19:05:28 +08:00
|
|
|
let match_ = TmpMatch {
|
|
|
|
query_index,
|
|
|
|
word_index,
|
2019-10-18 19:21:41 +08:00
|
|
|
..*match_
|
2019-10-18 19:05:28 +08:00
|
|
|
};
|
2019-10-02 23:34:32 +08:00
|
|
|
padded_matches.push((*id, match_));
|
|
|
|
}
|
|
|
|
|
|
|
|
biggest = biggest.max(replacement_len - 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
padding += biggest;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for document_matches in padded_matches.linear_group_by_key_mut(|(id, _)| *id) {
|
|
|
|
document_matches.sort_unstable();
|
|
|
|
}
|
|
|
|
|
2019-11-30 23:33:48 +08:00
|
|
|
// With this check we can see that the loop above takes something
|
|
|
|
// like 43% of the search time even when no rewrite is needed.
|
|
|
|
// assert_eq!(before_matches, padded_matches);
|
|
|
|
|
2019-10-02 23:34:32 +08:00
|
|
|
SetBuf::new_unchecked(padded_matches)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn fetch_raw_documents(
|
2019-11-26 23:12:06 +08:00
|
|
|
reader: &heed::RoTxn<MainT>,
|
2019-10-23 00:15:43 +08:00
|
|
|
automatons_groups: &[AutomatonGroup],
|
2019-10-02 23:34:32 +08:00
|
|
|
query_enhancer: &QueryEnhancer,
|
|
|
|
searchables: Option<&ReorderedAttrs>,
|
2019-10-18 19:21:41 +08:00
|
|
|
main_store: store::Main,
|
|
|
|
postings_lists_store: store::PostingsLists,
|
2019-10-18 19:05:28 +08:00
|
|
|
) -> MResult<Vec<RawDocument>> {
|
2019-10-02 23:34:32 +08:00
|
|
|
let mut matches = Vec::new();
|
|
|
|
let mut highlights = Vec::new();
|
|
|
|
|
2019-11-29 23:31:47 +08:00
|
|
|
let before_automatons_groups_loop = Instant::now();
|
2019-10-23 00:15:43 +08:00
|
|
|
for group in automatons_groups {
|
2019-11-29 23:31:47 +08:00
|
|
|
let AutomatonGroup { is_phrase_query, automatons } = group;
|
2019-10-23 18:06:21 +08:00
|
|
|
let phrase_query_len = automatons.len();
|
|
|
|
|
|
|
|
let mut tmp_matches = Vec::new();
|
|
|
|
for (id, automaton) in automatons.into_iter().enumerate() {
|
2019-11-29 23:31:47 +08:00
|
|
|
let Automaton { index, is_exact, query_len, query, .. } = automaton;
|
2019-10-23 18:06:21 +08:00
|
|
|
let dfa = automaton.dfa();
|
|
|
|
|
|
|
|
let words = match main_store.words_fst(reader)? {
|
|
|
|
Some(words) => words,
|
|
|
|
None => return Ok(Vec::new()),
|
|
|
|
};
|
2019-10-02 23:34:32 +08:00
|
|
|
|
2019-10-23 18:06:21 +08:00
|
|
|
let mut stream = words.search(&dfa).into_stream();
|
|
|
|
while let Some(input) = stream.next() {
|
|
|
|
let distance = dfa.eval(input).to_u8();
|
|
|
|
let is_exact = *is_exact && distance == 0 && input.len() == *query_len;
|
2019-10-02 23:34:32 +08:00
|
|
|
|
2019-11-05 23:40:34 +08:00
|
|
|
let covered_area = if *query_len > input.len() {
|
2019-10-31 00:49:50 +08:00
|
|
|
input.len()
|
2019-10-31 00:25:42 +08:00
|
|
|
} else {
|
|
|
|
prefix_damerau_levenshtein(query.as_bytes(), input).1
|
|
|
|
};
|
|
|
|
|
2019-10-23 18:06:21 +08:00
|
|
|
let doc_indexes = match postings_lists_store.postings_list(reader, input)? {
|
|
|
|
Some(doc_indexes) => doc_indexes,
|
|
|
|
None => continue,
|
|
|
|
};
|
2019-10-02 23:34:32 +08:00
|
|
|
|
2019-10-23 18:06:21 +08:00
|
|
|
tmp_matches.reserve(doc_indexes.len());
|
2019-10-02 23:34:32 +08:00
|
|
|
|
2019-10-23 18:06:21 +08:00
|
|
|
for di in doc_indexes.as_ref() {
|
|
|
|
let attribute = searchables.map_or(Some(di.attribute), |r| r.get(di.attribute));
|
|
|
|
if let Some(attribute) = attribute {
|
|
|
|
let match_ = TmpMatch {
|
|
|
|
query_index: *index as u32,
|
|
|
|
distance,
|
|
|
|
attribute,
|
|
|
|
word_index: di.word_index,
|
|
|
|
is_exact,
|
|
|
|
};
|
2019-10-23 00:15:43 +08:00
|
|
|
|
2019-11-05 23:40:34 +08:00
|
|
|
let covered_area = u16::try_from(covered_area).unwrap_or(u16::max_value());
|
|
|
|
let covered_area = cmp::min(covered_area, di.char_length);
|
|
|
|
|
2019-10-23 18:06:21 +08:00
|
|
|
let highlight = Highlight {
|
|
|
|
attribute: di.attribute,
|
|
|
|
char_index: di.char_index,
|
2019-11-05 23:40:34 +08:00
|
|
|
char_length: covered_area,
|
2019-10-23 00:15:43 +08:00
|
|
|
};
|
|
|
|
|
2019-10-23 18:06:21 +08:00
|
|
|
tmp_matches.push((di.document_id, id, match_, highlight));
|
2019-10-23 00:15:43 +08:00
|
|
|
}
|
|
|
|
}
|
2019-10-23 18:06:21 +08:00
|
|
|
}
|
|
|
|
}
|
2019-10-23 00:15:43 +08:00
|
|
|
|
2019-10-23 18:06:21 +08:00
|
|
|
if *is_phrase_query {
|
|
|
|
tmp_matches.sort_unstable_by_key(|(id, _, m, _)| (*id, m.attribute, m.word_index));
|
|
|
|
for group in tmp_matches.linear_group_by_key(|(id, _, m, _)| (*id, m.attribute)) {
|
|
|
|
for window in group.windows(2) {
|
|
|
|
let (ida, ia, ma, ha) = window[0];
|
|
|
|
let (idb, ib, mb, hb) = window[1];
|
|
|
|
|
|
|
|
debug_assert_eq!(ida, idb);
|
|
|
|
|
|
|
|
// if matches must follow and actually follows themselves
|
|
|
|
if ia + 1 == ib && ma.word_index + 1 == mb.word_index {
|
|
|
|
// TODO we must make it work for phrase query longer than 2
|
|
|
|
// if the second match is the last phrase query word
|
|
|
|
if ib + 1 == phrase_query_len {
|
|
|
|
// insert first match
|
|
|
|
matches.push((ida, ma));
|
|
|
|
highlights.push((ida, ha));
|
|
|
|
|
|
|
|
// insert second match
|
|
|
|
matches.push((idb, mb));
|
|
|
|
highlights.push((idb, hb));
|
2019-10-23 00:15:43 +08:00
|
|
|
}
|
|
|
|
}
|
2019-10-02 23:34:32 +08:00
|
|
|
}
|
|
|
|
}
|
2019-10-23 18:06:21 +08:00
|
|
|
} else {
|
2019-11-30 23:33:48 +08:00
|
|
|
let before_rerewrite = Instant::now();
|
2019-10-23 18:06:21 +08:00
|
|
|
for (id, _, match_, highlight) in tmp_matches {
|
|
|
|
matches.push((id, match_));
|
|
|
|
highlights.push((id, highlight));
|
|
|
|
}
|
2019-11-30 23:33:48 +08:00
|
|
|
debug!("rerewrite took {:.02?}", before_rerewrite.elapsed());
|
2019-10-02 23:34:32 +08:00
|
|
|
}
|
|
|
|
}
|
2019-11-29 23:31:47 +08:00
|
|
|
debug!("automatons_groups_loop took {:.02?}", before_automatons_groups_loop.elapsed());
|
2019-10-02 23:34:32 +08:00
|
|
|
|
2019-11-30 23:33:48 +08:00
|
|
|
{
|
|
|
|
let mut cloned = matches.clone();
|
|
|
|
let before_sort_test = Instant::now();
|
|
|
|
cloned.sort_unstable_by_key(|(id, m)| (*id, m.query_index, m.distance));
|
|
|
|
debug!("sorting test took {:.02?}", before_sort_test.elapsed());
|
|
|
|
}
|
|
|
|
|
2019-11-29 23:31:47 +08:00
|
|
|
let before_multiword_rewrite_matches = Instant::now();
|
2019-11-30 23:33:48 +08:00
|
|
|
debug!("number of matches before rewrite {}", matches.len());
|
|
|
|
debug!("{:?}", query_enhancer);
|
2019-10-02 23:34:32 +08:00
|
|
|
let matches = multiword_rewrite_matches(matches, &query_enhancer);
|
2019-11-30 23:33:48 +08:00
|
|
|
debug!("number of matches after rewrite {}", matches.len());
|
2019-11-29 23:31:47 +08:00
|
|
|
debug!("multiword_rewrite_matches took {:.02?}", before_multiword_rewrite_matches.elapsed());
|
|
|
|
|
|
|
|
let before_highlight_sorting = Instant::now();
|
2019-10-02 23:34:32 +08:00
|
|
|
let highlights = {
|
|
|
|
highlights.sort_unstable_by_key(|(id, _)| *id);
|
|
|
|
SetBuf::new_unchecked(highlights)
|
|
|
|
};
|
2019-11-29 23:31:47 +08:00
|
|
|
debug!("highlight_sorting {:.02?}", before_highlight_sorting.elapsed());
|
2019-10-02 23:34:32 +08:00
|
|
|
|
2019-10-15 00:48:32 +08:00
|
|
|
|
2019-11-29 23:31:47 +08:00
|
|
|
let before_raw_documents = Instant::now();
|
|
|
|
let raw_documents = raw_documents_from(matches, highlights);
|
|
|
|
debug!("raw_documents took {:.02?}", before_raw_documents.elapsed());
|
|
|
|
debug!("documents to worry about: {}", raw_documents.len());
|
|
|
|
|
|
|
|
Ok(raw_documents)
|
2019-10-02 23:34:32 +08:00
|
|
|
}
|
|
|
|
|
2019-10-17 20:45:21 +08:00
|
|
|
impl<'c, 'f, 'd> QueryBuilder<'c, 'f, 'd> {
|
2019-10-03 21:04:11 +08:00
|
|
|
pub fn new(
|
|
|
|
main: store::Main,
|
|
|
|
postings_lists: store::PostingsLists,
|
2019-10-15 00:48:32 +08:00
|
|
|
documents_fields_counts: store::DocumentsFieldsCounts,
|
2019-10-03 21:04:11 +08:00
|
|
|
synonyms: store::Synonyms,
|
2019-10-18 19:05:28 +08:00
|
|
|
) -> QueryBuilder<'c, 'f, 'd> {
|
2019-10-15 00:48:32 +08:00
|
|
|
QueryBuilder::with_criteria(
|
|
|
|
main,
|
|
|
|
postings_lists,
|
|
|
|
documents_fields_counts,
|
|
|
|
synonyms,
|
|
|
|
Criteria::default(),
|
|
|
|
)
|
2019-10-09 19:44:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn with_criteria(
|
|
|
|
main: store::Main,
|
|
|
|
postings_lists: store::PostingsLists,
|
2019-10-15 00:48:32 +08:00
|
|
|
documents_fields_counts: store::DocumentsFieldsCounts,
|
2019-10-09 19:44:18 +08:00
|
|
|
synonyms: store::Synonyms,
|
|
|
|
criteria: Criteria<'c>,
|
2019-10-18 19:05:28 +08:00
|
|
|
) -> QueryBuilder<'c, 'f, 'd> {
|
2019-10-02 23:34:32 +08:00
|
|
|
QueryBuilder {
|
2019-10-09 19:44:18 +08:00
|
|
|
criteria,
|
|
|
|
searchable_attrs: None,
|
|
|
|
filter: None,
|
2019-10-17 20:45:21 +08:00
|
|
|
distinct: None,
|
2019-10-09 23:59:31 +08:00
|
|
|
timeout: None,
|
2019-10-03 21:04:11 +08:00
|
|
|
main_store: main,
|
|
|
|
postings_lists_store: postings_lists,
|
2019-10-15 00:48:32 +08:00
|
|
|
documents_fields_counts_store: documents_fields_counts,
|
2019-10-02 23:34:32 +08:00
|
|
|
synonyms_store: synonyms,
|
|
|
|
}
|
|
|
|
}
|
2019-10-09 19:44:18 +08:00
|
|
|
|
2019-10-17 20:45:21 +08:00
|
|
|
pub fn with_filter<F>(&mut self, function: F)
|
2019-10-18 19:05:28 +08:00
|
|
|
where
|
|
|
|
F: Fn(DocumentId) -> bool + 'f,
|
2019-10-09 19:44:18 +08:00
|
|
|
{
|
2019-10-17 20:45:21 +08:00
|
|
|
self.filter = Some(Box::new(function))
|
2019-10-09 19:44:18 +08:00
|
|
|
}
|
2019-10-02 23:34:32 +08:00
|
|
|
|
2019-10-17 20:45:21 +08:00
|
|
|
pub fn with_fetch_timeout(&mut self, timeout: Duration) {
|
|
|
|
self.timeout = Some(timeout)
|
2019-10-09 19:44:18 +08:00
|
|
|
}
|
|
|
|
|
2019-10-17 20:45:21 +08:00
|
|
|
pub fn with_distinct<F, K>(&mut self, function: F, size: usize)
|
2019-10-18 19:05:28 +08:00
|
|
|
where
|
|
|
|
F: Fn(DocumentId) -> Option<u64> + 'd,
|
2019-10-09 19:44:18 +08:00
|
|
|
{
|
2019-10-17 20:45:21 +08:00
|
|
|
self.distinct = Some((Box::new(function), size))
|
2019-10-09 19:44:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn add_searchable_attribute(&mut self, attribute: u16) {
|
2019-10-18 19:05:28 +08:00
|
|
|
let reorders = self
|
|
|
|
.searchable_attrs
|
|
|
|
.get_or_insert_with(ReorderedAttrs::new);
|
2019-10-09 19:44:18 +08:00
|
|
|
reorders.insert_attribute(attribute);
|
|
|
|
}
|
|
|
|
|
2019-10-02 23:34:32 +08:00
|
|
|
pub fn query(
|
|
|
|
self,
|
2019-11-26 23:12:06 +08:00
|
|
|
reader: &heed::RoTxn<MainT>,
|
2019-10-02 23:34:32 +08:00
|
|
|
query: &str,
|
|
|
|
range: Range<usize>,
|
2019-10-18 19:05:28 +08:00
|
|
|
) -> MResult<Vec<Document>> {
|
2019-10-17 20:45:21 +08:00
|
|
|
match self.distinct {
|
2019-10-18 19:05:28 +08:00
|
|
|
Some((distinct, distinct_size)) => raw_query_with_distinct(
|
|
|
|
reader,
|
|
|
|
query,
|
|
|
|
range,
|
|
|
|
self.filter,
|
|
|
|
distinct,
|
|
|
|
distinct_size,
|
|
|
|
self.timeout,
|
|
|
|
self.criteria,
|
|
|
|
self.searchable_attrs,
|
|
|
|
self.main_store,
|
|
|
|
self.postings_lists_store,
|
|
|
|
self.documents_fields_counts_store,
|
|
|
|
self.synonyms_store,
|
|
|
|
),
|
|
|
|
None => raw_query(
|
|
|
|
reader,
|
|
|
|
query,
|
|
|
|
range,
|
|
|
|
self.filter,
|
|
|
|
self.timeout,
|
|
|
|
self.criteria,
|
|
|
|
self.searchable_attrs,
|
|
|
|
self.main_store,
|
|
|
|
self.postings_lists_store,
|
|
|
|
self.documents_fields_counts_store,
|
|
|
|
self.synonyms_store,
|
|
|
|
),
|
2019-10-09 19:44:18 +08:00
|
|
|
}
|
2019-10-17 20:45:21 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn raw_query<'c, FI>(
|
2019-11-26 23:12:06 +08:00
|
|
|
reader: &heed::RoTxn<MainT>,
|
2019-10-09 19:44:18 +08:00
|
|
|
|
2019-10-17 20:45:21 +08:00
|
|
|
query: &str,
|
|
|
|
range: Range<usize>,
|
|
|
|
|
|
|
|
filter: Option<FI>,
|
|
|
|
timeout: Option<Duration>,
|
2019-10-02 23:34:32 +08:00
|
|
|
|
2019-10-17 20:45:21 +08:00
|
|
|
criteria: Criteria<'c>,
|
|
|
|
searchable_attrs: Option<ReorderedAttrs>,
|
|
|
|
|
|
|
|
main_store: store::Main,
|
|
|
|
postings_lists_store: store::PostingsLists,
|
|
|
|
documents_fields_counts_store: store::DocumentsFieldsCounts,
|
|
|
|
synonyms_store: store::Synonyms,
|
|
|
|
) -> MResult<Vec<Document>>
|
2019-10-18 19:05:28 +08:00
|
|
|
where
|
|
|
|
FI: Fn(DocumentId) -> bool,
|
2019-10-17 20:45:21 +08:00
|
|
|
{
|
|
|
|
// We delegate the filter work to the distinct query builder,
|
|
|
|
// specifying a distinct rule that has no effect.
|
|
|
|
if filter.is_some() {
|
|
|
|
let distinct = |_| None;
|
|
|
|
let distinct_size = 1;
|
|
|
|
return raw_query_with_distinct(
|
2019-10-09 19:44:18 +08:00
|
|
|
reader,
|
|
|
|
query,
|
2019-10-17 20:45:21 +08:00
|
|
|
range,
|
|
|
|
filter,
|
|
|
|
distinct,
|
|
|
|
distinct_size,
|
|
|
|
timeout,
|
|
|
|
criteria,
|
|
|
|
searchable_attrs,
|
|
|
|
main_store,
|
|
|
|
postings_lists_store,
|
|
|
|
documents_fields_counts_store,
|
|
|
|
synonyms_store,
|
2019-10-18 19:05:28 +08:00
|
|
|
);
|
2019-10-17 20:45:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
let start_processing = Instant::now();
|
|
|
|
let mut raw_documents_processed = Vec::with_capacity(range.len());
|
|
|
|
|
2019-10-23 18:06:21 +08:00
|
|
|
let (automaton_producer, query_enhancer) = AutomatonProducer::new(
|
|
|
|
reader,
|
|
|
|
query,
|
|
|
|
main_store,
|
|
|
|
postings_lists_store,
|
|
|
|
synonyms_store,
|
|
|
|
)?;
|
2019-10-17 20:45:21 +08:00
|
|
|
|
2019-10-18 19:21:41 +08:00
|
|
|
let automaton_producer = automaton_producer.into_iter();
|
2019-10-17 20:45:21 +08:00
|
|
|
let mut automatons = Vec::new();
|
|
|
|
|
|
|
|
// aggregate automatons groups by groups after time
|
2019-10-18 19:21:41 +08:00
|
|
|
for auts in automaton_producer {
|
2019-10-23 00:15:43 +08:00
|
|
|
automatons.push(auts);
|
2019-10-17 20:45:21 +08:00
|
|
|
|
2019-11-29 23:31:47 +08:00
|
|
|
for (i, group) in automatons.iter().enumerate() {
|
|
|
|
debug!("group {} automatons {:?}", i, group.automatons);
|
|
|
|
}
|
|
|
|
|
|
|
|
let before_fetch_raw_documents = Instant::now();
|
2019-10-17 20:45:21 +08:00
|
|
|
// we must retrieve the documents associated
|
|
|
|
// with the current automatons
|
|
|
|
let mut raw_documents = fetch_raw_documents(
|
|
|
|
reader,
|
|
|
|
&automatons,
|
|
|
|
&query_enhancer,
|
|
|
|
searchable_attrs.as_ref(),
|
2019-10-18 19:21:41 +08:00
|
|
|
main_store,
|
|
|
|
postings_lists_store,
|
2019-10-09 19:44:18 +08:00
|
|
|
)?;
|
2019-11-29 23:31:47 +08:00
|
|
|
debug!("fetch_raw_documents took {:.02?}", before_fetch_raw_documents.elapsed());
|
2019-10-08 22:16:30 +08:00
|
|
|
|
2019-10-17 20:45:21 +08:00
|
|
|
// stop processing when time is running out
|
|
|
|
if let Some(timeout) = timeout {
|
|
|
|
if !raw_documents_processed.is_empty() && start_processing.elapsed() > timeout {
|
2019-10-18 19:05:28 +08:00
|
|
|
break;
|
2019-10-09 19:44:18 +08:00
|
|
|
}
|
2019-10-17 20:45:21 +08:00
|
|
|
}
|
2019-10-09 19:44:18 +08:00
|
|
|
|
2019-10-17 20:45:21 +08:00
|
|
|
let mut groups = vec![raw_documents.as_mut_slice()];
|
2019-10-02 23:34:32 +08:00
|
|
|
|
2019-10-17 20:45:21 +08:00
|
|
|
'criteria: for criterion in criteria.as_ref() {
|
|
|
|
let tmp_groups = mem::replace(&mut groups, Vec::new());
|
|
|
|
let mut documents_seen = 0;
|
2019-10-02 23:34:32 +08:00
|
|
|
|
2019-10-17 20:45:21 +08:00
|
|
|
for group in tmp_groups {
|
|
|
|
// if this group does not overlap with the requested range,
|
|
|
|
// push it without sorting and splitting it
|
|
|
|
if documents_seen + group.len() < range.start {
|
|
|
|
documents_seen += group.len();
|
|
|
|
groups.push(group);
|
|
|
|
continue;
|
|
|
|
}
|
2019-10-02 23:34:32 +08:00
|
|
|
|
2019-11-29 23:31:47 +08:00
|
|
|
// we must pull the fields counts of these documents
|
|
|
|
// TODO it would be great to had a "dependency" thing for each criterion
|
|
|
|
// and make it so that we can be lazy on pulling/computing some data.
|
|
|
|
if criterion.name() == "Exact" {
|
|
|
|
for document in group.iter_mut() {
|
|
|
|
let mut fields_counts = Vec::new();
|
|
|
|
for result in documents_fields_counts_store.document_fields_counts(reader, document.id)? {
|
|
|
|
let (attr, count) = result?;
|
|
|
|
fields_counts.push(AttrCount { attr: attr.0, count });
|
|
|
|
}
|
|
|
|
document.fields_counts = Some(SetBuf::new(fields_counts).unwrap());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-30 23:33:48 +08:00
|
|
|
|
2019-10-17 20:45:21 +08:00
|
|
|
group.sort_unstable_by(|a, b| criterion.evaluate(a, b));
|
2019-10-02 23:34:32 +08:00
|
|
|
|
2019-10-17 20:45:21 +08:00
|
|
|
for group in group.binary_group_by_mut(|a, b| criterion.eq(a, b)) {
|
2019-11-30 23:33:48 +08:00
|
|
|
debug!("criterion {} produced a group of size {}", criterion.name(), group.len());
|
|
|
|
|
2019-10-17 20:45:21 +08:00
|
|
|
documents_seen += group.len();
|
|
|
|
groups.push(group);
|
2019-10-02 23:34:32 +08:00
|
|
|
|
2019-11-30 23:33:48 +08:00
|
|
|
|
2019-10-17 20:45:21 +08:00
|
|
|
// we have sort enough documents if the last document sorted is after
|
|
|
|
// the end of the requested range, we can continue to the next criterion
|
2019-10-18 19:05:28 +08:00
|
|
|
if documents_seen >= range.end {
|
|
|
|
continue 'criteria;
|
|
|
|
}
|
2019-10-02 23:34:32 +08:00
|
|
|
}
|
|
|
|
}
|
2019-10-17 20:45:21 +08:00
|
|
|
}
|
2019-10-02 23:34:32 +08:00
|
|
|
|
2019-10-17 20:45:21 +08:00
|
|
|
// once we classified the documents related to the current
|
|
|
|
// automatons we save that as the next valid result
|
2019-10-18 19:05:28 +08:00
|
|
|
let iter = raw_documents
|
|
|
|
.into_iter()
|
|
|
|
.skip(range.start)
|
|
|
|
.take(range.len());
|
2019-10-17 20:45:21 +08:00
|
|
|
raw_documents_processed.clear();
|
|
|
|
raw_documents_processed.extend(iter);
|
2019-10-02 23:34:32 +08:00
|
|
|
|
2019-10-17 20:45:21 +08:00
|
|
|
// stop processing when time is running out
|
|
|
|
if let Some(timeout) = timeout {
|
2019-10-18 19:05:28 +08:00
|
|
|
if start_processing.elapsed() > timeout {
|
|
|
|
break;
|
|
|
|
}
|
2019-10-02 23:34:32 +08:00
|
|
|
}
|
2019-10-17 20:45:21 +08:00
|
|
|
}
|
2019-10-02 23:34:32 +08:00
|
|
|
|
2019-10-17 20:45:21 +08:00
|
|
|
// make real documents now that we know
|
|
|
|
// those must be returned
|
|
|
|
let documents = raw_documents_processed
|
|
|
|
.into_iter()
|
2019-10-18 19:21:41 +08:00
|
|
|
.map(Document::from_raw)
|
2019-10-17 20:45:21 +08:00
|
|
|
.collect();
|
2019-10-02 23:34:32 +08:00
|
|
|
|
2019-10-17 20:45:21 +08:00
|
|
|
Ok(documents)
|
2019-10-02 23:34:32 +08:00
|
|
|
}
|
2019-10-08 21:22:36 +08:00
|
|
|
|
2019-10-17 20:45:21 +08:00
|
|
|
fn raw_query_with_distinct<'c, FI, FD>(
|
2019-11-26 23:12:06 +08:00
|
|
|
reader: &heed::RoTxn<MainT>,
|
2019-10-09 19:44:18 +08:00
|
|
|
|
2019-10-17 20:45:21 +08:00
|
|
|
query: &str,
|
|
|
|
range: Range<usize>,
|
2019-10-09 19:44:18 +08:00
|
|
|
|
2019-10-17 20:45:21 +08:00
|
|
|
filter: Option<FI>,
|
2019-10-09 19:44:18 +08:00
|
|
|
|
2019-10-17 20:45:21 +08:00
|
|
|
distinct: FD,
|
|
|
|
distinct_size: usize,
|
|
|
|
timeout: Option<Duration>,
|
|
|
|
|
|
|
|
criteria: Criteria<'c>,
|
|
|
|
searchable_attrs: Option<ReorderedAttrs>,
|
2019-10-09 19:44:18 +08:00
|
|
|
|
2019-10-17 20:45:21 +08:00
|
|
|
main_store: store::Main,
|
|
|
|
postings_lists_store: store::PostingsLists,
|
|
|
|
documents_fields_counts_store: store::DocumentsFieldsCounts,
|
|
|
|
synonyms_store: store::Synonyms,
|
|
|
|
) -> MResult<Vec<Document>>
|
2019-10-18 19:05:28 +08:00
|
|
|
where
|
|
|
|
FI: Fn(DocumentId) -> bool,
|
|
|
|
FD: Fn(DocumentId) -> Option<u64>,
|
2019-10-09 19:44:18 +08:00
|
|
|
{
|
2019-10-17 20:45:21 +08:00
|
|
|
let start_processing = Instant::now();
|
|
|
|
let mut raw_documents_processed = Vec::new();
|
|
|
|
|
2019-10-23 18:06:21 +08:00
|
|
|
let (automaton_producer, query_enhancer) = AutomatonProducer::new(
|
|
|
|
reader,
|
|
|
|
query,
|
|
|
|
main_store,
|
|
|
|
postings_lists_store,
|
|
|
|
synonyms_store,
|
|
|
|
)?;
|
2019-10-17 20:45:21 +08:00
|
|
|
|
2019-10-18 19:21:41 +08:00
|
|
|
let automaton_producer = automaton_producer.into_iter();
|
2019-10-17 20:45:21 +08:00
|
|
|
let mut automatons = Vec::new();
|
|
|
|
|
|
|
|
// aggregate automatons groups by groups after time
|
2019-10-18 19:21:41 +08:00
|
|
|
for auts in automaton_producer {
|
2019-10-23 00:15:43 +08:00
|
|
|
automatons.push(auts);
|
2019-10-17 20:45:21 +08:00
|
|
|
|
|
|
|
// we must retrieve the documents associated
|
|
|
|
// with the current automatons
|
|
|
|
let mut raw_documents = fetch_raw_documents(
|
2019-10-09 19:44:18 +08:00
|
|
|
reader,
|
2019-10-17 20:45:21 +08:00
|
|
|
&automatons,
|
|
|
|
&query_enhancer,
|
|
|
|
searchable_attrs.as_ref(),
|
2019-10-18 19:21:41 +08:00
|
|
|
main_store,
|
|
|
|
postings_lists_store,
|
2019-10-09 19:44:18 +08:00
|
|
|
)?;
|
|
|
|
|
2019-10-17 20:45:21 +08:00
|
|
|
// stop processing when time is running out
|
|
|
|
if let Some(timeout) = timeout {
|
|
|
|
if !raw_documents_processed.is_empty() && start_processing.elapsed() > timeout {
|
2019-10-18 19:05:28 +08:00
|
|
|
break;
|
2019-10-09 19:44:18 +08:00
|
|
|
}
|
2019-10-17 20:45:21 +08:00
|
|
|
}
|
2019-10-09 19:44:18 +08:00
|
|
|
|
2019-10-17 20:45:21 +08:00
|
|
|
let mut groups = vec![raw_documents.as_mut_slice()];
|
|
|
|
let mut key_cache = HashMap::new();
|
|
|
|
|
|
|
|
let mut filter_map = HashMap::new();
|
|
|
|
// these two variables informs on the current distinct map and
|
|
|
|
// on the raw offset of the start of the group where the
|
|
|
|
// range.start bound is located according to the distinct function
|
|
|
|
let mut distinct_map = DistinctMap::new(distinct_size);
|
|
|
|
let mut distinct_raw_offset = 0;
|
|
|
|
|
|
|
|
'criteria: for criterion in criteria.as_ref() {
|
|
|
|
let tmp_groups = mem::replace(&mut groups, Vec::new());
|
|
|
|
let mut buf_distinct = BufferedDistinctMap::new(&mut distinct_map);
|
|
|
|
let mut documents_seen = 0;
|
|
|
|
|
|
|
|
for group in tmp_groups {
|
|
|
|
// if this group does not overlap with the requested range,
|
|
|
|
// push it without sorting and splitting it
|
|
|
|
if documents_seen + group.len() < distinct_raw_offset {
|
|
|
|
documents_seen += group.len();
|
|
|
|
groups.push(group);
|
|
|
|
continue;
|
|
|
|
}
|
2019-10-09 19:44:18 +08:00
|
|
|
|
2019-10-17 20:45:21 +08:00
|
|
|
group.sort_unstable_by(|a, b| criterion.evaluate(a, b));
|
|
|
|
|
|
|
|
for group in group.binary_group_by_mut(|a, b| criterion.eq(a, b)) {
|
|
|
|
// we must compute the real distinguished len of this sub-group
|
|
|
|
for document in group.iter() {
|
|
|
|
let filter_accepted = match &filter {
|
|
|
|
Some(filter) => {
|
|
|
|
let entry = filter_map.entry(document.id);
|
|
|
|
*entry.or_insert_with(|| (filter)(document.id))
|
2019-10-18 19:05:28 +08:00
|
|
|
}
|
2019-10-17 20:45:21 +08:00
|
|
|
None => true,
|
|
|
|
};
|
|
|
|
|
|
|
|
if filter_accepted {
|
|
|
|
let entry = key_cache.entry(document.id);
|
|
|
|
let key = entry.or_insert_with(|| (distinct)(document.id).map(Rc::new));
|
|
|
|
|
|
|
|
match key.clone() {
|
|
|
|
Some(key) => buf_distinct.register(key),
|
|
|
|
None => buf_distinct.register_without_key(),
|
2019-10-09 19:44:18 +08:00
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2019-10-17 20:45:21 +08:00
|
|
|
// the requested range end is reached: stop computing distinct
|
2019-10-18 19:05:28 +08:00
|
|
|
if buf_distinct.len() >= range.end {
|
|
|
|
break;
|
|
|
|
}
|
2019-10-17 20:45:21 +08:00
|
|
|
}
|
2019-10-09 19:44:18 +08:00
|
|
|
|
2019-10-17 20:45:21 +08:00
|
|
|
documents_seen += group.len();
|
|
|
|
groups.push(group);
|
2019-10-09 19:44:18 +08:00
|
|
|
|
2019-10-17 20:45:21 +08:00
|
|
|
// if this sub-group does not overlap with the requested range
|
|
|
|
// we must update the distinct map and its start index
|
|
|
|
if buf_distinct.len() < range.start {
|
|
|
|
buf_distinct.transfert_to_internal();
|
|
|
|
distinct_raw_offset = documents_seen;
|
2019-10-09 19:44:18 +08:00
|
|
|
}
|
2019-10-17 20:45:21 +08:00
|
|
|
|
|
|
|
// we have sort enough documents if the last document sorted is after
|
|
|
|
// the end of the requested range, we can continue to the next criterion
|
2019-10-18 19:05:28 +08:00
|
|
|
if buf_distinct.len() >= range.end {
|
|
|
|
continue 'criteria;
|
|
|
|
}
|
2019-10-09 19:44:18 +08:00
|
|
|
}
|
|
|
|
}
|
2019-10-17 20:45:21 +08:00
|
|
|
}
|
2019-10-09 19:44:18 +08:00
|
|
|
|
2019-10-17 20:45:21 +08:00
|
|
|
// once we classified the documents related to the current
|
|
|
|
// automatons we save that as the next valid result
|
|
|
|
let mut seen = BufferedDistinctMap::new(&mut distinct_map);
|
|
|
|
raw_documents_processed.clear();
|
2019-10-09 19:44:18 +08:00
|
|
|
|
2019-10-17 20:45:21 +08:00
|
|
|
for document in raw_documents.into_iter().skip(distinct_raw_offset) {
|
|
|
|
let filter_accepted = match &filter {
|
|
|
|
Some(_) => filter_map.remove(&document.id).unwrap(),
|
|
|
|
None => true,
|
|
|
|
};
|
2019-10-09 19:44:18 +08:00
|
|
|
|
2019-10-17 20:45:21 +08:00
|
|
|
if filter_accepted {
|
|
|
|
let key = key_cache.remove(&document.id).unwrap();
|
|
|
|
let distinct_accepted = match key {
|
|
|
|
Some(key) => seen.register(key),
|
|
|
|
None => seen.register_without_key(),
|
|
|
|
};
|
2019-10-09 19:44:18 +08:00
|
|
|
|
2019-10-17 20:45:21 +08:00
|
|
|
if distinct_accepted && seen.len() > range.start {
|
|
|
|
raw_documents_processed.push(document);
|
2019-10-18 19:05:28 +08:00
|
|
|
if raw_documents_processed.len() == range.len() {
|
|
|
|
break;
|
|
|
|
}
|
2019-10-09 19:44:18 +08:00
|
|
|
}
|
|
|
|
}
|
2019-10-17 20:45:21 +08:00
|
|
|
}
|
2019-10-09 19:44:18 +08:00
|
|
|
|
2019-10-17 20:45:21 +08:00
|
|
|
// stop processing when time is running out
|
|
|
|
if let Some(timeout) = timeout {
|
2019-10-18 19:05:28 +08:00
|
|
|
if start_processing.elapsed() > timeout {
|
|
|
|
break;
|
|
|
|
}
|
2019-10-09 19:44:18 +08:00
|
|
|
}
|
2019-10-17 20:45:21 +08:00
|
|
|
}
|
2019-10-09 19:44:18 +08:00
|
|
|
|
2019-10-17 20:45:21 +08:00
|
|
|
// make real documents now that we know
|
|
|
|
// those must be returned
|
|
|
|
let documents = raw_documents_processed
|
|
|
|
.into_iter()
|
2019-10-18 19:21:41 +08:00
|
|
|
.map(Document::from_raw)
|
2019-10-17 20:45:21 +08:00
|
|
|
.collect();
|
2019-10-09 19:44:18 +08:00
|
|
|
|
2019-10-17 20:45:21 +08:00
|
|
|
Ok(documents)
|
2019-10-09 19:44:18 +08:00
|
|
|
}
|
|
|
|
|
2019-10-08 21:22:36 +08:00
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use super::*;
|
|
|
|
|
|
|
|
use std::collections::{BTreeSet, HashMap};
|
|
|
|
use std::iter::FromIterator;
|
|
|
|
|
2019-10-18 19:05:28 +08:00
|
|
|
use fst::{IntoStreamer, Set};
|
2019-11-26 18:06:55 +08:00
|
|
|
use meilisearch_schema::SchemaAttr;
|
2019-10-08 21:22:36 +08:00
|
|
|
use sdset::SetBuf;
|
|
|
|
use tempfile::TempDir;
|
|
|
|
|
2019-10-09 17:45:19 +08:00
|
|
|
use crate::automaton::normalize_str;
|
2019-10-10 19:38:58 +08:00
|
|
|
use crate::database::Database;
|
2019-10-08 21:22:36 +08:00
|
|
|
use crate::store::Index;
|
2019-10-18 19:05:28 +08:00
|
|
|
use crate::DocIndex;
|
2019-10-08 22:16:30 +08:00
|
|
|
|
|
|
|
fn set_from_stream<'f, I, S>(stream: I) -> Set
|
|
|
|
where
|
2019-10-18 19:05:28 +08:00
|
|
|
I: for<'a> fst::IntoStreamer<'a, Into = S, Item = &'a [u8]>,
|
|
|
|
S: 'f + for<'a> fst::Streamer<'a, Item = &'a [u8]>,
|
2019-10-08 22:16:30 +08:00
|
|
|
{
|
|
|
|
let mut builder = fst::SetBuilder::memory();
|
|
|
|
builder.extend_stream(stream).unwrap();
|
|
|
|
builder.into_inner().and_then(Set::from_bytes).unwrap()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn insert_key(set: &Set, key: &[u8]) -> Set {
|
|
|
|
let unique_key = {
|
|
|
|
let mut builder = fst::SetBuilder::memory();
|
|
|
|
builder.insert(key).unwrap();
|
|
|
|
builder.into_inner().and_then(Set::from_bytes).unwrap()
|
|
|
|
};
|
|
|
|
|
|
|
|
let union_ = set.op().add(unique_key.into_stream()).r#union();
|
|
|
|
|
|
|
|
set_from_stream(union_)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn sdset_into_fstset(set: &sdset::Set<&str>) -> Set {
|
|
|
|
let mut builder = fst::SetBuilder::memory();
|
|
|
|
let set = SetBuf::from_dirty(set.into_iter().map(|s| normalize_str(s)).collect());
|
|
|
|
builder.extend_iter(set.into_iter()).unwrap();
|
|
|
|
builder.into_inner().and_then(Set::from_bytes).unwrap()
|
|
|
|
}
|
2019-10-08 21:22:36 +08:00
|
|
|
|
|
|
|
const fn doc_index(document_id: u64, word_index: u16) -> DocIndex {
|
|
|
|
DocIndex {
|
|
|
|
document_id: DocumentId(document_id),
|
|
|
|
attribute: 0,
|
|
|
|
word_index,
|
|
|
|
char_index: 0,
|
|
|
|
char_length: 0,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
const fn doc_char_index(document_id: u64, word_index: u16, char_index: u16) -> DocIndex {
|
|
|
|
DocIndex {
|
|
|
|
document_id: DocumentId(document_id),
|
|
|
|
attribute: 0,
|
|
|
|
word_index,
|
|
|
|
char_index,
|
|
|
|
char_length: 0,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub struct TempDatabase {
|
|
|
|
database: Database,
|
|
|
|
index: Index,
|
2019-10-08 23:31:07 +08:00
|
|
|
_tempdir: TempDir,
|
2019-10-08 21:22:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
impl TempDatabase {
|
|
|
|
pub fn query_builder(&self) -> QueryBuilder {
|
|
|
|
self.index.query_builder()
|
|
|
|
}
|
2019-10-08 22:16:30 +08:00
|
|
|
|
|
|
|
pub fn add_synonym(&mut self, word: &str, new: SetBuf<&str>) {
|
2019-11-26 23:12:06 +08:00
|
|
|
let db = &self.database;
|
|
|
|
let mut writer = db.main_write_txn().unwrap();
|
2019-10-08 22:16:30 +08:00
|
|
|
|
|
|
|
let word = word.to_lowercase();
|
|
|
|
|
2019-10-18 19:05:28 +08:00
|
|
|
let alternatives = match self
|
|
|
|
.index
|
|
|
|
.synonyms
|
|
|
|
.synonyms(&writer, word.as_bytes())
|
|
|
|
.unwrap()
|
|
|
|
{
|
2019-10-08 22:16:30 +08:00
|
|
|
Some(alternatives) => alternatives,
|
|
|
|
None => fst::Set::default(),
|
|
|
|
};
|
|
|
|
|
|
|
|
let new = sdset_into_fstset(&new);
|
2019-10-18 19:05:28 +08:00
|
|
|
let new_alternatives =
|
|
|
|
set_from_stream(alternatives.op().add(new.into_stream()).r#union());
|
|
|
|
self.index
|
|
|
|
.synonyms
|
|
|
|
.put_synonyms(&mut writer, word.as_bytes(), &new_alternatives)
|
|
|
|
.unwrap();
|
2019-10-08 22:16:30 +08:00
|
|
|
|
|
|
|
let synonyms = match self.index.main.synonyms_fst(&writer).unwrap() {
|
|
|
|
Some(synonyms) => synonyms,
|
|
|
|
None => fst::Set::default(),
|
|
|
|
};
|
|
|
|
|
|
|
|
let synonyms_fst = insert_key(&synonyms, word.as_bytes());
|
2019-10-18 19:05:28 +08:00
|
|
|
self.index
|
|
|
|
.main
|
|
|
|
.put_synonyms_fst(&mut writer, &synonyms_fst)
|
|
|
|
.unwrap();
|
2019-10-08 22:16:30 +08:00
|
|
|
|
|
|
|
writer.commit().unwrap();
|
|
|
|
}
|
2019-10-08 21:22:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
impl<'a> FromIterator<(&'a str, &'a [DocIndex])> for TempDatabase {
|
2019-10-18 19:05:28 +08:00
|
|
|
fn from_iter<I: IntoIterator<Item = (&'a str, &'a [DocIndex])>>(iter: I) -> Self {
|
2019-10-08 21:22:36 +08:00
|
|
|
let tempdir = TempDir::new().unwrap();
|
|
|
|
let database = Database::open_or_create(&tempdir).unwrap();
|
2019-10-10 19:38:58 +08:00
|
|
|
let index = database.create_index("default").unwrap();
|
2019-10-08 21:22:36 +08:00
|
|
|
|
2019-11-26 23:12:06 +08:00
|
|
|
let db = &database;
|
|
|
|
let mut writer = db.main_write_txn().unwrap();
|
2019-10-08 21:22:36 +08:00
|
|
|
|
|
|
|
let mut words_fst = BTreeSet::new();
|
|
|
|
let mut postings_lists = HashMap::new();
|
2019-11-28 00:01:23 +08:00
|
|
|
let mut fields_counts = HashMap::<_, u16>::new();
|
2019-10-08 21:22:36 +08:00
|
|
|
|
|
|
|
for (word, indexes) in iter {
|
|
|
|
let word = word.to_lowercase().into_bytes();
|
|
|
|
words_fst.insert(word.clone());
|
2019-10-18 19:05:28 +08:00
|
|
|
postings_lists
|
|
|
|
.entry(word)
|
|
|
|
.or_insert_with(Vec::new)
|
|
|
|
.extend_from_slice(indexes);
|
2019-10-15 00:48:32 +08:00
|
|
|
for idx in indexes {
|
|
|
|
fields_counts.insert((idx.document_id, idx.attribute, idx.word_index), 1);
|
|
|
|
}
|
2019-10-08 21:22:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
let words_fst = Set::from_iter(words_fst).unwrap();
|
|
|
|
|
|
|
|
index.main.put_words_fst(&mut writer, &words_fst).unwrap();
|
|
|
|
|
|
|
|
for (word, postings_list) in postings_lists {
|
|
|
|
let postings_list = SetBuf::from_dirty(postings_list);
|
2019-10-18 19:05:28 +08:00
|
|
|
index
|
|
|
|
.postings_lists
|
|
|
|
.put_postings_list(&mut writer, &word, &postings_list)
|
|
|
|
.unwrap();
|
2019-10-08 21:22:36 +08:00
|
|
|
}
|
|
|
|
|
2019-10-15 00:48:32 +08:00
|
|
|
for ((docid, attr, _), count) in fields_counts {
|
2019-10-18 19:05:28 +08:00
|
|
|
let prev = index
|
|
|
|
.documents_fields_counts
|
|
|
|
.document_field_count(&mut writer, docid, SchemaAttr(attr))
|
|
|
|
.unwrap();
|
2019-10-15 00:48:32 +08:00
|
|
|
|
|
|
|
let prev = prev.unwrap_or(0);
|
|
|
|
|
2019-10-18 19:05:28 +08:00
|
|
|
index
|
|
|
|
.documents_fields_counts
|
|
|
|
.put_document_field_count(&mut writer, docid, SchemaAttr(attr), prev + count)
|
|
|
|
.unwrap();
|
2019-10-15 00:48:32 +08:00
|
|
|
}
|
|
|
|
|
2019-10-08 21:22:36 +08:00
|
|
|
writer.commit().unwrap();
|
|
|
|
|
2019-10-18 19:05:28 +08:00
|
|
|
TempDatabase {
|
|
|
|
database,
|
|
|
|
index,
|
|
|
|
_tempdir: tempdir,
|
|
|
|
}
|
2019-10-08 21:22:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn simple() {
|
|
|
|
let store = TempDatabase::from_iter(vec![
|
|
|
|
("iphone", &[doc_char_index(0, 0, 0)][..]),
|
2019-10-18 19:05:28 +08:00
|
|
|
("from", &[doc_char_index(0, 1, 1)][..]),
|
|
|
|
("apple", &[doc_char_index(0, 2, 2)][..]),
|
2019-10-08 21:22:36 +08:00
|
|
|
]);
|
|
|
|
|
2019-11-26 23:12:06 +08:00
|
|
|
let db = &store.database;
|
|
|
|
let reader = db.main_read_txn().unwrap();
|
2019-10-08 21:22:36 +08:00
|
|
|
|
|
|
|
let builder = store.query_builder();
|
|
|
|
let results = builder.query(&reader, "iphone from apple", 0..20).unwrap();
|
|
|
|
let mut iter = results.into_iter();
|
|
|
|
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(0), matches, .. }) => {
|
|
|
|
let mut matches = matches.into_iter();
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 0, word_index: 0, .. }));
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 1, word_index: 1, .. }));
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 2, word_index: 2, .. }));
|
|
|
|
assert_matches!(matches.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
}
|
|
|
|
|
2019-10-08 22:16:30 +08:00
|
|
|
#[test]
|
|
|
|
fn simple_synonyms() {
|
2019-10-18 19:05:28 +08:00
|
|
|
let mut store = TempDatabase::from_iter(vec![("hello", &[doc_index(0, 0)][..])]);
|
2019-10-08 22:16:30 +08:00
|
|
|
|
|
|
|
store.add_synonym("bonjour", SetBuf::from_dirty(vec!["hello"]));
|
|
|
|
|
2019-11-26 23:12:06 +08:00
|
|
|
let db = &store.database;
|
|
|
|
let reader = db.main_read_txn().unwrap();
|
2019-10-08 22:16:30 +08:00
|
|
|
|
|
|
|
let builder = store.query_builder();
|
|
|
|
let results = builder.query(&reader, "hello", 0..20).unwrap();
|
|
|
|
let mut iter = results.into_iter();
|
|
|
|
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(0), matches, .. }) => {
|
|
|
|
let mut matches = matches.into_iter();
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 0, word_index: 0, .. }));
|
|
|
|
assert_matches!(matches.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
|
|
|
|
let builder = store.query_builder();
|
|
|
|
let results = builder.query(&reader, "bonjour", 0..20).unwrap();
|
|
|
|
let mut iter = results.into_iter();
|
|
|
|
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(0), matches, .. }) => {
|
|
|
|
let mut matches = matches.into_iter();
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 0, word_index: 0, .. }));
|
|
|
|
assert_matches!(matches.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn prefix_synonyms() {
|
2019-10-18 19:05:28 +08:00
|
|
|
let mut store = TempDatabase::from_iter(vec![("hello", &[doc_index(0, 0)][..])]);
|
2019-10-08 22:16:30 +08:00
|
|
|
|
|
|
|
store.add_synonym("bonjour", SetBuf::from_dirty(vec!["hello"]));
|
|
|
|
store.add_synonym("salut", SetBuf::from_dirty(vec!["hello"]));
|
|
|
|
|
2019-11-26 23:12:06 +08:00
|
|
|
let db = &store.database;
|
|
|
|
let reader = db.main_read_txn().unwrap();
|
2019-10-08 22:16:30 +08:00
|
|
|
|
|
|
|
let builder = store.query_builder();
|
|
|
|
let results = builder.query(&reader, "sal", 0..20).unwrap();
|
|
|
|
let mut iter = results.into_iter();
|
|
|
|
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(0), matches, .. }) => {
|
|
|
|
let mut matches = matches.into_iter();
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 0, word_index: 0, .. }));
|
|
|
|
assert_matches!(matches.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
|
|
|
|
let builder = store.query_builder();
|
|
|
|
let results = builder.query(&reader, "bonj", 0..20).unwrap();
|
|
|
|
let mut iter = results.into_iter();
|
|
|
|
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(0), matches, .. }) => {
|
|
|
|
let mut matches = matches.into_iter();
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 0, word_index: 0, .. }));
|
|
|
|
assert_matches!(matches.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
|
|
|
|
let builder = store.query_builder();
|
|
|
|
let results = builder.query(&reader, "sal blabla", 0..20).unwrap();
|
|
|
|
let mut iter = results.into_iter();
|
|
|
|
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
|
|
|
|
let builder = store.query_builder();
|
|
|
|
let results = builder.query(&reader, "bonj blabla", 0..20).unwrap();
|
|
|
|
let mut iter = results.into_iter();
|
|
|
|
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn levenshtein_synonyms() {
|
2019-10-18 19:05:28 +08:00
|
|
|
let mut store = TempDatabase::from_iter(vec![("hello", &[doc_index(0, 0)][..])]);
|
2019-10-08 22:16:30 +08:00
|
|
|
|
|
|
|
store.add_synonym("salutation", SetBuf::from_dirty(vec!["hello"]));
|
|
|
|
|
2019-11-26 23:12:06 +08:00
|
|
|
let db = &store.database;
|
|
|
|
let reader = db.main_read_txn().unwrap();
|
2019-10-08 22:16:30 +08:00
|
|
|
|
|
|
|
let builder = store.query_builder();
|
|
|
|
let results = builder.query(&reader, "salutution", 0..20).unwrap();
|
|
|
|
let mut iter = results.into_iter();
|
|
|
|
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(0), matches, .. }) => {
|
|
|
|
let mut matches = matches.into_iter();
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 0, word_index: 0, .. }));
|
|
|
|
assert_matches!(matches.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
|
|
|
|
let builder = store.query_builder();
|
|
|
|
let results = builder.query(&reader, "saluttion", 0..20).unwrap();
|
|
|
|
let mut iter = results.into_iter();
|
|
|
|
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(0), matches, .. }) => {
|
|
|
|
let mut matches = matches.into_iter();
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 0, word_index: 0, .. }));
|
|
|
|
assert_matches!(matches.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn harder_synonyms() {
|
|
|
|
let mut store = TempDatabase::from_iter(vec![
|
2019-10-18 19:05:28 +08:00
|
|
|
("hello", &[doc_index(0, 0)][..]),
|
2019-10-08 22:16:30 +08:00
|
|
|
("bonjour", &[doc_index(1, 3)]),
|
2019-10-18 19:05:28 +08:00
|
|
|
("salut", &[doc_index(2, 5)]),
|
2019-10-08 22:16:30 +08:00
|
|
|
]);
|
|
|
|
|
|
|
|
store.add_synonym("hello", SetBuf::from_dirty(vec!["bonjour", "salut"]));
|
|
|
|
store.add_synonym("bonjour", SetBuf::from_dirty(vec!["hello", "salut"]));
|
|
|
|
store.add_synonym("salut", SetBuf::from_dirty(vec!["hello", "bonjour"]));
|
|
|
|
|
2019-11-26 23:12:06 +08:00
|
|
|
let db = &store.database;
|
|
|
|
let reader = db.main_read_txn().unwrap();
|
2019-10-08 22:16:30 +08:00
|
|
|
|
|
|
|
let builder = store.query_builder();
|
|
|
|
let results = builder.query(&reader, "hello", 0..20).unwrap();
|
|
|
|
let mut iter = results.into_iter();
|
|
|
|
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(0), matches, .. }) => {
|
|
|
|
let mut matches = matches.into_iter();
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 0, word_index: 0, .. }));
|
|
|
|
assert_matches!(matches.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(1), matches, .. }) => {
|
|
|
|
let mut matches = matches.into_iter();
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 0, word_index: 3, .. }));
|
|
|
|
assert_matches!(matches.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(2), matches, .. }) => {
|
|
|
|
let mut matches = matches.into_iter();
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 0, word_index: 5, .. }));
|
|
|
|
assert_matches!(matches.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
|
|
|
|
let builder = store.query_builder();
|
|
|
|
let results = builder.query(&reader, "bonjour", 0..20).unwrap();
|
|
|
|
let mut iter = results.into_iter();
|
|
|
|
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(0), matches, .. }) => {
|
|
|
|
let mut matches = matches.into_iter();
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 0, word_index: 0, .. }));
|
|
|
|
assert_matches!(matches.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(1), matches, .. }) => {
|
|
|
|
let mut matches = matches.into_iter();
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 0, word_index: 3, .. }));
|
|
|
|
assert_matches!(matches.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(2), matches, .. }) => {
|
|
|
|
let mut matches = matches.into_iter();
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 0, word_index: 5, .. }));
|
|
|
|
assert_matches!(matches.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
|
|
|
|
let builder = store.query_builder();
|
|
|
|
let results = builder.query(&reader, "salut", 0..20).unwrap();
|
|
|
|
let mut iter = results.into_iter();
|
|
|
|
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(0), matches, .. }) => {
|
|
|
|
let mut matches = matches.into_iter();
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 0, word_index: 0, .. }));
|
|
|
|
assert_matches!(matches.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(1), matches, .. }) => {
|
|
|
|
let mut matches = matches.into_iter();
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 0, word_index: 3, .. }));
|
|
|
|
assert_matches!(matches.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(2), matches, .. }) => {
|
|
|
|
let mut matches = matches.into_iter();
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 0, word_index: 5, .. }));
|
|
|
|
assert_matches!(matches.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
/// Unique word has multi-word synonyms
|
|
|
|
fn unique_to_multiword_synonyms() {
|
|
|
|
let mut store = TempDatabase::from_iter(vec![
|
2019-10-18 19:05:28 +08:00
|
|
|
("new", &[doc_char_index(0, 0, 0)][..]),
|
|
|
|
("york", &[doc_char_index(0, 1, 1)][..]),
|
|
|
|
("city", &[doc_char_index(0, 2, 2)][..]),
|
2019-10-08 22:16:30 +08:00
|
|
|
("subway", &[doc_char_index(0, 3, 3)][..]),
|
2019-10-18 19:05:28 +08:00
|
|
|
("NY", &[doc_char_index(1, 0, 0)][..]),
|
2019-10-08 22:16:30 +08:00
|
|
|
("subway", &[doc_char_index(1, 1, 1)][..]),
|
|
|
|
]);
|
|
|
|
|
2019-10-18 19:05:28 +08:00
|
|
|
store.add_synonym(
|
|
|
|
"NY",
|
|
|
|
SetBuf::from_dirty(vec!["NYC", "new york", "new york city"]),
|
|
|
|
);
|
|
|
|
store.add_synonym(
|
|
|
|
"NYC",
|
|
|
|
SetBuf::from_dirty(vec!["NY", "new york", "new york city"]),
|
|
|
|
);
|
2019-10-08 22:16:30 +08:00
|
|
|
|
2019-11-26 23:12:06 +08:00
|
|
|
let db = &store.database;
|
|
|
|
let reader = db.main_read_txn().unwrap();
|
2019-10-08 22:16:30 +08:00
|
|
|
|
|
|
|
let builder = store.query_builder();
|
|
|
|
let results = builder.query(&reader, "NY subway", 0..20).unwrap();
|
|
|
|
let mut iter = results.into_iter();
|
|
|
|
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(1), matches, .. }) => {
|
|
|
|
let mut iter = matches.into_iter();
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 0, word_index: 0, is_exact: true, .. })); // NY ± new
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 1, word_index: 1, is_exact: true, .. })); // NY ± york
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 2, word_index: 2, is_exact: true, .. })); // NY ± city
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 3, word_index: 3, is_exact: true, .. })); // subway
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(0), matches, .. }) => {
|
|
|
|
let mut iter = matches.into_iter();
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 0, word_index: 0, is_exact: false, .. })); // new = NY
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 0, word_index: 0, is_exact: false, .. })); // new = NY
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 1, word_index: 1, is_exact: false, .. })); // york = NY
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 1, word_index: 1, is_exact: false, .. })); // york = NY
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 2, word_index: 2, is_exact: false, .. })); // city = NY
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 2, word_index: 2, is_exact: false, .. })); // city = NY
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 3, word_index: 3, is_exact: true, .. })); // subway
|
|
|
|
assert_matches!(iter.next(), None); // position rewritten ^
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
|
|
|
|
let builder = store.query_builder();
|
|
|
|
let results = builder.query(&reader, "NYC subway", 0..20).unwrap();
|
|
|
|
let mut iter = results.into_iter();
|
|
|
|
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(1), matches, .. }) => {
|
|
|
|
let mut iter = matches.into_iter();
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 0, word_index: 0, is_exact: true, .. })); // NYC ± new
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 1, word_index: 1, is_exact: true, .. })); // NYC ± york
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 2, word_index: 2, is_exact: true, .. })); // NYC ± city
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 3, word_index: 3, is_exact: true, .. })); // subway
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(0), matches, .. }) => {
|
|
|
|
let mut iter = matches.into_iter();
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 0, word_index: 0, is_exact: false, .. })); // new = NYC
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 0, word_index: 0, is_exact: false, .. })); // new = NYC
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 1, word_index: 1, is_exact: false, .. })); // york = NYC
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 1, word_index: 1, is_exact: false, .. })); // york = NYC
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 2, word_index: 2, is_exact: false, .. })); // city = NYC
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 2, word_index: 2, is_exact: false, .. })); // city = NYC
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 3, word_index: 3, is_exact: true, .. })); // subway
|
|
|
|
assert_matches!(iter.next(), None); // position rewritten ^
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn unique_to_multiword_synonyms_words_proximity() {
|
|
|
|
let mut store = TempDatabase::from_iter(vec![
|
2019-10-18 19:05:28 +08:00
|
|
|
("new", &[doc_char_index(0, 0, 0)][..]),
|
|
|
|
("york", &[doc_char_index(0, 1, 1)][..]),
|
|
|
|
("city", &[doc_char_index(0, 2, 2)][..]),
|
2019-10-08 22:16:30 +08:00
|
|
|
("subway", &[doc_char_index(0, 3, 3)][..]),
|
2019-10-18 19:05:28 +08:00
|
|
|
("york", &[doc_char_index(1, 0, 0)][..]),
|
|
|
|
("new", &[doc_char_index(1, 1, 1)][..]),
|
2019-10-08 22:16:30 +08:00
|
|
|
("subway", &[doc_char_index(1, 2, 2)][..]),
|
2019-10-18 19:05:28 +08:00
|
|
|
("NY", &[doc_char_index(2, 0, 0)][..]),
|
2019-10-08 22:16:30 +08:00
|
|
|
("subway", &[doc_char_index(2, 1, 1)][..]),
|
|
|
|
]);
|
|
|
|
|
2019-10-18 19:05:28 +08:00
|
|
|
store.add_synonym("NY", SetBuf::from_dirty(vec!["york new"]));
|
2019-10-08 22:16:30 +08:00
|
|
|
|
2019-11-26 23:12:06 +08:00
|
|
|
let db = &store.database;
|
|
|
|
let reader = db.main_read_txn().unwrap();
|
2019-10-08 22:16:30 +08:00
|
|
|
|
|
|
|
let builder = store.query_builder();
|
|
|
|
let results = builder.query(&reader, "NY", 0..20).unwrap();
|
|
|
|
let mut iter = results.into_iter();
|
|
|
|
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(2), matches, .. }) => {
|
|
|
|
let mut matches = matches.into_iter();
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 0, word_index: 0, .. })); // NY ± york
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 1, word_index: 1, .. })); // NY ± new
|
|
|
|
assert_matches!(matches.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(1), matches, .. }) => {
|
|
|
|
let mut matches = matches.into_iter();
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 0, word_index: 0, .. })); // york = NY
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 1, word_index: 1, .. })); // new = NY
|
|
|
|
assert_matches!(matches.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(0), matches, .. }) => {
|
|
|
|
let mut matches = matches.into_iter();
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 0, word_index: 1, .. })); // york = NY
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 1, word_index: 0, .. })); // new = NY
|
|
|
|
assert_matches!(matches.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
|
|
|
|
let builder = store.query_builder();
|
|
|
|
let results = builder.query(&reader, "new york", 0..20).unwrap();
|
|
|
|
let mut iter = results.into_iter();
|
|
|
|
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(0), matches, .. }) => {
|
|
|
|
let mut matches = matches.into_iter();
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 0, word_index: 0, .. })); // new
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 1, word_index: 1, .. })); // york
|
|
|
|
assert_matches!(matches.next(), None); // position rewritten ^
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(1), matches, .. }) => {
|
|
|
|
let mut matches = matches.into_iter();
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 0, word_index: 1, .. })); // york
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 1, word_index: 0, .. })); // new
|
|
|
|
assert_matches!(matches.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn unique_to_multiword_synonyms_cumulative_word_index() {
|
|
|
|
let mut store = TempDatabase::from_iter(vec![
|
2019-10-18 19:05:28 +08:00
|
|
|
("NY", &[doc_char_index(0, 0, 0)][..]),
|
2019-10-08 22:16:30 +08:00
|
|
|
("subway", &[doc_char_index(0, 1, 1)][..]),
|
2019-10-18 19:05:28 +08:00
|
|
|
("new", &[doc_char_index(1, 0, 0)][..]),
|
|
|
|
("york", &[doc_char_index(1, 1, 1)][..]),
|
2019-10-08 22:16:30 +08:00
|
|
|
("subway", &[doc_char_index(1, 2, 2)][..]),
|
|
|
|
]);
|
|
|
|
|
|
|
|
store.add_synonym("new york", SetBuf::from_dirty(vec!["NY"]));
|
|
|
|
|
2019-11-26 23:12:06 +08:00
|
|
|
let db = &store.database;
|
|
|
|
let reader = db.main_read_txn().unwrap();
|
2019-10-08 22:16:30 +08:00
|
|
|
|
|
|
|
let builder = store.query_builder();
|
|
|
|
let results = builder.query(&reader, "NY subway", 0..20).unwrap();
|
|
|
|
let mut iter = results.into_iter();
|
|
|
|
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(0), matches, .. }) => {
|
|
|
|
let mut matches = matches.into_iter();
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 0, word_index: 0, is_exact: true, .. })); // NY
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 1, word_index: 1, is_exact: true, .. })); // subway
|
|
|
|
assert_matches!(matches.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(1), matches, .. }) => {
|
|
|
|
let mut matches = matches.into_iter();
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 1, word_index: 2, is_exact: true, .. })); // subway
|
|
|
|
assert_matches!(matches.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
|
|
|
|
let builder = store.query_builder();
|
|
|
|
let results = builder.query(&reader, "new york subway", 0..20).unwrap();
|
|
|
|
let mut iter = results.into_iter();
|
|
|
|
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(0), matches, .. }) => {
|
|
|
|
let mut matches = matches.into_iter();
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 0, word_index: 0, is_exact: true, .. })); // new = NY
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 1, word_index: 1, is_exact: true, .. })); // york = NY
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 2, word_index: 2, is_exact: true, .. })); // subway
|
|
|
|
assert_matches!(matches.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(1), matches, .. }) => {
|
|
|
|
let mut matches = matches.into_iter();
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 0, word_index: 0, is_exact: true, .. })); // new
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 1, word_index: 1, is_exact: true, .. })); // york
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 2, word_index: 2, is_exact: true, .. })); // subway
|
|
|
|
assert_matches!(matches.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
/// Unique word has multi-word synonyms
|
|
|
|
fn harder_unique_to_multiword_synonyms_one() {
|
|
|
|
let mut store = TempDatabase::from_iter(vec![
|
2019-10-18 19:05:28 +08:00
|
|
|
("new", &[doc_char_index(0, 0, 0)][..]),
|
|
|
|
("york", &[doc_char_index(0, 1, 1)][..]),
|
|
|
|
("city", &[doc_char_index(0, 2, 2)][..]),
|
|
|
|
("yellow", &[doc_char_index(0, 3, 3)][..]),
|
|
|
|
("subway", &[doc_char_index(0, 4, 4)][..]),
|
|
|
|
("broken", &[doc_char_index(0, 5, 5)][..]),
|
|
|
|
("NY", &[doc_char_index(1, 0, 0)][..]),
|
|
|
|
("blue", &[doc_char_index(1, 1, 1)][..]),
|
|
|
|
("subway", &[doc_char_index(1, 2, 2)][..]),
|
2019-10-08 22:16:30 +08:00
|
|
|
]);
|
|
|
|
|
2019-10-18 19:05:28 +08:00
|
|
|
store.add_synonym(
|
|
|
|
"NY",
|
|
|
|
SetBuf::from_dirty(vec!["NYC", "new york", "new york city"]),
|
|
|
|
);
|
|
|
|
store.add_synonym(
|
|
|
|
"NYC",
|
|
|
|
SetBuf::from_dirty(vec!["NY", "new york", "new york city"]),
|
|
|
|
);
|
2019-10-08 22:16:30 +08:00
|
|
|
|
2019-11-26 23:12:06 +08:00
|
|
|
let db = &store.database;
|
|
|
|
let reader = db.main_read_txn().unwrap();
|
2019-10-08 22:16:30 +08:00
|
|
|
|
|
|
|
let builder = store.query_builder();
|
|
|
|
let results = builder.query(&reader, "NY subway", 0..20).unwrap();
|
|
|
|
let mut iter = results.into_iter();
|
|
|
|
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(1), matches, .. }) => {
|
|
|
|
let mut iter = matches.into_iter();
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 0, word_index: 0, is_exact: true, .. })); // new = NY
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 1, word_index: 1, is_exact: true, .. })); // york = NY
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 2, word_index: 2, is_exact: true, .. })); // city = NY
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 3, word_index: 4, is_exact: true, .. })); // subway
|
|
|
|
assert_matches!(iter.next(), None); // position rewritten ^
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(0), matches, .. }) => {
|
|
|
|
let mut iter = matches.into_iter();
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 0, word_index: 0, is_exact: false, .. })); // new = NY
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 0, word_index: 0, is_exact: false, .. })); // new = NY
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 1, word_index: 1, is_exact: false, .. })); // york = NY
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 1, word_index: 1, is_exact: false, .. })); // york = NY
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 2, word_index: 2, is_exact: false, .. })); // city = NY
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 2, word_index: 2, is_exact: false, .. })); // city = NY
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 3, word_index: 4, is_exact: true, .. })); // subway
|
|
|
|
assert_matches!(iter.next(), None); // position rewritten ^
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
|
|
|
|
let builder = store.query_builder();
|
|
|
|
let results = builder.query(&reader, "NYC subway", 0..20).unwrap();
|
|
|
|
let mut iter = results.into_iter();
|
|
|
|
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(1), matches, .. }) => {
|
|
|
|
let mut iter = matches.into_iter();
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 0, word_index: 0, is_exact: true, .. })); // NYC
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 1, word_index: 1, is_exact: true, .. })); // NYC
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 2, word_index: 2, is_exact: true, .. })); // NYC
|
|
|
|
// because one-word to one-word ^^^^
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 3, word_index: 4, is_exact: true, .. })); // subway
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(0), matches, .. }) => {
|
|
|
|
let mut iter = matches.into_iter();
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 0, word_index: 0, is_exact: false, .. })); // new = NYC
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 0, word_index: 0, is_exact: false, .. })); // new = NYC
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 1, word_index: 1, is_exact: false, .. })); // york = NYC
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 1, word_index: 1, is_exact: false, .. })); // york = NYC
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 2, word_index: 2, is_exact: false, .. })); // city = NYC
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 2, word_index: 2, is_exact: false, .. })); // city = NYC
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 3, word_index: 4, is_exact: true, .. })); // subway
|
|
|
|
assert_matches!(iter.next(), None); // position rewritten ^
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
/// Unique word has multi-word synonyms
|
|
|
|
fn even_harder_unique_to_multiword_synonyms() {
|
|
|
|
let mut store = TempDatabase::from_iter(vec![
|
2019-10-18 19:05:28 +08:00
|
|
|
("new", &[doc_char_index(0, 0, 0)][..]),
|
|
|
|
("york", &[doc_char_index(0, 1, 1)][..]),
|
|
|
|
("city", &[doc_char_index(0, 2, 2)][..]),
|
|
|
|
("yellow", &[doc_char_index(0, 3, 3)][..]),
|
2019-10-08 22:16:30 +08:00
|
|
|
("underground", &[doc_char_index(0, 4, 4)][..]),
|
2019-10-18 19:05:28 +08:00
|
|
|
("train", &[doc_char_index(0, 5, 5)][..]),
|
|
|
|
("broken", &[doc_char_index(0, 6, 6)][..]),
|
|
|
|
("NY", &[doc_char_index(1, 0, 0)][..]),
|
|
|
|
("blue", &[doc_char_index(1, 1, 1)][..]),
|
|
|
|
("subway", &[doc_char_index(1, 2, 2)][..]),
|
2019-10-08 22:16:30 +08:00
|
|
|
]);
|
|
|
|
|
2019-10-18 19:05:28 +08:00
|
|
|
store.add_synonym(
|
|
|
|
"NY",
|
|
|
|
SetBuf::from_dirty(vec!["NYC", "new york", "new york city"]),
|
|
|
|
);
|
|
|
|
store.add_synonym(
|
|
|
|
"NYC",
|
|
|
|
SetBuf::from_dirty(vec!["NY", "new york", "new york city"]),
|
|
|
|
);
|
2019-10-08 22:16:30 +08:00
|
|
|
store.add_synonym("subway", SetBuf::from_dirty(vec!["underground train"]));
|
|
|
|
|
2019-11-26 23:12:06 +08:00
|
|
|
let db = &store.database;
|
|
|
|
let reader = db.main_read_txn().unwrap();
|
2019-10-08 22:16:30 +08:00
|
|
|
|
|
|
|
let builder = store.query_builder();
|
|
|
|
let results = builder.query(&reader, "NY subway broken", 0..20).unwrap();
|
|
|
|
let mut iter = results.into_iter();
|
|
|
|
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(0), matches, .. }) => {
|
|
|
|
let mut iter = matches.into_iter();
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 0, word_index: 0, is_exact: false, .. })); // new = NY
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 0, word_index: 0, is_exact: false, .. })); // new = NY
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 1, word_index: 1, is_exact: false, .. })); // york = NY
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 1, word_index: 1, is_exact: false, .. })); // york = NY
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 2, word_index: 2, is_exact: false, .. })); // city = NY
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 2, word_index: 2, is_exact: false, .. })); // city = NY
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 3, word_index: 4, is_exact: false, .. })); // underground = subway
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 4, word_index: 5, is_exact: false, .. })); // train = subway
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 5, word_index: 6, is_exact: true, .. })); // broken
|
|
|
|
assert_matches!(iter.next(), None); // position rewritten ^
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(1), matches, .. }) => {
|
|
|
|
let mut iter = matches.into_iter();
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 0, word_index: 0, is_exact: true, .. })); // new = NY
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 1, word_index: 1, is_exact: true, .. })); // york = NY
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 2, word_index: 2, is_exact: true, .. })); // city = NY
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 3, word_index: 4, is_exact: true, .. })); // underground = subway
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 4, word_index: 5, is_exact: true, .. })); // train = subway
|
|
|
|
assert_matches!(iter.next(), None); // position rewritten ^
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
|
|
|
|
let builder = store.query_builder();
|
|
|
|
let results = builder.query(&reader, "NYC subway", 0..20).unwrap();
|
|
|
|
let mut iter = results.into_iter();
|
|
|
|
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(1), matches, .. }) => {
|
|
|
|
let mut iter = matches.into_iter();
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 0, word_index: 0, is_exact: true, .. })); // new = NYC
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 1, word_index: 1, is_exact: true, .. })); // york = NYC
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 2, word_index: 2, is_exact: true, .. })); // city = NYC
|
|
|
|
// because one-word to one-word ^^^^
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 3, word_index: 4, is_exact: true, .. })); // underground = subway
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 4, word_index: 5, is_exact: true, .. })); // train = subway
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(0), matches, .. }) => {
|
|
|
|
let mut iter = matches.into_iter();
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 0, word_index: 0, is_exact: false, .. })); // new = NYC
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 0, word_index: 0, is_exact: false, .. })); // new = NYC
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 1, word_index: 1, is_exact: false, .. })); // york = NYC
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 1, word_index: 1, is_exact: false, .. })); // york = NYC
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 2, word_index: 2, is_exact: false, .. })); // city = NYC
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 2, word_index: 2, is_exact: false, .. })); // city = NYC
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 3, word_index: 4, is_exact: false, .. })); // underground = subway
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 4, word_index: 5, is_exact: false, .. })); // train = subway
|
|
|
|
assert_matches!(iter.next(), None); // position rewritten ^
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
/// Multi-word has multi-word synonyms
|
|
|
|
fn multiword_to_multiword_synonyms() {
|
|
|
|
let mut store = TempDatabase::from_iter(vec![
|
2019-10-18 19:05:28 +08:00
|
|
|
("NY", &[doc_char_index(0, 0, 0)][..]),
|
|
|
|
("subway", &[doc_char_index(0, 1, 1)][..]),
|
|
|
|
("NYC", &[doc_char_index(1, 0, 0)][..]),
|
|
|
|
("blue", &[doc_char_index(1, 1, 1)][..]),
|
|
|
|
("subway", &[doc_char_index(1, 2, 2)][..]),
|
|
|
|
("broken", &[doc_char_index(1, 3, 3)][..]),
|
|
|
|
("new", &[doc_char_index(2, 0, 0)][..]),
|
|
|
|
("york", &[doc_char_index(2, 1, 1)][..]),
|
2019-10-08 22:16:30 +08:00
|
|
|
("underground", &[doc_char_index(2, 2, 2)][..]),
|
2019-10-18 19:05:28 +08:00
|
|
|
("train", &[doc_char_index(2, 3, 3)][..]),
|
|
|
|
("broken", &[doc_char_index(2, 4, 4)][..]),
|
2019-10-08 22:16:30 +08:00
|
|
|
]);
|
|
|
|
|
2019-10-18 19:05:28 +08:00
|
|
|
store.add_synonym(
|
|
|
|
"new york",
|
|
|
|
SetBuf::from_dirty(vec!["NYC", "NY", "new york city"]),
|
|
|
|
);
|
|
|
|
store.add_synonym(
|
|
|
|
"new york city",
|
|
|
|
SetBuf::from_dirty(vec!["NYC", "NY", "new york"]),
|
|
|
|
);
|
|
|
|
store.add_synonym("underground train", SetBuf::from_dirty(vec!["subway"]));
|
2019-10-08 22:16:30 +08:00
|
|
|
|
2019-11-26 23:12:06 +08:00
|
|
|
let db = &store.database;
|
|
|
|
let reader = db.main_read_txn().unwrap();
|
2019-10-08 22:16:30 +08:00
|
|
|
|
|
|
|
let builder = store.query_builder();
|
2019-10-18 19:05:28 +08:00
|
|
|
let results = builder
|
|
|
|
.query(&reader, "new york underground train broken", 0..20)
|
|
|
|
.unwrap();
|
2019-10-08 22:16:30 +08:00
|
|
|
let mut iter = results.into_iter();
|
|
|
|
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(2), matches, .. }) => {
|
|
|
|
let mut matches = matches.into_iter();
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 0, word_index: 0, is_exact: false, .. })); // new
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 0, word_index: 0, is_exact: true, .. })); // new
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 1, word_index: 1, is_exact: false, .. })); // york
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 1, word_index: 1, is_exact: true, .. })); // york
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 2, word_index: 2, is_exact: true, .. })); // city
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 3, word_index: 3, is_exact: true, .. })); // underground
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 4, word_index: 4, is_exact: true, .. })); // train
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 5, word_index: 5, is_exact: true, .. })); // broken
|
|
|
|
assert_matches!(matches.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(1), matches, .. }) => {
|
|
|
|
let mut iter = matches.into_iter();
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 0, word_index: 0, is_exact: true, .. })); // NYC = new
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 1, word_index: 1, is_exact: true, .. })); // NYC = york
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 2, word_index: 2, is_exact: true, .. })); // NYC = city
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 3, word_index: 4, is_exact: true, .. })); // subway = underground
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 4, word_index: 5, is_exact: true, .. })); // subway = train
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 5, word_index: 6, is_exact: true, .. })); // broken
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(0), matches, .. }) => {
|
|
|
|
let mut iter = matches.into_iter();
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 0, word_index: 0, is_exact: true, .. })); // NY = new
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 1, word_index: 1, is_exact: true, .. })); // NY = york
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 2, word_index: 2, is_exact: true, .. })); // NY = city
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 3, word_index: 3, is_exact: true, .. })); // subway = underground
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 4, word_index: 4, is_exact: true, .. })); // subway = train
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
|
|
|
|
let builder = store.query_builder();
|
2019-10-18 19:05:28 +08:00
|
|
|
let results = builder
|
|
|
|
.query(&reader, "new york city underground train broken", 0..20)
|
|
|
|
.unwrap();
|
2019-10-08 22:16:30 +08:00
|
|
|
let mut iter = results.into_iter();
|
|
|
|
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(2), matches, .. }) => {
|
|
|
|
let mut matches = matches.into_iter();
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 0, word_index: 0, is_exact: false, .. })); // new
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 0, word_index: 0, is_exact: false, .. })); // new
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 0, word_index: 0, is_exact: true, .. })); // new
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 1, word_index: 1, is_exact: false, .. })); // york
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 1, word_index: 1, is_exact: false, .. })); // york
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 1, word_index: 1, is_exact: true, .. })); // york
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 2, word_index: 2, is_exact: false, .. })); // city
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 3, word_index: 3, is_exact: true, .. })); // underground
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 4, word_index: 4, is_exact: true, .. })); // train
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 5, word_index: 5, is_exact: true, .. })); // broken
|
|
|
|
assert_matches!(matches.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(1), matches, .. }) => {
|
|
|
|
let mut iter = matches.into_iter();
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 0, word_index: 0, is_exact: true, .. })); // NYC = new
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 0, word_index: 0, is_exact: true, .. })); // NYC = new
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 1, word_index: 1, is_exact: true, .. })); // NYC = york
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 1, word_index: 1, is_exact: true, .. })); // NYC = york
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 2, word_index: 2, is_exact: true, .. })); // NYC = city
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 3, word_index: 4, is_exact: true, .. })); // subway = underground
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 4, word_index: 5, is_exact: true, .. })); // subway = train
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 5, word_index: 6, is_exact: true, .. })); // broken
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(0), matches, .. }) => {
|
|
|
|
let mut iter = matches.into_iter();
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 0, word_index: 0, is_exact: true, .. })); // NY = new
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 0, word_index: 0, is_exact: true, .. })); // NY = new
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 1, word_index: 1, is_exact: true, .. })); // NY = york
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 1, word_index: 1, is_exact: true, .. })); // NY = york
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 2, word_index: 2, is_exact: true, .. })); // NY = city
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 3, word_index: 3, is_exact: true, .. })); // subway = underground
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 4, word_index: 4, is_exact: true, .. })); // subway = train
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn intercrossed_multiword_synonyms() {
|
|
|
|
let mut store = TempDatabase::from_iter(vec![
|
2019-10-18 19:05:28 +08:00
|
|
|
("new", &[doc_index(0, 0)][..]),
|
|
|
|
("york", &[doc_index(0, 1)][..]),
|
|
|
|
("big", &[doc_index(0, 2)][..]),
|
|
|
|
("city", &[doc_index(0, 3)][..]),
|
2019-10-08 22:16:30 +08:00
|
|
|
]);
|
|
|
|
|
2019-10-18 19:05:28 +08:00
|
|
|
store.add_synonym("new york", SetBuf::from_dirty(vec!["new york city"]));
|
|
|
|
store.add_synonym("new york city", SetBuf::from_dirty(vec!["new york"]));
|
2019-10-08 22:16:30 +08:00
|
|
|
|
2019-11-26 23:12:06 +08:00
|
|
|
let db = &store.database;
|
|
|
|
let reader = db.main_read_txn().unwrap();
|
2019-10-08 22:16:30 +08:00
|
|
|
|
|
|
|
let builder = store.query_builder();
|
|
|
|
let results = builder.query(&reader, "new york big ", 0..20).unwrap();
|
|
|
|
let mut iter = results.into_iter();
|
|
|
|
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(0), matches, .. }) => {
|
|
|
|
let mut matches = matches.into_iter();
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 0, word_index: 0, is_exact: false, .. })); // new
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 0, word_index: 0, is_exact: true, .. })); // new
|
|
|
|
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 1, word_index: 1, is_exact: false, .. })); // york
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 1, word_index: 1, is_exact: true, .. })); // york
|
|
|
|
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 2, word_index: 2, is_exact: true, .. })); // city
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 2, word_index: 4, is_exact: false, .. })); // city
|
|
|
|
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 3, word_index: 3, is_exact: true, .. })); // big
|
|
|
|
assert_matches!(matches.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
|
|
|
|
let mut store = TempDatabase::from_iter(vec![
|
2019-10-18 19:05:28 +08:00
|
|
|
("NY", &[doc_index(0, 0)][..]),
|
|
|
|
("city", &[doc_index(0, 1)][..]),
|
2019-10-08 22:16:30 +08:00
|
|
|
("subway", &[doc_index(0, 2)][..]),
|
2019-10-18 19:05:28 +08:00
|
|
|
("NY", &[doc_index(1, 0)][..]),
|
2019-10-08 22:16:30 +08:00
|
|
|
("subway", &[doc_index(1, 1)][..]),
|
2019-10-18 19:05:28 +08:00
|
|
|
("NY", &[doc_index(2, 0)][..]),
|
|
|
|
("york", &[doc_index(2, 1)][..]),
|
|
|
|
("city", &[doc_index(2, 2)][..]),
|
2019-10-08 22:16:30 +08:00
|
|
|
("subway", &[doc_index(2, 3)][..]),
|
|
|
|
]);
|
|
|
|
|
|
|
|
store.add_synonym("NY", SetBuf::from_dirty(vec!["new york city story"]));
|
|
|
|
|
2019-11-26 23:12:06 +08:00
|
|
|
let db = &store.database;
|
|
|
|
let reader = db.main_read_txn().unwrap();
|
2019-10-08 22:16:30 +08:00
|
|
|
|
|
|
|
let builder = store.query_builder();
|
|
|
|
let results = builder.query(&reader, "NY subway ", 0..20).unwrap();
|
|
|
|
let mut iter = results.into_iter();
|
|
|
|
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(1), matches, .. }) => {
|
|
|
|
let mut matches = matches.into_iter();
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 0, word_index: 0, is_exact: true, .. })); // new
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 1, word_index: 1, is_exact: true, .. })); // york
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 2, word_index: 2, is_exact: true, .. })); // city
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 3, word_index: 3, is_exact: true, .. })); // story
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 4, word_index: 4, is_exact: true, .. })); // subway
|
|
|
|
assert_matches!(matches.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(0), matches, .. }) => {
|
|
|
|
let mut matches = matches.into_iter();
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 0, word_index: 0, is_exact: true, .. })); // new
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 1, word_index: 1, is_exact: true, .. })); // york
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 2, word_index: 2, is_exact: false, .. })); // city
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 2, word_index: 2, is_exact: true, .. })); // city
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 4, word_index: 3, is_exact: true, .. })); // subway
|
|
|
|
assert_matches!(matches.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(2), matches, .. }) => {
|
|
|
|
let mut matches = matches.into_iter();
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 0, word_index: 0, is_exact: true, .. })); // new
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 1, word_index: 1, is_exact: false, .. })); // york
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 1, word_index: 1, is_exact: true, .. })); // york
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 2, word_index: 2, is_exact: false, .. })); // city
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 2, word_index: 2, is_exact: true, .. })); // city
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 4, word_index: 3, is_exact: true, .. })); // subway
|
|
|
|
assert_matches!(matches.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn cumulative_word_indices() {
|
|
|
|
let mut store = TempDatabase::from_iter(vec![
|
2019-10-18 19:05:28 +08:00
|
|
|
("NYC", &[doc_index(0, 0)][..]),
|
|
|
|
("long", &[doc_index(0, 1)][..]),
|
2019-10-08 22:16:30 +08:00
|
|
|
("subway", &[doc_index(0, 2)][..]),
|
2019-10-18 19:05:28 +08:00
|
|
|
("cool", &[doc_index(0, 3)][..]),
|
2019-10-08 22:16:30 +08:00
|
|
|
]);
|
|
|
|
|
|
|
|
store.add_synonym("new york city", SetBuf::from_dirty(vec!["NYC"]));
|
2019-10-18 19:05:28 +08:00
|
|
|
store.add_synonym("subway", SetBuf::from_dirty(vec!["underground train"]));
|
2019-10-08 22:16:30 +08:00
|
|
|
|
2019-11-26 23:12:06 +08:00
|
|
|
let db = &store.database;
|
|
|
|
let reader = db.main_read_txn().unwrap();
|
2019-10-08 22:16:30 +08:00
|
|
|
|
|
|
|
let builder = store.query_builder();
|
2019-10-18 19:05:28 +08:00
|
|
|
let results = builder
|
|
|
|
.query(&reader, "new york city long subway cool ", 0..20)
|
|
|
|
.unwrap();
|
2019-10-08 22:16:30 +08:00
|
|
|
let mut iter = results.into_iter();
|
|
|
|
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(0), matches, .. }) => {
|
|
|
|
let mut matches = matches.into_iter();
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 0, word_index: 0, is_exact: true, .. })); // new = NYC
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 1, word_index: 1, is_exact: true, .. })); // york = NYC
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 2, word_index: 2, is_exact: true, .. })); // city = NYC
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 3, word_index: 3, is_exact: true, .. })); // long
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 4, word_index: 4, is_exact: true, .. })); // subway = underground
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 5, word_index: 5, is_exact: true, .. })); // subway = train
|
|
|
|
assert_matches!(matches.next(), Some(TmpMatch { query_index: 6, word_index: 6, is_exact: true, .. })); // cool
|
|
|
|
assert_matches!(matches.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn deunicoded_synonyms() {
|
|
|
|
let mut store = TempDatabase::from_iter(vec![
|
2019-11-26 18:06:55 +08:00
|
|
|
("telephone", &[doc_index(0, 0)][..]), // meilisearch indexes the unidecoded
|
2019-10-15 00:48:32 +08:00
|
|
|
("téléphone", &[doc_index(0, 0)][..]), // and the original words on the same DocIndex
|
2019-10-18 19:05:28 +08:00
|
|
|
("iphone", &[doc_index(1, 0)][..]),
|
2019-10-08 22:16:30 +08:00
|
|
|
]);
|
|
|
|
|
|
|
|
store.add_synonym("téléphone", SetBuf::from_dirty(vec!["iphone"]));
|
|
|
|
|
2019-11-26 23:12:06 +08:00
|
|
|
let db = &store.database;
|
|
|
|
let reader = db.main_read_txn().unwrap();
|
2019-10-08 22:16:30 +08:00
|
|
|
|
|
|
|
let builder = store.query_builder();
|
|
|
|
let results = builder.query(&reader, "telephone", 0..20).unwrap();
|
|
|
|
let mut iter = results.into_iter();
|
|
|
|
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(0), matches, .. }) => {
|
|
|
|
let mut iter = matches.into_iter();
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 0, .. }));
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 0, .. }));
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(1), matches, .. }) => {
|
|
|
|
let mut iter = matches.into_iter();
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 0, .. }));
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
|
|
|
|
let builder = store.query_builder();
|
|
|
|
let results = builder.query(&reader, "téléphone", 0..20).unwrap();
|
|
|
|
let mut iter = results.into_iter();
|
|
|
|
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(0), matches, .. }) => {
|
|
|
|
let mut iter = matches.into_iter();
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 0, .. }));
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 0, .. }));
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(1), matches, .. }) => {
|
|
|
|
let mut iter = matches.into_iter();
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 0, .. }));
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
|
|
|
|
let builder = store.query_builder();
|
|
|
|
let results = builder.query(&reader, "télephone", 0..20).unwrap();
|
|
|
|
let mut iter = results.into_iter();
|
|
|
|
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(1), matches, .. }) => {
|
|
|
|
let mut iter = matches.into_iter();
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 0, .. }));
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(0), matches, .. }) => {
|
|
|
|
let mut iter = matches.into_iter();
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 0, distance: 1, word_index: 0, is_exact: false, .. })); // iphone
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 0, distance: 1, word_index: 0, is_exact: false, .. })); // téléphone
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn simple_concatenation() {
|
|
|
|
let store = TempDatabase::from_iter(vec![
|
2019-10-18 19:05:28 +08:00
|
|
|
("iphone", &[doc_index(0, 0)][..]),
|
|
|
|
("case", &[doc_index(0, 1)][..]),
|
2019-10-08 22:16:30 +08:00
|
|
|
]);
|
|
|
|
|
2019-11-26 23:12:06 +08:00
|
|
|
let db = &store.database;
|
|
|
|
let reader = db.main_read_txn().unwrap();
|
2019-10-08 22:16:30 +08:00
|
|
|
|
|
|
|
let builder = store.query_builder();
|
|
|
|
let results = builder.query(&reader, "i phone case", 0..20).unwrap();
|
|
|
|
let mut iter = results.into_iter();
|
|
|
|
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(0), matches, .. }) => {
|
|
|
|
let mut iter = matches.into_iter();
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 0, word_index: 0, distance: 0, .. })); // iphone
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 1, word_index: 1, distance: 0, .. })); // iphone
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 1, word_index: 0, distance: 1, .. })); // phone
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 2, word_index: 2, distance: 0, .. })); // case
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
}
|
2019-10-23 00:15:43 +08:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn simple_phrase_query_splitting() {
|
|
|
|
let store = TempDatabase::from_iter(vec![
|
|
|
|
("search", &[doc_index(0, 0)][..]),
|
|
|
|
("engine", &[doc_index(0, 1)][..]),
|
|
|
|
("search", &[doc_index(1, 0)][..]),
|
2019-10-23 18:06:21 +08:00
|
|
|
("slow", &[doc_index(1, 1)][..]),
|
2019-10-23 00:15:43 +08:00
|
|
|
("engine", &[doc_index(1, 2)][..]),
|
|
|
|
]);
|
|
|
|
|
2019-11-26 23:12:06 +08:00
|
|
|
let db = &store.database;
|
|
|
|
let reader = db.main_read_txn().unwrap();
|
2019-10-23 00:15:43 +08:00
|
|
|
|
|
|
|
let builder = store.query_builder();
|
|
|
|
let results = builder.query(&reader, "searchengine", 0..20).unwrap();
|
|
|
|
let mut iter = results.into_iter();
|
|
|
|
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(0), matches, .. }) => {
|
|
|
|
let mut iter = matches.into_iter();
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 0, word_index: 0, distance: 0, .. })); // search
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 0, word_index: 1, distance: 0, .. })); // engine
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn harder_phrase_query_splitting() {
|
|
|
|
let store = TempDatabase::from_iter(vec![
|
|
|
|
("search", &[doc_index(0, 0)][..]),
|
|
|
|
("search", &[doc_index(0, 1)][..]),
|
|
|
|
("engine", &[doc_index(0, 2)][..]),
|
|
|
|
("search", &[doc_index(1, 0)][..]),
|
2019-10-23 18:06:21 +08:00
|
|
|
("slow", &[doc_index(1, 1)][..]),
|
2019-10-23 00:15:43 +08:00
|
|
|
("search", &[doc_index(1, 2)][..]),
|
|
|
|
("engine", &[doc_index(1, 3)][..]),
|
|
|
|
("search", &[doc_index(1, 0)][..]),
|
|
|
|
("search", &[doc_index(1, 1)][..]),
|
2019-10-23 18:06:21 +08:00
|
|
|
("slow", &[doc_index(1, 2)][..]),
|
2019-10-23 00:15:43 +08:00
|
|
|
("engine", &[doc_index(1, 3)][..]),
|
|
|
|
]);
|
|
|
|
|
2019-11-26 23:12:06 +08:00
|
|
|
let db = &store.database;
|
|
|
|
let reader = db.main_read_txn().unwrap();
|
2019-10-23 00:15:43 +08:00
|
|
|
|
|
|
|
let builder = store.query_builder();
|
|
|
|
let results = builder.query(&reader, "searchengine", 0..20).unwrap();
|
|
|
|
let mut iter = results.into_iter();
|
|
|
|
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(0), matches, .. }) => {
|
|
|
|
let mut iter = matches.into_iter();
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 0, word_index: 1, distance: 0, .. })); // search
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 0, word_index: 2, distance: 0, .. })); // engine
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), Some(Document { id: DocumentId(1), matches, .. }) => {
|
|
|
|
let mut iter = matches.into_iter();
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 0, word_index: 2, distance: 0, .. })); // search
|
|
|
|
assert_matches!(iter.next(), Some(TmpMatch { query_index: 0, word_index: 3, distance: 0, .. })); // engine
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
});
|
|
|
|
assert_matches!(iter.next(), None);
|
|
|
|
}
|
2019-10-08 21:22:36 +08:00
|
|
|
}
|