This commit is contained in:
ManyTheFish 2023-09-12 10:04:47 +02:00
parent 1fb7782777
commit 260633b662
8 changed files with 34 additions and 27 deletions

View File

@ -223,7 +223,9 @@ impl IndexMap {
enable_mdb_writemap: bool, enable_mdb_writemap: bool,
map_size_growth: usize, map_size_growth: usize,
) { ) {
let Some(index) = self.available.remove(uuid) else { return; }; let Some(index) = self.available.remove(uuid) else {
return;
};
self.close(*uuid, index, enable_mdb_writemap, map_size_growth); self.close(*uuid, index, enable_mdb_writemap, map_size_growth);
} }

View File

@ -147,9 +147,7 @@ impl Key {
fn parse_expiration_date( fn parse_expiration_date(
string: Option<String>, string: Option<String>,
) -> std::result::Result<Option<OffsetDateTime>, ParseOffsetDateTimeError> { ) -> std::result::Result<Option<OffsetDateTime>, ParseOffsetDateTimeError> {
let Some(string) = string else { let Some(string) = string else { return Ok(None) };
return Ok(None)
};
let datetime = if let Ok(datetime) = OffsetDateTime::parse(&string, &Rfc3339) { let datetime = if let Ok(datetime) = OffsetDateTime::parse(&string, &Rfc3339) {
datetime datetime
} else if let Ok(primitive_datetime) = PrimitiveDateTime::parse( } else if let Ok(primitive_datetime) = PrimitiveDateTime::parse(

View File

@ -125,7 +125,12 @@ pub fn bucket_sort<'ctx, Q: RankingRuleQueryTrait>(
continue; continue;
} }
let Some(next_bucket) = ranking_rules[cur_ranking_rule_index].next_bucket(ctx, logger, &ranking_rule_universes[cur_ranking_rule_index])? else { let Some(next_bucket) = ranking_rules[cur_ranking_rule_index].next_bucket(
ctx,
logger,
&ranking_rule_universes[cur_ranking_rule_index],
)?
else {
back!(); back!();
continue; continue;
}; };

View File

@ -193,9 +193,10 @@ impl<'ctx, G: RankingRuleGraphTrait> RankingRule<'ctx, QueryGraph> for GraphBase
.all_costs .all_costs
.get(state.graph.query_graph.root_node) .get(state.graph.query_graph.root_node)
.iter() .iter()
.find(|c| **c >= state.cur_cost) else { .find(|c| **c >= state.cur_cost)
self.state = None; else {
return Ok(None); self.state = None;
return Ok(None);
}; };
state.cur_cost = cost + 1; state.cur_cost = cost + 1;

View File

@ -80,7 +80,9 @@ impl MatchingWords {
let word = self.word_interner.get(*word); let word = self.word_interner.get(*word);
// if the word is a prefix we match using starts_with. // if the word is a prefix we match using starts_with.
if located_words.is_prefix && token.lemma().starts_with(word) { if located_words.is_prefix && token.lemma().starts_with(word) {
let Some((char_index, c)) = word.char_indices().take(located_words.original_char_count).last() else { let Some((char_index, c)) =
word.char_indices().take(located_words.original_char_count).last()
else {
continue; continue;
}; };
let prefix_length = char_index + c.len_utf8(); let prefix_length = char_index + c.len_utf8();

View File

@ -176,9 +176,7 @@ impl QueryTermSubset {
pub fn use_prefix_db(&self, ctx: &SearchContext) -> Option<Word> { pub fn use_prefix_db(&self, ctx: &SearchContext) -> Option<Word> {
let original = ctx.term_interner.get(self.original); let original = ctx.term_interner.get(self.original);
let Some(use_prefix_db) = original.zero_typo.use_prefix_db else { let Some(use_prefix_db) = original.zero_typo.use_prefix_db else { return None };
return None
};
let word = match &self.zero_typo_subset { let word = match &self.zero_typo_subset {
NTypoTermSubset::All => Some(use_prefix_db), NTypoTermSubset::All => Some(use_prefix_db),
NTypoTermSubset::Subset { words, phrases: _ } => { NTypoTermSubset::Subset { words, phrases: _ } => {
@ -264,13 +262,15 @@ impl QueryTermSubset {
match &self.one_typo_subset { match &self.one_typo_subset {
NTypoTermSubset::All => { NTypoTermSubset::All => {
let Lazy::Init(OneTypoTerm { split_words: _, one_typo }) = &original.one_typo else { let Lazy::Init(OneTypoTerm { split_words: _, one_typo }) = &original.one_typo
else {
panic!() panic!()
}; };
result.extend(one_typo.iter().copied().map(Word::Derived)) result.extend(one_typo.iter().copied().map(Word::Derived))
} }
NTypoTermSubset::Subset { words, phrases: _ } => { NTypoTermSubset::Subset { words, phrases: _ } => {
let Lazy::Init(OneTypoTerm { split_words: _, one_typo }) = &original.one_typo else { let Lazy::Init(OneTypoTerm { split_words: _, one_typo }) = &original.one_typo
else {
panic!() panic!()
}; };
result.extend(one_typo.intersection(words).copied().map(Word::Derived)); result.extend(one_typo.intersection(words).copied().map(Word::Derived));
@ -280,15 +280,11 @@ impl QueryTermSubset {
match &self.two_typo_subset { match &self.two_typo_subset {
NTypoTermSubset::All => { NTypoTermSubset::All => {
let Lazy::Init(TwoTypoTerm { two_typos }) = &original.two_typo else { let Lazy::Init(TwoTypoTerm { two_typos }) = &original.two_typo else { panic!() };
panic!()
};
result.extend(two_typos.iter().copied().map(Word::Derived)); result.extend(two_typos.iter().copied().map(Word::Derived));
} }
NTypoTermSubset::Subset { words, phrases: _ } => { NTypoTermSubset::Subset { words, phrases: _ } => {
let Lazy::Init(TwoTypoTerm { two_typos }) = &original.two_typo else { let Lazy::Init(TwoTypoTerm { two_typos }) = &original.two_typo else { panic!() };
panic!()
};
result.extend(two_typos.intersection(words).copied().map(Word::Derived)); result.extend(two_typos.intersection(words).copied().map(Word::Derived));
} }
NTypoTermSubset::Nothing => {} NTypoTermSubset::Nothing => {}
@ -312,13 +308,15 @@ impl QueryTermSubset {
match &self.one_typo_subset { match &self.one_typo_subset {
NTypoTermSubset::All => { NTypoTermSubset::All => {
let Lazy::Init(OneTypoTerm { split_words, one_typo: _ }) = &original.one_typo else { let Lazy::Init(OneTypoTerm { split_words, one_typo: _ }) = &original.one_typo
else {
panic!(); panic!();
}; };
result.extend(split_words.iter().copied()); result.extend(split_words.iter().copied());
} }
NTypoTermSubset::Subset { phrases, .. } => { NTypoTermSubset::Subset { phrases, .. } => {
let Lazy::Init(OneTypoTerm { split_words, one_typo: _ }) = &original.one_typo else { let Lazy::Init(OneTypoTerm { split_words, one_typo: _ }) = &original.one_typo
else {
panic!(); panic!();
}; };
if let Some(split_words) = split_words { if let Some(split_words) = split_words {

View File

@ -18,7 +18,7 @@ pub fn build_edges(
return Ok(vec![( return Ok(vec![(
(right_ngram_length - 1) as u32, (right_ngram_length - 1) as u32,
conditions_interner.insert(ProximityCondition::Term { term: right_term.clone() }), conditions_interner.insert(ProximityCondition::Term { term: right_term.clone() }),
)]) )]);
}; };
if left_term.positions.end() + 1 != *right_term.positions.start() { if left_term.positions.end() + 1 != *right_term.positions.start() {

View File

@ -2045,10 +2045,11 @@ mod tests {
"branch_id_number": 0 "branch_id_number": 0
}]}; }]};
let Err(Error::UserError(UserError::MultiplePrimaryKeyCandidatesFound { let Err(Error::UserError(UserError::MultiplePrimaryKeyCandidatesFound { candidates })) =
candidates index.add_documents(doc_multiple_ids)
})) = else {
index.add_documents(doc_multiple_ids) else { panic!("Expected Error::UserError(MultiplePrimaryKeyCandidatesFound)") }; panic!("Expected Error::UserError(MultiplePrimaryKeyCandidatesFound)")
};
assert_eq!(candidates, vec![S("id"), S("project_id"), S("public_uid"),]); assert_eq!(candidates, vec![S("id"), S("project_id"), S("public_uid"),]);