diff --git a/Cargo.lock b/Cargo.lock index ff4981d11..b6fb0cf70 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -463,7 +463,7 @@ checksum = "b645a089122eccb6111b4f81cbc1a49f5900ac4666bb93ac027feaecf15607bf" [[package]] name = "benchmarks" -version = "1.2.0" +version = "1.2.1" dependencies = [ "anyhow", "bytes", @@ -1209,7 +1209,7 @@ dependencies = [ [[package]] name = "dump" -version = "1.2.0" +version = "1.2.1" dependencies = [ "anyhow", "big_s", @@ -1428,7 +1428,7 @@ dependencies = [ [[package]] name = "file-store" -version = "1.2.0" +version = "1.2.1" dependencies = [ "faux", "tempfile", @@ -1450,7 +1450,7 @@ dependencies = [ [[package]] name = "filter-parser" -version = "1.2.0" +version = "1.2.1" dependencies = [ "insta", "nom", @@ -1476,7 +1476,7 @@ dependencies = [ [[package]] name = "flatten-serde-json" -version = "1.2.0" +version = "1.2.1" dependencies = [ "criterion", "serde_json", @@ -1959,7 +1959,7 @@ dependencies = [ [[package]] name = "index-scheduler" -version = "1.2.0" +version = "1.2.1" dependencies = [ "anyhow", "big_s", @@ -2113,7 +2113,7 @@ dependencies = [ [[package]] name = "json-depth-checker" -version = "1.2.0" +version = "1.2.1" dependencies = [ "criterion", "serde_json", @@ -2539,7 +2539,7 @@ checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771" [[package]] name = "meili-snap" -version = "1.2.0" +version = "1.2.1" dependencies = [ "insta", "md5", @@ -2548,7 +2548,7 @@ dependencies = [ [[package]] name = "meilisearch" -version = "1.2.0" +version = "1.2.1" dependencies = [ "actix-cors", "actix-http", @@ -2636,7 +2636,7 @@ dependencies = [ [[package]] name = "meilisearch-auth" -version = "1.2.0" +version = "1.2.1" dependencies = [ "base64 0.21.0", "enum-iterator", @@ -2655,7 +2655,7 @@ dependencies = [ [[package]] name = "meilisearch-types" -version = "1.2.0" +version = "1.2.1" dependencies = [ "actix-web", "anyhow", @@ -2709,7 +2709,7 @@ dependencies = [ [[package]] name = "milli" -version = "1.2.0" +version = "1.2.1" dependencies = [ "big_s", "bimap", @@ -3064,7 +3064,7 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "permissive-json-pointer" -version = "1.2.0" +version = "1.2.1" dependencies = [ "big_s", "serde_json", diff --git a/Cargo.toml b/Cargo.toml index b69831b9c..df2c22504 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,7 +17,7 @@ members = [ ] [workspace.package] -version = "1.2.0" +version = "1.2.1" authors = ["Quentin de Quelen ", "Clément Renault "] description = "Meilisearch HTTP server" homepage = "https://meilisearch.com" diff --git a/index-scheduler/src/index_mapper/index_map.rs b/index-scheduler/src/index_mapper/index_map.rs index 9bed4fe5d..a24213558 100644 --- a/index-scheduler/src/index_mapper/index_map.rs +++ b/index-scheduler/src/index_mapper/index_map.rs @@ -223,7 +223,9 @@ impl IndexMap { enable_mdb_writemap: bool, map_size_growth: usize, ) { - let Some(index) = self.available.remove(uuid) else { return; }; + let Some(index) = self.available.remove(uuid) else { + return; + }; self.close(*uuid, index, enable_mdb_writemap, map_size_growth); } diff --git a/meilisearch-types/src/keys.rs b/meilisearch-types/src/keys.rs index b2389b238..4c1765b6f 100644 --- a/meilisearch-types/src/keys.rs +++ b/meilisearch-types/src/keys.rs @@ -147,9 +147,7 @@ impl Key { fn parse_expiration_date( string: Option, ) -> std::result::Result, ParseOffsetDateTimeError> { - let Some(string) = string else { - return Ok(None) - }; + let Some(string) = string else { return Ok(None) }; let datetime = if let Ok(datetime) = OffsetDateTime::parse(&string, &Rfc3339) { datetime } else if let Ok(primitive_datetime) = PrimitiveDateTime::parse( diff --git a/milli/src/search/new/bucket_sort.rs b/milli/src/search/new/bucket_sort.rs index 5144a0a28..1883c48f7 100644 --- a/milli/src/search/new/bucket_sort.rs +++ b/milli/src/search/new/bucket_sort.rs @@ -125,7 +125,12 @@ pub fn bucket_sort<'ctx, Q: RankingRuleQueryTrait>( continue; } - let Some(next_bucket) = ranking_rules[cur_ranking_rule_index].next_bucket(ctx, logger, &ranking_rule_universes[cur_ranking_rule_index])? else { + let Some(next_bucket) = ranking_rules[cur_ranking_rule_index].next_bucket( + ctx, + logger, + &ranking_rule_universes[cur_ranking_rule_index], + )? + else { back!(); continue; }; diff --git a/milli/src/search/new/graph_based_ranking_rule.rs b/milli/src/search/new/graph_based_ranking_rule.rs index dd25ddd4a..8f82f6210 100644 --- a/milli/src/search/new/graph_based_ranking_rule.rs +++ b/milli/src/search/new/graph_based_ranking_rule.rs @@ -193,9 +193,10 @@ impl<'ctx, G: RankingRuleGraphTrait> RankingRule<'ctx, QueryGraph> for GraphBase .all_costs .get(state.graph.query_graph.root_node) .iter() - .find(|c| **c >= state.cur_cost) else { - self.state = None; - return Ok(None); + .find(|c| **c >= state.cur_cost) + else { + self.state = None; + return Ok(None); }; state.cur_cost = cost + 1; diff --git a/milli/src/search/new/matches/matching_words.rs b/milli/src/search/new/matches/matching_words.rs index 0ba8b198e..9e9449abf 100644 --- a/milli/src/search/new/matches/matching_words.rs +++ b/milli/src/search/new/matches/matching_words.rs @@ -80,7 +80,9 @@ impl MatchingWords { let word = self.word_interner.get(*word); // if the word is a prefix we match using starts_with. if located_words.is_prefix && token.lemma().starts_with(word) { - let Some((char_index, c)) = word.char_indices().take(located_words.original_char_count).last() else { + let Some((char_index, c)) = + word.char_indices().take(located_words.original_char_count).last() + else { continue; }; let prefix_length = char_index + c.len_utf8(); diff --git a/milli/src/search/new/query_term/mod.rs b/milli/src/search/new/query_term/mod.rs index fb749a797..2ecd8b077 100644 --- a/milli/src/search/new/query_term/mod.rs +++ b/milli/src/search/new/query_term/mod.rs @@ -176,9 +176,7 @@ impl QueryTermSubset { pub fn use_prefix_db(&self, ctx: &SearchContext) -> Option { let original = ctx.term_interner.get(self.original); - let Some(use_prefix_db) = original.zero_typo.use_prefix_db else { - return None - }; + let Some(use_prefix_db) = original.zero_typo.use_prefix_db else { return None }; let word = match &self.zero_typo_subset { NTypoTermSubset::All => Some(use_prefix_db), NTypoTermSubset::Subset { words, phrases: _ } => { @@ -264,13 +262,15 @@ impl QueryTermSubset { match &self.one_typo_subset { NTypoTermSubset::All => { - let Lazy::Init(OneTypoTerm { split_words: _, one_typo }) = &original.one_typo else { + let Lazy::Init(OneTypoTerm { split_words: _, one_typo }) = &original.one_typo + else { panic!() }; result.extend(one_typo.iter().copied().map(Word::Derived)) } NTypoTermSubset::Subset { words, phrases: _ } => { - let Lazy::Init(OneTypoTerm { split_words: _, one_typo }) = &original.one_typo else { + let Lazy::Init(OneTypoTerm { split_words: _, one_typo }) = &original.one_typo + else { panic!() }; result.extend(one_typo.intersection(words).copied().map(Word::Derived)); @@ -280,15 +280,11 @@ impl QueryTermSubset { match &self.two_typo_subset { NTypoTermSubset::All => { - let Lazy::Init(TwoTypoTerm { two_typos }) = &original.two_typo else { - panic!() - }; + let Lazy::Init(TwoTypoTerm { two_typos }) = &original.two_typo else { panic!() }; result.extend(two_typos.iter().copied().map(Word::Derived)); } NTypoTermSubset::Subset { words, phrases: _ } => { - let Lazy::Init(TwoTypoTerm { two_typos }) = &original.two_typo else { - panic!() - }; + let Lazy::Init(TwoTypoTerm { two_typos }) = &original.two_typo else { panic!() }; result.extend(two_typos.intersection(words).copied().map(Word::Derived)); } NTypoTermSubset::Nothing => {} @@ -312,13 +308,15 @@ impl QueryTermSubset { match &self.one_typo_subset { NTypoTermSubset::All => { - let Lazy::Init(OneTypoTerm { split_words, one_typo: _ }) = &original.one_typo else { + let Lazy::Init(OneTypoTerm { split_words, one_typo: _ }) = &original.one_typo + else { panic!(); }; result.extend(split_words.iter().copied()); } NTypoTermSubset::Subset { phrases, .. } => { - let Lazy::Init(OneTypoTerm { split_words, one_typo: _ }) = &original.one_typo else { + let Lazy::Init(OneTypoTerm { split_words, one_typo: _ }) = &original.one_typo + else { panic!(); }; if let Some(split_words) = split_words { diff --git a/milli/src/search/new/ranking_rule_graph/proximity/build.rs b/milli/src/search/new/ranking_rule_graph/proximity/build.rs index 660d59b3e..55e171429 100644 --- a/milli/src/search/new/ranking_rule_graph/proximity/build.rs +++ b/milli/src/search/new/ranking_rule_graph/proximity/build.rs @@ -18,7 +18,7 @@ pub fn build_edges( return Ok(vec![( (right_ngram_length - 1) as u32, conditions_interner.insert(ProximityCondition::Term { term: right_term.clone() }), - )]) + )]); }; if left_term.positions.end() + 1 != *right_term.positions.start() { diff --git a/milli/src/update/index_documents/mod.rs b/milli/src/update/index_documents/mod.rs index bbfa1d00c..e91d7999e 100644 --- a/milli/src/update/index_documents/mod.rs +++ b/milli/src/update/index_documents/mod.rs @@ -2045,10 +2045,11 @@ mod tests { "branch_id_number": 0 }]}; - let Err(Error::UserError(UserError::MultiplePrimaryKeyCandidatesFound { - candidates - })) = - index.add_documents(doc_multiple_ids) else { panic!("Expected Error::UserError(MultiplePrimaryKeyCandidatesFound)") }; + let Err(Error::UserError(UserError::MultiplePrimaryKeyCandidatesFound { candidates })) = + index.add_documents(doc_multiple_ids) + else { + panic!("Expected Error::UserError(MultiplePrimaryKeyCandidatesFound)") + }; assert_eq!(candidates, vec![S("id"), S("project_id"), S("public_uid"),]);