From edcb4c60ba0bc416152bdfd931598bfa0df87467 Mon Sep 17 00:00:00 2001 From: "F. Levi" <55688616+flevi29@users.noreply.github.com> Date: Thu, 12 Sep 2024 09:44:37 +0300 Subject: [PATCH 001/111] Change Matcher so that phrases are counted as one instead of word by word --- milli/src/search/new/matches/mod.rs | 45 +++++++++++------------------ 1 file changed, 17 insertions(+), 28 deletions(-) diff --git a/milli/src/search/new/matches/mod.rs b/milli/src/search/new/matches/mod.rs index 4688b8f32..6ddb81c6a 100644 --- a/milli/src/search/new/matches/mod.rs +++ b/milli/src/search/new/matches/mod.rs @@ -132,37 +132,21 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { mut partial: PartialMatch<'a>, token_position: usize, word_position: usize, + first_word_char_start: &usize, words_positions: &mut impl Iterator)>, matches: &mut Vec, ) -> bool { - let mut potential_matches = vec![(token_position, word_position, partial.char_len())]; - - for (token_position, word_position, word) in words_positions { + for (_, _, word) in words_positions { partial = match partial.match_token(word) { // token matches the partial match, but the match is not full, // we temporarily save the current token then we try to match the next one. - Some(MatchType::Partial(partial)) => { - potential_matches.push((token_position, word_position, partial.char_len())); - partial - } + Some(MatchType::Partial(partial)) => partial, // partial match is now full, we keep this matches and we advance positions - Some(MatchType::Full { char_len, ids }) => { - let ids: Vec<_> = ids.clone().collect(); - // save previously matched tokens as matches. - let iter = potential_matches.into_iter().map( - |(token_position, word_position, match_len)| Match { - match_len, - ids: ids.clone(), - word_position, - token_position, - }, - ); - matches.extend(iter); - + Some(MatchType::Full { ids, .. }) => { // save the token that closes the partial match as a match. matches.push(Match { - match_len: char_len, - ids, + match_len: word.char_end - first_word_char_start, + ids: ids.clone().collect(), word_position, token_position, }); @@ -221,6 +205,7 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { partial, token_position, word_position, + &word.char_start, &mut wp, &mut matches, ) { @@ -472,15 +457,17 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { .enumerate() .find(|(i, _)| *i == m.match_len) .map_or(token.byte_end, |(_, (i, _))| i + token.byte_start); + formatted.push(self.highlight_prefix); formatted.push(&self.text[token.byte_start..highlight_byte_index]); formatted.push(self.highlight_suffix); + // if it's a prefix highlight, we put the end of the word after the highlight marker. if highlight_byte_index < token.byte_end { formatted.push(&self.text[highlight_byte_index..token.byte_end]); } - byte_index = token.byte_end; + byte_index = token.byte_start + m.match_len; } } @@ -821,22 +808,24 @@ mod tests { fn format_highlight_crop_phrase_query() { //! testing: https://github.com/meilisearch/meilisearch/issues/3975 let temp_index = TempIndex::new(); + + let text = "The groundbreaking invention had the power to split the world between those who embraced progress and those who resisted change!"; temp_index .add_documents(documents!([ - { "id": 1, "text": "The groundbreaking invention had the power to split the world between those who embraced progress and those who resisted change!" } + { "id": 1, "text": text } ])) .unwrap(); + let rtxn = temp_index.read_txn().unwrap(); let format_options = FormatOptions { highlight: true, crop: Some(10) }; - let text = "The groundbreaking invention had the power to split the world between those who embraced progress and those who resisted change!"; let builder = MatcherBuilder::new_test(&rtxn, &temp_index, "\"the world\""); let mut matcher = builder.build(text, None); // should return 10 words with a marker at the start as well the end, and the highlighted matches. insta::assert_snapshot!( matcher.format(format_options), - @"…had the power to split the world between those who…" + @"…had the power to split the world between those who…" ); let builder = MatcherBuilder::new_test(&rtxn, &temp_index, "those \"and those\""); @@ -844,7 +833,7 @@ mod tests { // should highlight "those" and the phrase "and those". insta::assert_snapshot!( matcher.format(format_options), - @"…world between those who embraced progress and those who resisted…" + @"…world between those who embraced progress and those who resisted…" ); } @@ -900,7 +889,7 @@ mod tests { let mut matcher = builder.build(text, None); insta::assert_snapshot!( matcher.format(format_options), - @"_the_ _do_ _or_ die can't be he do and or isn'_t_ _he_" + @"_the_ _do or_ die can't be he do and or isn'_t he_" ); } } From e7af499314f24e51f1bff27ff231ceb898aa27a1 Mon Sep 17 00:00:00 2001 From: "F. Levi" <55688616+flevi29@users.noreply.github.com> Date: Thu, 12 Sep 2024 16:58:13 +0300 Subject: [PATCH 002/111] Improve changes to Matcher --- milli/src/search/new/matches/mod.rs | 136 +++++++++++++++++++++------- 1 file changed, 104 insertions(+), 32 deletions(-) diff --git a/milli/src/search/new/matches/mod.rs b/milli/src/search/new/matches/mod.rs index 6ddb81c6a..26dd6f6e8 100644 --- a/milli/src/search/new/matches/mod.rs +++ b/milli/src/search/new/matches/mod.rs @@ -93,15 +93,28 @@ impl FormatOptions { } } +#[derive(Clone, Debug)] +pub enum MatchPosition { + Word { + // position of the word in the whole text. + word_position: usize, + // position of the token in the whole text. + token_position: usize, + }, + Phrase { + // position of the first and last word in the phrase in the whole text. + word_positions: (usize, usize), + // position of the first and last token in the phrase in the whole text. + token_positions: (usize, usize), + }, +} + #[derive(Clone, Debug)] pub struct Match { match_len: usize, // ids of the query words that matches. ids: Vec, - // position of the word in the whole text. - word_position: usize, - // position of the token in the whole text. - token_position: usize, + position: MatchPosition, } #[derive(Serialize, Debug, Clone, PartialEq, Eq)] @@ -130,13 +143,13 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { /// compute_partial_match peek into next words to validate if the match is complete. fn compute_partial_match<'a>( mut partial: PartialMatch<'a>, - token_position: usize, - word_position: usize, + first_token_position: usize, + first_word_position: usize, first_word_char_start: &usize, words_positions: &mut impl Iterator)>, matches: &mut Vec, ) -> bool { - for (_, _, word) in words_positions { + for (token_position, word_position, word) in words_positions { partial = match partial.match_token(word) { // token matches the partial match, but the match is not full, // we temporarily save the current token then we try to match the next one. @@ -145,10 +158,12 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { Some(MatchType::Full { ids, .. }) => { // save the token that closes the partial match as a match. matches.push(Match { - match_len: word.char_end - first_word_char_start, + match_len: word.char_end - *first_word_char_start, ids: ids.clone().collect(), - word_position, - token_position, + position: MatchPosition::Phrase { + word_positions: (first_word_position, word_position), + token_positions: (first_token_position, token_position), + }, }); // the match is complete, we return true. @@ -191,8 +206,7 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { matches.push(Match { match_len: char_len, ids, - word_position, - token_position, + position: MatchPosition::Word { word_position, token_position }, }); break; } @@ -228,13 +242,47 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { Some((tokens, matches)) => matches .iter() .map(|m| MatchBounds { - start: tokens[m.token_position].byte_start, + start: tokens[match m.position { + MatchPosition::Word { token_position, .. } => token_position, + MatchPosition::Phrase { + token_positions: (first_token_position, _), + .. + } => first_token_position, + }] + .byte_start, length: m.match_len, }) .collect(), } } + // @TODO: This should be improved, looks nasty + fn get_match_pos(&self, m: &Match, is_first: bool, is_word: bool) -> usize { + match m.position { + MatchPosition::Word { word_position, token_position } => { + if is_word { + word_position + } else { + token_position + } + } + MatchPosition::Phrase { word_positions: (wpf, wpl), token_positions: (tpf, tpl) } => { + if is_word { + if is_first { + return wpf; + } else { + return wpl; + } + } + if is_first { + tpf + } else { + tpl + } + } + } + } + /// Returns the bounds in byte index of the crop window. fn crop_bounds( &self, @@ -243,10 +291,14 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { crop_size: usize, ) -> (usize, usize) { // if there is no match, we start from the beginning of the string by default. - let first_match_word_position = matches.first().map(|m| m.word_position).unwrap_or(0); - let first_match_token_position = matches.first().map(|m| m.token_position).unwrap_or(0); - let last_match_word_position = matches.last().map(|m| m.word_position).unwrap_or(0); - let last_match_token_position = matches.last().map(|m| m.token_position).unwrap_or(0); + let first_match_word_position = + matches.first().map(|m| self.get_match_pos(m, true, true)).unwrap_or(0); + let first_match_token_position = + matches.first().map(|m| self.get_match_pos(m, true, false)).unwrap_or(0); + let last_match_word_position = + matches.last().map(|m| self.get_match_pos(m, false, true)).unwrap_or(0); + let last_match_token_position = + matches.last().map(|m| self.get_match_pos(m, false, false)).unwrap_or(0); // matches needs to be counted in the crop len. let mut remaining_words = crop_size + first_match_word_position - last_match_word_position; @@ -350,7 +402,9 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { } // compute distance between matches - distance_score -= (next_match.word_position - m.word_position).min(7) as i16; + distance_score -= (self.get_match_pos(next_match, true, true) + - self.get_match_pos(m, true, true)) + .min(7) as i16; } ids.extend(m.ids.iter()); @@ -378,7 +432,12 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { // if next match would make interval gross more than crop_size, // we compare the current interval with the best one, // then we increase `interval_first` until next match can be added. - if next_match.word_position - matches[interval_first].word_position >= crop_size { + let next_match_word_position = self.get_match_pos(next_match, true, true); + + if next_match_word_position + - self.get_match_pos(&matches[interval_first], false, true) + >= crop_size + { let interval_score = self.match_interval_score(&matches[interval_first..=interval_last]); @@ -389,10 +448,15 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { } // advance start of the interval while interval is longer than crop_size. - while next_match.word_position - matches[interval_first].word_position - >= crop_size - { + loop { interval_first += 1; + + if next_match_word_position + - self.get_match_pos(&matches[interval_first], false, true) + < crop_size + { + break; + } } } interval_last = index; @@ -441,33 +505,41 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { if format_options.highlight { // insert highlight markers around matches. for m in matches { - let token = &tokens[m.token_position]; + let (current_byte_start, current_byte_end) = match m.position { + MatchPosition::Word { token_position, .. } => { + let token = &tokens[token_position]; + (&token.byte_start, &token.byte_end) + } + MatchPosition::Phrase { token_positions: (ftp, ltp), .. } => { + (&tokens[ftp].byte_start, &tokens[ltp].byte_end) + } + }; // skip matches out of the crop window. - if token.byte_start < byte_start || token.byte_end > byte_end { + if *current_byte_start < byte_start || *current_byte_end > byte_end { continue; } - if byte_index < token.byte_start { - formatted.push(&self.text[byte_index..token.byte_start]); + if byte_index < *current_byte_start { + formatted.push(&self.text[byte_index..*current_byte_start]); } - let highlight_byte_index = self.text[token.byte_start..] + let highlight_byte_index = self.text[*current_byte_start..] .char_indices() .enumerate() .find(|(i, _)| *i == m.match_len) - .map_or(token.byte_end, |(_, (i, _))| i + token.byte_start); + .map_or(*current_byte_end, |(_, (i, _))| i + *current_byte_start); formatted.push(self.highlight_prefix); - formatted.push(&self.text[token.byte_start..highlight_byte_index]); + formatted.push(&self.text[*current_byte_start..highlight_byte_index]); formatted.push(self.highlight_suffix); // if it's a prefix highlight, we put the end of the word after the highlight marker. - if highlight_byte_index < token.byte_end { - formatted.push(&self.text[highlight_byte_index..token.byte_end]); + if highlight_byte_index < *current_byte_end { + formatted.push(&self.text[highlight_byte_index..*current_byte_end]); } - byte_index = token.byte_start + m.match_len; + byte_index = *current_byte_end; } } From cc6a2aec06ebd6cb7332afb0478affe3e63185af Mon Sep 17 00:00:00 2001 From: "F. Levi" <55688616+flevi29@users.noreply.github.com> Date: Fri, 13 Sep 2024 13:31:07 +0300 Subject: [PATCH 003/111] Improve changes to Matcher --- milli/src/search/new/matches/mod.rs | 78 +++++++++++++++-------------- 1 file changed, 41 insertions(+), 37 deletions(-) diff --git a/milli/src/search/new/matches/mod.rs b/milli/src/search/new/matches/mod.rs index 26dd6f6e8..a84b25923 100644 --- a/milli/src/search/new/matches/mod.rs +++ b/milli/src/search/new/matches/mod.rs @@ -93,6 +93,16 @@ impl FormatOptions { } } +enum FL { + First, + Last, +} + +enum WT { + Word, + Token, +} + #[derive(Clone, Debug)] pub enum MatchPosition { Word { @@ -256,28 +266,22 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { } } - // @TODO: This should be improved, looks nasty - fn get_match_pos(&self, m: &Match, is_first: bool, is_word: bool) -> usize { + fn get_match_pos(&self, m: &Match, wt: WT, fl: FL) -> usize { match m.position { - MatchPosition::Word { word_position, token_position } => { - if is_word { - word_position - } else { - token_position - } - } - MatchPosition::Phrase { word_positions: (wpf, wpl), token_positions: (tpf, tpl) } => { - if is_word { - if is_first { - return wpf; - } else { - return wpl; - } - } - if is_first { - tpf - } else { - tpl + MatchPosition::Word { word_position, token_position } => match wt { + WT::Word => word_position, + WT::Token => token_position, + }, + MatchPosition::Phrase { word_positions: (fwp, lwp), token_positions: (ftp, ltp) } => { + match wt { + WT::Word => match fl { + FL::First => fwp, + FL::Last => lwp, + }, + WT::Token => match fl { + FL::First => ftp, + FL::Last => ltp, + }, } } } @@ -292,13 +296,13 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { ) -> (usize, usize) { // if there is no match, we start from the beginning of the string by default. let first_match_word_position = - matches.first().map(|m| self.get_match_pos(m, true, true)).unwrap_or(0); + matches.first().map(|m| self.get_match_pos(m, WT::Word, FL::First)).unwrap_or(0); let first_match_token_position = - matches.first().map(|m| self.get_match_pos(m, true, false)).unwrap_or(0); + matches.first().map(|m| self.get_match_pos(m, WT::Token, FL::First)).unwrap_or(0); let last_match_word_position = - matches.last().map(|m| self.get_match_pos(m, false, true)).unwrap_or(0); + matches.last().map(|m| self.get_match_pos(m, WT::Word, FL::Last)).unwrap_or(0); let last_match_token_position = - matches.last().map(|m| self.get_match_pos(m, false, false)).unwrap_or(0); + matches.last().map(|m| self.get_match_pos(m, WT::Token, FL::Last)).unwrap_or(0); // matches needs to be counted in the crop len. let mut remaining_words = crop_size + first_match_word_position - last_match_word_position; @@ -401,10 +405,12 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { order_score += 1; } + let next_match_first_word_pos = self.get_match_pos(next_match, WT::Word, FL::First); + let current_match_first_word_pos = self.get_match_pos(m, WT::Word, FL::First); + // compute distance between matches - distance_score -= (self.get_match_pos(next_match, true, true) - - self.get_match_pos(m, true, true)) - .min(7) as i16; + distance_score -= + (next_match_first_word_pos - current_match_first_word_pos).min(7) as i16; } ids.extend(m.ids.iter()); @@ -432,12 +438,11 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { // if next match would make interval gross more than crop_size, // we compare the current interval with the best one, // then we increase `interval_first` until next match can be added. - let next_match_word_position = self.get_match_pos(next_match, true, true); + let next_match_word_pos = self.get_match_pos(next_match, WT::Word, FL::First); + let mut interval_first_match_word_pos = + self.get_match_pos(&matches[interval_first], WT::Word, FL::Last); - if next_match_word_position - - self.get_match_pos(&matches[interval_first], false, true) - >= crop_size - { + if next_match_word_pos - interval_first_match_word_pos >= crop_size { let interval_score = self.match_interval_score(&matches[interval_first..=interval_last]); @@ -450,11 +455,10 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { // advance start of the interval while interval is longer than crop_size. loop { interval_first += 1; + interval_first_match_word_pos = + self.get_match_pos(&matches[interval_first], WT::Word, FL::Last); - if next_match_word_position - - self.get_match_pos(&matches[interval_first], false, true) - < crop_size - { + if next_match_word_pos - interval_first_match_word_pos < crop_size { break; } } From 65e3d61a955dd9b0f4b877d17a0b2b0dc087816c Mon Sep 17 00:00:00 2001 From: "F. Levi" <55688616+flevi29@users.noreply.github.com> Date: Fri, 13 Sep 2024 13:35:58 +0300 Subject: [PATCH 004/111] Make use of helper function in one more place --- milli/src/search/new/matches/mod.rs | 35 ++++++++++++----------------- 1 file changed, 14 insertions(+), 21 deletions(-) diff --git a/milli/src/search/new/matches/mod.rs b/milli/src/search/new/matches/mod.rs index a84b25923..5a4f0b914 100644 --- a/milli/src/search/new/matches/mod.rs +++ b/milli/src/search/new/matches/mod.rs @@ -245,27 +245,6 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { self } - /// Returns boundaries of the words that match the query. - pub fn matches(&mut self) -> Vec { - match &self.matches { - None => self.compute_matches().matches(), - Some((tokens, matches)) => matches - .iter() - .map(|m| MatchBounds { - start: tokens[match m.position { - MatchPosition::Word { token_position, .. } => token_position, - MatchPosition::Phrase { - token_positions: (first_token_position, _), - .. - } => first_token_position, - }] - .byte_start, - length: m.match_len, - }) - .collect(), - } - } - fn get_match_pos(&self, m: &Match, wt: WT, fl: FL) -> usize { match m.position { MatchPosition::Word { word_position, token_position } => match wt { @@ -287,6 +266,20 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { } } + /// Returns boundaries of the words that match the query. + pub fn matches(&mut self) -> Vec { + match &self.matches { + None => self.compute_matches().matches(), + Some((tokens, matches)) => matches + .iter() + .map(|m| MatchBounds { + start: tokens[self.get_match_pos(m, WT::Token, FL::First)].byte_start, + length: m.match_len, + }) + .collect(), + } + } + /// Returns the bounds in byte index of the crop window. fn crop_bounds( &self, From cab63abc845d87350ab36c07d3999b58eebd0eaa Mon Sep 17 00:00:00 2001 From: "F. Levi" <55688616+flevi29@users.noreply.github.com> Date: Fri, 13 Sep 2024 14:35:28 +0300 Subject: [PATCH 005/111] Improve MatchesPosition enum with an impl --- milli/src/search/new/matches/mod.rs | 81 ++++++++++++++--------------- 1 file changed, 40 insertions(+), 41 deletions(-) diff --git a/milli/src/search/new/matches/mod.rs b/milli/src/search/new/matches/mod.rs index 5a4f0b914..ce878a1eb 100644 --- a/milli/src/search/new/matches/mod.rs +++ b/milli/src/search/new/matches/mod.rs @@ -93,16 +93,6 @@ impl FormatOptions { } } -enum FL { - First, - Last, -} - -enum WT { - Word, - Token, -} - #[derive(Clone, Debug)] pub enum MatchPosition { Word { @@ -127,6 +117,36 @@ pub struct Match { position: MatchPosition, } +impl MatchPosition { + fn get_first_word(m: &Match) -> usize { + match m.position { + MatchPosition::Word { word_position, .. } => word_position, + MatchPosition::Phrase { word_positions: (fwp, _), .. } => fwp, + } + } + + fn get_last_word(m: &Match) -> usize { + match m.position { + MatchPosition::Word { word_position, .. } => word_position, + MatchPosition::Phrase { word_positions: (_, lwp), .. } => lwp, + } + } + + fn get_first_token(m: &Match) -> usize { + match m.position { + MatchPosition::Word { token_position, .. } => token_position, + MatchPosition::Phrase { token_positions: (ftp, _), .. } => ftp, + } + } + + fn get_last_token(m: &Match) -> usize { + match m.position { + MatchPosition::Word { token_position, .. } => token_position, + MatchPosition::Phrase { token_positions: (_, ltp), .. } => ltp, + } + } +} + #[derive(Serialize, Debug, Clone, PartialEq, Eq)] pub struct MatchBounds { pub start: usize, @@ -245,27 +265,6 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { self } - fn get_match_pos(&self, m: &Match, wt: WT, fl: FL) -> usize { - match m.position { - MatchPosition::Word { word_position, token_position } => match wt { - WT::Word => word_position, - WT::Token => token_position, - }, - MatchPosition::Phrase { word_positions: (fwp, lwp), token_positions: (ftp, ltp) } => { - match wt { - WT::Word => match fl { - FL::First => fwp, - FL::Last => lwp, - }, - WT::Token => match fl { - FL::First => ftp, - FL::Last => ltp, - }, - } - } - } - } - /// Returns boundaries of the words that match the query. pub fn matches(&mut self) -> Vec { match &self.matches { @@ -273,7 +272,7 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { Some((tokens, matches)) => matches .iter() .map(|m| MatchBounds { - start: tokens[self.get_match_pos(m, WT::Token, FL::First)].byte_start, + start: tokens[MatchPosition::get_first_token(m)].byte_start, length: m.match_len, }) .collect(), @@ -289,13 +288,13 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { ) -> (usize, usize) { // if there is no match, we start from the beginning of the string by default. let first_match_word_position = - matches.first().map(|m| self.get_match_pos(m, WT::Word, FL::First)).unwrap_or(0); + matches.first().map(|m| MatchPosition::get_first_word(m)).unwrap_or(0); let first_match_token_position = - matches.first().map(|m| self.get_match_pos(m, WT::Token, FL::First)).unwrap_or(0); + matches.first().map(|m| MatchPosition::get_first_token(m)).unwrap_or(0); let last_match_word_position = - matches.last().map(|m| self.get_match_pos(m, WT::Word, FL::Last)).unwrap_or(0); + matches.last().map(|m| MatchPosition::get_last_word(m)).unwrap_or(0); let last_match_token_position = - matches.last().map(|m| self.get_match_pos(m, WT::Token, FL::Last)).unwrap_or(0); + matches.last().map(|m| MatchPosition::get_last_token(m)).unwrap_or(0); // matches needs to be counted in the crop len. let mut remaining_words = crop_size + first_match_word_position - last_match_word_position; @@ -398,8 +397,8 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { order_score += 1; } - let next_match_first_word_pos = self.get_match_pos(next_match, WT::Word, FL::First); - let current_match_first_word_pos = self.get_match_pos(m, WT::Word, FL::First); + let next_match_first_word_pos = MatchPosition::get_first_word(next_match); + let current_match_first_word_pos = MatchPosition::get_first_word(m); // compute distance between matches distance_score -= @@ -431,9 +430,9 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { // if next match would make interval gross more than crop_size, // we compare the current interval with the best one, // then we increase `interval_first` until next match can be added. - let next_match_word_pos = self.get_match_pos(next_match, WT::Word, FL::First); + let next_match_word_pos = MatchPosition::get_first_word(next_match); let mut interval_first_match_word_pos = - self.get_match_pos(&matches[interval_first], WT::Word, FL::Last); + MatchPosition::get_last_word(&matches[interval_first]); if next_match_word_pos - interval_first_match_word_pos >= crop_size { let interval_score = @@ -449,7 +448,7 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { loop { interval_first += 1; interval_first_match_word_pos = - self.get_match_pos(&matches[interval_first], WT::Word, FL::Last); + MatchPosition::get_last_word(&matches[interval_first]); if next_match_word_pos - interval_first_match_word_pos < crop_size { break; From a2a16bf846066f422a5e6bd9bcb0009a894dcad0 Mon Sep 17 00:00:00 2001 From: "F. Levi" <55688616+flevi29@users.noreply.github.com> Date: Fri, 13 Sep 2024 21:20:06 +0300 Subject: [PATCH 006/111] Move MatchPosition impl to Match, adjust counting score for phrases --- milli/src/search/new/matches/mod.rs | 66 +++++++++++++++++++---------- 1 file changed, 43 insertions(+), 23 deletions(-) diff --git a/milli/src/search/new/matches/mod.rs b/milli/src/search/new/matches/mod.rs index ce878a1eb..e63920145 100644 --- a/milli/src/search/new/matches/mod.rs +++ b/milli/src/search/new/matches/mod.rs @@ -117,30 +117,30 @@ pub struct Match { position: MatchPosition, } -impl MatchPosition { - fn get_first_word(m: &Match) -> usize { - match m.position { +impl Match { + fn get_first_word_pos(&self) -> usize { + match self.position { MatchPosition::Word { word_position, .. } => word_position, MatchPosition::Phrase { word_positions: (fwp, _), .. } => fwp, } } - fn get_last_word(m: &Match) -> usize { - match m.position { + fn get_last_word_pos(&self) -> usize { + match self.position { MatchPosition::Word { word_position, .. } => word_position, MatchPosition::Phrase { word_positions: (_, lwp), .. } => lwp, } } - fn get_first_token(m: &Match) -> usize { - match m.position { + fn get_first_token_pos(&self) -> usize { + match self.position { MatchPosition::Word { token_position, .. } => token_position, MatchPosition::Phrase { token_positions: (ftp, _), .. } => ftp, } } - fn get_last_token(m: &Match) -> usize { - match m.position { + fn get_last_token_pos(&self) -> usize { + match self.position { MatchPosition::Word { token_position, .. } => token_position, MatchPosition::Phrase { token_positions: (_, ltp), .. } => ltp, } @@ -272,7 +272,7 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { Some((tokens, matches)) => matches .iter() .map(|m| MatchBounds { - start: tokens[MatchPosition::get_first_token(m)].byte_start, + start: tokens[m.get_first_token_pos()].byte_start, length: m.match_len, }) .collect(), @@ -288,13 +288,11 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { ) -> (usize, usize) { // if there is no match, we start from the beginning of the string by default. let first_match_word_position = - matches.first().map(|m| MatchPosition::get_first_word(m)).unwrap_or(0); + matches.first().map(|m| m.get_first_word_pos()).unwrap_or(0); let first_match_token_position = - matches.first().map(|m| MatchPosition::get_first_token(m)).unwrap_or(0); - let last_match_word_position = - matches.last().map(|m| MatchPosition::get_last_word(m)).unwrap_or(0); - let last_match_token_position = - matches.last().map(|m| MatchPosition::get_last_token(m)).unwrap_or(0); + matches.first().map(|m| m.get_first_token_pos()).unwrap_or(0); + let last_match_word_position = matches.last().map(|m| m.get_last_word_pos()).unwrap_or(0); + let last_match_token_position = matches.last().map(|m| m.get_last_token_pos()).unwrap_or(0); // matches needs to be counted in the crop len. let mut remaining_words = crop_size + first_match_word_position - last_match_word_position; @@ -389,6 +387,16 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { let mut order_score = 0; let mut distance_score = 0; + // Count score for phrases + let tally_phrase_scores = + |fwp: &usize, lwp: &usize, order_score: &mut i16, distance_score: &mut i16| { + let words_in_phrase_minus_one = (lwp - fwp) as i16; + // will always be ordered, so +1 for each space between words + *order_score += words_in_phrase_minus_one; + // distance will always be 1, so -1 for each space between words + *distance_score -= words_in_phrase_minus_one; + }; + let mut iter = matches.iter().peekable(); while let Some(m) = iter.next() { if let Some(next_match) = iter.peek() { @@ -397,12 +405,24 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { order_score += 1; } - let next_match_first_word_pos = MatchPosition::get_first_word(next_match); - let current_match_first_word_pos = MatchPosition::get_first_word(m); + let m_last_word_pos = match m.position { + MatchPosition::Word { word_position, .. } => word_position, + MatchPosition::Phrase { word_positions: (fwp, lwp), .. } => { + tally_phrase_scores(&fwp, &lwp, &mut order_score, &mut distance_score); + lwp + } + }; + + let next_match_first_word_pos = match next_match.position { + MatchPosition::Word { word_position, .. } => word_position, + MatchPosition::Phrase { word_positions: (fwp, _), .. } => fwp, + }; // compute distance between matches - distance_score -= - (next_match_first_word_pos - current_match_first_word_pos).min(7) as i16; + distance_score -= (next_match_first_word_pos - m_last_word_pos).min(7) as i16; + } else if let MatchPosition::Phrase { word_positions: (fwp, lwp), .. } = m.position { + // in case last match is a phrase, count score for its words + tally_phrase_scores(&fwp, &lwp, &mut order_score, &mut distance_score); } ids.extend(m.ids.iter()); @@ -430,9 +450,9 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { // if next match would make interval gross more than crop_size, // we compare the current interval with the best one, // then we increase `interval_first` until next match can be added. - let next_match_word_pos = MatchPosition::get_first_word(next_match); + let next_match_word_pos = next_match.get_last_word_pos(); let mut interval_first_match_word_pos = - MatchPosition::get_last_word(&matches[interval_first]); + matches[interval_first].get_first_word_pos(); if next_match_word_pos - interval_first_match_word_pos >= crop_size { let interval_score = @@ -448,7 +468,7 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { loop { interval_first += 1; interval_first_match_word_pos = - MatchPosition::get_last_word(&matches[interval_first]); + matches[interval_first].get_first_word_pos(); if next_match_word_pos - interval_first_match_word_pos < crop_size { break; From 51085206ccab6e8e0098c4cf8b2a3e67e06558a4 Mon Sep 17 00:00:00 2001 From: "F. Levi" <55688616+flevi29@users.noreply.github.com> Date: Sat, 14 Sep 2024 10:14:07 +0300 Subject: [PATCH 007/111] Misc adjustments --- milli/src/search/new/matches/mod.rs | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/milli/src/search/new/matches/mod.rs b/milli/src/search/new/matches/mod.rs index e63920145..414509cd3 100644 --- a/milli/src/search/new/matches/mod.rs +++ b/milli/src/search/new/matches/mod.rs @@ -387,7 +387,7 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { let mut order_score = 0; let mut distance_score = 0; - // Count score for phrases + // count score for phrases let tally_phrase_scores = |fwp: &usize, lwp: &usize, order_score: &mut i16, distance_score: &mut i16| { let words_in_phrase_minus_one = (lwp - fwp) as i16; @@ -450,11 +450,11 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { // if next match would make interval gross more than crop_size, // we compare the current interval with the best one, // then we increase `interval_first` until next match can be added. - let next_match_word_pos = next_match.get_last_word_pos(); - let mut interval_first_match_word_pos = + let next_match_last_word_pos = next_match.get_last_word_pos(); + let mut interval_first_match_first_word_pos = matches[interval_first].get_first_word_pos(); - if next_match_word_pos - interval_first_match_word_pos >= crop_size { + if next_match_last_word_pos - interval_first_match_first_word_pos >= crop_size { let interval_score = self.match_interval_score(&matches[interval_first..=interval_last]); @@ -467,10 +467,12 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { // advance start of the interval while interval is longer than crop_size. loop { interval_first += 1; - interval_first_match_word_pos = + interval_first_match_first_word_pos = matches[interval_first].get_first_word_pos(); - if next_match_word_pos - interval_first_match_word_pos < crop_size { + if next_match_last_word_pos - interval_first_match_first_word_pos + < crop_size + { break; } } From dcb61f8b3ad2972ee59ab7880c9010bde8abf211 Mon Sep 17 00:00:00 2001 From: "F. Levi" <55688616+flevi29@users.noreply.github.com> Date: Mon, 9 Sep 2024 15:27:47 +0300 Subject: [PATCH 008/111] Return error for primary keys with a length greater than 512 bytes --- milli/src/documents/primary_key.rs | 2 ++ milli/src/error.rs | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/milli/src/documents/primary_key.rs b/milli/src/documents/primary_key.rs index 64131af40..123232c44 100644 --- a/milli/src/documents/primary_key.rs +++ b/milli/src/documents/primary_key.rs @@ -151,6 +151,7 @@ fn starts_with(selector: &str, key: &str) -> bool { fn validate_document_id(document_id: &str) -> Option<&str> { if !document_id.is_empty() + && document_id.len() <= 512 && document_id.chars().all(|c| matches!(c, 'a'..='z' | 'A'..='Z' | '0'..='9' | '-' | '_')) { Some(document_id) @@ -166,6 +167,7 @@ pub fn validate_document_id_value(document_id: Value) -> StdResult Ok(s.to_string()), None => Err(UserError::InvalidDocumentId { document_id: Value::String(string) }), }, + // a `u64` or `i64` cannot be more than 512 bytes once converted to a string Value::Number(number) if !number.is_f64() => Ok(number.to_string()), content => Err(UserError::InvalidDocumentId { document_id: content }), } diff --git a/milli/src/error.rs b/milli/src/error.rs index f0e92a9ab..ee3a4ec43 100644 --- a/milli/src/error.rs +++ b/milli/src/error.rs @@ -106,7 +106,8 @@ pub enum UserError { #[error( "Document identifier `{}` is invalid. \ A document identifier can be of type integer or string, \ -only composed of alphanumeric characters (a-z A-Z 0-9), hyphens (-) and underscores (_).", .document_id.to_string() +only composed of alphanumeric characters (a-z A-Z 0-9), hyphens (-) and underscores (_), \ +and can not be more than 512 bytes.", .document_id.to_string() )] InvalidDocumentId { document_id: Value }, #[error("Invalid facet distribution, {}", format_invalid_filter_distribution(.invalid_facets_name, .valid_facets_name))] From 993408d3ba65cbcea9920caeab8b421160a931ac Mon Sep 17 00:00:00 2001 From: "F. Levi" <55688616+flevi29@users.noreply.github.com> Date: Sun, 15 Sep 2024 16:15:09 +0300 Subject: [PATCH 009/111] Change closure to fn --- milli/src/search/new/matches/mod.rs | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/milli/src/search/new/matches/mod.rs b/milli/src/search/new/matches/mod.rs index 414509cd3..df110aff9 100644 --- a/milli/src/search/new/matches/mod.rs +++ b/milli/src/search/new/matches/mod.rs @@ -388,14 +388,18 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { let mut distance_score = 0; // count score for phrases - let tally_phrase_scores = - |fwp: &usize, lwp: &usize, order_score: &mut i16, distance_score: &mut i16| { - let words_in_phrase_minus_one = (lwp - fwp) as i16; - // will always be ordered, so +1 for each space between words - *order_score += words_in_phrase_minus_one; - // distance will always be 1, so -1 for each space between words - *distance_score -= words_in_phrase_minus_one; - }; + fn tally_phrase_scores( + fwp: &usize, + lwp: &usize, + order_score: &mut i16, + distance_score: &mut i16, + ) { + let words_in_phrase_minus_one = (lwp - fwp) as i16; + // will always be ordered, so +1 for each space between words + *order_score += words_in_phrase_minus_one; + // distance will always be 1, so -1 for each space between words + *distance_score -= words_in_phrase_minus_one; + } let mut iter = matches.iter().peekable(); while let Some(m) = iter.next() { From 51bc7b3173e458c37301c70b9638a468121d8a3d Mon Sep 17 00:00:00 2001 From: "F. Levi" <55688616+flevi29@users.noreply.github.com> Date: Mon, 16 Sep 2024 22:22:24 +0300 Subject: [PATCH 010/111] Update tests --- index-scheduler/src/error.rs | 2 +- meilisearch-types/src/error.rs | 2 +- meilisearch-types/src/index_uid.rs | 2 +- meilisearch/src/routes/tasks.rs | 4 ++-- meilisearch/tests/documents/add_documents.rs | 4 ++-- meilisearch/tests/documents/update_documents.rs | 4 ++-- meilisearch/tests/index/create_index.rs | 2 +- meilisearch/tests/index/errors.rs | 8 ++++---- meilisearch/tests/index/get_index.rs | 2 +- meilisearch/tests/search/multi.rs | 4 ++-- meilisearch/tests/settings/get_settings.rs | 2 +- meilisearch/tests/similar/errors.rs | 4 ++-- meilisearch/tests/tasks/errors.rs | 6 +++--- 13 files changed, 23 insertions(+), 23 deletions(-) diff --git a/index-scheduler/src/error.rs b/index-scheduler/src/error.rs index 223b84762..3bd378fd6 100644 --- a/index-scheduler/src/error.rs +++ b/index-scheduler/src/error.rs @@ -101,7 +101,7 @@ pub enum Error { )] InvalidTaskCanceledBy { canceled_by: String }, #[error( - "{index_uid} is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_)." + "{index_uid} is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_), and can not be more than 512 bytes." )] InvalidIndexUid { index_uid: String }, #[error("Task `{0}` not found.")] diff --git a/meilisearch-types/src/error.rs b/meilisearch-types/src/error.rs index 0099cada5..c082f82f3 100644 --- a/meilisearch-types/src/error.rs +++ b/meilisearch-types/src/error.rs @@ -534,7 +534,7 @@ impl fmt::Display for deserr_codes::InvalidSimilarId { f, "the value of `id` is invalid. \ A document identifier can be of type integer or string, \ - only composed of alphanumeric characters (a-z A-Z 0-9), hyphens (-) and underscores (_)." + only composed of alphanumeric characters (a-z A-Z 0-9), hyphens (-) and underscores (_), and can not be more than 512 bytes." ) } } diff --git a/meilisearch-types/src/index_uid.rs b/meilisearch-types/src/index_uid.rs index 341ab02cb..583aeef92 100644 --- a/meilisearch-types/src/index_uid.rs +++ b/meilisearch-types/src/index_uid.rs @@ -81,7 +81,7 @@ impl fmt::Display for IndexUidFormatError { f, "`{}` is not a valid index uid. Index uid can be an \ integer or a string containing only alphanumeric \ - characters, hyphens (-) and underscores (_).", + characters, hyphens (-) and underscores (_), and can not be more than 512 bytes.", self.invalid_uid, ) } diff --git a/meilisearch/src/routes/tasks.rs b/meilisearch/src/routes/tasks.rs index 3dc6520af..02f009ff7 100644 --- a/meilisearch/src/routes/tasks.rs +++ b/meilisearch/src/routes/tasks.rs @@ -616,7 +616,7 @@ mod tests { let err = deserr_query_params::(params).unwrap_err(); snapshot!(meili_snap::json_string!(err), @r###" { - "message": "Invalid value in parameter `indexUids[1]`: `hé` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_).", + "message": "Invalid value in parameter `indexUids[1]`: `hé` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_), and can not be more than 512 bytes.", "code": "invalid_index_uid", "type": "invalid_request", "link": "https://docs.meilisearch.com/errors#invalid_index_uid" @@ -628,7 +628,7 @@ mod tests { let err = deserr_query_params::(params).unwrap_err(); snapshot!(meili_snap::json_string!(err), @r###" { - "message": "Invalid value in parameter `indexUids`: `hé` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_).", + "message": "Invalid value in parameter `indexUids`: `hé` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_), and can not be more than 512 bytes.", "code": "invalid_index_uid", "type": "invalid_request", "link": "https://docs.meilisearch.com/errors#invalid_index_uid" diff --git a/meilisearch/tests/documents/add_documents.rs b/meilisearch/tests/documents/add_documents.rs index 819b2ddc2..c37b3a5e3 100644 --- a/meilisearch/tests/documents/add_documents.rs +++ b/meilisearch/tests/documents/add_documents.rs @@ -1023,7 +1023,7 @@ async fn error_document_add_create_index_bad_uid() { snapshot!(json_string!(response), @r###" { - "message": "`883 fj!` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_).", + "message": "`883 fj!` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_), and can not be more than 512 bytes.", "code": "invalid_index_uid", "type": "invalid_request", "link": "https://docs.meilisearch.com/errors#invalid_index_uid" @@ -1280,7 +1280,7 @@ async fn error_add_documents_bad_document_id() { "indexedDocuments": 0 }, "error": { - "message": "Document identifier `\"foo & bar\"` is invalid. A document identifier can be of type integer or string, only composed of alphanumeric characters (a-z A-Z 0-9), hyphens (-) and underscores (_).", + "message": "Document identifier `\"foo & bar\"` is invalid. A document identifier can be of type integer or string, only composed of alphanumeric characters (a-z A-Z 0-9), hyphens (-) and underscores (_), and can not be more than 512 bytes.", "code": "invalid_document_id", "type": "invalid_request", "link": "https://docs.meilisearch.com/errors#invalid_document_id" diff --git a/meilisearch/tests/documents/update_documents.rs b/meilisearch/tests/documents/update_documents.rs index a5d466513..195dca914 100644 --- a/meilisearch/tests/documents/update_documents.rs +++ b/meilisearch/tests/documents/update_documents.rs @@ -11,7 +11,7 @@ async fn error_document_update_create_index_bad_uid() { let (response, code) = index.update_documents(json!([{"id": 1}]), None).await; let expected_response = json!({ - "message": "`883 fj!` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_).", + "message": "`883 fj!` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_), and can not be more than 512 bytes.", "code": "invalid_index_uid", "type": "invalid_request", "link": "https://docs.meilisearch.com/errors#invalid_index_uid" @@ -173,7 +173,7 @@ async fn error_update_documents_bad_document_id() { assert_eq!( response["error"]["message"], json!( - r#"Document identifier `"foo & bar"` is invalid. A document identifier can be of type integer or string, only composed of alphanumeric characters (a-z A-Z 0-9), hyphens (-) and underscores (_)."# + r#"Document identifier `"foo & bar"` is invalid. A document identifier can be of type integer or string, only composed of alphanumeric characters (a-z A-Z 0-9), hyphens (-) and underscores (_), and can not be more than 512 bytes."# ) ); assert_eq!(response["error"]["code"], json!("invalid_document_id")); diff --git a/meilisearch/tests/index/create_index.rs b/meilisearch/tests/index/create_index.rs index b51ccab51..8e3ff4760 100644 --- a/meilisearch/tests/index/create_index.rs +++ b/meilisearch/tests/index/create_index.rs @@ -192,7 +192,7 @@ async fn error_create_with_invalid_index_uid() { snapshot!(code, @"400 Bad Request"); snapshot!(json_string!(response), @r###" { - "message": "Invalid value at `.uid`: `test test#!` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_).", + "message": "Invalid value at `.uid`: `test test#!` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_), and can not be more than 512 bytes.", "code": "invalid_index_uid", "type": "invalid_request", "link": "https://docs.meilisearch.com/errors#invalid_index_uid" diff --git a/meilisearch/tests/index/errors.rs b/meilisearch/tests/index/errors.rs index 9c677ee12..3bab83955 100644 --- a/meilisearch/tests/index/errors.rs +++ b/meilisearch/tests/index/errors.rs @@ -75,7 +75,7 @@ async fn create_index_bad_uid() { snapshot!(code, @"400 Bad Request"); snapshot!(json_string!(response), @r###" { - "message": "Invalid value at `.uid`: `the best doggo` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_).", + "message": "Invalid value at `.uid`: `the best doggo` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_), and can not be more than 512 bytes.", "code": "invalid_index_uid", "type": "invalid_request", "link": "https://docs.meilisearch.com/errors#invalid_index_uid" @@ -136,7 +136,7 @@ async fn get_index_bad_uid() { snapshot!(code, @"400 Bad Request"); snapshot!(json_string!(response), @r###" { - "message": "`the good doggo` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_).", + "message": "`the good doggo` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_), and can not be more than 512 bytes.", "code": "invalid_index_uid", "type": "invalid_request", "link": "https://docs.meilisearch.com/errors#invalid_index_uid" @@ -232,7 +232,7 @@ async fn update_index_bad_uid() { snapshot!(code, @"400 Bad Request"); snapshot!(json_string!(response), @r###" { - "message": "`the good doggo` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_).", + "message": "`the good doggo` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_), and can not be more than 512 bytes.", "code": "invalid_index_uid", "type": "invalid_request", "link": "https://docs.meilisearch.com/errors#invalid_index_uid" @@ -247,7 +247,7 @@ async fn delete_index_bad_uid() { snapshot!(code, @"400 Bad Request"); snapshot!(json_string!(response), @r###" { - "message": "`the good doggo` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_).", + "message": "`the good doggo` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_), and can not be more than 512 bytes.", "code": "invalid_index_uid", "type": "invalid_request", "link": "https://docs.meilisearch.com/errors#invalid_index_uid" diff --git a/meilisearch/tests/index/get_index.rs b/meilisearch/tests/index/get_index.rs index 5a184c8ce..ce08251be 100644 --- a/meilisearch/tests/index/get_index.rs +++ b/meilisearch/tests/index/get_index.rs @@ -186,7 +186,7 @@ async fn get_invalid_index_uid() { snapshot!(code, @"400 Bad Request"); snapshot!(json_string!(response), @r###" { - "message": "`this is not a valid index name` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_).", + "message": "`this is not a valid index name` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_), and can not be more than 512 bytes.", "code": "invalid_index_uid", "type": "invalid_request", "link": "https://docs.meilisearch.com/errors#invalid_index_uid" diff --git a/meilisearch/tests/search/multi.rs b/meilisearch/tests/search/multi.rs index 08ad0b18c..4bebb71fe 100644 --- a/meilisearch/tests/search/multi.rs +++ b/meilisearch/tests/search/multi.rs @@ -412,7 +412,7 @@ async fn simple_search_illegal_index_uid() { snapshot!(code, @"400 Bad Request"); insta::assert_json_snapshot!(response, @r###" { - "message": "Invalid value at `.queries[0].indexUid`: `hé` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_).", + "message": "Invalid value at `.queries[0].indexUid`: `hé` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_), and can not be more than 512 bytes.", "code": "invalid_index_uid", "type": "invalid_request", "link": "https://docs.meilisearch.com/errors#invalid_index_uid" @@ -437,7 +437,7 @@ async fn federation_search_illegal_index_uid() { snapshot!(code, @"400 Bad Request"); insta::assert_json_snapshot!(response, @r###" { - "message": "Invalid value at `.queries[0].indexUid`: `hé` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_).", + "message": "Invalid value at `.queries[0].indexUid`: `hé` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_), and can not be more than 512 bytes.", "code": "invalid_index_uid", "type": "invalid_request", "link": "https://docs.meilisearch.com/errors#invalid_index_uid" diff --git a/meilisearch/tests/settings/get_settings.rs b/meilisearch/tests/settings/get_settings.rs index e99a9fa65..6de0db0b3 100644 --- a/meilisearch/tests/settings/get_settings.rs +++ b/meilisearch/tests/settings/get_settings.rs @@ -330,7 +330,7 @@ async fn error_update_setting_unexisting_index_invalid_uid() { meili_snap::snapshot!(code, @"400 Bad Request"); meili_snap::snapshot!(meili_snap::json_string!(response), @r###" { - "message": "`test##! ` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_).", + "message": "`test##! ` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_), and can not be more than 512 bytes.", "code": "invalid_index_uid", "type": "invalid_request", "link": "https://docs.meilisearch.com/errors#invalid_index_uid" diff --git a/meilisearch/tests/similar/errors.rs b/meilisearch/tests/similar/errors.rs index d0be6562f..d42045d95 100644 --- a/meilisearch/tests/similar/errors.rs +++ b/meilisearch/tests/similar/errors.rs @@ -79,7 +79,7 @@ async fn similar_bad_id() { snapshot!(code, @"400 Bad Request"); snapshot!(json_string!(response), @r###" { - "message": "Invalid value at `.id`: the value of `id` is invalid. A document identifier can be of type integer or string, only composed of alphanumeric characters (a-z A-Z 0-9), hyphens (-) and underscores (_).", + "message": "Invalid value at `.id`: the value of `id` is invalid. A document identifier can be of type integer or string, only composed of alphanumeric characters (a-z A-Z 0-9), hyphens (-) and underscores (_), and can not be more than 512 bytes.", "code": "invalid_similar_id", "type": "invalid_request", "link": "https://docs.meilisearch.com/errors#invalid_similar_id" @@ -172,7 +172,7 @@ async fn similar_invalid_id() { snapshot!(code, @"400 Bad Request"); snapshot!(json_string!(response), @r###" { - "message": "Invalid value at `.id`: the value of `id` is invalid. A document identifier can be of type integer or string, only composed of alphanumeric characters (a-z A-Z 0-9), hyphens (-) and underscores (_).", + "message": "Invalid value at `.id`: the value of `id` is invalid. A document identifier can be of type integer or string, only composed of alphanumeric characters (a-z A-Z 0-9), hyphens (-) and underscores (_), and can not be more than 512 bytes.", "code": "invalid_similar_id", "type": "invalid_request", "link": "https://docs.meilisearch.com/errors#invalid_similar_id" diff --git a/meilisearch/tests/tasks/errors.rs b/meilisearch/tests/tasks/errors.rs index c404a2329..42ec42997 100644 --- a/meilisearch/tests/tasks/errors.rs +++ b/meilisearch/tests/tasks/errors.rs @@ -173,7 +173,7 @@ async fn task_bad_index_uids() { snapshot!(code, @"400 Bad Request"); snapshot!(json_string!(response), @r###" { - "message": "Invalid value in parameter `indexUids`: `the good doggo` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_).", + "message": "Invalid value in parameter `indexUids`: `the good doggo` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_), and can not be more than 512 bytes.", "code": "invalid_index_uid", "type": "invalid_request", "link": "https://docs.meilisearch.com/errors#invalid_index_uid" @@ -184,7 +184,7 @@ async fn task_bad_index_uids() { snapshot!(code, @"400 Bad Request"); snapshot!(json_string!(response), @r###" { - "message": "Invalid value in parameter `indexUids`: `the good doggo` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_).", + "message": "Invalid value in parameter `indexUids`: `the good doggo` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_), and can not be more than 512 bytes.", "code": "invalid_index_uid", "type": "invalid_request", "link": "https://docs.meilisearch.com/errors#invalid_index_uid" @@ -195,7 +195,7 @@ async fn task_bad_index_uids() { snapshot!(code, @"400 Bad Request"); snapshot!(json_string!(response), @r###" { - "message": "Invalid value in parameter `indexUids`: `the good doggo` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_).", + "message": "Invalid value in parameter `indexUids`: `the good doggo` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_), and can not be more than 512 bytes.", "code": "invalid_index_uid", "type": "invalid_request", "link": "https://docs.meilisearch.com/errors#invalid_index_uid" From 4a922a176f8af0df4971436d0212bf21dafe5f72 Mon Sep 17 00:00:00 2001 From: "F. Levi" <55688616+flevi29@users.noreply.github.com> Date: Mon, 16 Sep 2024 23:53:34 +0300 Subject: [PATCH 011/111] Add test for > 512 byte ID --- meilisearch/tests/index/create_index.rs | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/meilisearch/tests/index/create_index.rs b/meilisearch/tests/index/create_index.rs index 8e3ff4760..e99244722 100644 --- a/meilisearch/tests/index/create_index.rs +++ b/meilisearch/tests/index/create_index.rs @@ -125,11 +125,11 @@ async fn create_index_with_primary_key() { #[actix_rt::test] async fn create_index_with_invalid_primary_key() { - let document = json!([ { "id": 2, "title": "Pride and Prejudice" } ]); + let documents = json!([ { "id": 2, "title": "Pride and Prejudice" } ]); let server = Server::new().await; let index = server.index("movies"); - let (_response, code) = index.add_documents(document, Some("title")).await; + let (_response, code) = index.add_documents(documents, Some("title")).await; assert_eq!(code, 202); index.wait_task(0).await; @@ -137,6 +137,17 @@ async fn create_index_with_invalid_primary_key() { let (response, code) = index.get().await; assert_eq!(code, 200); assert_eq!(response["primaryKey"], json!(null)); + + let documents = json!([ { "id": "e".repeat(513) } ]); + + let (_response, code) = index.add_documents(documents, Some("id")).await; + assert_eq!(code, 202); + + index.wait_task(1).await; + + let (response, code) = index.get().await; + assert_eq!(code, 200); + assert_eq!(response["primaryKey"], json!(null)); } #[actix_rt::test] From ec815fa3682b9d0f09426eac27e0a5a6270831a1 Mon Sep 17 00:00:00 2001 From: "F. Levi" <55688616+flevi29@users.noreply.github.com> Date: Mon, 16 Sep 2024 23:59:48 +0300 Subject: [PATCH 012/111] Format --- meilisearch-types/src/error.rs | 3 ++- meilisearch-types/src/index_uid.rs | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/meilisearch-types/src/error.rs b/meilisearch-types/src/error.rs index c082f82f3..ec00718c3 100644 --- a/meilisearch-types/src/error.rs +++ b/meilisearch-types/src/error.rs @@ -534,7 +534,8 @@ impl fmt::Display for deserr_codes::InvalidSimilarId { f, "the value of `id` is invalid. \ A document identifier can be of type integer or string, \ - only composed of alphanumeric characters (a-z A-Z 0-9), hyphens (-) and underscores (_), and can not be more than 512 bytes." + only composed of alphanumeric characters (a-z A-Z 0-9), hyphens (-) and underscores (_), \ + and can not be more than 512 bytes." ) } } diff --git a/meilisearch-types/src/index_uid.rs b/meilisearch-types/src/index_uid.rs index 583aeef92..01f629828 100644 --- a/meilisearch-types/src/index_uid.rs +++ b/meilisearch-types/src/index_uid.rs @@ -81,7 +81,8 @@ impl fmt::Display for IndexUidFormatError { f, "`{}` is not a valid index uid. Index uid can be an \ integer or a string containing only alphanumeric \ - characters, hyphens (-) and underscores (_), and can not be more than 512 bytes.", + characters, hyphens (-) and underscores (_), \ + and can not be more than 512 bytes.", self.invalid_uid, ) } From e098cc832070b313faff1b1abb2449e9a7b256ab Mon Sep 17 00:00:00 2001 From: "F. Levi" <55688616+flevi29@users.noreply.github.com> Date: Tue, 17 Sep 2024 00:16:15 +0300 Subject: [PATCH 013/111] Make comparison simpler, add IndexUid error details similarly --- dump/src/reader/v4/meta.rs | 3 ++- dump/src/reader/v5/meta.rs | 3 ++- milli/src/documents/primary_key.rs | 10 +++++----- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/dump/src/reader/v4/meta.rs b/dump/src/reader/v4/meta.rs index cec05f57c..2daea68a4 100644 --- a/dump/src/reader/v4/meta.rs +++ b/dump/src/reader/v4/meta.rs @@ -74,7 +74,8 @@ impl Display for IndexUidFormatError { f, "invalid index uid `{}`, the uid must be an integer \ or a string containing only alphanumeric characters \ - a-z A-Z 0-9, hyphens - and underscores _.", + a-z A-Z 0-9, hyphens - and underscores _, \ + and can not be more than 400 bytes.", self.invalid_uid, ) } diff --git a/dump/src/reader/v5/meta.rs b/dump/src/reader/v5/meta.rs index cec05f57c..2daea68a4 100644 --- a/dump/src/reader/v5/meta.rs +++ b/dump/src/reader/v5/meta.rs @@ -74,7 +74,8 @@ impl Display for IndexUidFormatError { f, "invalid index uid `{}`, the uid must be an integer \ or a string containing only alphanumeric characters \ - a-z A-Z 0-9, hyphens - and underscores _.", + a-z A-Z 0-9, hyphens - and underscores _, \ + and can not be more than 400 bytes.", self.invalid_uid, ) } diff --git a/milli/src/documents/primary_key.rs b/milli/src/documents/primary_key.rs index 123232c44..9ac5ace91 100644 --- a/milli/src/documents/primary_key.rs +++ b/milli/src/documents/primary_key.rs @@ -150,13 +150,13 @@ fn starts_with(selector: &str, key: &str) -> bool { // FIXME: move to a DocumentId struct fn validate_document_id(document_id: &str) -> Option<&str> { - if !document_id.is_empty() - && document_id.len() <= 512 - && document_id.chars().all(|c| matches!(c, 'a'..='z' | 'A'..='Z' | '0'..='9' | '-' | '_')) + if document_id.is_empty() + || document_id.len() > 512 + || !document_id.chars().all(|c| c.is_ascii_alphanumeric() || c == '-' || c == '_') { - Some(document_id) - } else { None + } else { + Some(document_id) } } From f7337affd6342ae495d99312862b300e7af461e0 Mon Sep 17 00:00:00 2001 From: "F. Levi" <55688616+flevi29@users.noreply.github.com> Date: Tue, 17 Sep 2024 17:31:09 +0300 Subject: [PATCH 014/111] Adjust tests to changes --- meilisearch/tests/search/locales.rs | 44 ++++++++++++++--------------- milli/src/search/new/matches/mod.rs | 2 +- 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/meilisearch/tests/search/locales.rs b/meilisearch/tests/search/locales.rs index dbc4fcc30..b9e70c5b1 100644 --- a/meilisearch/tests/search/locales.rs +++ b/meilisearch/tests/search/locales.rs @@ -400,9 +400,9 @@ async fn force_locales() { ] }, "_formatted": { - "name_zh": "巨人", + "name_zh": "进击的巨人", "author_zh": "諫山創", - "description_zh": "巨人是日本的漫画系列,由諫山 創作画。", + "description_zh": "进击的巨人是日本的漫画系列,由諫山 創作画。", "id": "853", "_vectors": { "manual": [ @@ -447,9 +447,9 @@ async fn force_locales() { ] }, "_formatted": { - "name_zh": "巨人", + "name_zh": "进击的巨人", "author_zh": "諫山創", - "description_zh": "巨人是日本的漫画系列,由諫山 創作画。", + "description_zh": "进击的巨人是日本的漫画系列,由諫山 創作画。", "id": "853", "_vectors": { "manual": [ @@ -524,9 +524,9 @@ async fn force_locales_with_pattern() { ] }, "_formatted": { - "name_zh": "巨人", + "name_zh": "进击的巨人", "author_zh": "諫山創", - "description_zh": "巨人是日本的漫画系列,由諫山 創作画。", + "description_zh": "进击的巨人是日本的漫画系列,由諫山 創作画。", "id": "853", "_vectors": { "manual": [ @@ -571,9 +571,9 @@ async fn force_locales_with_pattern() { ] }, "_formatted": { - "name_zh": "巨人", + "name_zh": "进击的巨人", "author_zh": "諫山創", - "description_zh": "巨人是日本的漫画系列,由諫山 創作画。", + "description_zh": "进击的巨人是日本的漫画系列,由諫山 創作画。", "id": "853", "_vectors": { "manual": [ @@ -689,8 +689,8 @@ async fn force_locales_with_pattern_nested() { "author": "諫山 創" }, "document_zh": { - "name": "巨人", - "description": "巨人是日本的漫画系列,由諫山 創作画。", + "name": "进击的巨人", + "description": "进击的巨人是日本的漫画系列,由諫山 創作画。", "author": "諫山創" }, "id": "852", @@ -788,9 +788,9 @@ async fn force_different_locales_with_pattern() { ] }, "_formatted": { - "name_zh": "巨人", + "name_zh": "进击的巨人", "author_zh": "諫山創", - "description_zh": "巨人是日本的漫画系列,由諫山 創作画。", + "description_zh": "进击的巨人是日本的漫画系列,由諫山 創作画。", "id": "853", "_vectors": { "manual": [ @@ -889,9 +889,9 @@ async fn auto_infer_locales_at_search_with_attributes_to_search_on() { ] }, "_formatted": { - "name_zh": "巨人", + "name_zh": "进击的巨人", "author_zh": "諫山創", - "description_zh": "巨人是日本的漫画系列,由諫山 創作画。", + "description_zh": "进击的巨人是日本的漫画系列,由諫山 創作画。", "id": "853", "_vectors": { "manual": [ @@ -965,9 +965,9 @@ async fn auto_infer_locales_at_search() { ] }, "_formatted": { - "name_zh": "巨人", + "name_zh": "进击的巨人", "author_zh": "諫山創", - "description_zh": "巨人是日本的漫画系列,由諫山 創作画。", + "description_zh": "进击的巨人是日本的漫画系列,由諫山 創作画。", "id": "853", "_vectors": { "manual": [ @@ -1011,9 +1011,9 @@ async fn auto_infer_locales_at_search() { ] }, "_formatted": { - "name_zh": "巨人", + "name_zh": "进击的巨人", "author_zh": "諫山創", - "description_zh": "巨人是日本的漫画系列,由諫山 創作画。", + "description_zh": "进击的巨人是日本的漫画系列,由諫山 創作画。", "id": "853", "_vectors": { "manual": [ @@ -1057,9 +1057,9 @@ async fn auto_infer_locales_at_search() { ] }, "_formatted": { - "name_zh": "巨人", + "name_zh": "进击的巨人", "author_zh": "諫山創", - "description_zh": "巨人是日本的漫画系列,由諫山 創作画。", + "description_zh": "进击的巨人是日本的漫画系列,由諫山 創作画。", "id": "853", "_vectors": { "manual": [ @@ -1177,8 +1177,8 @@ async fn force_different_locales_with_pattern_nested() { "author": "諫山 創" }, "document_zh": { - "name": "巨人", - "description": "巨人是日本的漫画系列,由諫山 創作画。", + "name": "进击的巨人", + "description": "进击的巨人是日本的漫画系列,由諫山 創作画。", "author": "諫山創" }, "id": "852", diff --git a/milli/src/search/new/matches/mod.rs b/milli/src/search/new/matches/mod.rs index df110aff9..09d3db575 100644 --- a/milli/src/search/new/matches/mod.rs +++ b/milli/src/search/new/matches/mod.rs @@ -919,7 +919,7 @@ mod tests { // should return 10 words with a marker at the start as well the end, and the highlighted matches. insta::assert_snapshot!( matcher.format(format_options), - @"…had the power to split the world between those who…" + @"…the power to split the world between those who embraced…" ); let builder = MatcherBuilder::new_test(&rtxn, &temp_index, "those \"and those\""); From 83113998f99bb6d59bb9e94e9ef3e527f4c93f62 Mon Sep 17 00:00:00 2001 From: "F. Levi" <55688616+flevi29@users.noreply.github.com> Date: Wed, 18 Sep 2024 10:35:23 +0300 Subject: [PATCH 015/111] Add more test assertions --- milli/src/search/new/matches/mod.rs | 36 +++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/milli/src/search/new/matches/mod.rs b/milli/src/search/new/matches/mod.rs index 09d3db575..8a84f91bd 100644 --- a/milli/src/search/new/matches/mod.rs +++ b/milli/src/search/new/matches/mod.rs @@ -929,6 +929,42 @@ mod tests { matcher.format(format_options), @"…world between those who embraced progress and those who resisted…" ); + + let builder = MatcherBuilder::new_test( + &rtxn, + &temp_index, + "\"The groundbreaking invention had the power to split the world\"", + ); + let mut matcher = builder.build(text, None); + // should highlight "those" and the phrase "and those". + insta::assert_snapshot!( + matcher.format(format_options), + @"The groundbreaking invention had the power to split the world…" + ); + + let builder = MatcherBuilder::new_test( + &rtxn, + &temp_index, + "\"The groundbreaking invention had the power to split the world between\"", + ); + let mut matcher = builder.build(text, None); + // should highlight "those" and the phrase "and those". + insta::assert_snapshot!( + matcher.format(format_options), + @"The groundbreaking invention had the power to split the world …" + ); + + let builder = MatcherBuilder::new_test( + &rtxn, + &temp_index, + "\"The groundbreaking invention\" \"embraced progress and those who resisted change\"", + ); + let mut matcher = builder.build(text, None); + // should highlight "those" and the phrase "and those". + insta::assert_snapshot!( + matcher.format(format_options), + @"…between those who embraced progress and those who resisted change…" + ); } #[test] From 0ffeea5a5209f1e206720e3cf63d7fe627b8cee0 Mon Sep 17 00:00:00 2001 From: "F. Levi" <55688616+flevi29@users.noreply.github.com> Date: Thu, 19 Sep 2024 09:06:40 +0300 Subject: [PATCH 016/111] Remove wrong comments --- milli/src/search/new/matches/mod.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/milli/src/search/new/matches/mod.rs b/milli/src/search/new/matches/mod.rs index 8a84f91bd..26115c39b 100644 --- a/milli/src/search/new/matches/mod.rs +++ b/milli/src/search/new/matches/mod.rs @@ -936,7 +936,6 @@ mod tests { "\"The groundbreaking invention had the power to split the world\"", ); let mut matcher = builder.build(text, None); - // should highlight "those" and the phrase "and those". insta::assert_snapshot!( matcher.format(format_options), @"The groundbreaking invention had the power to split the world…" @@ -948,7 +947,6 @@ mod tests { "\"The groundbreaking invention had the power to split the world between\"", ); let mut matcher = builder.build(text, None); - // should highlight "those" and the phrase "and those". insta::assert_snapshot!( matcher.format(format_options), @"The groundbreaking invention had the power to split the world …" @@ -960,7 +958,6 @@ mod tests { "\"The groundbreaking invention\" \"embraced progress and those who resisted change\"", ); let mut matcher = builder.build(text, None); - // should highlight "those" and the phrase "and those". insta::assert_snapshot!( matcher.format(format_options), @"…between those who embraced progress and those who resisted change…" From afa3ae0cbd9c7223d4068dd438d043a43d0d4fae Mon Sep 17 00:00:00 2001 From: Tamo Date: Thu, 19 Sep 2024 17:42:52 +0200 Subject: [PATCH 017/111] WIP --- milli/src/update/index_documents/mod.rs | 17 ++----- .../src/update/index_documents/typed_chunk.rs | 16 ++---- milli/src/vector/mod.rs | 51 +++++++++++-------- 3 files changed, 38 insertions(+), 46 deletions(-) diff --git a/milli/src/update/index_documents/mod.rs b/milli/src/update/index_documents/mod.rs index 326dd842d..b03ab259a 100644 --- a/milli/src/update/index_documents/mod.rs +++ b/milli/src/update/index_documents/mod.rs @@ -689,9 +689,8 @@ where key: None, }, )?; - let first_id = crate::vector::arroy_db_range_for_embedder(index).next().unwrap(); let reader = - ArroyWrapper::new(self.index.vector_arroy, first_id, action.was_quantized); + ArroyWrapper::new(self.index.vector_arroy, index, action.was_quantized); let dim = reader.dimensions(self.wtxn)?; dimension.insert(name.to_string(), dim); } @@ -713,17 +712,11 @@ where let is_quantizing = embedder_config.map_or(false, |action| action.is_being_quantized); pool.install(|| { - for k in crate::vector::arroy_db_range_for_embedder(embedder_index) { - let mut writer = ArroyWrapper::new(vector_arroy, k, was_quantized); - if is_quantizing { - writer.quantize(wtxn, k, dimension)?; - } - if writer.need_build(wtxn, dimension)? { - writer.build(wtxn, &mut rng, dimension)?; - } else if writer.is_empty(wtxn, dimension)? { - break; - } + let mut writer = ArroyWrapper::new(vector_arroy, embedder_index, was_quantized); + if is_quantizing { + writer.quantize(wtxn, dimension)?; } + writer.build(wtxn, &mut rng, dimension)?; Result::Ok(()) }) .map_err(InternalError::from)??; diff --git a/milli/src/update/index_documents/typed_chunk.rs b/milli/src/update/index_documents/typed_chunk.rs index 97a4bf712..e340137e2 100644 --- a/milli/src/update/index_documents/typed_chunk.rs +++ b/milli/src/update/index_documents/typed_chunk.rs @@ -673,22 +673,14 @@ pub(crate) fn write_typed_chunk_into_index( .get(&embedder_name) .map_or(false, |conf| conf.2); // FIXME: allow customizing distance - let writers: Vec<_> = crate::vector::arroy_db_range_for_embedder(embedder_index) - .map(|k| ArroyWrapper::new(index.vector_arroy, k, binary_quantized)) - .collect(); + let writer = ArroyWrapper::new(index.vector_arroy, embedder_index, binary_quantized); // remove vectors for docids we want them removed let merger = remove_vectors_builder.build(); let mut iter = merger.into_stream_merger_iter()?; while let Some((key, _)) = iter.next()? { let docid = key.try_into().map(DocumentId::from_be_bytes).unwrap(); - - for writer in &writers { - // Uses invariant: vectors are packed in the first writers. - if !writer.del_item(wtxn, expected_dimension, docid)? { - break; - } - } + writer.del_item(wtxn, expected_dimension, docid)?; } // add generated embeddings @@ -716,9 +708,7 @@ pub(crate) fn write_typed_chunk_into_index( embeddings.embedding_count(), ))); } - for (embedding, writer) in embeddings.iter().zip(&writers) { - writer.add_item(wtxn, expected_dimension, docid, embedding)?; - } + writer.add_items(wtxn, expected_dimension, docid, embeddings)?; } // perform the manual diff diff --git a/milli/src/vector/mod.rs b/milli/src/vector/mod.rs index d52e68bbe..644826dcd 100644 --- a/milli/src/vector/mod.rs +++ b/milli/src/vector/mod.rs @@ -32,60 +32,69 @@ pub const REQUEST_PARALLELISM: usize = 40; pub struct ArroyWrapper { quantized: bool, - index: u16, + index: u8, database: arroy::Database, } impl ArroyWrapper { - pub fn new(database: arroy::Database, index: u16, quantized: bool) -> Self { + pub fn new(database: arroy::Database, index: u8, quantized: bool) -> Self { Self { database, index, quantized } } - pub fn index(&self) -> u16 { + pub fn index(&self) -> u8 { self.index } pub fn dimensions(&self, rtxn: &RoTxn) -> Result { + let first_id = arroy_db_range_for_embedder(self.index).next().unwrap(); if self.quantized { - Ok(arroy::Reader::open(rtxn, self.index, self.quantized_db())?.dimensions()) + Ok(arroy::Reader::open(rtxn, first_id, self.quantized_db())?.dimensions()) } else { - Ok(arroy::Reader::open(rtxn, self.index, self.angular_db())?.dimensions()) + Ok(arroy::Reader::open(rtxn, first_id, self.angular_db())?.dimensions()) } } - pub fn quantize( - &mut self, - wtxn: &mut RwTxn, - index: u16, - dimension: usize, - ) -> Result<(), arroy::Error> { + pub fn quantize(&mut self, wtxn: &mut RwTxn, dimension: usize) -> Result<(), arroy::Error> { if !self.quantized { - let writer = arroy::Writer::new(self.angular_db(), index, dimension); - writer.prepare_changing_distance::(wtxn)?; + for index in arroy_db_range_for_embedder(self.index) { + let writer = arroy::Writer::new(self.angular_db(), index, dimension); + writer.prepare_changing_distance::(wtxn)?; + } self.quantized = true; } Ok(()) } + // TODO: We can stop early when we find an empty DB pub fn need_build(&self, rtxn: &RoTxn, dimension: usize) -> Result { - if self.quantized { - arroy::Writer::new(self.quantized_db(), self.index, dimension).need_build(rtxn) - } else { - arroy::Writer::new(self.angular_db(), self.index, dimension).need_build(rtxn) + for index in arroy_db_range_for_embedder(self.index) { + let need_build = if self.quantized { + arroy::Writer::new(self.quantized_db(), index, dimension).need_build(rtxn) + } else { + arroy::Writer::new(self.angular_db(), index, dimension).need_build(rtxn) + }; + if need_build? { + return Ok(true); + } } + Ok(false) } + /// TODO: We should early exit when it doesn't need to be built pub fn build( &self, wtxn: &mut RwTxn, rng: &mut R, dimension: usize, ) -> Result<(), arroy::Error> { - if self.quantized { - arroy::Writer::new(self.quantized_db(), self.index, dimension).build(wtxn, rng, None) - } else { - arroy::Writer::new(self.angular_db(), self.index, dimension).build(wtxn, rng, None) + for index in arroy_db_range_for_embedder(self.index) { + if self.quantized { + arroy::Writer::new(self.quantized_db(), index, dimension).build(wtxn, rng, None)? + } else { + arroy::Writer::new(self.angular_db(), index, dimension).build(wtxn, rng, None)? + } } + Ok(()) } pub fn add_item( From 6ba4baecbf47e39339c22c67b60a5d0953f53fc5 Mon Sep 17 00:00:00 2001 From: Tamo Date: Mon, 23 Sep 2024 15:15:26 +0200 Subject: [PATCH 018/111] first ugly step --- milli/src/search/similar.rs | 26 +- .../src/update/index_documents/typed_chunk.rs | 45 +--- milli/src/vector/mod.rs | 232 ++++++++++++++---- 3 files changed, 203 insertions(+), 100 deletions(-) diff --git a/milli/src/search/similar.rs b/milli/src/search/similar.rs index 0cb8d723d..e408c94b1 100644 --- a/milli/src/search/similar.rs +++ b/milli/src/search/similar.rs @@ -4,7 +4,7 @@ use ordered_float::OrderedFloat; use roaring::RoaringBitmap; use crate::score_details::{self, ScoreDetails}; -use crate::vector::Embedder; +use crate::vector::{ArroyWrapper, Embedder}; use crate::{filtered_universe, DocumentId, Filter, Index, Result, SearchResult}; pub struct Similar<'a> { @@ -71,23 +71,13 @@ impl<'a> Similar<'a> { .get(self.rtxn, &self.embedder_name)? .ok_or_else(|| crate::UserError::InvalidEmbedder(self.embedder_name.to_owned()))?; - let mut results = Vec::new(); - - for reader in self.index.arroy_readers(self.rtxn, embedder_index, self.quantized) { - let nns_by_item = reader?.nns_by_item( - self.rtxn, - self.id, - self.limit + self.offset + 1, - Some(&universe), - )?; - if let Some(mut nns_by_item) = nns_by_item { - results.append(&mut nns_by_item); - } else { - break; - } - } - - results.sort_unstable_by_key(|(_, distance)| OrderedFloat(*distance)); + let reader = ArroyWrapper::new(self.index.vector_arroy, embedder_index, self.quantized); + let results = reader.nns_by_item( + self.rtxn, + self.id, + self.limit + self.offset + 1, + Some(&universe), + )?; let mut documents_ids = Vec::with_capacity(self.limit); let mut document_scores = Vec::with_capacity(self.limit); diff --git a/milli/src/update/index_documents/typed_chunk.rs b/milli/src/update/index_documents/typed_chunk.rs index e340137e2..e118420d8 100644 --- a/milli/src/update/index_documents/typed_chunk.rs +++ b/milli/src/update/index_documents/typed_chunk.rs @@ -680,7 +680,7 @@ pub(crate) fn write_typed_chunk_into_index( let mut iter = merger.into_stream_merger_iter()?; while let Some((key, _)) = iter.next()? { let docid = key.try_into().map(DocumentId::from_be_bytes).unwrap(); - writer.del_item(wtxn, expected_dimension, docid)?; + writer.del_item_raw(wtxn, expected_dimension, docid)?; } // add generated embeddings @@ -708,7 +708,7 @@ pub(crate) fn write_typed_chunk_into_index( embeddings.embedding_count(), ))); } - writer.add_items(wtxn, expected_dimension, docid, embeddings)?; + writer.add_items(wtxn, docid, &embeddings)?; } // perform the manual diff @@ -723,51 +723,14 @@ pub(crate) fn write_typed_chunk_into_index( if let Some(value) = vector_deladd_obkv.get(DelAdd::Deletion) { let vector: Vec = pod_collect_to_vec(value); - let mut deleted_index = None; - for (index, writer) in writers.iter().enumerate() { - let Some(candidate) = writer.item_vector(wtxn, docid)? else { - // uses invariant: vectors are packed in the first writers. - break; - }; - if candidate == vector { - writer.del_item(wtxn, expected_dimension, docid)?; - deleted_index = Some(index); - } - } - - // 🥲 enforce invariant: vectors are packed in the first writers. - if let Some(deleted_index) = deleted_index { - let mut last_index_with_a_vector = None; - for (index, writer) in writers.iter().enumerate().skip(deleted_index) { - let Some(candidate) = writer.item_vector(wtxn, docid)? else { - break; - }; - last_index_with_a_vector = Some((index, candidate)); - } - if let Some((last_index, vector)) = last_index_with_a_vector { - // unwrap: computed the index from the list of writers - let writer = writers.get(last_index).unwrap(); - writer.del_item(wtxn, expected_dimension, docid)?; - writers.get(deleted_index).unwrap().add_item( - wtxn, - expected_dimension, - docid, - &vector, - )?; - } - } + writer.del_item(wtxn, docid, &vector)?; } if let Some(value) = vector_deladd_obkv.get(DelAdd::Addition) { let vector = pod_collect_to_vec(value); // overflow was detected during vector extraction. - for writer in &writers { - if !writer.contains_item(wtxn, expected_dimension, docid)? { - writer.add_item(wtxn, expected_dimension, docid, &vector)?; - break; - } - } + writer.add_item(wtxn, docid, &vector)?; } } diff --git a/milli/src/vector/mod.rs b/milli/src/vector/mod.rs index 644826dcd..54765cfef 100644 --- a/milli/src/vector/mod.rs +++ b/milli/src/vector/mod.rs @@ -97,49 +97,165 @@ impl ArroyWrapper { Ok(()) } + pub fn add_items( + &self, + wtxn: &mut RwTxn, + item_id: arroy::ItemId, + embeddings: &Embeddings, + ) -> Result<(), arroy::Error> { + let dimension = embeddings.dimension(); + for (index, vector) in arroy_db_range_for_embedder(self.index).zip(embeddings.iter()) { + if self.quantized { + arroy::Writer::new(self.quantized_db(), index, dimension) + .add_item(wtxn, item_id, vector)? + } else { + arroy::Writer::new(self.angular_db(), index, dimension) + .add_item(wtxn, item_id, vector)? + } + } + Ok(()) + } + pub fn add_item( &self, wtxn: &mut RwTxn, - dimension: usize, item_id: arroy::ItemId, vector: &[f32], ) -> Result<(), arroy::Error> { - if self.quantized { - arroy::Writer::new(self.quantized_db(), self.index, dimension) - .add_item(wtxn, item_id, vector) - } else { - arroy::Writer::new(self.angular_db(), self.index, dimension) - .add_item(wtxn, item_id, vector) + let dimension = vector.len(); + + for index in arroy_db_range_for_embedder(self.index) { + if self.quantized { + let writer = arroy::Writer::new(self.quantized_db(), index, dimension); + if !writer.contains_item(wtxn, item_id)? { + writer.add_item(wtxn, item_id, &vector)?; + break; + } + } else { + arroy::Writer::new(self.angular_db(), index, dimension) + .add_item(wtxn, item_id, vector)? + } } + + Ok(()) } - pub fn del_item( + pub fn del_item_raw( &self, wtxn: &mut RwTxn, dimension: usize, item_id: arroy::ItemId, ) -> Result { - if self.quantized { - arroy::Writer::new(self.quantized_db(), self.index, dimension).del_item(wtxn, item_id) - } else { - arroy::Writer::new(self.angular_db(), self.index, dimension).del_item(wtxn, item_id) + for index in arroy_db_range_for_embedder(self.index) { + if self.quantized { + let writer = arroy::Writer::new(self.quantized_db(), index, dimension); + if writer.del_item(wtxn, item_id)? { + return Ok(true); + } + } else { + let writer = arroy::Writer::new(self.angular_db(), index, dimension); + if writer.del_item(wtxn, item_id)? { + return Ok(true); + } + } } + + Ok(false) + } + + pub fn del_item( + &self, + wtxn: &mut RwTxn, + itemid: arroy::ItemId, + vector: &[f32], + ) -> Result { + let dimension = vector.len(); + let mut deleted_index = None; + + for index in arroy_db_range_for_embedder(self.index) { + if self.quantized { + let writer = arroy::Writer::new(self.quantized_db(), index, dimension); + let Some(candidate) = writer.item_vector(wtxn, itemid)? else { + // uses invariant: vectors are packed in the first writers. + break; + }; + if candidate == vector { + writer.del_item(wtxn, itemid)?; + deleted_index = Some(index); + } + } else { + let writer = arroy::Writer::new(self.angular_db(), index, dimension); + let Some(candidate) = writer.item_vector(wtxn, itemid)? else { + // uses invariant: vectors are packed in the first writers. + break; + }; + if candidate == vector { + writer.del_item(wtxn, itemid)?; + deleted_index = Some(index); + } + } + } + + // 🥲 enforce invariant: vectors are packed in the first writers. + if let Some(deleted_index) = deleted_index { + let mut last_index_with_a_vector = None; + for index in arroy_db_range_for_embedder(self.index).skip(deleted_index as usize) { + if self.quantized { + let writer = arroy::Writer::new(self.quantized_db(), index, dimension); + let Some(candidate) = writer.item_vector(wtxn, itemid)? else { + break; + }; + last_index_with_a_vector = Some((index, candidate)); + } else { + let writer = arroy::Writer::new(self.angular_db(), index, dimension); + let Some(candidate) = writer.item_vector(wtxn, itemid)? else { + break; + }; + last_index_with_a_vector = Some((index, candidate)); + } + } + if let Some((last_index, vector)) = last_index_with_a_vector { + if self.quantized { + // unwrap: computed the index from the list of writers + let writer = arroy::Writer::new(self.quantized_db(), last_index, dimension); + writer.del_item(wtxn, itemid)?; + let writer = arroy::Writer::new(self.quantized_db(), deleted_index, dimension); + writer.add_item(wtxn, itemid, &vector)?; + } else { + // unwrap: computed the index from the list of writers + let writer = arroy::Writer::new(self.angular_db(), last_index, dimension); + writer.del_item(wtxn, itemid)?; + let writer = arroy::Writer::new(self.angular_db(), deleted_index, dimension); + writer.add_item(wtxn, itemid, &vector)?; + } + } + } + Ok(deleted_index.is_some()) } pub fn clear(&self, wtxn: &mut RwTxn, dimension: usize) -> Result<(), arroy::Error> { - if self.quantized { - arroy::Writer::new(self.quantized_db(), self.index, dimension).clear(wtxn) - } else { - arroy::Writer::new(self.angular_db(), self.index, dimension).clear(wtxn) + for index in arroy_db_range_for_embedder(self.index) { + if self.quantized { + arroy::Writer::new(self.quantized_db(), index, dimension).clear(wtxn)?; + } else { + arroy::Writer::new(self.angular_db(), index, dimension).clear(wtxn)?; + } } + Ok(()) } pub fn is_empty(&self, rtxn: &RoTxn, dimension: usize) -> Result { - if self.quantized { - arroy::Writer::new(self.quantized_db(), self.index, dimension).is_empty(rtxn) - } else { - arroy::Writer::new(self.angular_db(), self.index, dimension).is_empty(rtxn) + for index in arroy_db_range_for_embedder(self.index) { + let empty = if self.quantized { + arroy::Writer::new(self.quantized_db(), index, dimension).is_empty(rtxn)? + } else { + arroy::Writer::new(self.angular_db(), index, dimension).is_empty(rtxn)? + }; + if !empty { + return Ok(false); + } } + Ok(true) } pub fn contains_item( @@ -148,11 +264,18 @@ impl ArroyWrapper { dimension: usize, item: arroy::ItemId, ) -> Result { - if self.quantized { - arroy::Writer::new(self.quantized_db(), self.index, dimension).contains_item(rtxn, item) - } else { - arroy::Writer::new(self.angular_db(), self.index, dimension).contains_item(rtxn, item) + for index in arroy_db_range_for_embedder(self.index) { + let contains = if self.quantized { + arroy::Writer::new(self.quantized_db(), index, dimension) + .contains_item(rtxn, item)? + } else { + arroy::Writer::new(self.angular_db(), index, dimension).contains_item(rtxn, item)? + }; + if contains { + return Ok(contains); + } } + Ok(false) } pub fn nns_by_item( @@ -161,14 +284,26 @@ impl ArroyWrapper { item: ItemId, limit: usize, filter: Option<&RoaringBitmap>, - ) -> Result>, arroy::Error> { - if self.quantized { - arroy::Reader::open(rtxn, self.index, self.quantized_db())? - .nns_by_item(rtxn, item, limit, None, None, filter) - } else { - arroy::Reader::open(rtxn, self.index, self.angular_db())? - .nns_by_item(rtxn, item, limit, None, None, filter) + ) -> Result, arroy::Error> { + let mut results = Vec::new(); + + for index in arroy_db_range_for_embedder(self.index) { + let ret = if self.quantized { + arroy::Reader::open(rtxn, index, self.quantized_db())? + .nns_by_item(rtxn, item, limit, None, None, filter)? + } else { + arroy::Reader::open(rtxn, index, self.angular_db())? + .nns_by_item(rtxn, item, limit, None, None, filter)? + }; + if let Some(mut ret) = ret { + results.append(&mut ret); + } else { + break; + } } + results.sort_unstable_by_key(|(_, distance)| OrderedFloat(*distance)); + + Ok(results) } pub fn nns_by_vector( @@ -178,21 +313,36 @@ impl ArroyWrapper { limit: usize, filter: Option<&RoaringBitmap>, ) -> Result, arroy::Error> { - if self.quantized { - arroy::Reader::open(txn, self.index, self.quantized_db())? - .nns_by_vector(txn, item, limit, None, None, filter) - } else { - arroy::Reader::open(txn, self.index, self.angular_db())? - .nns_by_vector(txn, item, limit, None, None, filter) + let mut results = Vec::new(); + + for index in arroy_db_range_for_embedder(self.index) { + let mut ret = if self.quantized { + arroy::Reader::open(txn, index, self.quantized_db())? + .nns_by_vector(txn, item, limit, None, None, filter)? + } else { + arroy::Reader::open(txn, index, self.angular_db())? + .nns_by_vector(txn, item, limit, None, None, filter)? + }; + results.append(&mut ret); } + + results.sort_unstable_by_key(|(_, distance)| OrderedFloat(*distance)); + + Ok(results) } pub fn item_vector(&self, rtxn: &RoTxn, docid: u32) -> Result>, arroy::Error> { - if self.quantized { - arroy::Reader::open(rtxn, self.index, self.quantized_db())?.item_vector(rtxn, docid) - } else { - arroy::Reader::open(rtxn, self.index, self.angular_db())?.item_vector(rtxn, docid) + for index in arroy_db_range_for_embedder(self.index) { + let ret = if self.quantized { + arroy::Reader::open(rtxn, index, self.quantized_db())?.item_vector(rtxn, docid)? + } else { + arroy::Reader::open(rtxn, index, self.angular_db())?.item_vector(rtxn, docid)? + }; + if ret.is_some() { + return Ok(ret); + } } + Ok(None) } fn angular_db(&self) -> arroy::Database { From 1e4d4e69c4cebee8f09d905c5cc8130b08214f04 Mon Sep 17 00:00:00 2001 From: Tamo Date: Mon, 23 Sep 2024 18:56:15 +0200 Subject: [PATCH 019/111] finish the arroywrapper --- milli/src/index.rs | 29 +-- milli/src/search/new/vector_sort.rs | 12 +- milli/src/search/similar.rs | 1 - milli/src/update/index_documents/transform.rs | 63 ++---- milli/src/vector/mod.rs | 211 +++++++++++------- 5 files changed, 155 insertions(+), 161 deletions(-) diff --git a/milli/src/index.rs b/milli/src/index.rs index c47896df7..5b7a9c58c 100644 --- a/milli/src/index.rs +++ b/milli/src/index.rs @@ -1610,24 +1610,6 @@ impl Index { .unwrap_or_default()) } - pub fn arroy_readers<'a>( - &'a self, - rtxn: &'a RoTxn<'a>, - embedder_id: u8, - quantized: bool, - ) -> impl Iterator> + 'a { - crate::vector::arroy_db_range_for_embedder(embedder_id).map_while(move |k| { - let reader = ArroyWrapper::new(self.vector_arroy, k, quantized); - // Here we don't care about the dimensions, but we want to know if we can read - // in the database or if its metadata are missing because there is no document with that many vectors. - match reader.dimensions(rtxn) { - Ok(_) => Some(Ok(reader)), - Err(arroy::Error::MissingMetadata(_)) => None, - Err(e) => Some(Err(e.into())), - } - }) - } - pub(crate) fn put_search_cutoff(&self, wtxn: &mut RwTxn<'_>, cutoff: u64) -> heed::Result<()> { self.main.remap_types::().put(wtxn, main_key::SEARCH_CUTOFF, &cutoff) } @@ -1649,14 +1631,9 @@ impl Index { let embedding_configs = self.embedding_configs(rtxn)?; for config in embedding_configs { let embedder_id = self.embedder_category_id.get(rtxn, &config.name)?.unwrap(); - let embeddings = self - .arroy_readers(rtxn, embedder_id, config.config.quantized()) - .map_while(|reader| { - reader - .and_then(|r| r.item_vector(rtxn, docid).map_err(|e| e.into())) - .transpose() - }) - .collect::>>()?; + let reader = + ArroyWrapper::new(self.vector_arroy, embedder_id, config.config.quantized()); + let embeddings = reader.item_vectors(rtxn, docid)?; res.insert(config.name.to_owned(), embeddings); } Ok(res) diff --git a/milli/src/search/new/vector_sort.rs b/milli/src/search/new/vector_sort.rs index de1dacbe7..90377c09c 100644 --- a/milli/src/search/new/vector_sort.rs +++ b/milli/src/search/new/vector_sort.rs @@ -1,11 +1,10 @@ use std::iter::FromIterator; -use ordered_float::OrderedFloat; use roaring::RoaringBitmap; use super::ranking_rules::{RankingRule, RankingRuleOutput, RankingRuleQueryTrait}; use crate::score_details::{self, ScoreDetails}; -use crate::vector::{DistributionShift, Embedder}; +use crate::vector::{ArroyWrapper, DistributionShift, Embedder}; use crate::{DocumentId, Result, SearchContext, SearchLogger}; pub struct VectorSort { @@ -53,14 +52,9 @@ impl VectorSort { vector_candidates: &RoaringBitmap, ) -> Result<()> { let target = &self.target; - let mut results = Vec::new(); - for reader in ctx.index.arroy_readers(ctx.txn, self.embedder_index, self.quantized) { - let nns_by_vector = - reader?.nns_by_vector(ctx.txn, target, self.limit, Some(vector_candidates))?; - results.extend(nns_by_vector.into_iter()); - } - results.sort_unstable_by_key(|(_, distance)| OrderedFloat(*distance)); + let reader = ArroyWrapper::new(ctx.index.vector_arroy, self.embedder_index, self.quantized); + let results = reader.nns_by_vector(ctx.txn, target, self.limit, Some(vector_candidates))?; self.cached_sorted_docids = results.into_iter(); Ok(()) diff --git a/milli/src/search/similar.rs b/milli/src/search/similar.rs index e408c94b1..5547d800e 100644 --- a/milli/src/search/similar.rs +++ b/milli/src/search/similar.rs @@ -1,6 +1,5 @@ use std::sync::Arc; -use ordered_float::OrderedFloat; use roaring::RoaringBitmap; use crate::score_details::{self, ScoreDetails}; diff --git a/milli/src/update/index_documents/transform.rs b/milli/src/update/index_documents/transform.rs index bb2cfe56c..763f30d0f 100644 --- a/milli/src/update/index_documents/transform.rs +++ b/milli/src/update/index_documents/transform.rs @@ -990,27 +990,24 @@ impl<'a, 'i> Transform<'a, 'i> { None }; - let readers: Result, &RoaringBitmap)>> = settings_diff + let readers: BTreeMap<&str, (ArroyWrapper, &RoaringBitmap)> = settings_diff .embedding_config_updates .iter() .filter_map(|(name, action)| { if let Some(WriteBackToDocuments { embedder_id, user_provided }) = action.write_back() { - let readers: Result> = self - .index - .arroy_readers(wtxn, *embedder_id, action.was_quantized) - .collect(); - match readers { - Ok(readers) => Some(Ok((name.as_str(), (readers, user_provided)))), - Err(error) => Some(Err(error)), - } + let reader = ArroyWrapper::new( + self.index.vector_arroy, + *embedder_id, + action.was_quantized, + ); + Some((name.as_str(), (reader, user_provided))) } else { None } }) .collect(); - let readers = readers?; let old_vectors_fid = settings_diff .old @@ -1048,34 +1045,24 @@ impl<'a, 'i> Transform<'a, 'i> { arroy::Error, > = readers .iter() - .filter_map(|(name, (readers, user_provided))| { + .filter_map(|(name, (reader, user_provided))| { if !user_provided.contains(docid) { return None; } - let mut vectors = Vec::new(); - for reader in readers { - let Some(vector) = reader.item_vector(wtxn, docid).transpose() else { - break; - }; - - match vector { - Ok(vector) => vectors.push(vector), - Err(error) => return Some(Err(error)), - } + match reader.item_vectors(wtxn, docid) { + Ok(vectors) if vectors.is_empty() => None, + Ok(vectors) => Some(Ok(( + name.to_string(), + serde_json::to_value(ExplicitVectors { + embeddings: Some( + VectorOrArrayOfVectors::from_array_of_vectors(vectors), + ), + regenerate: false, + }) + .unwrap(), + ))), + Err(e) => Some(Err(e)), } - if vectors.is_empty() { - return None; - } - Some(Ok(( - name.to_string(), - serde_json::to_value(ExplicitVectors { - embeddings: Some(VectorOrArrayOfVectors::from_array_of_vectors( - vectors, - )), - regenerate: false, - }) - .unwrap(), - ))) }) .collect(); @@ -1104,11 +1091,9 @@ impl<'a, 'i> Transform<'a, 'i> { } // delete all vectors from the embedders that need removal - for (_, (readers, _)) in readers { - for reader in readers { - let dimensions = reader.dimensions(wtxn)?; - reader.clear(wtxn, dimensions)?; - } + for (_, (reader, _)) in readers { + let dimensions = reader.dimensions(wtxn)?; + reader.clear(wtxn, dimensions)?; } let grenad_params = GrenadParameters { diff --git a/milli/src/vector/mod.rs b/milli/src/vector/mod.rs index 54765cfef..b5b6cd953 100644 --- a/milli/src/vector/mod.rs +++ b/milli/src/vector/mod.rs @@ -45,6 +45,20 @@ impl ArroyWrapper { self.index } + fn readers<'a, D: arroy::Distance>( + &'a self, + rtxn: &'a RoTxn<'a>, + db: arroy::Database, + ) -> impl Iterator, arroy::Error>> + 'a { + arroy_db_range_for_embedder(self.index).map_while(move |index| { + match arroy::Reader::open(rtxn, index, db) { + Ok(reader) => Some(Ok(reader)), + Err(arroy::Error::MissingMetadata(_)) => None, + Err(e) => Some(Err(e)), + } + }) + } + pub fn dimensions(&self, rtxn: &RoTxn) -> Result { let first_id = arroy_db_range_for_embedder(self.index).next().unwrap(); if self.quantized { @@ -97,6 +111,7 @@ impl ArroyWrapper { Ok(()) } + /// Overwrite all the embeddings associated to the index and item id. pub fn add_items( &self, wtxn: &mut RwTxn, @@ -116,30 +131,41 @@ impl ArroyWrapper { Ok(()) } + /// Add one document int for this index where we can find an empty spot. pub fn add_item( &self, wtxn: &mut RwTxn, item_id: arroy::ItemId, vector: &[f32], + ) -> Result<(), arroy::Error> { + if self.quantized { + self._add_item(wtxn, self.quantized_db(), item_id, vector) + } else { + self._add_item(wtxn, self.angular_db(), item_id, vector) + } + } + + fn _add_item( + &self, + wtxn: &mut RwTxn, + db: arroy::Database, + item_id: arroy::ItemId, + vector: &[f32], ) -> Result<(), arroy::Error> { let dimension = vector.len(); for index in arroy_db_range_for_embedder(self.index) { - if self.quantized { - let writer = arroy::Writer::new(self.quantized_db(), index, dimension); - if !writer.contains_item(wtxn, item_id)? { - writer.add_item(wtxn, item_id, &vector)?; - break; - } - } else { - arroy::Writer::new(self.angular_db(), index, dimension) - .add_item(wtxn, item_id, vector)? + let writer = arroy::Writer::new(db, index, dimension); + if !writer.contains_item(wtxn, item_id)? { + writer.add_item(wtxn, item_id, vector)?; + break; } } - Ok(()) } + /// Delete an item from the index. It **does not** take care of fixing the hole + /// made after deleting the item. pub fn del_item_raw( &self, wtxn: &mut RwTxn, @@ -163,36 +189,39 @@ impl ArroyWrapper { Ok(false) } + /// Delete one item. pub fn del_item( &self, wtxn: &mut RwTxn, - itemid: arroy::ItemId, + item_id: arroy::ItemId, + vector: &[f32], + ) -> Result { + if self.quantized { + self._del_item(wtxn, self.quantized_db(), item_id, vector) + } else { + self._del_item(wtxn, self.angular_db(), item_id, vector) + } + } + + fn _del_item( + &self, + wtxn: &mut RwTxn, + db: arroy::Database, + item_id: arroy::ItemId, vector: &[f32], ) -> Result { let dimension = vector.len(); let mut deleted_index = None; for index in arroy_db_range_for_embedder(self.index) { - if self.quantized { - let writer = arroy::Writer::new(self.quantized_db(), index, dimension); - let Some(candidate) = writer.item_vector(wtxn, itemid)? else { - // uses invariant: vectors are packed in the first writers. - break; - }; - if candidate == vector { - writer.del_item(wtxn, itemid)?; - deleted_index = Some(index); - } - } else { - let writer = arroy::Writer::new(self.angular_db(), index, dimension); - let Some(candidate) = writer.item_vector(wtxn, itemid)? else { - // uses invariant: vectors are packed in the first writers. - break; - }; - if candidate == vector { - writer.del_item(wtxn, itemid)?; - deleted_index = Some(index); - } + let writer = arroy::Writer::new(db, index, dimension); + let Some(candidate) = writer.item_vector(wtxn, item_id)? else { + // uses invariant: vectors are packed in the first writers. + break; + }; + if candidate == vector { + writer.del_item(wtxn, item_id)?; + deleted_index = Some(index); } } @@ -200,34 +229,18 @@ impl ArroyWrapper { if let Some(deleted_index) = deleted_index { let mut last_index_with_a_vector = None; for index in arroy_db_range_for_embedder(self.index).skip(deleted_index as usize) { - if self.quantized { - let writer = arroy::Writer::new(self.quantized_db(), index, dimension); - let Some(candidate) = writer.item_vector(wtxn, itemid)? else { - break; - }; - last_index_with_a_vector = Some((index, candidate)); - } else { - let writer = arroy::Writer::new(self.angular_db(), index, dimension); - let Some(candidate) = writer.item_vector(wtxn, itemid)? else { - break; - }; - last_index_with_a_vector = Some((index, candidate)); - } + let writer = arroy::Writer::new(db, index, dimension); + let Some(candidate) = writer.item_vector(wtxn, item_id)? else { + break; + }; + last_index_with_a_vector = Some((index, candidate)); } if let Some((last_index, vector)) = last_index_with_a_vector { - if self.quantized { - // unwrap: computed the index from the list of writers - let writer = arroy::Writer::new(self.quantized_db(), last_index, dimension); - writer.del_item(wtxn, itemid)?; - let writer = arroy::Writer::new(self.quantized_db(), deleted_index, dimension); - writer.add_item(wtxn, itemid, &vector)?; - } else { - // unwrap: computed the index from the list of writers - let writer = arroy::Writer::new(self.angular_db(), last_index, dimension); - writer.del_item(wtxn, itemid)?; - let writer = arroy::Writer::new(self.angular_db(), deleted_index, dimension); - writer.add_item(wtxn, itemid, &vector)?; - } + // unwrap: computed the index from the list of writers + let writer = arroy::Writer::new(db, last_index, dimension); + writer.del_item(wtxn, item_id)?; + let writer = arroy::Writer::new(db, deleted_index, dimension); + writer.add_item(wtxn, item_id, &vector)?; } } Ok(deleted_index.is_some()) @@ -284,17 +297,26 @@ impl ArroyWrapper { item: ItemId, limit: usize, filter: Option<&RoaringBitmap>, + ) -> Result, arroy::Error> { + if self.quantized { + self._nns_by_item(rtxn, self.quantized_db(), item, limit, filter) + } else { + self._nns_by_item(rtxn, self.angular_db(), item, limit, filter) + } + } + + fn _nns_by_item( + &self, + rtxn: &RoTxn, + db: arroy::Database, + item: ItemId, + limit: usize, + filter: Option<&RoaringBitmap>, ) -> Result, arroy::Error> { let mut results = Vec::new(); - for index in arroy_db_range_for_embedder(self.index) { - let ret = if self.quantized { - arroy::Reader::open(rtxn, index, self.quantized_db())? - .nns_by_item(rtxn, item, limit, None, None, filter)? - } else { - arroy::Reader::open(rtxn, index, self.angular_db())? - .nns_by_item(rtxn, item, limit, None, None, filter)? - }; + for reader in self.readers(rtxn, db) { + let ret = reader?.nns_by_item(rtxn, item, limit, None, None, filter)?; if let Some(mut ret) = ret { results.append(&mut ret); } else { @@ -302,27 +324,35 @@ impl ArroyWrapper { } } results.sort_unstable_by_key(|(_, distance)| OrderedFloat(*distance)); - Ok(results) } pub fn nns_by_vector( &self, - txn: &RoTxn, - item: &[f32], + rtxn: &RoTxn, + vector: &[f32], + limit: usize, + filter: Option<&RoaringBitmap>, + ) -> Result, arroy::Error> { + if self.quantized { + self._nns_by_vector(rtxn, self.quantized_db(), vector, limit, filter) + } else { + self._nns_by_vector(rtxn, self.angular_db(), vector, limit, filter) + } + } + + fn _nns_by_vector( + &self, + rtxn: &RoTxn, + db: arroy::Database, + vector: &[f32], limit: usize, filter: Option<&RoaringBitmap>, ) -> Result, arroy::Error> { let mut results = Vec::new(); - for index in arroy_db_range_for_embedder(self.index) { - let mut ret = if self.quantized { - arroy::Reader::open(txn, index, self.quantized_db())? - .nns_by_vector(txn, item, limit, None, None, filter)? - } else { - arroy::Reader::open(txn, index, self.angular_db())? - .nns_by_vector(txn, item, limit, None, None, filter)? - }; + for reader in self.readers(rtxn, db) { + let mut ret = reader?.nns_by_vector(rtxn, vector, limit, None, None, filter)?; results.append(&mut ret); } @@ -331,18 +361,27 @@ impl ArroyWrapper { Ok(results) } - pub fn item_vector(&self, rtxn: &RoTxn, docid: u32) -> Result>, arroy::Error> { - for index in arroy_db_range_for_embedder(self.index) { - let ret = if self.quantized { - arroy::Reader::open(rtxn, index, self.quantized_db())?.item_vector(rtxn, docid)? - } else { - arroy::Reader::open(rtxn, index, self.angular_db())?.item_vector(rtxn, docid)? - }; - if ret.is_some() { - return Ok(ret); + pub fn item_vectors(&self, rtxn: &RoTxn, item_id: u32) -> Result>, arroy::Error> { + let mut vectors = Vec::new(); + + if self.quantized { + for reader in self.readers(rtxn, self.quantized_db()) { + if let Some(vec) = reader?.item_vector(rtxn, item_id)? { + vectors.push(vec); + } else { + break; + } + } + } else { + for reader in self.readers(rtxn, self.angular_db()) { + if let Some(vec) = reader?.item_vector(rtxn, item_id)? { + vectors.push(vec); + } else { + break; + } } } - Ok(None) + Ok(vectors) } fn angular_db(&self) -> arroy::Database { From 0704fb71e97ce20fbe3ed5f5af6ad53da3a3d67f Mon Sep 17 00:00:00 2001 From: Louis Dureuil Date: Tue, 24 Sep 2024 09:44:29 +0200 Subject: [PATCH 020/111] Fix bench by adding embedder --- .../search/embeddings-movies-subset-hf.json | 21 ++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/workloads/search/embeddings-movies-subset-hf.json b/workloads/search/embeddings-movies-subset-hf.json index aeeecac59..36f45cfb9 100644 --- a/workloads/search/embeddings-movies-subset-hf.json +++ b/workloads/search/embeddings-movies-subset-hf.json @@ -77,7 +77,8 @@ "q": "puppy cute comforting movie", "limit": 100, "hybrid": { - "semanticRatio": 0.1 + "semanticRatio": 0.1, + "embedder": "default" } } }, @@ -91,7 +92,8 @@ "q": "puppy cute comforting movie", "limit": 100, "hybrid": { - "semanticRatio": 0.5 + "semanticRatio": 0.5, + "embedder": "default" } } }, @@ -105,7 +107,8 @@ "q": "puppy cute comforting movie", "limit": 100, "hybrid": { - "semanticRatio": 0.9 + "semanticRatio": 0.9, + "embedder": "default" } } }, @@ -119,7 +122,8 @@ "q": "puppy cute comforting movie", "limit": 100, "hybrid": { - "semanticRatio": 1.0 + "semanticRatio": 1.0, + "embedder": "default" } } }, @@ -133,7 +137,8 @@ "q": "shrek", "limit": 100, "hybrid": { - "semanticRatio": 1.0 + "semanticRatio": 1.0, + "embedder": "default" } } }, @@ -147,7 +152,8 @@ "q": "shrek", "limit": 100, "hybrid": { - "semanticRatio": 0.5 + "semanticRatio": 0.5, + "embedder": "default" } } }, @@ -161,7 +167,8 @@ "q": "shrek", "limit": 100, "hybrid": { - "semanticRatio": 0.1 + "semanticRatio": 0.1, + "embedder": "default" } } }, From 86da0e83fe9043ff84d27ec7eb98e0ccd312b98e Mon Sep 17 00:00:00 2001 From: Louis Dureuil Date: Tue, 24 Sep 2024 10:02:53 +0200 Subject: [PATCH 021/111] Upgrade "batch failed" log to ERROR level --- index-scheduler/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/index-scheduler/src/lib.rs b/index-scheduler/src/lib.rs index fe8244f9b..e0e2bfb75 100644 --- a/index-scheduler/src/lib.rs +++ b/index-scheduler/src/lib.rs @@ -1263,7 +1263,7 @@ impl IndexScheduler { #[cfg(test)] self.maybe_fail(tests::FailureLocation::UpdatingTaskAfterProcessBatchFailure)?; - tracing::info!("Batch failed {}", error); + tracing::error!("Batch failed {}", error); self.update_task(&mut wtxn, &task) .map_err(|e| Error::TaskDatabaseUpdate(Box::new(e)))?; From 79d8a7a51a13fc089c3ebe58721302c856191d8d Mon Sep 17 00:00:00 2001 From: Tamo Date: Tue, 24 Sep 2024 10:36:28 +0200 Subject: [PATCH 022/111] rename the embedder index for clarity --- milli/src/vector/mod.rs | 42 ++++++++++++++++++++++++----------------- 1 file changed, 25 insertions(+), 17 deletions(-) diff --git a/milli/src/vector/mod.rs b/milli/src/vector/mod.rs index b5b6cd953..2da8ecd57 100644 --- a/milli/src/vector/mod.rs +++ b/milli/src/vector/mod.rs @@ -32,17 +32,21 @@ pub const REQUEST_PARALLELISM: usize = 40; pub struct ArroyWrapper { quantized: bool, - index: u8, + embedder_index: u8, database: arroy::Database, } impl ArroyWrapper { - pub fn new(database: arroy::Database, index: u8, quantized: bool) -> Self { - Self { database, index, quantized } + pub fn new( + database: arroy::Database, + embedder_index: u8, + quantized: bool, + ) -> Self { + Self { database, embedder_index, quantized } } pub fn index(&self) -> u8 { - self.index + self.embedder_index } fn readers<'a, D: arroy::Distance>( @@ -50,7 +54,7 @@ impl ArroyWrapper { rtxn: &'a RoTxn<'a>, db: arroy::Database, ) -> impl Iterator, arroy::Error>> + 'a { - arroy_db_range_for_embedder(self.index).map_while(move |index| { + arroy_db_range_for_embedder(self.embedder_index).map_while(move |index| { match arroy::Reader::open(rtxn, index, db) { Ok(reader) => Some(Ok(reader)), Err(arroy::Error::MissingMetadata(_)) => None, @@ -60,7 +64,7 @@ impl ArroyWrapper { } pub fn dimensions(&self, rtxn: &RoTxn) -> Result { - let first_id = arroy_db_range_for_embedder(self.index).next().unwrap(); + let first_id = arroy_db_range_for_embedder(self.embedder_index).next().unwrap(); if self.quantized { Ok(arroy::Reader::open(rtxn, first_id, self.quantized_db())?.dimensions()) } else { @@ -70,7 +74,7 @@ impl ArroyWrapper { pub fn quantize(&mut self, wtxn: &mut RwTxn, dimension: usize) -> Result<(), arroy::Error> { if !self.quantized { - for index in arroy_db_range_for_embedder(self.index) { + for index in arroy_db_range_for_embedder(self.embedder_index) { let writer = arroy::Writer::new(self.angular_db(), index, dimension); writer.prepare_changing_distance::(wtxn)?; } @@ -81,7 +85,7 @@ impl ArroyWrapper { // TODO: We can stop early when we find an empty DB pub fn need_build(&self, rtxn: &RoTxn, dimension: usize) -> Result { - for index in arroy_db_range_for_embedder(self.index) { + for index in arroy_db_range_for_embedder(self.embedder_index) { let need_build = if self.quantized { arroy::Writer::new(self.quantized_db(), index, dimension).need_build(rtxn) } else { @@ -101,7 +105,7 @@ impl ArroyWrapper { rng: &mut R, dimension: usize, ) -> Result<(), arroy::Error> { - for index in arroy_db_range_for_embedder(self.index) { + for index in arroy_db_range_for_embedder(self.embedder_index) { if self.quantized { arroy::Writer::new(self.quantized_db(), index, dimension).build(wtxn, rng, None)? } else { @@ -119,7 +123,9 @@ impl ArroyWrapper { embeddings: &Embeddings, ) -> Result<(), arroy::Error> { let dimension = embeddings.dimension(); - for (index, vector) in arroy_db_range_for_embedder(self.index).zip(embeddings.iter()) { + for (index, vector) in + arroy_db_range_for_embedder(self.embedder_index).zip(embeddings.iter()) + { if self.quantized { arroy::Writer::new(self.quantized_db(), index, dimension) .add_item(wtxn, item_id, vector)? @@ -154,7 +160,7 @@ impl ArroyWrapper { ) -> Result<(), arroy::Error> { let dimension = vector.len(); - for index in arroy_db_range_for_embedder(self.index) { + for index in arroy_db_range_for_embedder(self.embedder_index) { let writer = arroy::Writer::new(db, index, dimension); if !writer.contains_item(wtxn, item_id)? { writer.add_item(wtxn, item_id, vector)?; @@ -172,7 +178,7 @@ impl ArroyWrapper { dimension: usize, item_id: arroy::ItemId, ) -> Result { - for index in arroy_db_range_for_embedder(self.index) { + for index in arroy_db_range_for_embedder(self.embedder_index) { if self.quantized { let writer = arroy::Writer::new(self.quantized_db(), index, dimension); if writer.del_item(wtxn, item_id)? { @@ -213,7 +219,7 @@ impl ArroyWrapper { let dimension = vector.len(); let mut deleted_index = None; - for index in arroy_db_range_for_embedder(self.index) { + for index in arroy_db_range_for_embedder(self.embedder_index) { let writer = arroy::Writer::new(db, index, dimension); let Some(candidate) = writer.item_vector(wtxn, item_id)? else { // uses invariant: vectors are packed in the first writers. @@ -228,7 +234,9 @@ impl ArroyWrapper { // 🥲 enforce invariant: vectors are packed in the first writers. if let Some(deleted_index) = deleted_index { let mut last_index_with_a_vector = None; - for index in arroy_db_range_for_embedder(self.index).skip(deleted_index as usize) { + for index in + arroy_db_range_for_embedder(self.embedder_index).skip(deleted_index as usize) + { let writer = arroy::Writer::new(db, index, dimension); let Some(candidate) = writer.item_vector(wtxn, item_id)? else { break; @@ -247,7 +255,7 @@ impl ArroyWrapper { } pub fn clear(&self, wtxn: &mut RwTxn, dimension: usize) -> Result<(), arroy::Error> { - for index in arroy_db_range_for_embedder(self.index) { + for index in arroy_db_range_for_embedder(self.embedder_index) { if self.quantized { arroy::Writer::new(self.quantized_db(), index, dimension).clear(wtxn)?; } else { @@ -258,7 +266,7 @@ impl ArroyWrapper { } pub fn is_empty(&self, rtxn: &RoTxn, dimension: usize) -> Result { - for index in arroy_db_range_for_embedder(self.index) { + for index in arroy_db_range_for_embedder(self.embedder_index) { let empty = if self.quantized { arroy::Writer::new(self.quantized_db(), index, dimension).is_empty(rtxn)? } else { @@ -277,7 +285,7 @@ impl ArroyWrapper { dimension: usize, item: arroy::ItemId, ) -> Result { - for index in arroy_db_range_for_embedder(self.index) { + for index in arroy_db_range_for_embedder(self.embedder_index) { let contains = if self.quantized { arroy::Writer::new(self.quantized_db(), index, dimension) .contains_item(rtxn, item)? From f2d187ba3e779c0644ad0e1dbf3174dea2614d35 Mon Sep 17 00:00:00 2001 From: Tamo Date: Tue, 24 Sep 2024 10:39:40 +0200 Subject: [PATCH 023/111] rename the index method to embedder_index --- milli/src/vector/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/milli/src/vector/mod.rs b/milli/src/vector/mod.rs index 2da8ecd57..ca607c892 100644 --- a/milli/src/vector/mod.rs +++ b/milli/src/vector/mod.rs @@ -45,7 +45,7 @@ impl ArroyWrapper { Self { database, embedder_index, quantized } } - pub fn index(&self) -> u8 { + pub fn embedder_index(&self) -> u8 { self.embedder_index } From fd8447c5214b62b724f18ec5de9b92fa34537462 Mon Sep 17 00:00:00 2001 From: Tamo Date: Tue, 24 Sep 2024 10:52:05 +0200 Subject: [PATCH 024/111] fix the del items thing --- milli/src/update/index_documents/typed_chunk.rs | 2 +- milli/src/vector/mod.rs | 17 ++++++++--------- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/milli/src/update/index_documents/typed_chunk.rs b/milli/src/update/index_documents/typed_chunk.rs index e118420d8..20e70b2a6 100644 --- a/milli/src/update/index_documents/typed_chunk.rs +++ b/milli/src/update/index_documents/typed_chunk.rs @@ -680,7 +680,7 @@ pub(crate) fn write_typed_chunk_into_index( let mut iter = merger.into_stream_merger_iter()?; while let Some((key, _)) = iter.next()? { let docid = key.try_into().map(DocumentId::from_be_bytes).unwrap(); - writer.del_item_raw(wtxn, expected_dimension, docid)?; + writer.del_items(wtxn, expected_dimension, docid)?; } // add generated embeddings diff --git a/milli/src/vector/mod.rs b/milli/src/vector/mod.rs index ca607c892..4b322ddf4 100644 --- a/milli/src/vector/mod.rs +++ b/milli/src/vector/mod.rs @@ -170,29 +170,28 @@ impl ArroyWrapper { Ok(()) } - /// Delete an item from the index. It **does not** take care of fixing the hole - /// made after deleting the item. - pub fn del_item_raw( + /// Delete all embeddings from a specific `item_id` + pub fn del_items( &self, wtxn: &mut RwTxn, dimension: usize, item_id: arroy::ItemId, - ) -> Result { + ) -> Result<(), arroy::Error> { for index in arroy_db_range_for_embedder(self.embedder_index) { if self.quantized { let writer = arroy::Writer::new(self.quantized_db(), index, dimension); - if writer.del_item(wtxn, item_id)? { - return Ok(true); + if !writer.del_item(wtxn, item_id)? { + break; } } else { let writer = arroy::Writer::new(self.angular_db(), index, dimension); - if writer.del_item(wtxn, item_id)? { - return Ok(true); + if !writer.del_item(wtxn, item_id)? { + break; } } } - Ok(false) + Ok(()) } /// Delete one item. From b8a74e04647af60a396539b6ba3b47d19771cc49 Mon Sep 17 00:00:00 2001 From: Tamo Date: Tue, 24 Sep 2024 10:59:15 +0200 Subject: [PATCH 025/111] fix comments --- milli/src/vector/mod.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/milli/src/vector/mod.rs b/milli/src/vector/mod.rs index 4b322ddf4..8341ab923 100644 --- a/milli/src/vector/mod.rs +++ b/milli/src/vector/mod.rs @@ -115,7 +115,10 @@ impl ArroyWrapper { Ok(()) } - /// Overwrite all the embeddings associated to the index and item id. + /// Overwrite all the embeddings associated with the index and item ID. + /// /!\ It won't remove embeddings after the last passed embedding, which can leave stale embeddings. + /// You should call `del_items` on the `item_id` before calling this method. + /// /!\ Cannot insert more than u8::MAX embeddings; after inserting u8::MAX embeddings, all the remaining ones will be silently ignored. pub fn add_items( &self, wtxn: &mut RwTxn, @@ -243,7 +246,6 @@ impl ArroyWrapper { last_index_with_a_vector = Some((index, candidate)); } if let Some((last_index, vector)) = last_index_with_a_vector { - // unwrap: computed the index from the list of writers let writer = arroy::Writer::new(db, last_index, dimension); writer.del_item(wtxn, item_id)?; let writer = arroy::Writer::new(db, deleted_index, dimension); From 645a55317af91f37d68d26527568032016bf5393 Mon Sep 17 00:00:00 2001 From: Tamo Date: Tue, 24 Sep 2024 14:54:24 +0200 Subject: [PATCH 026/111] merge the build and quantize method --- milli/src/update/index_documents/mod.rs | 5 +-- milli/src/vector/mod.rs | 43 ++++++++++++++----------- 2 files changed, 25 insertions(+), 23 deletions(-) diff --git a/milli/src/update/index_documents/mod.rs b/milli/src/update/index_documents/mod.rs index b03ab259a..e164a0817 100644 --- a/milli/src/update/index_documents/mod.rs +++ b/milli/src/update/index_documents/mod.rs @@ -713,10 +713,7 @@ where pool.install(|| { let mut writer = ArroyWrapper::new(vector_arroy, embedder_index, was_quantized); - if is_quantizing { - writer.quantize(wtxn, dimension)?; - } - writer.build(wtxn, &mut rng, dimension)?; + writer.build_and_quantize(wtxn, &mut rng, dimension, is_quantizing)?; Result::Ok(()) }) .map_err(InternalError::from)??; diff --git a/milli/src/vector/mod.rs b/milli/src/vector/mod.rs index 8341ab923..a33f76559 100644 --- a/milli/src/vector/mod.rs +++ b/milli/src/vector/mod.rs @@ -98,18 +98,37 @@ impl ArroyWrapper { Ok(false) } - /// TODO: We should early exit when it doesn't need to be built - pub fn build( - &self, + pub fn build_and_quantize( + &mut self, wtxn: &mut RwTxn, rng: &mut R, dimension: usize, + quantizing: bool, ) -> Result<(), arroy::Error> { for index in arroy_db_range_for_embedder(self.embedder_index) { if self.quantized { - arroy::Writer::new(self.quantized_db(), index, dimension).build(wtxn, rng, None)? + let writer = arroy::Writer::new(self.quantized_db(), index, dimension); + if writer.need_build(wtxn)? { + writer.build(wtxn, rng, None)? + } else if writer.is_empty(wtxn)? { + break; + } } else { - arroy::Writer::new(self.angular_db(), index, dimension).build(wtxn, rng, None)? + let writer = arroy::Writer::new(self.angular_db(), index, dimension); + // If we are quantizing the databases, we can't know from meilisearch + // if the db was empty but still contained the wrong metadata, thus we need + // to quantize everything and can't stop early. Since this operation can + // only happens once in the life of an embedder, it's not very performances + // sensitive. + if quantizing && !self.quantized { + let writer = + writer.prepare_changing_distance::(wtxn)?; + writer.build(wtxn, rng, None)? + } else if writer.need_build(wtxn)? { + writer.build(wtxn, rng, None)? + } else if writer.is_empty(wtxn)? { + break; + } } } Ok(()) @@ -266,20 +285,6 @@ impl ArroyWrapper { Ok(()) } - pub fn is_empty(&self, rtxn: &RoTxn, dimension: usize) -> Result { - for index in arroy_db_range_for_embedder(self.embedder_index) { - let empty = if self.quantized { - arroy::Writer::new(self.quantized_db(), index, dimension).is_empty(rtxn)? - } else { - arroy::Writer::new(self.angular_db(), index, dimension).is_empty(rtxn)? - }; - if !empty { - return Ok(false); - } - } - Ok(true) - } - pub fn contains_item( &self, rtxn: &RoTxn, From 8b4e2c7b1798e58a71dfb0538dbc980155b688cc Mon Sep 17 00:00:00 2001 From: Tamo Date: Tue, 24 Sep 2024 15:00:25 +0200 Subject: [PATCH 027/111] Remove now unused method --- milli/src/vector/mod.rs | 26 -------------------------- 1 file changed, 26 deletions(-) diff --git a/milli/src/vector/mod.rs b/milli/src/vector/mod.rs index a33f76559..39655e72a 100644 --- a/milli/src/vector/mod.rs +++ b/milli/src/vector/mod.rs @@ -72,32 +72,6 @@ impl ArroyWrapper { } } - pub fn quantize(&mut self, wtxn: &mut RwTxn, dimension: usize) -> Result<(), arroy::Error> { - if !self.quantized { - for index in arroy_db_range_for_embedder(self.embedder_index) { - let writer = arroy::Writer::new(self.angular_db(), index, dimension); - writer.prepare_changing_distance::(wtxn)?; - } - self.quantized = true; - } - Ok(()) - } - - // TODO: We can stop early when we find an empty DB - pub fn need_build(&self, rtxn: &RoTxn, dimension: usize) -> Result { - for index in arroy_db_range_for_embedder(self.embedder_index) { - let need_build = if self.quantized { - arroy::Writer::new(self.quantized_db(), index, dimension).need_build(rtxn) - } else { - arroy::Writer::new(self.angular_db(), index, dimension).need_build(rtxn) - }; - if need_build? { - return Ok(true); - } - } - Ok(false) - } - pub fn build_and_quantize( &mut self, wtxn: &mut RwTxn, From 7f048b9732a048624bbe4beacb2e93f59c6d510d Mon Sep 17 00:00:00 2001 From: Tamo Date: Tue, 24 Sep 2024 15:02:38 +0200 Subject: [PATCH 028/111] early exit in the clear and contains --- milli/src/vector/mod.rs | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/milli/src/vector/mod.rs b/milli/src/vector/mod.rs index 39655e72a..d5b80db83 100644 --- a/milli/src/vector/mod.rs +++ b/milli/src/vector/mod.rs @@ -251,9 +251,17 @@ impl ArroyWrapper { pub fn clear(&self, wtxn: &mut RwTxn, dimension: usize) -> Result<(), arroy::Error> { for index in arroy_db_range_for_embedder(self.embedder_index) { if self.quantized { - arroy::Writer::new(self.quantized_db(), index, dimension).clear(wtxn)?; + let writer = arroy::Writer::new(self.quantized_db(), index, dimension); + if writer.is_empty(wtxn)? { + break; + } + writer.clear(wtxn)?; } else { - arroy::Writer::new(self.angular_db(), index, dimension).clear(wtxn)?; + let writer = arroy::Writer::new(self.angular_db(), index, dimension); + if writer.is_empty(wtxn)? { + break; + } + writer.clear(wtxn)?; } } Ok(()) @@ -267,10 +275,17 @@ impl ArroyWrapper { ) -> Result { for index in arroy_db_range_for_embedder(self.embedder_index) { let contains = if self.quantized { - arroy::Writer::new(self.quantized_db(), index, dimension) - .contains_item(rtxn, item)? + let writer = arroy::Writer::new(self.quantized_db(), index, dimension); + if writer.is_empty(rtxn)? { + break; + } + writer.contains_item(rtxn, item)? } else { - arroy::Writer::new(self.angular_db(), index, dimension).contains_item(rtxn, item)? + let writer = arroy::Writer::new(self.angular_db(), index, dimension); + if writer.is_empty(rtxn)? { + break; + } + writer.contains_item(rtxn, item)? }; if contains { return Ok(contains); From b31e9bea26c098750dece8fb38eb2f57d6c254b5 Mon Sep 17 00:00:00 2001 From: Tamo Date: Tue, 24 Sep 2024 16:33:17 +0200 Subject: [PATCH 029/111] while retrieving the readers on an arroywrapper, stops at the first empty reader --- milli/src/vector/mod.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/milli/src/vector/mod.rs b/milli/src/vector/mod.rs index d5b80db83..b6d6510af 100644 --- a/milli/src/vector/mod.rs +++ b/milli/src/vector/mod.rs @@ -56,7 +56,11 @@ impl ArroyWrapper { ) -> impl Iterator, arroy::Error>> + 'a { arroy_db_range_for_embedder(self.embedder_index).map_while(move |index| { match arroy::Reader::open(rtxn, index, db) { - Ok(reader) => Some(Ok(reader)), + Ok(reader) => match reader.is_empty(rtxn) { + Ok(false) => Some(Ok(reader)), + Ok(true) => None, + Err(e) => Some(Err(e)), + }, Err(arroy::Error::MissingMetadata(_)) => None, Err(e) => Some(Err(e)), } From e9580fe61946477d83b9222ad4c00058a9868824 Mon Sep 17 00:00:00 2001 From: ManyTheFish Date: Wed, 25 Sep 2024 11:03:17 +0200 Subject: [PATCH 030/111] Add turkish normalization --- meilisearch-types/Cargo.toml | 5 ++++- meilisearch/Cargo.toml | 1 + milli/Cargo.toml | 4 ++++ 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/meilisearch-types/Cargo.toml b/meilisearch-types/Cargo.toml index cb4937e57..0dae024f2 100644 --- a/meilisearch-types/Cargo.toml +++ b/meilisearch-types/Cargo.toml @@ -66,5 +66,8 @@ khmer = ["milli/khmer"] vietnamese = ["milli/vietnamese"] # force swedish character recomposition swedish-recomposition = ["milli/swedish-recomposition"] -# force german character recomposition +# allow german tokenization german = ["milli/german"] +# allow turkish normalization +turkish = ["milli/turkish"] + diff --git a/meilisearch/Cargo.toml b/meilisearch/Cargo.toml index 2a16e1017..c193c89d4 100644 --- a/meilisearch/Cargo.toml +++ b/meilisearch/Cargo.toml @@ -154,6 +154,7 @@ khmer = ["meilisearch-types/khmer"] vietnamese = ["meilisearch-types/vietnamese"] swedish-recomposition = ["meilisearch-types/swedish-recomposition"] german = ["meilisearch-types/german"] +turkish = ["meilisearch-types/turkish"] [package.metadata.mini-dashboard] assets-url = "https://github.com/meilisearch/mini-dashboard/releases/download/v0.2.14/build.zip" diff --git a/milli/Cargo.toml b/milli/Cargo.toml index 5fc2d65c8..70d09ce4e 100644 --- a/milli/Cargo.toml +++ b/milli/Cargo.toml @@ -108,6 +108,7 @@ all-tokenizations = [ "charabia/vietnamese", "charabia/swedish-recomposition", "charabia/german-segmentation", + "charabia/turkish", ] # Use POSIX semaphores instead of SysV semaphores in LMDB @@ -146,5 +147,8 @@ german = ["charabia/german-segmentation"] # force swedish character recomposition swedish-recomposition = ["charabia/swedish-recomposition"] +# allow turkish specialized tokenization +turkish = ["charabia/turkish"] + # allow CUDA support, see cuda = ["candle-core/cuda"] From dc2cb58cf1ce3fa33f791d095f095c429a6ad9c0 Mon Sep 17 00:00:00 2001 From: ManyTheFish Date: Wed, 25 Sep 2024 11:12:30 +0200 Subject: [PATCH 031/111] use charabia default for all-tokenization --- milli/Cargo.toml | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/milli/Cargo.toml b/milli/Cargo.toml index 70d09ce4e..3c4a44639 100644 --- a/milli/Cargo.toml +++ b/milli/Cargo.toml @@ -98,17 +98,7 @@ rand = { version = "0.8.5", features = ["small_rng"] } [features] all-tokenizations = [ - "charabia/chinese", - "charabia/hebrew", - "charabia/japanese", - "charabia/thai", - "charabia/korean", - "charabia/greek", - "charabia/khmer", - "charabia/vietnamese", - "charabia/swedish-recomposition", - "charabia/german-segmentation", - "charabia/turkish", + "charabia/default", ] # Use POSIX semaphores instead of SysV semaphores in LMDB From 78a4b7949df6c1f5ee6e95c80b8966ddf5aca957 Mon Sep 17 00:00:00 2001 From: Tamo Date: Thu, 26 Sep 2024 15:04:03 +0200 Subject: [PATCH 032/111] =?UTF-8?q?update=20rhai=20to=20a=20version=20that?= =?UTF-8?q?=20shouldn=E2=80=99t=20panic?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Cargo.lock | 8 +++----- milli/Cargo.toml | 2 +- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bcca35173..3237d4e16 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4581,9 +4581,8 @@ dependencies = [ [[package]] name = "rhai" -version = "1.19.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61797318be89b1a268a018a92a7657096d83f3ecb31418b9e9c16dcbb043b702" +version = "1.20.0" +source = "git+https://github.com/rhaiscript/rhai?rev=ef3df63121d27aacd838f366f2b83fd65f20a1e4#ef3df63121d27aacd838f366f2b83fd65f20a1e4" dependencies = [ "ahash 0.8.11", "bitflags 2.6.0", @@ -4600,8 +4599,7 @@ dependencies = [ [[package]] name = "rhai_codegen" version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5a11a05ee1ce44058fa3d5961d05194fdbe3ad6b40f904af764d81b86450e6b" +source = "git+https://github.com/rhaiscript/rhai?rev=ef3df63121d27aacd838f366f2b83fd65f20a1e4#ef3df63121d27aacd838f366f2b83fd65f20a1e4" dependencies = [ "proc-macro2", "quote", diff --git a/milli/Cargo.toml b/milli/Cargo.toml index 5fc2d65c8..b22d2164f 100644 --- a/milli/Cargo.toml +++ b/milli/Cargo.toml @@ -79,7 +79,7 @@ hf-hub = { git = "https://github.com/dureuill/hf-hub.git", branch = "rust_tls", ] } tiktoken-rs = "0.5.9" liquid = "0.26.6" -rhai = { version = "1.19.0", features = ["serde", "no_module", "no_custom_syntax", "no_time", "sync"] } +rhai = { git = "https://github.com/rhaiscript/rhai", rev = "ef3df63121d27aacd838f366f2b83fd65f20a1e4", features = ["serde", "no_module", "no_custom_syntax", "no_time", "sync"] } arroy = { git = "https://github.com/meilisearch/arroy/", rev = "2386594dfb009ce08821a925ccc89fb8e30bf73d" } rand = "0.8.5" tracing = "0.1.40" From d20a39b9599f7962b2a316e45cc126f90a3d8eed Mon Sep 17 00:00:00 2001 From: "F. Levi" <55688616+flevi29@users.noreply.github.com> Date: Fri, 27 Sep 2024 15:44:30 +0300 Subject: [PATCH 033/111] Refactor find_best_match_interval --- milli/src/search/new/matches/mod.rs | 154 +++++++++++++++++++--------- 1 file changed, 106 insertions(+), 48 deletions(-) diff --git a/milli/src/search/new/matches/mod.rs b/milli/src/search/new/matches/mod.rs index 26115c39b..bbd39e682 100644 --- a/milli/src/search/new/matches/mod.rs +++ b/milli/src/search/new/matches/mod.rs @@ -442,36 +442,48 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { /// Returns the matches interval where the score computed by match_interval_score is the best. fn find_best_match_interval<'a>(&self, matches: &'a [Match], crop_size: usize) -> &'a [Match] { + let matches_len = matches.len(); + // we compute the matches interval if we have at least 2 matches. - if matches.len() > 1 { + if matches_len > 1 { + // current interval positions. + let mut interval_first = 0; // positions of the first and the last match of the best matches interval in `matches`. let mut best_interval = (0, 0); let mut best_interval_score = self.match_interval_score(&matches[0..=0]); - // current interval positions. - let mut interval_first = 0; - let mut interval_last = 0; - for (index, next_match) in matches.iter().enumerate().skip(1) { + + let mut index = 1; + while index < matches_len - 1 { + let next_match = &matches[index]; + // if next match would make interval gross more than crop_size, // we compare the current interval with the best one, // then we increase `interval_first` until next match can be added. let next_match_last_word_pos = next_match.get_last_word_pos(); - let mut interval_first_match_first_word_pos = + let interval_first_match_first_word_pos = matches[interval_first].get_first_word_pos(); + // if the next match would mean that we pass the crop size window, + // we take the last valid match, that didn't pass this boundry, which is `index` - 1, + // and calculate a score for it, and check if it's better than our best so far if next_match_last_word_pos - interval_first_match_first_word_pos >= crop_size { - let interval_score = - self.match_interval_score(&matches[interval_first..=interval_last]); + // skip for 1, because it would result in the same as our very first interval score + if index != 1 { + let interval_last = index - 1; + let interval_score = + self.match_interval_score(&matches[interval_first..=interval_last]); - // keep interval if it's the best - if interval_score > best_interval_score { - best_interval = (interval_first, interval_last); - best_interval_score = interval_score; + // keep interval if it's the best + if interval_score > best_interval_score { + best_interval = (interval_first, interval_last); + best_interval_score = interval_score; + } } // advance start of the interval while interval is longer than crop_size. loop { interval_first += 1; - interval_first_match_first_word_pos = + let interval_first_match_first_word_pos = matches[interval_first].get_first_word_pos(); if next_match_last_word_pos - interval_first_match_first_word_pos @@ -481,10 +493,12 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { } } } - interval_last = index; + + index += 1; } // compute the last interval score and compare it to the best one. + let interval_last = matches_len - 1; let interval_score = self.match_interval_score(&matches[interval_first..=interval_last]); if interval_score > best_interval_score { @@ -914,32 +928,32 @@ mod tests { let format_options = FormatOptions { highlight: true, crop: Some(10) }; - let builder = MatcherBuilder::new_test(&rtxn, &temp_index, "\"the world\""); - let mut matcher = builder.build(text, None); - // should return 10 words with a marker at the start as well the end, and the highlighted matches. - insta::assert_snapshot!( - matcher.format(format_options), - @"…the power to split the world between those who embraced…" - ); + // let builder = MatcherBuilder::new_test(&rtxn, &temp_index, "\"the world\""); + // let mut matcher = builder.build(text, None); + // // should return 10 words with a marker at the start as well the end, and the highlighted matches. + // insta::assert_snapshot!( + // matcher.format(format_options), + // @"…the power to split the world between those who embraced…" + // ); - let builder = MatcherBuilder::new_test(&rtxn, &temp_index, "those \"and those\""); - let mut matcher = builder.build(text, None); - // should highlight "those" and the phrase "and those". - insta::assert_snapshot!( - matcher.format(format_options), - @"…world between those who embraced progress and those who resisted…" - ); + // let builder = MatcherBuilder::new_test(&rtxn, &temp_index, "\"power to\" \"and those\""); + // let mut matcher = builder.build(text, None); + // // should highlight "those" and the phrase "and those". + // insta::assert_snapshot!( + // matcher.format(format_options), + // @"…groundbreaking invention had the power to split the world between…" + // ); - let builder = MatcherBuilder::new_test( - &rtxn, - &temp_index, - "\"The groundbreaking invention had the power to split the world\"", - ); - let mut matcher = builder.build(text, None); - insta::assert_snapshot!( - matcher.format(format_options), - @"The groundbreaking invention had the power to split the world…" - ); + // let builder = MatcherBuilder::new_test( + // &rtxn, + // &temp_index, + // "\"The groundbreaking invention had the power to split the world\"", + // ); + // let mut matcher = builder.build(text, None); + // insta::assert_snapshot!( + // matcher.format(format_options), + // @"The groundbreaking invention had the power to split the world…" + // ); let builder = MatcherBuilder::new_test( &rtxn, @@ -952,16 +966,60 @@ mod tests { @"The groundbreaking invention had the power to split the world …" ); - let builder = MatcherBuilder::new_test( - &rtxn, - &temp_index, - "\"The groundbreaking invention\" \"embraced progress and those who resisted change\"", - ); - let mut matcher = builder.build(text, None); - insta::assert_snapshot!( - matcher.format(format_options), - @"…between those who embraced progress and those who resisted change…" - ); + // let builder = MatcherBuilder::new_test( + // &rtxn, + // &temp_index, + // "\"The groundbreaking invention\" \"embraced progress and those who resisted change!\"", + // ); + // let mut matcher = builder.build(text, None); + // insta::assert_snapshot!( + // matcher.format(format_options), + // @"…between those who embraced progress and those who resisted change…" + // ); + + // let builder = MatcherBuilder::new_test( + // &rtxn, + // &temp_index, + // "\"The groundbreaking invention\" \"split the world between those\"", + // ); + // let mut matcher = builder.build(text, None); + // insta::assert_snapshot!( + // matcher.format(format_options), + // @"…the power to split the world between those who embraced…" + // ); + + // let builder = MatcherBuilder::new_test( + // &rtxn, + // &temp_index, + // "\"groundbreaking invention\" \"split the world between\"", + // ); + // let mut matcher = builder.build(text, None); + // insta::assert_snapshot!( + // matcher.format(format_options), + // @"…groundbreaking invention had the power to split the world between…" + // ); + + // let builder = MatcherBuilder::new_test( + // &rtxn, + // &temp_index, + // "\"groundbreaking invention\" \"had the power to split the world between those\"", + // ); + // let mut matcher = builder.build(text, None); + // insta::assert_snapshot!( + // matcher.format(format_options), + // @"…invention had the power to split the world between those…" + // ); + + // let builder = MatcherBuilder::new_test( + // &rtxn, + // &temp_index, + // "\"The groundbreaking invention\" \"had the power to split the world between those\"", + // ); + // let mut matcher = builder.build(text, None); + // insta::assert_snapshot!( + // matcher.format(format_options), + // @"…invention had the power to split the world between those…" + // ); } #[test] From 5539a1904a76a34ea1c292b521249478b41c1bfb Mon Sep 17 00:00:00 2001 From: Timon Jurschitsch Date: Sat, 28 Sep 2024 11:05:52 +0200 Subject: [PATCH 034/111] test: improve performance of create_index.rs --- meilisearch/tests/index/create_index.rs | 30 ++++++++++++------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/meilisearch/tests/index/create_index.rs b/meilisearch/tests/index/create_index.rs index b51ccab51..16a6e0678 100644 --- a/meilisearch/tests/index/create_index.rs +++ b/meilisearch/tests/index/create_index.rs @@ -17,7 +17,7 @@ async fn create_index_no_primary_key() { assert_eq!(response["status"], "enqueued"); - let response = index.wait_task(0).await; + let response = index.wait_task(response.uid()).await; assert_eq!(response["status"], "succeeded"); assert_eq!(response["type"], "indexCreation"); @@ -34,7 +34,7 @@ async fn create_index_with_gzip_encoded_request() { assert_eq!(response["status"], "enqueued"); - let response = index.wait_task(0).await; + let response = index.wait_task(response.uid()).await; assert_eq!(response["status"], "succeeded"); assert_eq!(response["type"], "indexCreation"); @@ -82,7 +82,7 @@ async fn create_index_with_zlib_encoded_request() { assert_eq!(response["status"], "enqueued"); - let response = index.wait_task(0).await; + let response = index.wait_task(response.uid()).await; assert_eq!(response["status"], "succeeded"); assert_eq!(response["type"], "indexCreation"); @@ -99,7 +99,7 @@ async fn create_index_with_brotli_encoded_request() { assert_eq!(response["status"], "enqueued"); - let response = index.wait_task(0).await; + let response = index.wait_task(response.uid()).await; assert_eq!(response["status"], "succeeded"); assert_eq!(response["type"], "indexCreation"); @@ -116,7 +116,7 @@ async fn create_index_with_primary_key() { assert_eq!(response["status"], "enqueued"); - let response = index.wait_task(0).await; + let response = index.wait_task(response.uid()).await; assert_eq!(response["status"], "succeeded"); assert_eq!(response["type"], "indexCreation"); @@ -129,10 +129,10 @@ async fn create_index_with_invalid_primary_key() { let server = Server::new().await; let index = server.index("movies"); - let (_response, code) = index.add_documents(document, Some("title")).await; + let (response, code) = index.add_documents(document, Some("title")).await; assert_eq!(code, 202); - index.wait_task(0).await; + index.wait_task(response.uid()).await; let (response, code) = index.get().await; assert_eq!(code, 200); @@ -147,13 +147,13 @@ async fn test_create_multiple_indexes() { let index3 = server.index("test3"); let index4 = server.index("test4"); - index1.create(None).await; - index2.create(None).await; - index3.create(None).await; + let (task1, _) = index1.create(None).await; + let (task2, _) = index2.create(None).await; + let (task3, _) = index3.create(None).await; - index1.wait_task(0).await; - index1.wait_task(1).await; - index1.wait_task(2).await; + index1.wait_task(task1.uid()).await.succeeded(); + index2.wait_task(task2.uid()).await.succeeded(); + index3.wait_task(task3.uid()).await.succeeded(); assert_eq!(index1.get().await.1, 200); assert_eq!(index2.get().await.1, 200); @@ -169,9 +169,9 @@ async fn error_create_existing_index() { assert_eq!(code, 202); - index.create(Some("primary")).await; + let (task, _) = index.create(Some("primary")).await; - let response = index.wait_task(1).await; + let response = index.wait_task(task.uid()).await; let expected_response = json!({ "message": "Index `test` already exists.", From 84b4219a4fa153c83a31f00fc45b7545d66cf0a6 Mon Sep 17 00:00:00 2001 From: Timon Jurschitsch Date: Sun, 29 Sep 2024 10:16:31 +0200 Subject: [PATCH 035/111] test: improve delete_index.rs --- meilisearch/tests/index/delete_index.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/meilisearch/tests/index/delete_index.rs b/meilisearch/tests/index/delete_index.rs index e404a6003..8e238dad3 100644 --- a/meilisearch/tests/index/delete_index.rs +++ b/meilisearch/tests/index/delete_index.rs @@ -5,19 +5,19 @@ use crate::json; async fn create_and_delete_index() { let server = Server::new().await; let index = server.index("test"); - let (_response, code) = index.create(None).await; + let (response, code) = index.create(None).await; assert_eq!(code, 202); - index.wait_task(0).await; + index.wait_task(response.uid()).await.succeeded(); assert_eq!(index.get().await.1, 200); - let (_response, code) = index.delete().await; + let (response, code) = index.delete().await; assert_eq!(code, 202); - index.wait_task(1).await; + index.wait_task(response.uid()).await.succeeded(); assert_eq!(index.get().await.1, 404); } @@ -26,7 +26,7 @@ async fn create_and_delete_index() { async fn error_delete_unexisting_index() { let server = Server::new().await; let index = server.index("test"); - let (_, code) = index.delete().await; + let (task, code) = index.delete().await; assert_eq!(code, 202); @@ -37,7 +37,7 @@ async fn error_delete_unexisting_index() { "link": "https://docs.meilisearch.com/errors#index_not_found" }); - let response = index.wait_task(0).await; + let response = index.wait_task(task.uid()).await; assert_eq!(response["status"], "failed"); assert_eq!(response["error"], expected_response); } From eabc14c26858d9f0bda89e6fa38f0aa4b0244be8 Mon Sep 17 00:00:00 2001 From: "F. Levi" <55688616+flevi29@users.noreply.github.com> Date: Mon, 30 Sep 2024 21:24:41 +0300 Subject: [PATCH 036/111] Refactor, handle more cases for phrases --- .../src/search/new/matches/matching_words.rs | 2 +- milli/src/search/new/matches/mod.rs | 497 ++++++++++-------- 2 files changed, 291 insertions(+), 208 deletions(-) diff --git a/milli/src/search/new/matches/matching_words.rs b/milli/src/search/new/matches/matching_words.rs index 4ad5c37ec..4deaff6a0 100644 --- a/milli/src/search/new/matches/matching_words.rs +++ b/milli/src/search/new/matches/matching_words.rs @@ -181,7 +181,7 @@ impl<'a> PartialMatch<'a> { // return a new Partial match allowing the highlighter to continue. if is_matching && matching_words.len() > 1 { matching_words.remove(0); - Some(MatchType::Partial(PartialMatch { matching_words, ids, char_len })) + Some(MatchType::Partial(Self { matching_words, ids, char_len })) // if there is no remaining word to match in the phrase and the current token is matching, // return a Full match. } else if is_matching { diff --git a/milli/src/search/new/matches/mod.rs b/milli/src/search/new/matches/mod.rs index bbd39e682..624287f5f 100644 --- a/milli/src/search/new/matches/mod.rs +++ b/milli/src/search/new/matches/mod.rs @@ -1,6 +1,6 @@ use std::borrow::Cow; -use charabia::{Language, SeparatorKind, Token, Tokenizer}; +use charabia::{Language, SeparatorKind, Token, TokenKind, Tokenizer}; pub use matching_words::MatchingWords; use matching_words::{MatchType, PartialMatch, WordId}; use serde::Serialize; @@ -145,6 +145,13 @@ impl Match { MatchPosition::Phrase { token_positions: (_, ltp), .. } => ltp, } } + + fn get_word_count(&self) -> usize { + match self.position { + MatchPosition::Word { .. } => 1, + MatchPosition::Phrase { word_positions: (fwp, lwp), .. } => lwp - fwp + 1, + } + } } #[derive(Serialize, Debug, Clone, PartialEq, Eq)] @@ -153,6 +160,27 @@ pub struct MatchBounds { pub length: usize, } +enum SimpleTokenKind { + Separator(SeparatorKind), + NotSeparator, +} + +impl SimpleTokenKind { + fn get(token: &&Token<'_>) -> Self { + match token.kind { + TokenKind::Separator(separaor_kind) => Self::Separator(separaor_kind), + _ => Self::NotSeparator, + } + } + + fn is_not_separator(&self) -> bool { + match self { + SimpleTokenKind::NotSeparator => true, + SimpleTokenKind::Separator(_) => false, + } + } +} + /// Structure used to analyze a string, compute words that match, /// and format the source string, returning a highlighted and cropped sub-string. pub struct Matcher<'t, 'tokenizer, 'b, 'lang> { @@ -287,95 +315,130 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { crop_size: usize, ) -> (usize, usize) { // if there is no match, we start from the beginning of the string by default. - let first_match_word_position = + let first_match_first_word_position = matches.first().map(|m| m.get_first_word_pos()).unwrap_or(0); - let first_match_token_position = + let first_match_first_token_position = matches.first().map(|m| m.get_first_token_pos()).unwrap_or(0); - let last_match_word_position = matches.last().map(|m| m.get_last_word_pos()).unwrap_or(0); - let last_match_token_position = matches.last().map(|m| m.get_last_token_pos()).unwrap_or(0); + let last_match_last_word_position = + matches.last().map(|m| m.get_last_word_pos()).unwrap_or(0); + let last_match_last_token_position = + matches.last().map(|m| m.get_last_token_pos()).unwrap_or(0); - // matches needs to be counted in the crop len. - let mut remaining_words = crop_size + first_match_word_position - last_match_word_position; + let matches_window_len = + last_match_last_word_position - first_match_first_word_position + 1; - // create the initial state of the crop window: 2 iterators starting from the matches positions, - // a reverse iterator starting from the first match token position and going towards the beginning of the text, - let mut before_tokens = tokens[..first_match_token_position].iter().rev().peekable(); - // an iterator starting from the last match token position and going towards the end of the text. - let mut after_tokens = tokens[last_match_token_position..].iter().peekable(); + if crop_size >= matches_window_len { + // matches needs to be counted in the crop len. + let mut remaining_words = crop_size - matches_window_len; - // grows the crop window peeking in both directions - // until the window contains the good number of words: - while remaining_words > 0 { - let before_token = before_tokens.peek().map(|t| t.separator_kind()); - let after_token = after_tokens.peek().map(|t| t.separator_kind()); + // create the initial state of the crop window: 2 iterators starting from the matches positions, + // a reverse iterator starting from the first match token position and going towards the beginning of the text, + let mut before_tokens = + tokens[..first_match_first_token_position].iter().rev().peekable(); + // an iterator starting from the last match token position and going towards the end of the text. + let mut after_tokens = tokens[last_match_last_token_position + 1..].iter().peekable(); - match (before_token, after_token) { - // we can expand both sides. - (Some(before_token), Some(after_token)) => { - match (before_token, after_token) { - // if they are both separators and are the same kind then advance both, - // or expand in the soft separator separator side. - (Some(before_token_kind), Some(after_token_kind)) => { - if before_token_kind == after_token_kind { - before_tokens.next(); + // grows the crop window peeking in both directions + // until the window contains the good number of words: + while remaining_words > 0 { + let before_token_kind = before_tokens.peek().map(SimpleTokenKind::get); + let after_token_kind = after_tokens.peek().map(SimpleTokenKind::get); - // this avoid having an ending separator before crop marker. - if remaining_words > 1 { + match (before_token_kind, after_token_kind) { + // we can expand both sides. + (Some(before_token_kind), Some(after_token_kind)) => { + match (before_token_kind, after_token_kind) { + // if they are both separators and are the same kind then advance both, + // or expand in the soft separator separator side. + ( + SimpleTokenKind::Separator(before_token_separator_kind), + SimpleTokenKind::Separator(after_token_separator_kind), + ) => { + if before_token_separator_kind == after_token_separator_kind { + before_tokens.next(); + + // this avoid having an ending separator before crop marker. + if remaining_words > 1 { + after_tokens.next(); + } + } else if let SeparatorKind::Hard = before_token_separator_kind { after_tokens.next(); + } else { + before_tokens.next(); } - } else if before_token_kind == SeparatorKind::Hard { - after_tokens.next(); - } else { - before_tokens.next(); } - } - // if one of the tokens is a word, we expend in the side of the word. - // left is a word, advance left. - (None, Some(_)) => { - before_tokens.next(); - remaining_words -= 1; - } - // right is a word, advance right. - (Some(_), None) => { - after_tokens.next(); - remaining_words -= 1; - } - // both are words, advance left then right if remaining_word > 0. - (None, None) => { - before_tokens.next(); - remaining_words -= 1; - - if remaining_words > 0 { + // if one of the tokens is a word, we expend in the side of the word. + // left is a word, advance left. + (SimpleTokenKind::NotSeparator, SimpleTokenKind::Separator(_)) => { + before_tokens.next(); + remaining_words -= 1; + } + // right is a word, advance right. + (SimpleTokenKind::Separator(_), SimpleTokenKind::NotSeparator) => { after_tokens.next(); remaining_words -= 1; } + // both are words, advance left then right if remaining_word > 0. + (SimpleTokenKind::NotSeparator, SimpleTokenKind::NotSeparator) => { + before_tokens.next(); + remaining_words -= 1; + + if remaining_words > 0 { + after_tokens.next(); + remaining_words -= 1; + } + } } } - } - // the end of the text is reached, advance left. - (Some(before_token), None) => { - before_tokens.next(); - if before_token.is_none() { - remaining_words -= 1; + // the end of the text is reached, advance left. + (Some(before_token_kind), None) => { + before_tokens.next(); + if let SimpleTokenKind::NotSeparator = before_token_kind { + remaining_words -= 1; + } } - } - // the start of the text is reached, advance right. - (None, Some(after_token)) => { - after_tokens.next(); - if after_token.is_none() { - remaining_words -= 1; + // the start of the text is reached, advance right. + (None, Some(after_token_kind)) => { + after_tokens.next(); + if let SimpleTokenKind::NotSeparator = after_token_kind { + remaining_words -= 1; + } } + // no more token to add. + (None, None) => break, } - // no more token to add. - (None, None) => break, } + + // finally, keep the byte index of each bound of the crop window. + let crop_byte_start = before_tokens.next().map_or(0, |t| t.byte_end); + let crop_byte_end = after_tokens.next().map_or(self.text.len(), |t| t.byte_start); + + (crop_byte_start, crop_byte_end) + } else { + // there's one match? and it's longer than the crop window, so we have to advance inward + let mut remaining_extra_words = matches_window_len - crop_size; + let mut tokens_from_end = + tokens[..=last_match_last_token_position].iter().rev().peekable(); + + while remaining_extra_words > 0 { + let token_from_end_kind = + tokens_from_end.peek().map(SimpleTokenKind::get).expect("TODO"); + if token_from_end_kind.is_not_separator() { + remaining_extra_words -= 1; + } + + tokens_from_end.next(); + } + + let crop_byte_start = if first_match_first_token_position > 0 { + &tokens[first_match_first_token_position - 1].byte_end + } else { + &0 + }; + let crop_byte_end = tokens_from_end.next().map(|t| t.byte_start).expect("TODO"); + + (*crop_byte_start, crop_byte_end) } - - // finally, keep the byte index of each bound of the crop window. - let crop_byte_start = before_tokens.next().map_or(0, |t| t.byte_end); - let crop_byte_end = after_tokens.next().map_or(self.text.len(), |t| t.byte_start); - - (crop_byte_start, crop_byte_end) } /// Compute the score of a match interval: @@ -416,11 +479,7 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { lwp } }; - - let next_match_first_word_pos = match next_match.position { - MatchPosition::Word { word_position, .. } => word_position, - MatchPosition::Phrase { word_positions: (fwp, _), .. } => fwp, - }; + let next_match_first_word_pos = next_match.get_first_word_pos(); // compute distance between matches distance_score -= (next_match_first_word_pos - m_last_word_pos).min(7) as i16; @@ -443,72 +502,96 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { /// Returns the matches interval where the score computed by match_interval_score is the best. fn find_best_match_interval<'a>(&self, matches: &'a [Match], crop_size: usize) -> &'a [Match] { let matches_len = matches.len(); + if matches_len <= 1 { + return matches; + } + + // positions of the first and the last match of the best matches interval in `matches`. + struct BestInterval { + interval: (usize, usize), + score: (i16, i16, i16), + } + + fn save_best_interval( + best_interval: &mut Option, + interval_first: usize, + interval_last: usize, + interval_score: (i16, i16, i16), + ) { + if let Some(best_interval) = best_interval { + if interval_score > best_interval.score { + best_interval.interval = (interval_first, interval_last); + best_interval.score = interval_score; + } + } else { + *best_interval = Some(BestInterval { + interval: (interval_first, interval_last), + score: interval_score, + }); + } + } + + let mut best_interval: Option = None; // we compute the matches interval if we have at least 2 matches. - if matches_len > 1 { - // current interval positions. - let mut interval_first = 0; - // positions of the first and the last match of the best matches interval in `matches`. - let mut best_interval = (0, 0); - let mut best_interval_score = self.match_interval_score(&matches[0..=0]); + // current interval positions. + let mut interval_first = 0; + let mut interval_first_match_first_word_pos = matches[interval_first].get_first_word_pos(); - let mut index = 1; - while index < matches_len - 1 { - let next_match = &matches[index]; + for (index, next_match) in matches.iter().enumerate() { + // if next match would make interval gross more than crop_size, + // we compare the current interval with the best one, + // then we increase `interval_first` until next match can be added. + let next_match_last_word_pos = next_match.get_last_word_pos(); - // if next match would make interval gross more than crop_size, - // we compare the current interval with the best one, - // then we increase `interval_first` until next match can be added. - let next_match_last_word_pos = next_match.get_last_word_pos(); - let interval_first_match_first_word_pos = - matches[interval_first].get_first_word_pos(); + // if the next match would mean that we pass the crop size window, + // we take the last valid match, that didn't pass this boundry, which is `index` - 1, + // and calculate a score for it, and check if it's better than our best so far + if next_match_last_word_pos - interval_first_match_first_word_pos >= crop_size { + // if index is 0 there is no last viable match + if index != 0 { + let interval_last = index - 1; + let interval_score = + self.match_interval_score(&matches[interval_first..=interval_last]); - // if the next match would mean that we pass the crop size window, - // we take the last valid match, that didn't pass this boundry, which is `index` - 1, - // and calculate a score for it, and check if it's better than our best so far - if next_match_last_word_pos - interval_first_match_first_word_pos >= crop_size { - // skip for 1, because it would result in the same as our very first interval score - if index != 1 { - let interval_last = index - 1; - let interval_score = - self.match_interval_score(&matches[interval_first..=interval_last]); - - // keep interval if it's the best - if interval_score > best_interval_score { - best_interval = (interval_first, interval_last); - best_interval_score = interval_score; - } - } - - // advance start of the interval while interval is longer than crop_size. - loop { - interval_first += 1; - let interval_first_match_first_word_pos = - matches[interval_first].get_first_word_pos(); - - if next_match_last_word_pos - interval_first_match_first_word_pos - < crop_size - { - break; - } - } + // keep interval if it's the best + save_best_interval( + &mut best_interval, + interval_first, + interval_last, + interval_score, + ); } - index += 1; - } + // advance start of the interval while interval is longer than crop_size. + loop { + interval_first += 1; + interval_first_match_first_word_pos = + matches[interval_first].get_first_word_pos(); - // compute the last interval score and compare it to the best one. - let interval_last = matches_len - 1; + if interval_first_match_first_word_pos > next_match_last_word_pos + || next_match_last_word_pos - interval_first_match_first_word_pos + < crop_size + { + break; + } + } + } + } + + // compute the last interval score and compare it to the best one. + let interval_last = matches_len - 1; + // if it's the last match with itself, we need to make sure it's + // not a phrase longer than the crop window + if interval_first != interval_last || matches[interval_first].get_word_count() < crop_size { let interval_score = self.match_interval_score(&matches[interval_first..=interval_last]); - if interval_score > best_interval_score { - best_interval = (interval_first, interval_last); - } - - &matches[best_interval.0..=best_interval.1] - } else { - matches + save_best_interval(&mut best_interval, interval_first, interval_last, interval_score); } + + // if none of the matches fit the criteria above, default to the first one + let best_interval = best_interval.map_or((0, 0), |v| v.interval); + &matches[best_interval.0..=best_interval.1] } // Returns the formatted version of the original text. @@ -928,98 +1011,98 @@ mod tests { let format_options = FormatOptions { highlight: true, crop: Some(10) }; - // let builder = MatcherBuilder::new_test(&rtxn, &temp_index, "\"the world\""); - // let mut matcher = builder.build(text, None); - // // should return 10 words with a marker at the start as well the end, and the highlighted matches. - // insta::assert_snapshot!( - // matcher.format(format_options), - // @"…the power to split the world between those who embraced…" - // ); + let builder = MatcherBuilder::new_test(&rtxn, &temp_index, "\"the world\""); + let mut matcher = builder.build(text, None); + // should return 10 words with a marker at the start as well the end, and the highlighted matches. + insta::assert_snapshot!( + matcher.format(format_options), + @"…the power to split the world between those who embraced…" + ); - // let builder = MatcherBuilder::new_test(&rtxn, &temp_index, "\"power to\" \"and those\""); - // let mut matcher = builder.build(text, None); - // // should highlight "those" and the phrase "and those". - // insta::assert_snapshot!( - // matcher.format(format_options), - // @"…groundbreaking invention had the power to split the world between…" - // ); - - // let builder = MatcherBuilder::new_test( - // &rtxn, - // &temp_index, - // "\"The groundbreaking invention had the power to split the world\"", - // ); - // let mut matcher = builder.build(text, None); - // insta::assert_snapshot!( - // matcher.format(format_options), - // @"The groundbreaking invention had the power to split the world…" - // ); + let builder = MatcherBuilder::new_test(&rtxn, &temp_index, "\"power to\" \"and those\""); + let mut matcher = builder.build(text, None); + // should highlight "those" and the phrase "and those". + insta::assert_snapshot!( + matcher.format(format_options), + @"…groundbreaking invention had the power to split the world between…" + ); let builder = MatcherBuilder::new_test( &rtxn, &temp_index, - "\"The groundbreaking invention had the power to split the world between\"", + "\"The groundbreaking invention had the power to split the world\"", ); let mut matcher = builder.build(text, None); insta::assert_snapshot!( matcher.format(format_options), - @"The groundbreaking invention had the power to split the world …" + @"The groundbreaking invention had the power to split the world…" ); - // let builder = MatcherBuilder::new_test( - // &rtxn, - // &temp_index, - // "\"The groundbreaking invention\" \"embraced progress and those who resisted change!\"", - // ); - // let mut matcher = builder.build(text, None); - // insta::assert_snapshot!( - // matcher.format(format_options), - // @"…between those who embraced progress and those who resisted change…" - // ); + let builder = MatcherBuilder::new_test( + &rtxn, + &temp_index, + "\"The groundbreaking invention had the power to split the world between those\"", + ); + let mut matcher = builder.build(text, None); + insta::assert_snapshot!( + matcher.format(format_options), + @"The groundbreaking invention had the power to split the world…" + ); - // let builder = MatcherBuilder::new_test( - // &rtxn, - // &temp_index, - // "\"The groundbreaking invention\" \"split the world between those\"", - // ); - // let mut matcher = builder.build(text, None); - // insta::assert_snapshot!( - // matcher.format(format_options), - // @"…the power to split the world between those who embraced…" - // ); + let builder = MatcherBuilder::new_test( + &rtxn, + &temp_index, + "\"The groundbreaking invention\" \"embraced progress and those who resisted change!\"", + ); + let mut matcher = builder.build(text, None); + insta::assert_snapshot!( + matcher.format(format_options), + @"…between those who embraced progress and those who resisted change…" + ); - // let builder = MatcherBuilder::new_test( - // &rtxn, - // &temp_index, - // "\"groundbreaking invention\" \"split the world between\"", - // ); - // let mut matcher = builder.build(text, None); - // insta::assert_snapshot!( - // matcher.format(format_options), - // @"…groundbreaking invention had the power to split the world between…" - // ); + let builder = MatcherBuilder::new_test( + &rtxn, + &temp_index, + "\"The groundbreaking invention\" \"split the world between those\"", + ); + let mut matcher = builder.build(text, None); + insta::assert_snapshot!( + matcher.format(format_options), + @"…the power to split the world between those who embraced…" + ); - // let builder = MatcherBuilder::new_test( - // &rtxn, - // &temp_index, - // "\"groundbreaking invention\" \"had the power to split the world between those\"", - // ); - // let mut matcher = builder.build(text, None); - // insta::assert_snapshot!( - // matcher.format(format_options), - // @"…invention had the power to split the world between those…" - // ); + let builder = MatcherBuilder::new_test( + &rtxn, + &temp_index, + "\"groundbreaking invention\" \"split the world between\"", + ); + let mut matcher = builder.build(text, None); + insta::assert_snapshot!( + matcher.format(format_options), + @"…groundbreaking invention had the power to split the world between…" + ); - // let builder = MatcherBuilder::new_test( - // &rtxn, - // &temp_index, - // "\"The groundbreaking invention\" \"had the power to split the world between those\"", - // ); - // let mut matcher = builder.build(text, None); - // insta::assert_snapshot!( - // matcher.format(format_options), - // @"…invention had the power to split the world between those…" - // ); + let builder = MatcherBuilder::new_test( + &rtxn, + &temp_index, + "\"groundbreaking invention\" \"had the power to split the world between those\"", + ); + let mut matcher = builder.build(text, None); + insta::assert_snapshot!( + matcher.format(format_options), + @"…invention had the power to split the world between those…" + ); + + let builder = MatcherBuilder::new_test( + &rtxn, + &temp_index, + "\"The groundbreaking invention\" \"had the power to split the world between those\"", + ); + let mut matcher = builder.build(text, None); + insta::assert_snapshot!( + matcher.format(format_options), + @"…invention had the power to split the world between those…" + ); } #[test] From 6d16230f17eb000407adb21dc2f3e9fa49767cc8 Mon Sep 17 00:00:00 2001 From: "F. Levi" <55688616+flevi29@users.noreply.github.com> Date: Tue, 1 Oct 2024 17:19:15 +0300 Subject: [PATCH 037/111] Refactor --- milli/src/search/new/matches/mod.rs | 327 ++++++++++++++-------------- 1 file changed, 158 insertions(+), 169 deletions(-) diff --git a/milli/src/search/new/matches/mod.rs b/milli/src/search/new/matches/mod.rs index 624287f5f..804b59553 100644 --- a/milli/src/search/new/matches/mod.rs +++ b/milli/src/search/new/matches/mod.rs @@ -181,6 +181,149 @@ impl SimpleTokenKind { } } +#[derive(PartialEq, PartialOrd)] +struct MatchIntervalScore(i16, i16, i16); + +impl MatchIntervalScore { + /// Compute the score of a match interval: + /// 1) count unique matches + /// 2) calculate distance between matches + /// 3) count ordered matches + fn new(matches: &[Match]) -> Self { + let mut ids: Vec = Vec::with_capacity(matches.len()); + let mut order_score = 0; + let mut distance_score = 0; + + // count score for phrases + fn tally_phrase_scores( + fwp: &usize, + lwp: &usize, + order_score: &mut i16, + distance_score: &mut i16, + ) { + let words_in_phrase_minus_one = (lwp - fwp) as i16; + // will always be ordered, so +1 for each space between words + *order_score += words_in_phrase_minus_one; + // distance will always be 1, so -1 for each space between words + *distance_score -= words_in_phrase_minus_one; + } + + let mut iter = matches.iter().peekable(); + while let Some(m) = iter.next() { + if let Some(next_match) = iter.peek() { + // if matches are ordered + if next_match.ids.iter().min() > m.ids.iter().min() { + order_score += 1; + } + + let m_last_word_pos = match m.position { + MatchPosition::Word { word_position, .. } => word_position, + MatchPosition::Phrase { word_positions: (fwp, lwp), .. } => { + tally_phrase_scores(&fwp, &lwp, &mut order_score, &mut distance_score); + lwp + } + }; + let next_match_first_word_pos = next_match.get_first_word_pos(); + + // compute distance between matches + distance_score -= (next_match_first_word_pos - m_last_word_pos).min(7) as i16; + } else if let MatchPosition::Phrase { word_positions: (fwp, lwp), .. } = m.position { + // in case last match is a phrase, count score for its words + tally_phrase_scores(&fwp, &lwp, &mut order_score, &mut distance_score); + } + + ids.extend(m.ids.iter()); + } + + ids.sort_unstable(); + ids.dedup(); + let uniq_score = ids.len() as i16; + + // rank by unique match count, then by distance between matches, then by ordered match count. + Self(uniq_score, distance_score, order_score) + } +} + +struct MatchIntervalWithScore { + interval: (usize, usize), + score: MatchIntervalScore, +} + +impl MatchIntervalWithScore { + /// Returns the matches interval where the score computed by match_interval_score is the best. + fn find_best_match_interval(matches: &[Match], crop_size: usize) -> &[Match] { + let matches_len = matches.len(); + if matches_len <= 1 { + return matches; + } + + // positions of the first and the last match of the best matches interval in `matches`. + let mut best_interval: Option = None; + let mut save_best_interval = |interval_first, interval_last, interval_score| { + let is_interval_score_better = + &best_interval.as_ref().map_or(true, |Self { score, .. }| interval_score > *score); + if *is_interval_score_better { + best_interval = + Some(Self { interval: (interval_first, interval_last), score: interval_score }); + } + }; + + // we compute the matches interval if we have at least 2 matches. + // current interval positions. + let mut interval_first = 0; + let mut interval_first_match_first_word_pos = matches[interval_first].get_first_word_pos(); + + for (index, next_match) in matches.iter().enumerate() { + // if next match would make interval gross more than crop_size, + // we compare the current interval with the best one, + // then we increase `interval_first` until next match can be added. + let next_match_last_word_pos = next_match.get_last_word_pos(); + + // if the next match would mean that we pass the crop size window, + // we take the last valid match, that didn't pass this boundry, which is `index` - 1, + // and calculate a score for it, and check if it's better than our best so far + if next_match_last_word_pos - interval_first_match_first_word_pos >= crop_size { + // if index is 0 there is no last viable match + if index != 0 { + let interval_last = index - 1; + let interval_score = + MatchIntervalScore::new(&matches[interval_first..=interval_last]); + + // keep interval if it's the best + save_best_interval(interval_first, interval_last, interval_score); + } + + // advance start of the interval while interval is longer than crop_size. + loop { + interval_first += 1; + interval_first_match_first_word_pos = + matches[interval_first].get_first_word_pos(); + + if interval_first_match_first_word_pos > next_match_last_word_pos + || next_match_last_word_pos - interval_first_match_first_word_pos + < crop_size + { + break; + } + } + } + } + + // compute the last interval score and compare it to the best one. + let interval_last = matches_len - 1; + // if it's the last match with itself, we need to make sure it's + // not a phrase longer than the crop window + if interval_first != interval_last || matches[interval_first].get_word_count() < crop_size { + let interval_score = MatchIntervalScore::new(&matches[interval_first..=interval_last]); + save_best_interval(interval_first, interval_last, interval_score); + } + + // if none of the matches fit the criteria above, default to the first one + let best_interval = best_interval.map_or((0, 0), |v| v.interval); + &matches[best_interval.0..=best_interval.1] + } +} + /// Structure used to analyze a string, compute words that match, /// and format the source string, returning a highlighted and cropped sub-string. pub struct Matcher<'t, 'tokenizer, 'b, 'lang> { @@ -415,14 +558,16 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { (crop_byte_start, crop_byte_end) } else { - // there's one match? and it's longer than the crop window, so we have to advance inward + // there's one match and it's longer than the crop window, so we have to advance inward let mut remaining_extra_words = matches_window_len - crop_size; let mut tokens_from_end = tokens[..=last_match_last_token_position].iter().rev().peekable(); while remaining_extra_words > 0 { - let token_from_end_kind = - tokens_from_end.peek().map(SimpleTokenKind::get).expect("TODO"); + let token_from_end_kind = tokens_from_end + .peek() + .map(SimpleTokenKind::get) + .expect("Expected iterator to not reach end"); if token_from_end_kind.is_not_separator() { remaining_extra_words -= 1; } @@ -435,165 +580,15 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { } else { &0 }; - let crop_byte_end = tokens_from_end.next().map(|t| t.byte_start).expect("TODO"); + let crop_byte_end = tokens_from_end + .next() + .map(|t| t.byte_start) + .expect("Expected iterator to not reach end"); (*crop_byte_start, crop_byte_end) } } - /// Compute the score of a match interval: - /// 1) count unique matches - /// 2) calculate distance between matches - /// 3) count ordered matches - fn match_interval_score(&self, matches: &[Match]) -> (i16, i16, i16) { - let mut ids: Vec = Vec::with_capacity(matches.len()); - let mut order_score = 0; - let mut distance_score = 0; - - // count score for phrases - fn tally_phrase_scores( - fwp: &usize, - lwp: &usize, - order_score: &mut i16, - distance_score: &mut i16, - ) { - let words_in_phrase_minus_one = (lwp - fwp) as i16; - // will always be ordered, so +1 for each space between words - *order_score += words_in_phrase_minus_one; - // distance will always be 1, so -1 for each space between words - *distance_score -= words_in_phrase_minus_one; - } - - let mut iter = matches.iter().peekable(); - while let Some(m) = iter.next() { - if let Some(next_match) = iter.peek() { - // if matches are ordered - if next_match.ids.iter().min() > m.ids.iter().min() { - order_score += 1; - } - - let m_last_word_pos = match m.position { - MatchPosition::Word { word_position, .. } => word_position, - MatchPosition::Phrase { word_positions: (fwp, lwp), .. } => { - tally_phrase_scores(&fwp, &lwp, &mut order_score, &mut distance_score); - lwp - } - }; - let next_match_first_word_pos = next_match.get_first_word_pos(); - - // compute distance between matches - distance_score -= (next_match_first_word_pos - m_last_word_pos).min(7) as i16; - } else if let MatchPosition::Phrase { word_positions: (fwp, lwp), .. } = m.position { - // in case last match is a phrase, count score for its words - tally_phrase_scores(&fwp, &lwp, &mut order_score, &mut distance_score); - } - - ids.extend(m.ids.iter()); - } - - ids.sort_unstable(); - ids.dedup(); - let uniq_score = ids.len() as i16; - - // rank by unique match count, then by distance between matches, then by ordered match count. - (uniq_score, distance_score, order_score) - } - - /// Returns the matches interval where the score computed by match_interval_score is the best. - fn find_best_match_interval<'a>(&self, matches: &'a [Match], crop_size: usize) -> &'a [Match] { - let matches_len = matches.len(); - if matches_len <= 1 { - return matches; - } - - // positions of the first and the last match of the best matches interval in `matches`. - struct BestInterval { - interval: (usize, usize), - score: (i16, i16, i16), - } - - fn save_best_interval( - best_interval: &mut Option, - interval_first: usize, - interval_last: usize, - interval_score: (i16, i16, i16), - ) { - if let Some(best_interval) = best_interval { - if interval_score > best_interval.score { - best_interval.interval = (interval_first, interval_last); - best_interval.score = interval_score; - } - } else { - *best_interval = Some(BestInterval { - interval: (interval_first, interval_last), - score: interval_score, - }); - } - } - - let mut best_interval: Option = None; - - // we compute the matches interval if we have at least 2 matches. - // current interval positions. - let mut interval_first = 0; - let mut interval_first_match_first_word_pos = matches[interval_first].get_first_word_pos(); - - for (index, next_match) in matches.iter().enumerate() { - // if next match would make interval gross more than crop_size, - // we compare the current interval with the best one, - // then we increase `interval_first` until next match can be added. - let next_match_last_word_pos = next_match.get_last_word_pos(); - - // if the next match would mean that we pass the crop size window, - // we take the last valid match, that didn't pass this boundry, which is `index` - 1, - // and calculate a score for it, and check if it's better than our best so far - if next_match_last_word_pos - interval_first_match_first_word_pos >= crop_size { - // if index is 0 there is no last viable match - if index != 0 { - let interval_last = index - 1; - let interval_score = - self.match_interval_score(&matches[interval_first..=interval_last]); - - // keep interval if it's the best - save_best_interval( - &mut best_interval, - interval_first, - interval_last, - interval_score, - ); - } - - // advance start of the interval while interval is longer than crop_size. - loop { - interval_first += 1; - interval_first_match_first_word_pos = - matches[interval_first].get_first_word_pos(); - - if interval_first_match_first_word_pos > next_match_last_word_pos - || next_match_last_word_pos - interval_first_match_first_word_pos - < crop_size - { - break; - } - } - } - } - - // compute the last interval score and compare it to the best one. - let interval_last = matches_len - 1; - // if it's the last match with itself, we need to make sure it's - // not a phrase longer than the crop window - if interval_first != interval_last || matches[interval_first].get_word_count() < crop_size { - let interval_score = - self.match_interval_score(&matches[interval_first..=interval_last]); - save_best_interval(&mut best_interval, interval_first, interval_last, interval_score); - } - - // if none of the matches fit the criteria above, default to the first one - let best_interval = best_interval.map_or((0, 0), |v| v.interval); - &matches[best_interval.0..=best_interval.1] - } - // Returns the formatted version of the original text. pub fn format(&mut self, format_options: FormatOptions) -> Cow<'t, str> { if !format_options.highlight && format_options.crop.is_none() { @@ -606,7 +601,9 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { // crop around the best interval. let (byte_start, byte_end) = match format_options.crop { Some(crop_size) if crop_size > 0 => { - let matches = self.find_best_match_interval(matches, crop_size); + let matches = MatchIntervalWithScore::find_best_match_interval( + matches, crop_size, + ); self.crop_bounds(tokens, matches, crop_size) } _ => (0, self.text.len()), @@ -1046,6 +1043,7 @@ mod tests { let mut matcher = builder.build(text, None); insta::assert_snapshot!( matcher.format(format_options), + // @TODO: Should probably highlight it all, even if it didn't fit the whole phrase @"The groundbreaking invention had the power to split the world…" ); @@ -1057,6 +1055,7 @@ mod tests { let mut matcher = builder.build(text, None); insta::assert_snapshot!( matcher.format(format_options), + // @TODO: Should probably include end of string in this case? @"…between those who embraced progress and those who resisted change…" ); @@ -1090,17 +1089,7 @@ mod tests { let mut matcher = builder.build(text, None); insta::assert_snapshot!( matcher.format(format_options), - @"…invention had the power to split the world between those…" - ); - - let builder = MatcherBuilder::new_test( - &rtxn, - &temp_index, - "\"The groundbreaking invention\" \"had the power to split the world between those\"", - ); - let mut matcher = builder.build(text, None); - insta::assert_snapshot!( - matcher.format(format_options), + // @TODO: "invention" should be highlighted as well @"…invention had the power to split the world between those…" ); } From d9e4db9983e7017bb13a89f7e28def43069e1a58 Mon Sep 17 00:00:00 2001 From: "F. Levi" <55688616+flevi29@users.noreply.github.com> Date: Tue, 1 Oct 2024 17:50:59 +0300 Subject: [PATCH 038/111] Refactor --- milli/src/search/new/matches/mod.rs | 38 ++++++++++++----------------- 1 file changed, 16 insertions(+), 22 deletions(-) diff --git a/milli/src/search/new/matches/mod.rs b/milli/src/search/new/matches/mod.rs index 804b59553..1552de8aa 100644 --- a/milli/src/search/new/matches/mod.rs +++ b/milli/src/search/new/matches/mod.rs @@ -166,19 +166,12 @@ enum SimpleTokenKind { } impl SimpleTokenKind { - fn get(token: &&Token<'_>) -> Self { + fn new(token: &&Token<'_>) -> Self { match token.kind { TokenKind::Separator(separaor_kind) => Self::Separator(separaor_kind), _ => Self::NotSeparator, } } - - fn is_not_separator(&self) -> bool { - match self { - SimpleTokenKind::NotSeparator => true, - SimpleTokenKind::Separator(_) => false, - } - } } #[derive(PartialEq, PartialOrd)] @@ -259,9 +252,12 @@ impl MatchIntervalWithScore { // positions of the first and the last match of the best matches interval in `matches`. let mut best_interval: Option = None; - let mut save_best_interval = |interval_first, interval_last, interval_score| { + + let mut save_best_interval = |interval_first, interval_last| { + let interval_score = MatchIntervalScore::new(&matches[interval_first..=interval_last]); let is_interval_score_better = &best_interval.as_ref().map_or(true, |Self { score, .. }| interval_score > *score); + if *is_interval_score_better { best_interval = Some(Self { interval: (interval_first, interval_last), score: interval_score }); @@ -286,11 +282,8 @@ impl MatchIntervalWithScore { // if index is 0 there is no last viable match if index != 0 { let interval_last = index - 1; - let interval_score = - MatchIntervalScore::new(&matches[interval_first..=interval_last]); - // keep interval if it's the best - save_best_interval(interval_first, interval_last, interval_score); + save_best_interval(interval_first, interval_last); } // advance start of the interval while interval is longer than crop_size. @@ -314,8 +307,7 @@ impl MatchIntervalWithScore { // if it's the last match with itself, we need to make sure it's // not a phrase longer than the crop window if interval_first != interval_last || matches[interval_first].get_word_count() < crop_size { - let interval_score = MatchIntervalScore::new(&matches[interval_first..=interval_last]); - save_best_interval(interval_first, interval_last, interval_score); + save_best_interval(interval_first, interval_last); } // if none of the matches fit the criteria above, default to the first one @@ -359,6 +351,7 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { Some(MatchType::Full { ids, .. }) => { // save the token that closes the partial match as a match. matches.push(Match { + // @TODO: Shouldn't this be +1? match_len: word.char_end - *first_word_char_start, ids: ids.clone().collect(), position: MatchPosition::Phrase { @@ -484,8 +477,8 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { // grows the crop window peeking in both directions // until the window contains the good number of words: while remaining_words > 0 { - let before_token_kind = before_tokens.peek().map(SimpleTokenKind::get); - let after_token_kind = after_tokens.peek().map(SimpleTokenKind::get); + let before_token_kind = before_tokens.peek().map(SimpleTokenKind::new); + let after_token_kind = after_tokens.peek().map(SimpleTokenKind::new); match (before_token_kind, after_token_kind) { // we can expand both sides. @@ -504,7 +497,8 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { if remaining_words > 1 { after_tokens.next(); } - } else if let SeparatorKind::Hard = before_token_separator_kind { + } else if matches!(before_token_separator_kind, SeparatorKind::Hard) + { after_tokens.next(); } else { before_tokens.next(); @@ -536,14 +530,14 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { // the end of the text is reached, advance left. (Some(before_token_kind), None) => { before_tokens.next(); - if let SimpleTokenKind::NotSeparator = before_token_kind { + if matches!(before_token_kind, SimpleTokenKind::NotSeparator) { remaining_words -= 1; } } // the start of the text is reached, advance right. (None, Some(after_token_kind)) => { after_tokens.next(); - if let SimpleTokenKind::NotSeparator = after_token_kind { + if matches!(after_token_kind, SimpleTokenKind::NotSeparator) { remaining_words -= 1; } } @@ -566,9 +560,9 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { while remaining_extra_words > 0 { let token_from_end_kind = tokens_from_end .peek() - .map(SimpleTokenKind::get) + .map(SimpleTokenKind::new) .expect("Expected iterator to not reach end"); - if token_from_end_kind.is_not_separator() { + if matches!(token_from_end_kind, SimpleTokenKind::NotSeparator) { remaining_extra_words -= 1; } From 2654ce6e6c4d256b37e098e8d1be12af0d82e2eb Mon Sep 17 00:00:00 2001 From: Timon Jurschitsch Date: Tue, 1 Oct 2024 17:01:47 +0200 Subject: [PATCH 039/111] use shared servers --- meilisearch/tests/index/delete_index.rs | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/meilisearch/tests/index/delete_index.rs b/meilisearch/tests/index/delete_index.rs index 8e238dad3..a6d61882e 100644 --- a/meilisearch/tests/index/delete_index.rs +++ b/meilisearch/tests/index/delete_index.rs @@ -3,8 +3,8 @@ use crate::json; #[actix_rt::test] async fn create_and_delete_index() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (response, code) = index.create(None).await; assert_eq!(code, 202); @@ -24,14 +24,18 @@ async fn create_and_delete_index() { #[actix_rt::test] async fn error_delete_unexisting_index() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (task, code) = index.delete().await; assert_eq!(code, 202); + let msg = format!( + "Index `{}` not found.", + task["indexUid"].as_str().expect("indexUid should exist").trim_matches('"') + ); let expected_response = json!({ - "message": "Index `test` not found.", + "message": msg, "code": "index_not_found", "type": "invalid_request", "link": "https://docs.meilisearch.com/errors#index_not_found" @@ -44,8 +48,8 @@ async fn error_delete_unexisting_index() { #[actix_rt::test] async fn loop_delete_add_documents() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let documents = json!([{"id": 1, "field1": "hello"}]); let mut tasks = Vec::new(); for _ in 0..50 { From 17571805b47d9117fc75afc5abdded64fa9424e8 Mon Sep 17 00:00:00 2001 From: Timon Jurschitsch Date: Tue, 1 Oct 2024 17:27:27 +0200 Subject: [PATCH 040/111] use shared servers --- meilisearch/tests/common/server.rs | 5 +++ meilisearch/tests/index/create_index.rs | 44 ++++++++++++++----------- 2 files changed, 29 insertions(+), 20 deletions(-) diff --git a/meilisearch/tests/common/server.rs b/meilisearch/tests/common/server.rs index 6d331ebbc..200f6a141 100644 --- a/meilisearch/tests/common/server.rs +++ b/meilisearch/tests/common/server.rs @@ -309,6 +309,11 @@ impl Server { } } + pub fn unique_index_with_encoder(&self, encoder: Encoder) -> Index<'_> { + let uuid = Uuid::new_v4(); + Index { uid: uuid.to_string(), service: &self.service, encoder, marker: PhantomData } + } + pub(super) async fn _create_index(&self, body: Value) -> (Value, StatusCode) { self.service.post("/indexes", body).await } diff --git a/meilisearch/tests/index/create_index.rs b/meilisearch/tests/index/create_index.rs index 16a6e0678..528e84b8e 100644 --- a/meilisearch/tests/index/create_index.rs +++ b/meilisearch/tests/index/create_index.rs @@ -9,8 +9,8 @@ use crate::json; #[actix_rt::test] async fn create_index_no_primary_key() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (response, code) = index.create(None).await; assert_eq!(code, 202); @@ -26,8 +26,8 @@ async fn create_index_no_primary_key() { #[actix_rt::test] async fn create_index_with_gzip_encoded_request() { - let server = Server::new().await; - let index = server.index_with_encoder("test", Encoder::Gzip); + let server = Server::new_shared(); + let index = server.unique_index_with_encoder(Encoder::Gzip); let (response, code) = index.create(None).await; assert_eq!(code, 202); @@ -43,7 +43,7 @@ async fn create_index_with_gzip_encoded_request() { #[actix_rt::test] async fn create_index_with_gzip_encoded_request_and_receiving_brotli_encoded_response() { - let server = Server::new().await; + let server = Server::new_shared(); let app = server.init_web_app().await; let body = serde_json::to_string(&json!({ @@ -68,14 +68,14 @@ async fn create_index_with_gzip_encoded_request_and_receiving_brotli_encoded_res let parsed_response = serde_json::from_slice::(decoded.into().as_ref()).expect("Expecting valid json"); - assert_eq!(parsed_response["taskUid"], 0); + assert_eq!(parsed_response["taskUid"], 3); assert_eq!(parsed_response["indexUid"], "test"); } #[actix_rt::test] async fn create_index_with_zlib_encoded_request() { - let server = Server::new().await; - let index = server.index_with_encoder("test", Encoder::Deflate); + let server = Server::new_shared(); + let index = server.unique_index_with_encoder(Encoder::Deflate); let (response, code) = index.create(None).await; assert_eq!(code, 202); @@ -91,8 +91,8 @@ async fn create_index_with_zlib_encoded_request() { #[actix_rt::test] async fn create_index_with_brotli_encoded_request() { - let server = Server::new().await; - let index = server.index_with_encoder("test", Encoder::Brotli); + let server = Server::new_shared(); + let index = server.unique_index_with_encoder(Encoder::Brotli); let (response, code) = index.create(None).await; assert_eq!(code, 202); @@ -108,8 +108,8 @@ async fn create_index_with_brotli_encoded_request() { #[actix_rt::test] async fn create_index_with_primary_key() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (response, code) = index.create(Some("primary")).await; assert_eq!(code, 202); @@ -127,8 +127,8 @@ async fn create_index_with_primary_key() { async fn create_index_with_invalid_primary_key() { let document = json!([ { "id": 2, "title": "Pride and Prejudice" } ]); - let server = Server::new().await; - let index = server.index("movies"); + let server = Server::new_shared(); + let index = server.unique_index(); let (response, code) = index.add_documents(document, Some("title")).await; assert_eq!(code, 202); @@ -142,10 +142,10 @@ async fn create_index_with_invalid_primary_key() { #[actix_rt::test] async fn test_create_multiple_indexes() { let server = Server::new().await; - let index1 = server.index("test1"); - let index2 = server.index("test2"); - let index3 = server.index("test3"); - let index4 = server.index("test4"); + let index1 = server.unique_index(); + let index2 = server.unique_index(); + let index3 = server.unique_index(); + let index4 = server.unique_index(); let (task1, _) = index1.create(None).await; let (task2, _) = index2.create(None).await; @@ -164,7 +164,7 @@ async fn test_create_multiple_indexes() { #[actix_rt::test] async fn error_create_existing_index() { let server = Server::new().await; - let index = server.index("test"); + let index = server.unique_index(); let (_, code) = index.create(Some("primary")).await; assert_eq!(code, 202); @@ -172,9 +172,13 @@ async fn error_create_existing_index() { let (task, _) = index.create(Some("primary")).await; let response = index.wait_task(task.uid()).await; + let msg = format!( + "Index `{}` already exists.", + task["indexUid"].as_str().expect("indexUid should exist").trim_matches('"') + ); let expected_response = json!({ - "message": "Index `test` already exists.", + "message": msg, "code": "index_already_exists", "type": "invalid_request", "link":"https://docs.meilisearch.com/errors#index_already_exists" From 4b598fa648944a5f5f1cdd7ecbdadd1cb8d3d659 Mon Sep 17 00:00:00 2001 From: Tamo Date: Mon, 30 Sep 2024 13:12:01 +0200 Subject: [PATCH 041/111] update arroy --- Cargo.lock | 5 +++-- index-scheduler/Cargo.toml | 2 +- milli/Cargo.toml | 2 +- milli/src/error.rs | 1 + milli/src/vector/mod.rs | 33 +++++++++++++++++++++------------ 5 files changed, 27 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3237d4e16..c85a59952 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -386,8 +386,9 @@ checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" [[package]] name = "arroy" -version = "0.4.0" -source = "git+https://github.com/meilisearch/arroy/?rev=2386594dfb009ce08821a925ccc89fb8e30bf73d#2386594dfb009ce08821a925ccc89fb8e30bf73d" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfc5f272f38fa063bbff0a7ab5219404e221493de005e2b4078c62d626ef567e" dependencies = [ "bytemuck", "byteorder", diff --git a/index-scheduler/Cargo.toml b/index-scheduler/Cargo.toml index 432a86382..e80311005 100644 --- a/index-scheduler/Cargo.toml +++ b/index-scheduler/Cargo.toml @@ -40,7 +40,7 @@ ureq = "2.10.0" uuid = { version = "1.10.0", features = ["serde", "v4"] } [dev-dependencies] -arroy = { git = "https://github.com/meilisearch/arroy/", rev = "2386594dfb009ce08821a925ccc89fb8e30bf73d" } +arroy = "0.5.0" big_s = "1.0.2" crossbeam = "0.8.4" insta = { version = "1.39.0", features = ["json", "redactions"] } diff --git a/milli/Cargo.toml b/milli/Cargo.toml index 01384f496..df0e59496 100644 --- a/milli/Cargo.toml +++ b/milli/Cargo.toml @@ -80,7 +80,7 @@ hf-hub = { git = "https://github.com/dureuill/hf-hub.git", branch = "rust_tls", tiktoken-rs = "0.5.9" liquid = "0.26.6" rhai = { git = "https://github.com/rhaiscript/rhai", rev = "ef3df63121d27aacd838f366f2b83fd65f20a1e4", features = ["serde", "no_module", "no_custom_syntax", "no_time", "sync"] } -arroy = { git = "https://github.com/meilisearch/arroy/", rev = "2386594dfb009ce08821a925ccc89fb8e30bf73d" } +arroy = "0.5.0" rand = "0.8.5" tracing = "0.1.40" ureq = { version = "2.10.0", features = ["json"] } diff --git a/milli/src/error.rs b/milli/src/error.rs index 400d3d3be..840db7606 100644 --- a/milli/src/error.rs +++ b/milli/src/error.rs @@ -297,6 +297,7 @@ impl From for Error { arroy::Error::InvalidVecDimension { expected, received } => { Error::UserError(UserError::InvalidVectorDimensions { expected, found: received }) } + arroy::Error::BuildCancelled => Error::InternalError(InternalError::AbortedIndexation), arroy::Error::DatabaseFull | arroy::Error::InvalidItemAppend | arroy::Error::UnmatchingDistance { .. } diff --git a/milli/src/vector/mod.rs b/milli/src/vector/mod.rs index b6d6510af..097e93ad2 100644 --- a/milli/src/vector/mod.rs +++ b/milli/src/vector/mod.rs @@ -1,7 +1,7 @@ use std::collections::HashMap; use std::sync::Arc; -use arroy::distances::{Angular, BinaryQuantizedAngular}; +use arroy::distances::{BinaryQuantizedCosine, Cosine}; use arroy::ItemId; use deserr::{DeserializeError, Deserr}; use heed::{RoTxn, RwTxn, Unspecified}; @@ -87,7 +87,7 @@ impl ArroyWrapper { if self.quantized { let writer = arroy::Writer::new(self.quantized_db(), index, dimension); if writer.need_build(wtxn)? { - writer.build(wtxn, rng, None)? + writer.builder(rng).build(wtxn)? } else if writer.is_empty(wtxn)? { break; } @@ -99,11 +99,10 @@ impl ArroyWrapper { // only happens once in the life of an embedder, it's not very performances // sensitive. if quantizing && !self.quantized { - let writer = - writer.prepare_changing_distance::(wtxn)?; - writer.build(wtxn, rng, None)? + let writer = writer.prepare_changing_distance::(wtxn)?; + writer.builder(rng).build(wtxn)?; } else if writer.need_build(wtxn)? { - writer.build(wtxn, rng, None)? + writer.builder(rng).build(wtxn)?; } else if writer.is_empty(wtxn)? { break; } @@ -323,8 +322,13 @@ impl ArroyWrapper { let mut results = Vec::new(); for reader in self.readers(rtxn, db) { - let ret = reader?.nns_by_item(rtxn, item, limit, None, None, filter)?; - if let Some(mut ret) = ret { + let reader = reader?; + let mut searcher = reader.nns(limit); + if let Some(filter) = filter { + searcher.candidates(filter); + } + + if let Some(mut ret) = searcher.by_item(rtxn, item)? { results.append(&mut ret); } else { break; @@ -359,8 +363,13 @@ impl ArroyWrapper { let mut results = Vec::new(); for reader in self.readers(rtxn, db) { - let mut ret = reader?.nns_by_vector(rtxn, vector, limit, None, None, filter)?; - results.append(&mut ret); + let reader = reader?; + let mut searcher = reader.nns(limit); + if let Some(filter) = filter { + searcher.candidates(filter); + } + + results.append(&mut searcher.by_vector(rtxn, vector)?); } results.sort_unstable_by_key(|(_, distance)| OrderedFloat(*distance)); @@ -391,11 +400,11 @@ impl ArroyWrapper { Ok(vectors) } - fn angular_db(&self) -> arroy::Database { + fn angular_db(&self) -> arroy::Database { self.database.remap_data_type() } - fn quantized_db(&self) -> arroy::Database { + fn quantized_db(&self) -> arroy::Database { self.database.remap_data_type() } } From b1dc10e771a757826fe400280c8bac84976ce95b Mon Sep 17 00:00:00 2001 From: Tamo Date: Tue, 1 Oct 2024 17:45:49 +0200 Subject: [PATCH 042/111] uses the new cancellation method in arroy --- milli/src/update/index_documents/mod.rs | 3 ++- milli/src/vector/mod.rs | 5 +++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/milli/src/update/index_documents/mod.rs b/milli/src/update/index_documents/mod.rs index e164a0817..88d20fff0 100644 --- a/milli/src/update/index_documents/mod.rs +++ b/milli/src/update/index_documents/mod.rs @@ -699,6 +699,7 @@ where for (embedder_name, dimension) in dimension { let wtxn = &mut *self.wtxn; let vector_arroy = self.index.vector_arroy; + let cancel = &self.should_abort; let embedder_index = self.index.embedder_category_id.get(wtxn, &embedder_name)?.ok_or( InternalError::DatabaseMissingEntry { db_name: "embedder_category_id", key: None }, @@ -713,7 +714,7 @@ where pool.install(|| { let mut writer = ArroyWrapper::new(vector_arroy, embedder_index, was_quantized); - writer.build_and_quantize(wtxn, &mut rng, dimension, is_quantizing)?; + writer.build_and_quantize(wtxn, &mut rng, dimension, is_quantizing, cancel)?; Result::Ok(()) }) .map_err(InternalError::from)??; diff --git a/milli/src/vector/mod.rs b/milli/src/vector/mod.rs index 097e93ad2..571c02c8c 100644 --- a/milli/src/vector/mod.rs +++ b/milli/src/vector/mod.rs @@ -82,6 +82,7 @@ impl ArroyWrapper { rng: &mut R, dimension: usize, quantizing: bool, + cancel: &(impl Fn() -> bool + Sync + Send), ) -> Result<(), arroy::Error> { for index in arroy_db_range_for_embedder(self.embedder_index) { if self.quantized { @@ -100,9 +101,9 @@ impl ArroyWrapper { // sensitive. if quantizing && !self.quantized { let writer = writer.prepare_changing_distance::(wtxn)?; - writer.builder(rng).build(wtxn)?; + writer.builder(rng).cancel(cancel).build(wtxn)?; } else if writer.need_build(wtxn)? { - writer.builder(rng).build(wtxn)?; + writer.builder(rng).cancel(cancel).build(wtxn)?; } else if writer.is_empty(wtxn)? { break; } From 37a9d64c4441bb6a4a199ad018ab4ddb44d4d958 Mon Sep 17 00:00:00 2001 From: "F. Levi" <55688616+flevi29@users.noreply.github.com> Date: Tue, 1 Oct 2024 22:52:01 +0300 Subject: [PATCH 043/111] Fix failing test, refactor --- milli/src/search/new/matches/mod.rs | 44 ++++++++++++++++++----------- 1 file changed, 27 insertions(+), 17 deletions(-) diff --git a/milli/src/search/new/matches/mod.rs b/milli/src/search/new/matches/mod.rs index 1552de8aa..ae1264482 100644 --- a/milli/src/search/new/matches/mod.rs +++ b/milli/src/search/new/matches/mod.rs @@ -245,8 +245,7 @@ struct MatchIntervalWithScore { impl MatchIntervalWithScore { /// Returns the matches interval where the score computed by match_interval_score is the best. fn find_best_match_interval(matches: &[Match], crop_size: usize) -> &[Match] { - let matches_len = matches.len(); - if matches_len <= 1 { + if matches.len() <= 1 { return matches; } @@ -303,7 +302,7 @@ impl MatchIntervalWithScore { } // compute the last interval score and compare it to the best one. - let interval_last = matches_len - 1; + let interval_last = matches.len() - 1; // if it's the last match with itself, we need to make sure it's // not a phrase longer than the crop window if interval_first != interval_last || matches[interval_first].get_word_count() < crop_size { @@ -451,28 +450,39 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { crop_size: usize, ) -> (usize, usize) { // if there is no match, we start from the beginning of the string by default. - let first_match_first_word_position = - matches.first().map(|m| m.get_first_word_pos()).unwrap_or(0); - let first_match_first_token_position = - matches.first().map(|m| m.get_first_token_pos()).unwrap_or(0); - let last_match_last_word_position = - matches.last().map(|m| m.get_last_word_pos()).unwrap_or(0); - let last_match_last_token_position = - matches.last().map(|m| m.get_last_token_pos()).unwrap_or(0); + let (matches_size, first_match_first_token_position, last_match_last_token_position) = + if !matches.is_empty() { + let matches_first = matches.first().unwrap(); + let matches_last = matches.last().unwrap(); - let matches_window_len = - last_match_last_word_position - first_match_first_word_position + 1; + ( + matches_last.get_last_word_pos() - matches_first.get_first_word_pos() + 1, + matches_first.get_first_token_pos(), + matches_last.get_last_token_pos(), + ) + } else { + (0, 0, 0) + }; - if crop_size >= matches_window_len { + if crop_size >= matches_size { // matches needs to be counted in the crop len. - let mut remaining_words = crop_size - matches_window_len; + let mut remaining_words = crop_size - matches_size; + + let last_match_last_token_position_plus_one = last_match_last_token_position + 1; + let after_tokens_starting_index = if matches_size == 0 { + 0 + } else if last_match_last_token_position_plus_one < tokens.len() { + last_match_last_token_position_plus_one + } else { + tokens.len() + }; // create the initial state of the crop window: 2 iterators starting from the matches positions, // a reverse iterator starting from the first match token position and going towards the beginning of the text, let mut before_tokens = tokens[..first_match_first_token_position].iter().rev().peekable(); // an iterator starting from the last match token position and going towards the end of the text. - let mut after_tokens = tokens[last_match_last_token_position + 1..].iter().peekable(); + let mut after_tokens = tokens[after_tokens_starting_index..].iter().peekable(); // grows the crop window peeking in both directions // until the window contains the good number of words: @@ -553,7 +563,7 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { (crop_byte_start, crop_byte_end) } else { // there's one match and it's longer than the crop window, so we have to advance inward - let mut remaining_extra_words = matches_window_len - crop_size; + let mut remaining_extra_words = matches_size - crop_size; let mut tokens_from_end = tokens[..=last_match_last_token_position].iter().rev().peekable(); From 62dfbd6255846db8fcfb7c515a9ad041999f7d3a Mon Sep 17 00:00:00 2001 From: Louis Dureuil Date: Wed, 2 Oct 2024 11:20:02 +0200 Subject: [PATCH 044/111] Add binary quantized to allowed fields for source adds its sources --- milli/src/vector/settings.rs | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/milli/src/vector/settings.rs b/milli/src/vector/settings.rs index 3bb7f09e6..d1cf364a2 100644 --- a/milli/src/vector/settings.rs +++ b/milli/src/vector/settings.rs @@ -417,6 +417,8 @@ impl EmbeddingSettings { pub const DISTRIBUTION: &'static str = "distribution"; + pub const BINARY_QUANTIZED: &'static str = "binaryQuantized"; + pub fn allowed_sources_for_field(field: &'static str) -> &'static [EmbedderSource] { match field { Self::SOURCE => &[ @@ -456,6 +458,13 @@ impl EmbeddingSettings { EmbedderSource::Rest, EmbedderSource::UserProvided, ], + Self::BINARY_QUANTIZED => &[ + EmbedderSource::HuggingFace, + EmbedderSource::Ollama, + EmbedderSource::OpenAi, + EmbedderSource::Rest, + EmbedderSource::UserProvided, + ], _other => unreachable!("unknown field"), } } @@ -470,6 +479,7 @@ impl EmbeddingSettings { Self::DIMENSIONS, Self::DISTRIBUTION, Self::URL, + Self::BINARY_QUANTIZED, ], EmbedderSource::HuggingFace => &[ Self::SOURCE, @@ -477,6 +487,7 @@ impl EmbeddingSettings { Self::REVISION, Self::DOCUMENT_TEMPLATE, Self::DISTRIBUTION, + Self::BINARY_QUANTIZED, ], EmbedderSource::Ollama => &[ Self::SOURCE, @@ -486,8 +497,11 @@ impl EmbeddingSettings { Self::API_KEY, Self::DIMENSIONS, Self::DISTRIBUTION, + Self::BINARY_QUANTIZED, ], - EmbedderSource::UserProvided => &[Self::SOURCE, Self::DIMENSIONS, Self::DISTRIBUTION], + EmbedderSource::UserProvided => { + &[Self::SOURCE, Self::DIMENSIONS, Self::DISTRIBUTION, Self::BINARY_QUANTIZED] + } EmbedderSource::Rest => &[ Self::SOURCE, Self::API_KEY, @@ -498,6 +512,7 @@ impl EmbeddingSettings { Self::RESPONSE, Self::HEADERS, Self::DISTRIBUTION, + Self::BINARY_QUANTIZED, ], } } From 0c2661ea90f26d3269d0ed53cb47fa69bf9e5600 Mon Sep 17 00:00:00 2001 From: Louis Dureuil Date: Wed, 2 Oct 2024 11:20:29 +0200 Subject: [PATCH 045/111] Fix tests --- meilisearch/tests/vector/settings.rs | 47 ++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/meilisearch/tests/vector/settings.rs b/meilisearch/tests/vector/settings.rs index 4f07ca18b..ed45913a8 100644 --- a/meilisearch/tests/vector/settings.rs +++ b/meilisearch/tests/vector/settings.rs @@ -4,6 +4,53 @@ use crate::common::{GetAllDocumentsOptions, Server}; use crate::json; use crate::vector::generate_default_user_provided_documents; +#[actix_rt::test] +async fn field_unavailable_for_source() { + let server = Server::new().await; + let index = server.index("doggo"); + let (value, code) = server.set_features(json!({"vectorStore": true})).await; + snapshot!(code, @"200 OK"); + snapshot!(value, @r###" + { + "vectorStore": true, + "metrics": false, + "logsRoute": false, + "editDocumentsByFunction": false, + "containsFilter": false + } + "###); + + let (response, code) = index + .update_settings(json!({ + "embedders": { "manual": {"source": "userProvided", "documentTemplate": "{{doc.documentTemplate}}"}}, + })) + .await; + snapshot!(code, @"400 Bad Request"); + snapshot!(response, @r###" + { + "message": "`.embedders.manual`: Field `documentTemplate` unavailable for source `userProvided` (only available for sources: `huggingFace`, `openAi`, `ollama`, `rest`). Available fields: `source`, `dimensions`, `distribution`, `binaryQuantized`", + "code": "invalid_settings_embedders", + "type": "invalid_request", + "link": "https://docs.meilisearch.com/errors#invalid_settings_embedders" + } + "###); + + let (response, code) = index + .update_settings(json!({ + "embedders": { "default": {"source": "openAi", "revision": "42"}}, + })) + .await; + snapshot!(code, @"400 Bad Request"); + snapshot!(response, @r###" + { + "message": "`.embedders.default`: Field `revision` unavailable for source `openAi` (only available for sources: `huggingFace`). Available fields: `source`, `model`, `apiKey`, `documentTemplate`, `dimensions`, `distribution`, `url`, `binaryQuantized`", + "code": "invalid_settings_embedders", + "type": "invalid_request", + "link": "https://docs.meilisearch.com/errors#invalid_settings_embedders" + } + "###); +} + #[actix_rt::test] async fn update_embedder() { let server = Server::new().await; From 2a18917af309e053ef48cc09a6c875eddf03f5d8 Mon Sep 17 00:00:00 2001 From: Timon Jurschitsch Date: Wed, 2 Oct 2024 16:23:21 +0200 Subject: [PATCH 046/111] add delete_index_fail function --- meilisearch/tests/common/index.rs | 20 ++++++++++++++++++++ meilisearch/tests/index/delete_index.rs | 13 ++++--------- 2 files changed, 24 insertions(+), 9 deletions(-) diff --git a/meilisearch/tests/common/index.rs b/meilisearch/tests/common/index.rs index 381bd1cb4..784067c2d 100644 --- a/meilisearch/tests/common/index.rs +++ b/meilisearch/tests/common/index.rs @@ -272,6 +272,20 @@ impl<'a> Index<'a, Shared> { } (task, code) } + + pub async fn delete_index_fail(&self) -> (Value, StatusCode) { + let (mut task, code) = self._delete().await; + if code.is_success() { + task = self.wait_task(task.uid()).await; + if task.is_success() { + panic!( + "`delete_index_fail` succeeded: {}", + serde_json::to_string_pretty(&task).unwrap() + ); + } + } + (task, code) + } } #[allow(dead_code)] @@ -314,6 +328,12 @@ impl Index<'_, State> { }); self.service.post_encoded("/indexes", body, self.encoder).await } + + pub(super) async fn _delete(&self) -> (Value, StatusCode) { + let url = format!("/indexes/{}", urlencode(self.uid.as_ref())); + self.service.delete(url).await + } + pub async fn wait_task(&self, update_id: u64) -> Value { // try several times to get status, or panic to not wait forever let url = format!("/tasks/{}", update_id); diff --git a/meilisearch/tests/index/delete_index.rs b/meilisearch/tests/index/delete_index.rs index a6d61882e..03185d21a 100644 --- a/meilisearch/tests/index/delete_index.rs +++ b/meilisearch/tests/index/delete_index.rs @@ -1,4 +1,4 @@ -use crate::common::Server; +use crate::common::{shared_does_not_exists_index, Server}; use crate::json; #[actix_rt::test] @@ -24,18 +24,13 @@ async fn create_and_delete_index() { #[actix_rt::test] async fn error_delete_unexisting_index() { - let server = Server::new_shared(); - let index = server.unique_index(); - let (task, code) = index.delete().await; + let index = shared_does_not_exists_index().await; + let (task, code) = index.delete_index_fail().await; assert_eq!(code, 202); - let msg = format!( - "Index `{}` not found.", - task["indexUid"].as_str().expect("indexUid should exist").trim_matches('"') - ); let expected_response = json!({ - "message": msg, + "message": "Index `DOES_NOT_EXISTS` not found.", "code": "index_not_found", "type": "invalid_request", "link": "https://docs.meilisearch.com/errors#index_not_found" From 40336ce87d46b43123d03cb343b4d3f785001a9c Mon Sep 17 00:00:00 2001 From: "F. Levi" <55688616+flevi29@users.noreply.github.com> Date: Thu, 3 Oct 2024 10:40:14 +0300 Subject: [PATCH 047/111] Fix and refactor crop_bounds --- milli/src/search/new/matches/mod.rs | 231 ++++++++++++++-------------- 1 file changed, 113 insertions(+), 118 deletions(-) diff --git a/milli/src/search/new/matches/mod.rs b/milli/src/search/new/matches/mod.rs index ae1264482..f8d60ef54 100644 --- a/milli/src/search/new/matches/mod.rs +++ b/milli/src/search/new/matches/mod.rs @@ -1,6 +1,7 @@ use std::borrow::Cow; use charabia::{Language, SeparatorKind, Token, TokenKind, Tokenizer}; +use either::Either; pub use matching_words::MatchingWords; use matching_words::{MatchType, PartialMatch, WordId}; use serde::Serialize; @@ -450,147 +451,141 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { crop_size: usize, ) -> (usize, usize) { // if there is no match, we start from the beginning of the string by default. - let (matches_size, first_match_first_token_position, last_match_last_token_position) = - if !matches.is_empty() { - let matches_first = matches.first().unwrap(); - let matches_last = matches.last().unwrap(); + let ( + mut remaining_words, + is_iterating_forward, + before_tokens_starting_index, + after_tokens_starting_index, + ) = if !matches.is_empty() { + let matches_first = matches.first().unwrap(); + let matches_last = matches.last().unwrap(); - ( - matches_last.get_last_word_pos() - matches_first.get_first_word_pos() + 1, - matches_first.get_first_token_pos(), - matches_last.get_last_token_pos(), - ) + let matches_size = + matches_last.get_last_word_pos() - matches_first.get_first_word_pos() + 1; + + let is_crop_size_gte_match_size = crop_size >= matches_size; + let is_iterating_forward = matches_size == 0 || is_crop_size_gte_match_size; + + let remaining_words = if is_crop_size_gte_match_size { + crop_size - matches_size } else { - (0, 0, 0) + // in case matches size is greater than crop size, which implies there's only one match, + // we count words backwards, because we have to remove words, as they're extra words outside of + // crop window + matches_size - crop_size }; - if crop_size >= matches_size { - // matches needs to be counted in the crop len. - let mut remaining_words = crop_size - matches_size; - - let last_match_last_token_position_plus_one = last_match_last_token_position + 1; let after_tokens_starting_index = if matches_size == 0 { 0 - } else if last_match_last_token_position_plus_one < tokens.len() { - last_match_last_token_position_plus_one } else { - tokens.len() + let last_match_last_token_position_plus_one = matches_last.get_last_token_pos() + 1; + if last_match_last_token_position_plus_one < tokens.len() { + last_match_last_token_position_plus_one + } else { + // we have matched the end of possible tokens, there's nothing to advance + tokens.len() - 1 + } }; - // create the initial state of the crop window: 2 iterators starting from the matches positions, - // a reverse iterator starting from the first match token position and going towards the beginning of the text, - let mut before_tokens = - tokens[..first_match_first_token_position].iter().rev().peekable(); - // an iterator starting from the last match token position and going towards the end of the text. - let mut after_tokens = tokens[after_tokens_starting_index..].iter().peekable(); + ( + remaining_words, + is_iterating_forward, + if is_iterating_forward { matches_first.get_first_token_pos() } else { 0 }, + after_tokens_starting_index, + ) + } else { + (crop_size, true, 0, 0) + }; - // grows the crop window peeking in both directions - // until the window contains the good number of words: - while remaining_words > 0 { - let before_token_kind = before_tokens.peek().map(SimpleTokenKind::new); - let after_token_kind = after_tokens.peek().map(SimpleTokenKind::new); + // create the initial state of the crop window: 2 iterators starting from the matches positions, + // a reverse iterator starting from the first match token position and going towards the beginning of the text, + let mut before_tokens = tokens[..before_tokens_starting_index].iter().rev().peekable(); + // an iterator ... + let mut after_tokens = if is_iterating_forward { + // ... starting from the last match token position and going towards the end of the text. + Either::Left(tokens[after_tokens_starting_index..].iter().peekable()) + } else { + // ... starting from the last match token position and going towards the start of the text. + Either::Right(tokens[..=after_tokens_starting_index].iter().rev().peekable()) + }; - match (before_token_kind, after_token_kind) { - // we can expand both sides. - (Some(before_token_kind), Some(after_token_kind)) => { - match (before_token_kind, after_token_kind) { - // if they are both separators and are the same kind then advance both, - // or expand in the soft separator separator side. - ( - SimpleTokenKind::Separator(before_token_separator_kind), - SimpleTokenKind::Separator(after_token_separator_kind), - ) => { - if before_token_separator_kind == after_token_separator_kind { - before_tokens.next(); + // grows the crop window peeking in both directions + // until the window contains the good number of words: + while remaining_words > 0 { + let before_token_kind = before_tokens.peek().map(SimpleTokenKind::new); + let after_token_kind = + after_tokens.as_mut().either(|v| v.peek(), |v| v.peek()).map(SimpleTokenKind::new); - // this avoid having an ending separator before crop marker. - if remaining_words > 1 { - after_tokens.next(); - } - } else if matches!(before_token_separator_kind, SeparatorKind::Hard) - { - after_tokens.next(); - } else { - before_tokens.next(); - } - } - // if one of the tokens is a word, we expend in the side of the word. - // left is a word, advance left. - (SimpleTokenKind::NotSeparator, SimpleTokenKind::Separator(_)) => { + match (before_token_kind, after_token_kind) { + // we can expand both sides. + (Some(before_token_kind), Some(after_token_kind)) => { + match (before_token_kind, after_token_kind) { + // if they are both separators and are the same kind then advance both, + // or expand in the soft separator separator side. + ( + SimpleTokenKind::Separator(before_token_separator_kind), + SimpleTokenKind::Separator(after_token_separator_kind), + ) => { + if before_token_separator_kind == after_token_separator_kind { + before_tokens.next(); + + // this avoid having an ending separator before crop marker. + if remaining_words > 1 { + after_tokens.next(); + } + } else if matches!(before_token_separator_kind, SeparatorKind::Hard) { + after_tokens.next(); + } else { before_tokens.next(); - remaining_words -= 1; } - // right is a word, advance right. - (SimpleTokenKind::Separator(_), SimpleTokenKind::NotSeparator) => { + } + // if one of the tokens is a word, we expend in the side of the word. + // left is a word, advance left. + (SimpleTokenKind::NotSeparator, SimpleTokenKind::Separator(_)) => { + before_tokens.next(); + remaining_words -= 1; + } + // right is a word, advance right. + (SimpleTokenKind::Separator(_), SimpleTokenKind::NotSeparator) => { + after_tokens.next(); + remaining_words -= 1; + } + // both are words, advance left then right if remaining_word > 0. + (SimpleTokenKind::NotSeparator, SimpleTokenKind::NotSeparator) => { + before_tokens.next(); + remaining_words -= 1; + + if remaining_words > 0 { after_tokens.next(); remaining_words -= 1; } - // both are words, advance left then right if remaining_word > 0. - (SimpleTokenKind::NotSeparator, SimpleTokenKind::NotSeparator) => { - before_tokens.next(); - remaining_words -= 1; - - if remaining_words > 0 { - after_tokens.next(); - remaining_words -= 1; - } - } } } - // the end of the text is reached, advance left. - (Some(before_token_kind), None) => { - before_tokens.next(); - if matches!(before_token_kind, SimpleTokenKind::NotSeparator) { - remaining_words -= 1; - } - } - // the start of the text is reached, advance right. - (None, Some(after_token_kind)) => { - after_tokens.next(); - if matches!(after_token_kind, SimpleTokenKind::NotSeparator) { - remaining_words -= 1; - } - } - // no more token to add. - (None, None) => break, } - } - - // finally, keep the byte index of each bound of the crop window. - let crop_byte_start = before_tokens.next().map_or(0, |t| t.byte_end); - let crop_byte_end = after_tokens.next().map_or(self.text.len(), |t| t.byte_start); - - (crop_byte_start, crop_byte_end) - } else { - // there's one match and it's longer than the crop window, so we have to advance inward - let mut remaining_extra_words = matches_size - crop_size; - let mut tokens_from_end = - tokens[..=last_match_last_token_position].iter().rev().peekable(); - - while remaining_extra_words > 0 { - let token_from_end_kind = tokens_from_end - .peek() - .map(SimpleTokenKind::new) - .expect("Expected iterator to not reach end"); - if matches!(token_from_end_kind, SimpleTokenKind::NotSeparator) { - remaining_extra_words -= 1; + // the end of the text is reached, advance left. + (Some(before_token_kind), None) => { + before_tokens.next(); + if matches!(before_token_kind, SimpleTokenKind::NotSeparator) { + remaining_words -= 1; + } } - - tokens_from_end.next(); + // the start of the text is reached, advance right. + (None, Some(after_token_kind)) => { + after_tokens.next(); + if matches!(after_token_kind, SimpleTokenKind::NotSeparator) { + remaining_words -= 1; + } + } + // no more token to add. + (None, None) => break, } - - let crop_byte_start = if first_match_first_token_position > 0 { - &tokens[first_match_first_token_position - 1].byte_end - } else { - &0 - }; - let crop_byte_end = tokens_from_end - .next() - .map(|t| t.byte_start) - .expect("Expected iterator to not reach end"); - - (*crop_byte_start, crop_byte_end) } + + // finally, keep the byte index of each bound of the crop window. + let crop_byte_start = before_tokens.next().map_or(0, |t| t.byte_end); + let crop_byte_end = after_tokens.next().map_or(self.text.len(), |t| t.byte_start); + + (crop_byte_start, crop_byte_end) } // Returns the formatted version of the original text. From 8221c94e7f5666c73944cc5f57211a0eb4035b59 Mon Sep 17 00:00:00 2001 From: "F. Levi" <55688616+flevi29@users.noreply.github.com> Date: Thu, 3 Oct 2024 15:37:51 +0300 Subject: [PATCH 048/111] Split into multiple files, refactor --- .../search/new/matches/best_match_interval.rs | 139 ++++++++++ milli/src/search/new/matches/match.rs | 62 +++++ milli/src/search/new/matches/mod.rs | 244 +----------------- .../search/new/matches/simple_token_kind.rs | 15 ++ 4 files changed, 230 insertions(+), 230 deletions(-) create mode 100644 milli/src/search/new/matches/best_match_interval.rs create mode 100644 milli/src/search/new/matches/match.rs create mode 100644 milli/src/search/new/matches/simple_token_kind.rs diff --git a/milli/src/search/new/matches/best_match_interval.rs b/milli/src/search/new/matches/best_match_interval.rs new file mode 100644 index 000000000..a6497f351 --- /dev/null +++ b/milli/src/search/new/matches/best_match_interval.rs @@ -0,0 +1,139 @@ +use super::matching_words::WordId; +use super::{Match, MatchPosition}; + +struct MatchIntervalWithScore { + interval: [usize; 2], + score: [i16; 3], +} + +// count score for phrases +fn tally_phrase_scores(fwp: &usize, lwp: &usize, order_score: &mut i16, distance_score: &mut i16) { + let words_in_phrase_minus_one = (lwp - fwp) as i16; + // will always be ordered, so +1 for each space between words + *order_score += words_in_phrase_minus_one; + // distance will always be 1, so -1 for each space between words + *distance_score -= words_in_phrase_minus_one; +} + +/// Compute the score of a match interval: +/// 1) count unique matches +/// 2) calculate distance between matches +/// 3) count ordered matches +fn get_interval_score(matches: &[Match]) -> [i16; 3] { + let mut ids: Vec = Vec::with_capacity(matches.len()); + let mut order_score = 0; + let mut distance_score = 0; + + let mut iter = matches.iter().peekable(); + while let Some(m) = iter.next() { + if let Some(next_match) = iter.peek() { + // if matches are ordered + if next_match.ids.iter().min() > m.ids.iter().min() { + order_score += 1; + } + + let m_last_word_pos = match m.position { + MatchPosition::Word { word_position, .. } => word_position, + MatchPosition::Phrase { word_positions: [fwp, lwp], .. } => { + tally_phrase_scores(&fwp, &lwp, &mut order_score, &mut distance_score); + lwp + } + }; + let next_match_first_word_pos = next_match.get_first_word_pos(); + + // compute distance between matches + distance_score -= (next_match_first_word_pos - m_last_word_pos).min(7) as i16; + } else if let MatchPosition::Phrase { word_positions: [fwp, lwp], .. } = m.position { + // in case last match is a phrase, count score for its words + tally_phrase_scores(&fwp, &lwp, &mut order_score, &mut distance_score); + } + + ids.extend(m.ids.iter()); + } + + ids.sort_unstable(); + ids.dedup(); + let uniq_score = ids.len() as i16; + + // rank by unique match count, then by distance between matches, then by ordered match count. + [uniq_score, distance_score, order_score] +} + +/// Returns the first and last match where the score computed by match_interval_score is the best. +pub fn find_best_match_interval(matches: &[Match], crop_size: usize) -> [&Match; 2] { + if matches.is_empty() { + panic!("`matches` should not be empty at this point"); + } + + // positions of the first and the last match of the best matches interval in `matches`. + let mut best_interval: Option = None; + + let mut save_best_interval = |interval_first, interval_last| { + let interval_score = get_interval_score(&matches[interval_first..=interval_last]); + let is_interval_score_better = &best_interval + .as_ref() + .map_or(true, |MatchIntervalWithScore { score, .. }| interval_score > *score); + + if *is_interval_score_better { + best_interval = Some(MatchIntervalWithScore { + interval: [interval_first, interval_last], + score: interval_score, + }); + } + }; + + // we compute the matches interval if we have at least 2 matches. + // current interval positions. + let mut interval_first = 0; + let mut interval_first_match_first_word_pos = matches[interval_first].get_first_word_pos(); + + for (index, next_match) in matches.iter().enumerate() { + // if next match would make interval gross more than crop_size, + // we compare the current interval with the best one, + // then we increase `interval_first` until next match can be added. + let next_match_last_word_pos = next_match.get_last_word_pos(); + + // if the next match would mean that we pass the crop size window, + // we take the last valid match, that didn't pass this boundry, which is `index` - 1, + // and calculate a score for it, and check if it's better than our best so far + if next_match_last_word_pos - interval_first_match_first_word_pos >= crop_size { + // if index is 0 there is no last viable match + if index != 0 { + let interval_last = index - 1; + // keep interval if it's the best + save_best_interval(interval_first, interval_last); + } + + // advance start of the interval while interval is longer than crop_size. + loop { + interval_first += 1; + if interval_first == matches.len() { + interval_first -= 1; + break; + } + + interval_first_match_first_word_pos = matches[interval_first].get_first_word_pos(); + + if interval_first_match_first_word_pos > next_match_last_word_pos + || next_match_last_word_pos - interval_first_match_first_word_pos < crop_size + { + break; + } + } + } + } + + // compute the last interval score and compare it to the best one. + let interval_last = matches.len() - 1; + // if it's the last match with itself, we need to make sure it's + // not a phrase longer than the crop window + if interval_first != interval_last || matches[interval_first].get_word_count() < crop_size { + save_best_interval(interval_first, interval_last); + } + + // if none of the matches fit the criteria above, default to the first one + best_interval.map_or( + [&matches[0], &matches[0]], + |MatchIntervalWithScore { interval: [first, last], .. }| [&matches[first], &matches[last]], + ) +} diff --git a/milli/src/search/new/matches/match.rs b/milli/src/search/new/matches/match.rs new file mode 100644 index 000000000..cc08b006c --- /dev/null +++ b/milli/src/search/new/matches/match.rs @@ -0,0 +1,62 @@ +use super::matching_words::WordId; + +#[derive(Clone, Debug)] +pub enum MatchPosition { + Word { + // position of the word in the whole text. + word_position: usize, + // position of the token in the whole text. + token_position: usize, + }, + Phrase { + // position of the first and last word in the phrase in the whole text. + word_positions: [usize; 2], + // position of the first and last token in the phrase in the whole text. + token_positions: [usize; 2], + }, +} + +#[derive(Clone, Debug)] +pub struct Match { + pub match_len: usize, + // ids of the query words that matches. + pub ids: Vec, + pub position: MatchPosition, +} + +impl Match { + pub(super) fn get_first_word_pos(&self) -> usize { + match self.position { + MatchPosition::Word { word_position, .. } => word_position, + MatchPosition::Phrase { word_positions: [fwp, _], .. } => fwp, + } + } + + pub(super) fn get_last_word_pos(&self) -> usize { + match self.position { + MatchPosition::Word { word_position, .. } => word_position, + MatchPosition::Phrase { word_positions: [_, lwp], .. } => lwp, + } + } + + pub(super) fn get_first_token_pos(&self) -> usize { + match self.position { + MatchPosition::Word { token_position, .. } => token_position, + MatchPosition::Phrase { token_positions: [ftp, _], .. } => ftp, + } + } + + pub(super) fn get_last_token_pos(&self) -> usize { + match self.position { + MatchPosition::Word { token_position, .. } => token_position, + MatchPosition::Phrase { token_positions: [_, ltp], .. } => ltp, + } + } + + pub(super) fn get_word_count(&self) -> usize { + match self.position { + MatchPosition::Word { .. } => 1, + MatchPosition::Phrase { word_positions: [fwp, lwp], .. } => lwp - fwp + 1, + } + } +} diff --git a/milli/src/search/new/matches/mod.rs b/milli/src/search/new/matches/mod.rs index f8d60ef54..3df361702 100644 --- a/milli/src/search/new/matches/mod.rs +++ b/milli/src/search/new/matches/mod.rs @@ -1,12 +1,16 @@ -use std::borrow::Cow; +mod best_match_interval; +mod r#match; +mod matching_words; +mod simple_token_kind; -use charabia::{Language, SeparatorKind, Token, TokenKind, Tokenizer}; +use charabia::{Language, SeparatorKind, Token, Tokenizer}; use either::Either; pub use matching_words::MatchingWords; -use matching_words::{MatchType, PartialMatch, WordId}; +use matching_words::{MatchType, PartialMatch}; +use r#match::{Match, MatchPosition}; use serde::Serialize; - -pub mod matching_words; +use simple_token_kind::SimpleTokenKind; +use std::borrow::Cow; const DEFAULT_CROP_MARKER: &str = "…"; const DEFAULT_HIGHLIGHT_PREFIX: &str = ""; @@ -94,228 +98,12 @@ impl FormatOptions { } } -#[derive(Clone, Debug)] -pub enum MatchPosition { - Word { - // position of the word in the whole text. - word_position: usize, - // position of the token in the whole text. - token_position: usize, - }, - Phrase { - // position of the first and last word in the phrase in the whole text. - word_positions: (usize, usize), - // position of the first and last token in the phrase in the whole text. - token_positions: (usize, usize), - }, -} - -#[derive(Clone, Debug)] -pub struct Match { - match_len: usize, - // ids of the query words that matches. - ids: Vec, - position: MatchPosition, -} - -impl Match { - fn get_first_word_pos(&self) -> usize { - match self.position { - MatchPosition::Word { word_position, .. } => word_position, - MatchPosition::Phrase { word_positions: (fwp, _), .. } => fwp, - } - } - - fn get_last_word_pos(&self) -> usize { - match self.position { - MatchPosition::Word { word_position, .. } => word_position, - MatchPosition::Phrase { word_positions: (_, lwp), .. } => lwp, - } - } - - fn get_first_token_pos(&self) -> usize { - match self.position { - MatchPosition::Word { token_position, .. } => token_position, - MatchPosition::Phrase { token_positions: (ftp, _), .. } => ftp, - } - } - - fn get_last_token_pos(&self) -> usize { - match self.position { - MatchPosition::Word { token_position, .. } => token_position, - MatchPosition::Phrase { token_positions: (_, ltp), .. } => ltp, - } - } - - fn get_word_count(&self) -> usize { - match self.position { - MatchPosition::Word { .. } => 1, - MatchPosition::Phrase { word_positions: (fwp, lwp), .. } => lwp - fwp + 1, - } - } -} - #[derive(Serialize, Debug, Clone, PartialEq, Eq)] pub struct MatchBounds { pub start: usize, pub length: usize, } -enum SimpleTokenKind { - Separator(SeparatorKind), - NotSeparator, -} - -impl SimpleTokenKind { - fn new(token: &&Token<'_>) -> Self { - match token.kind { - TokenKind::Separator(separaor_kind) => Self::Separator(separaor_kind), - _ => Self::NotSeparator, - } - } -} - -#[derive(PartialEq, PartialOrd)] -struct MatchIntervalScore(i16, i16, i16); - -impl MatchIntervalScore { - /// Compute the score of a match interval: - /// 1) count unique matches - /// 2) calculate distance between matches - /// 3) count ordered matches - fn new(matches: &[Match]) -> Self { - let mut ids: Vec = Vec::with_capacity(matches.len()); - let mut order_score = 0; - let mut distance_score = 0; - - // count score for phrases - fn tally_phrase_scores( - fwp: &usize, - lwp: &usize, - order_score: &mut i16, - distance_score: &mut i16, - ) { - let words_in_phrase_minus_one = (lwp - fwp) as i16; - // will always be ordered, so +1 for each space between words - *order_score += words_in_phrase_minus_one; - // distance will always be 1, so -1 for each space between words - *distance_score -= words_in_phrase_minus_one; - } - - let mut iter = matches.iter().peekable(); - while let Some(m) = iter.next() { - if let Some(next_match) = iter.peek() { - // if matches are ordered - if next_match.ids.iter().min() > m.ids.iter().min() { - order_score += 1; - } - - let m_last_word_pos = match m.position { - MatchPosition::Word { word_position, .. } => word_position, - MatchPosition::Phrase { word_positions: (fwp, lwp), .. } => { - tally_phrase_scores(&fwp, &lwp, &mut order_score, &mut distance_score); - lwp - } - }; - let next_match_first_word_pos = next_match.get_first_word_pos(); - - // compute distance between matches - distance_score -= (next_match_first_word_pos - m_last_word_pos).min(7) as i16; - } else if let MatchPosition::Phrase { word_positions: (fwp, lwp), .. } = m.position { - // in case last match is a phrase, count score for its words - tally_phrase_scores(&fwp, &lwp, &mut order_score, &mut distance_score); - } - - ids.extend(m.ids.iter()); - } - - ids.sort_unstable(); - ids.dedup(); - let uniq_score = ids.len() as i16; - - // rank by unique match count, then by distance between matches, then by ordered match count. - Self(uniq_score, distance_score, order_score) - } -} - -struct MatchIntervalWithScore { - interval: (usize, usize), - score: MatchIntervalScore, -} - -impl MatchIntervalWithScore { - /// Returns the matches interval where the score computed by match_interval_score is the best. - fn find_best_match_interval(matches: &[Match], crop_size: usize) -> &[Match] { - if matches.len() <= 1 { - return matches; - } - - // positions of the first and the last match of the best matches interval in `matches`. - let mut best_interval: Option = None; - - let mut save_best_interval = |interval_first, interval_last| { - let interval_score = MatchIntervalScore::new(&matches[interval_first..=interval_last]); - let is_interval_score_better = - &best_interval.as_ref().map_or(true, |Self { score, .. }| interval_score > *score); - - if *is_interval_score_better { - best_interval = - Some(Self { interval: (interval_first, interval_last), score: interval_score }); - } - }; - - // we compute the matches interval if we have at least 2 matches. - // current interval positions. - let mut interval_first = 0; - let mut interval_first_match_first_word_pos = matches[interval_first].get_first_word_pos(); - - for (index, next_match) in matches.iter().enumerate() { - // if next match would make interval gross more than crop_size, - // we compare the current interval with the best one, - // then we increase `interval_first` until next match can be added. - let next_match_last_word_pos = next_match.get_last_word_pos(); - - // if the next match would mean that we pass the crop size window, - // we take the last valid match, that didn't pass this boundry, which is `index` - 1, - // and calculate a score for it, and check if it's better than our best so far - if next_match_last_word_pos - interval_first_match_first_word_pos >= crop_size { - // if index is 0 there is no last viable match - if index != 0 { - let interval_last = index - 1; - // keep interval if it's the best - save_best_interval(interval_first, interval_last); - } - - // advance start of the interval while interval is longer than crop_size. - loop { - interval_first += 1; - interval_first_match_first_word_pos = - matches[interval_first].get_first_word_pos(); - - if interval_first_match_first_word_pos > next_match_last_word_pos - || next_match_last_word_pos - interval_first_match_first_word_pos - < crop_size - { - break; - } - } - } - } - - // compute the last interval score and compare it to the best one. - let interval_last = matches.len() - 1; - // if it's the last match with itself, we need to make sure it's - // not a phrase longer than the crop window - if interval_first != interval_last || matches[interval_first].get_word_count() < crop_size { - save_best_interval(interval_first, interval_last); - } - - // if none of the matches fit the criteria above, default to the first one - let best_interval = best_interval.map_or((0, 0), |v| v.interval); - &matches[best_interval.0..=best_interval.1] - } -} - /// Structure used to analyze a string, compute words that match, /// and format the source string, returning a highlighted and cropped sub-string. pub struct Matcher<'t, 'tokenizer, 'b, 'lang> { @@ -355,8 +143,8 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { match_len: word.char_end - *first_word_char_start, ids: ids.clone().collect(), position: MatchPosition::Phrase { - word_positions: (first_word_position, word_position), - token_positions: (first_token_position, token_position), + word_positions: [first_word_position, word_position], + token_positions: [first_token_position, token_position], }, }); @@ -450,15 +238,14 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { matches: &[Match], crop_size: usize, ) -> (usize, usize) { - // if there is no match, we start from the beginning of the string by default. let ( mut remaining_words, is_iterating_forward, before_tokens_starting_index, after_tokens_starting_index, ) = if !matches.is_empty() { - let matches_first = matches.first().unwrap(); - let matches_last = matches.last().unwrap(); + let [matches_first, matches_last] = + best_match_interval::find_best_match_interval(matches, crop_size); let matches_size = matches_last.get_last_word_pos() - matches_first.get_first_word_pos() + 1; @@ -600,9 +387,6 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { // crop around the best interval. let (byte_start, byte_end) = match format_options.crop { Some(crop_size) if crop_size > 0 => { - let matches = MatchIntervalWithScore::find_best_match_interval( - matches, crop_size, - ); self.crop_bounds(tokens, matches, crop_size) } _ => (0, self.text.len()), @@ -625,7 +409,7 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { let token = &tokens[token_position]; (&token.byte_start, &token.byte_end) } - MatchPosition::Phrase { token_positions: (ftp, ltp), .. } => { + MatchPosition::Phrase { token_positions: [ftp, ltp], .. } => { (&tokens[ftp].byte_start, &tokens[ltp].byte_end) } }; diff --git a/milli/src/search/new/matches/simple_token_kind.rs b/milli/src/search/new/matches/simple_token_kind.rs new file mode 100644 index 000000000..b34a8c985 --- /dev/null +++ b/milli/src/search/new/matches/simple_token_kind.rs @@ -0,0 +1,15 @@ +use charabia::{SeparatorKind, Token, TokenKind}; + +pub enum SimpleTokenKind { + Separator(SeparatorKind), + NotSeparator, +} + +impl SimpleTokenKind { + pub fn new(token: &&Token<'_>) -> Self { + match token.kind { + TokenKind::Separator(separaor_kind) => Self::Separator(separaor_kind), + _ => Self::NotSeparator, + } + } +} From c3de3a9ab75e6be99314400137b8329cdf46ff12 Mon Sep 17 00:00:00 2001 From: "F. Levi" <55688616+flevi29@users.noreply.github.com> Date: Fri, 4 Oct 2024 11:30:31 +0300 Subject: [PATCH 049/111] Refactor --- milli/src/search/new/matches/matching_words.rs | 12 +++--------- milli/src/search/new/matches/mod.rs | 1 - 2 files changed, 3 insertions(+), 10 deletions(-) diff --git a/milli/src/search/new/matches/matching_words.rs b/milli/src/search/new/matches/matching_words.rs index 4deaff6a0..e4d2785ca 100644 --- a/milli/src/search/new/matches/matching_words.rs +++ b/milli/src/search/new/matches/matching_words.rs @@ -130,7 +130,7 @@ impl<'a> Iterator for MatchesIter<'a, '_> { word.map(|word| self.matching_words.word_interner.get(word).as_str()) }) .collect(); - let partial = PartialMatch { matching_words: words, ids, char_len: 0 }; + let partial = PartialMatch { matching_words: words, ids }; partial.match_token(self.token).or_else(|| self.next()) } @@ -158,7 +158,6 @@ pub enum MatchType<'a> { pub struct PartialMatch<'a> { matching_words: Vec>, ids: &'a RangeInclusive, - char_len: usize, } impl<'a> PartialMatch<'a> { @@ -176,25 +175,20 @@ impl<'a> PartialMatch<'a> { None => token.is_stopword(), }; - let char_len = token.char_end - token.char_start; // if there are remaining words to match in the phrase and the current token is matching, // return a new Partial match allowing the highlighter to continue. if is_matching && matching_words.len() > 1 { matching_words.remove(0); - Some(MatchType::Partial(Self { matching_words, ids, char_len })) + Some(MatchType::Partial(Self { matching_words, ids })) // if there is no remaining word to match in the phrase and the current token is matching, // return a Full match. } else if is_matching { - Some(MatchType::Full { char_len, ids }) + Some(MatchType::Full { char_len: token.char_end - token.char_start, ids }) // if the current token doesn't match, return None to break the match sequence. } else { None } } - - pub fn char_len(&self) -> usize { - self.char_len - } } impl fmt::Debug for MatchingWords { diff --git a/milli/src/search/new/matches/mod.rs b/milli/src/search/new/matches/mod.rs index 3df361702..9ca560529 100644 --- a/milli/src/search/new/matches/mod.rs +++ b/milli/src/search/new/matches/mod.rs @@ -139,7 +139,6 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { Some(MatchType::Full { ids, .. }) => { // save the token that closes the partial match as a match. matches.push(Match { - // @TODO: Shouldn't this be +1? match_len: word.char_end - *first_word_char_start, ids: ids.clone().collect(), position: MatchPosition::Phrase { From 03579aba13853560059cec3c881e284b4f7a307a Mon Sep 17 00:00:00 2001 From: "F. Levi" <55688616+flevi29@users.noreply.github.com> Date: Fri, 4 Oct 2024 11:38:47 +0300 Subject: [PATCH 050/111] Adjust test --- milli/src/search/new/matches/mod.rs | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/milli/src/search/new/matches/mod.rs b/milli/src/search/new/matches/mod.rs index 9ca560529..ac0fb7e7b 100644 --- a/milli/src/search/new/matches/mod.rs +++ b/milli/src/search/new/matches/mod.rs @@ -798,12 +798,12 @@ mod tests { @"…the power to split the world between those who embraced…" ); - let builder = MatcherBuilder::new_test(&rtxn, &temp_index, "\"power to\" \"and those\""); + let builder = MatcherBuilder::new_test(&rtxn, &temp_index, "those \"and those\""); let mut matcher = builder.build(text, None); // should highlight "those" and the phrase "and those". insta::assert_snapshot!( matcher.format(format_options), - @"…groundbreaking invention had the power to split the world between…" + @"…world between those who embraced progress and those who resisted…" ); let builder = MatcherBuilder::new_test( @@ -841,17 +841,6 @@ mod tests { @"…between those who embraced progress and those who resisted change…" ); - let builder = MatcherBuilder::new_test( - &rtxn, - &temp_index, - "\"The groundbreaking invention\" \"split the world between those\"", - ); - let mut matcher = builder.build(text, None); - insta::assert_snapshot!( - matcher.format(format_options), - @"…the power to split the world between those who embraced…" - ); - let builder = MatcherBuilder::new_test( &rtxn, &temp_index, From c0912aa6856f16b711df116d14f88e921e945faf Mon Sep 17 00:00:00 2001 From: Timon Jurschitsch Date: Mon, 7 Oct 2024 16:29:47 +0200 Subject: [PATCH 051/111] add missing shared servers --- meilisearch/tests/index/create_index.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/meilisearch/tests/index/create_index.rs b/meilisearch/tests/index/create_index.rs index 47a2f0f4a..110690638 100644 --- a/meilisearch/tests/index/create_index.rs +++ b/meilisearch/tests/index/create_index.rs @@ -152,7 +152,7 @@ async fn create_index_with_invalid_primary_key() { #[actix_rt::test] async fn test_create_multiple_indexes() { - let server = Server::new().await; + let server = Server::new_shared(); let index1 = server.unique_index(); let index2 = server.unique_index(); let index3 = server.unique_index(); @@ -174,7 +174,7 @@ async fn test_create_multiple_indexes() { #[actix_rt::test] async fn error_create_existing_index() { - let server = Server::new().await; + let server = Server::new_shared(); let index = server.unique_index(); let (_, code) = index.create(Some("primary")).await; From 5b04189f7a37ad6f6c5bc8a153d6d177fa498b7f Mon Sep 17 00:00:00 2001 From: Timon Jurschitsch Date: Mon, 7 Oct 2024 16:50:57 +0200 Subject: [PATCH 052/111] remove flaky assert --- meilisearch/tests/index/create_index.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/meilisearch/tests/index/create_index.rs b/meilisearch/tests/index/create_index.rs index 110690638..9b9fbd039 100644 --- a/meilisearch/tests/index/create_index.rs +++ b/meilisearch/tests/index/create_index.rs @@ -68,7 +68,6 @@ async fn create_index_with_gzip_encoded_request_and_receiving_brotli_encoded_res let parsed_response = serde_json::from_slice::(decoded.into().as_ref()).expect("Expecting valid json"); - assert_eq!(parsed_response["taskUid"], 3); assert_eq!(parsed_response["indexUid"], "test"); } From 6af55b1a80fb1d0f988232d82f90dca14d3ede76 Mon Sep 17 00:00:00 2001 From: Marc Date: Tue, 8 Oct 2024 11:59:43 +0200 Subject: [PATCH 053/111] Update Dockerfile --- Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/Dockerfile b/Dockerfile index 84d1da8f5..20f4c27c7 100644 --- a/Dockerfile +++ b/Dockerfile @@ -21,6 +21,7 @@ RUN set -eux; \ # Run FROM alpine:3.20 +LABEL org.opencontainers.image.source="https://github.com/meilisearch/meilisearch" ENV MEILI_HTTP_ADDR 0.0.0.0:7700 ENV MEILI_SERVER_PROVIDER docker From ed267fa0634b0306b96ccfa8c724c6b7a7d02645 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9mentine?= Date: Tue, 8 Oct 2024 14:14:16 +0200 Subject: [PATCH 054/111] Apply suggestions from code review --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 20f4c27c7..04557df59 100644 --- a/Dockerfile +++ b/Dockerfile @@ -21,7 +21,7 @@ RUN set -eux; \ # Run FROM alpine:3.20 -LABEL org.opencontainers.image.source="https://github.com/meilisearch/meilisearch" +LABEL org.opencontainers.image.source="https://github.com/meilisearch/meilisearch" ENV MEILI_HTTP_ADDR 0.0.0.0:7700 ENV MEILI_SERVER_PROVIDER docker From 7f5d0837c3343b9ce154197867bd153b12390e5c Mon Sep 17 00:00:00 2001 From: Tamo Date: Wed, 9 Oct 2024 11:46:57 +0200 Subject: [PATCH 055/111] fix the bad experimental search queue size --- meilisearch/src/option.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/meilisearch/src/option.rs b/meilisearch/src/option.rs index 3799bdcb7..82c783115 100644 --- a/meilisearch/src/option.rs +++ b/meilisearch/src/option.rs @@ -357,8 +357,8 @@ pub struct Opt { /// Lets you customize the size of the search queue. Meilisearch processes your search requests as fast as possible but once the /// queue is full it starts returning HTTP 503, Service Unavailable. /// The default value is 1000. - #[clap(long, env = MEILI_EXPERIMENTAL_SEARCH_QUEUE_SIZE, default_value_t = 1000)] - #[serde(default)] + #[clap(long, env = MEILI_EXPERIMENTAL_SEARCH_QUEUE_SIZE, default_value_t = default_experimental_search_queue_size())] + #[serde(default = "default_experimental_search_queue_size")] pub experimental_search_queue_size: usize, /// Experimental logs mode feature. For more information, see: @@ -890,6 +890,10 @@ fn default_dump_dir() -> PathBuf { PathBuf::from(DEFAULT_DUMP_DIR) } +fn default_experimental_search_queue_size() -> usize { + 1000 +} + /// Indicates if a snapshot was scheduled, and if yes with which interval. #[derive(Debug, Default, Copy, Clone, Deserialize, Serialize)] pub enum ScheduleSnapshot { From 6e37ae8619ebf52aa1f9a703fc12723764f4ebe5 Mon Sep 17 00:00:00 2001 From: curquiza Date: Wed, 9 Oct 2024 19:13:14 +0200 Subject: [PATCH 056/111] Update mini-dashboard --- meilisearch/Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/meilisearch/Cargo.toml b/meilisearch/Cargo.toml index c193c89d4..6c2fb4060 100644 --- a/meilisearch/Cargo.toml +++ b/meilisearch/Cargo.toml @@ -157,5 +157,5 @@ german = ["meilisearch-types/german"] turkish = ["meilisearch-types/turkish"] [package.metadata.mini-dashboard] -assets-url = "https://github.com/meilisearch/mini-dashboard/releases/download/v0.2.14/build.zip" -sha1 = "592d1b5a3459d621d0aae1dded8fe3154f5c38fe" +assets-url = "https://github.com/meilisearch/mini-dashboard/releases/download/v0.2.15/build.zip" +sha1 = "d057600b4a839a2e0c0be7a372cd1b2683f3ca7e" From 466604725ec017234db3e61c58c957a3802d2bb9 Mon Sep 17 00:00:00 2001 From: Tamo Date: Thu, 10 Oct 2024 23:47:15 +0200 Subject: [PATCH 057/111] Do not send empty edit document by function --- meilisearch/src/analytics/segment_analytics.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/meilisearch/src/analytics/segment_analytics.rs b/meilisearch/src/analytics/segment_analytics.rs index f8d6a0fdc..0ea0de572 100644 --- a/meilisearch/src/analytics/segment_analytics.rs +++ b/meilisearch/src/analytics/segment_analytics.rs @@ -1572,6 +1572,10 @@ impl EditDocumentsByFunctionAggregator { pub fn into_event(self, user: &User, event_name: &str) -> Option { let Self { timestamp, user_agents, index_creation, filtered, with_context } = self; + // if we had no timestamp it means we never encountered any events and + // thus we don't need to send this event. + let timestamp = timestamp?; + let properties = json!({ "user-agent": user_agents, "filtered": filtered, @@ -1580,7 +1584,7 @@ impl EditDocumentsByFunctionAggregator { }); Some(Track { - timestamp, + timestamp: Some(timestamp), user: user.clone(), event: event_name.to_string(), properties, From 92070a3578ded5a78bb42e8fb0ab02242fd11bc4 Mon Sep 17 00:00:00 2001 From: Tamo Date: Thu, 10 Oct 2024 13:17:25 +0200 Subject: [PATCH 058/111] Implement the experimental drop search after and nb search per core --- .../src/analytics/segment_analytics.rs | 6 +++ meilisearch/src/main.rs | 11 +++++- meilisearch/src/option.rs | 38 ++++++++++++++++++- 3 files changed, 52 insertions(+), 3 deletions(-) diff --git a/meilisearch/src/analytics/segment_analytics.rs b/meilisearch/src/analytics/segment_analytics.rs index 0ea0de572..476b3264e 100644 --- a/meilisearch/src/analytics/segment_analytics.rs +++ b/meilisearch/src/analytics/segment_analytics.rs @@ -265,6 +265,8 @@ struct Infos { experimental_contains_filter: bool, experimental_enable_metrics: bool, experimental_search_queue_size: usize, + experimental_drop_search_after: usize, + experimental_nb_searches_per_core: usize, experimental_logs_mode: LogMode, experimental_replication_parameters: bool, experimental_enable_logs_route: bool, @@ -308,6 +310,8 @@ impl From for Infos { experimental_contains_filter, experimental_enable_metrics, experimental_search_queue_size, + experimental_drop_search_after, + experimental_nb_searches_per_core, experimental_logs_mode, experimental_replication_parameters, experimental_enable_logs_route, @@ -359,6 +363,8 @@ impl From for Infos { experimental_contains_filter, experimental_enable_metrics, experimental_search_queue_size, + experimental_drop_search_after: experimental_drop_search_after.into(), + experimental_nb_searches_per_core: experimental_nb_searches_per_core.into(), experimental_logs_mode, experimental_replication_parameters, experimental_enable_logs_route, diff --git a/meilisearch/src/main.rs b/meilisearch/src/main.rs index b66bfc5b8..de9784d15 100644 --- a/meilisearch/src/main.rs +++ b/meilisearch/src/main.rs @@ -5,6 +5,7 @@ use std::path::PathBuf; use std::str::FromStr; use std::sync::Arc; use std::thread::available_parallelism; +use std::time::Duration; use actix_web::http::KeepAlive; use actix_web::web::Data; @@ -153,8 +154,14 @@ async fn run_http( let auth_controller = Data::from(auth_controller); let search_queue = SearchQueue::new( opt.experimental_search_queue_size, - available_parallelism().unwrap_or(NonZeroUsize::new(2).unwrap()), - ); + available_parallelism() + .unwrap_or(NonZeroUsize::new(2).unwrap()) + .checked_mul(opt.experimental_nb_searches_per_core) + .unwrap_or(NonZeroUsize::MAX), + ) + .with_time_to_abort(Duration::from_secs( + usize::from(opt.experimental_drop_search_after) as u64 + )); let search_queue = Data::new(search_queue); let http_server = HttpServer::new(move || { diff --git a/meilisearch/src/option.rs b/meilisearch/src/option.rs index 82c783115..bbeb94577 100644 --- a/meilisearch/src/option.rs +++ b/meilisearch/src/option.rs @@ -2,7 +2,7 @@ use std::env::VarError; use std::ffi::OsStr; use std::fmt::Display; use std::io::{BufReader, Read}; -use std::num::ParseIntError; +use std::num::{NonZeroUsize, ParseIntError}; use std::ops::Deref; use std::path::PathBuf; use std::str::FromStr; @@ -55,6 +55,8 @@ const MEILI_EXPERIMENTAL_ENABLE_LOGS_ROUTE: &str = "MEILI_EXPERIMENTAL_ENABLE_LO const MEILI_EXPERIMENTAL_CONTAINS_FILTER: &str = "MEILI_EXPERIMENTAL_CONTAINS_FILTER"; const MEILI_EXPERIMENTAL_ENABLE_METRICS: &str = "MEILI_EXPERIMENTAL_ENABLE_METRICS"; const MEILI_EXPERIMENTAL_SEARCH_QUEUE_SIZE: &str = "MEILI_EXPERIMENTAL_SEARCH_QUEUE_SIZE"; +const MEILI_EXPERIMENTAL_DROP_SEARCH_AFTER: &str = "MEILI_EXPERIMENTAL_DROP_SEARCH_AFTER"; +const MEILI_EXPERIMENTAL_NB_SEARCHES_PER_CORE: &str = "MEILI_EXPERIMENTAL_NB_SEARCHES_PER_CORE"; const MEILI_EXPERIMENTAL_REDUCE_INDEXING_MEMORY_USAGE: &str = "MEILI_EXPERIMENTAL_REDUCE_INDEXING_MEMORY_USAGE"; const MEILI_EXPERIMENTAL_MAX_NUMBER_OF_BATCHED_TASKS: &str = @@ -361,6 +363,22 @@ pub struct Opt { #[serde(default = "default_experimental_search_queue_size")] pub experimental_search_queue_size: usize, + /// Experimental drop search after. For more information, see: + /// + /// Lets you customize after how much seconds should Meilisearch consider a search as irrelevant and drop it. + /// The default value is 60. + #[clap(long, env = MEILI_EXPERIMENTAL_DROP_SEARCH_AFTER, default_value_t = default_drop_search_after())] + #[serde(default = "default_drop_search_after")] + pub experimental_drop_search_after: NonZeroUsize, + + /// Experimental number of searches per core. For more information, see: + /// + /// Lets you customize after how many search requests can run on each cores. + /// The default value is 4. + #[clap(long, env = MEILI_EXPERIMENTAL_NB_SEARCHES_PER_CORE, default_value_t = default_nb_searches_per_core())] + #[serde(default = "default_drop_search_after")] + pub experimental_nb_searches_per_core: NonZeroUsize, + /// Experimental logs mode feature. For more information, see: /// /// Change the mode of the logs on the console. @@ -492,6 +510,8 @@ impl Opt { experimental_contains_filter, experimental_enable_metrics, experimental_search_queue_size, + experimental_drop_search_after, + experimental_nb_searches_per_core, experimental_logs_mode, experimental_enable_logs_route, experimental_replication_parameters, @@ -559,6 +579,14 @@ impl Opt { MEILI_EXPERIMENTAL_SEARCH_QUEUE_SIZE, experimental_search_queue_size.to_string(), ); + export_to_env_if_not_present( + MEILI_EXPERIMENTAL_DROP_SEARCH_AFTER, + experimental_drop_search_after.to_string(), + ); + export_to_env_if_not_present( + MEILI_EXPERIMENTAL_NB_SEARCHES_PER_CORE, + experimental_nb_searches_per_core.to_string(), + ); export_to_env_if_not_present( MEILI_EXPERIMENTAL_LOGS_MODE, experimental_logs_mode.to_string(), @@ -894,6 +922,14 @@ fn default_experimental_search_queue_size() -> usize { 1000 } +fn default_drop_search_after() -> NonZeroUsize { + NonZeroUsize::new(60).unwrap() +} + +fn default_nb_searches_per_core() -> NonZeroUsize { + NonZeroUsize::new(4).unwrap() +} + /// Indicates if a snapshot was scheduled, and if yes with which interval. #[derive(Debug, Default, Copy, Clone, Deserialize, Serialize)] pub enum ScheduleSnapshot { From c32282acb1f14e65bb124003c34fa1de9c01f869 Mon Sep 17 00:00:00 2001 From: Tamo Date: Thu, 10 Oct 2024 13:21:18 +0200 Subject: [PATCH 059/111] improve doc --- meilisearch/src/option.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/meilisearch/src/option.rs b/meilisearch/src/option.rs index bbeb94577..a231eb058 100644 --- a/meilisearch/src/option.rs +++ b/meilisearch/src/option.rs @@ -365,7 +365,7 @@ pub struct Opt { /// Experimental drop search after. For more information, see: /// - /// Lets you customize after how much seconds should Meilisearch consider a search as irrelevant and drop it. + /// Let you customize after how many seconds Meilisearch should consider a search as irrelevant and drop it. /// The default value is 60. #[clap(long, env = MEILI_EXPERIMENTAL_DROP_SEARCH_AFTER, default_value_t = default_drop_search_after())] #[serde(default = "default_drop_search_after")] @@ -373,7 +373,7 @@ pub struct Opt { /// Experimental number of searches per core. For more information, see: /// - /// Lets you customize after how many search requests can run on each cores. + /// Lets you customize how many search requests can run on each core. /// The default value is 4. #[clap(long, env = MEILI_EXPERIMENTAL_NB_SEARCHES_PER_CORE, default_value_t = default_nb_searches_per_core())] #[serde(default = "default_drop_search_after")] From c4efd1df4e70b2929ee1cb1c22b535b7ff163cc7 Mon Sep 17 00:00:00 2001 From: Tamo Date: Thu, 10 Oct 2024 13:40:21 +0200 Subject: [PATCH 060/111] Update meilisearch/src/option.rs Co-authored-by: Louis Dureuil --- meilisearch/src/option.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meilisearch/src/option.rs b/meilisearch/src/option.rs index a231eb058..cef787e1a 100644 --- a/meilisearch/src/option.rs +++ b/meilisearch/src/option.rs @@ -376,7 +376,7 @@ pub struct Opt { /// Lets you customize how many search requests can run on each core. /// The default value is 4. #[clap(long, env = MEILI_EXPERIMENTAL_NB_SEARCHES_PER_CORE, default_value_t = default_nb_searches_per_core())] - #[serde(default = "default_drop_search_after")] + #[serde(default = "default_nb_searches_per_core")] pub experimental_nb_searches_per_core: NonZeroUsize, /// Experimental logs mode feature. For more information, see: From 3085092e04cbc909601b8b290d883b35ff541f89 Mon Sep 17 00:00:00 2001 From: Tamo Date: Thu, 10 Oct 2024 13:40:28 +0200 Subject: [PATCH 061/111] Update meilisearch/src/option.rs Co-authored-by: Louis Dureuil --- meilisearch/src/option.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meilisearch/src/option.rs b/meilisearch/src/option.rs index cef787e1a..b3f01d208 100644 --- a/meilisearch/src/option.rs +++ b/meilisearch/src/option.rs @@ -373,7 +373,7 @@ pub struct Opt { /// Experimental number of searches per core. For more information, see: /// - /// Lets you customize how many search requests can run on each core. + /// Lets you customize how many search requests can run on each core concurrently. /// The default value is 4. #[clap(long, env = MEILI_EXPERIMENTAL_NB_SEARCHES_PER_CORE, default_value_t = default_nb_searches_per_core())] #[serde(default = "default_nb_searches_per_core")] From 4b4a6c78638573721d7b88869fd443236f90d29a Mon Sep 17 00:00:00 2001 From: Tamo Date: Thu, 10 Oct 2024 15:24:24 +0200 Subject: [PATCH 062/111] Update meilisearch/src/option.rs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Clément Renault --- meilisearch/src/option.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meilisearch/src/option.rs b/meilisearch/src/option.rs index b3f01d208..02dc660a4 100644 --- a/meilisearch/src/option.rs +++ b/meilisearch/src/option.rs @@ -365,7 +365,7 @@ pub struct Opt { /// Experimental drop search after. For more information, see: /// - /// Let you customize after how many seconds Meilisearch should consider a search as irrelevant and drop it. + /// Let you customize after how many seconds Meilisearch should consider a search request irrelevant and drop it. /// The default value is 60. #[clap(long, env = MEILI_EXPERIMENTAL_DROP_SEARCH_AFTER, default_value_t = default_drop_search_after())] #[serde(default = "default_drop_search_after")] From e44e7b5e81e8644ae1c95d3a3b28f530fcc52eb2 Mon Sep 17 00:00:00 2001 From: Louis Dureuil Date: Mon, 14 Oct 2024 16:17:19 +0200 Subject: [PATCH 063/111] Fix retrieveVectors when explicitly passed in displayed attributes without any document containing _vectors --- meilisearch/src/search/mod.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/meilisearch/src/search/mod.rs b/meilisearch/src/search/mod.rs index 66b6e56de..7832c1761 100644 --- a/meilisearch/src/search/mod.rs +++ b/meilisearch/src/search/mod.rs @@ -1195,8 +1195,13 @@ impl<'a> HitMaker<'a> { let vectors_is_hidden = match (&displayed_ids, vectors_fid) { // displayed_ids is a wildcard, so `_vectors` can be displayed regardless of its fid (None, _) => false, - // displayed_ids is a finite list, and `_vectors` cannot be part of it because it is not an existing field - (Some(_), None) => true, + // vectors has no fid, so check its explicit name + (Some(_), None) => { + // unwrap as otherwise we'd go to the first one + let displayed_names = index.displayed_fields(rtxn)?.unwrap(); + !displayed_names + .contains(&milli::vector::parsed_vectors::RESERVED_VECTORS_FIELD_NAME) + } // displayed_ids is a finit list, so hide if `_vectors` is not part of it (Some(map), Some(vectors_fid)) => map.contains(&vectors_fid), }; From 5a74d4729cdc02a3cea011d4ab6a0f608be867f9 Mon Sep 17 00:00:00 2001 From: Louis Dureuil Date: Mon, 14 Oct 2024 16:23:28 +0200 Subject: [PATCH 064/111] Add test failing before this PR, OK now --- meilisearch/tests/search/hybrid.rs | 51 ++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/meilisearch/tests/search/hybrid.rs b/meilisearch/tests/search/hybrid.rs index e301c0b05..00a65d9aa 100644 --- a/meilisearch/tests/search/hybrid.rs +++ b/meilisearch/tests/search/hybrid.rs @@ -568,6 +568,57 @@ async fn retrieve_vectors() { ] "###); + // use explicit `_vectors` in displayed attributes + let (response, code) = index + .update_settings(json!({ "displayedAttributes": ["id", "title", "desc", "_vectors"]} )) + .await; + assert_eq!(202, code, "{:?}", response); + index.wait_task(response.uid()).await; + + let (response, code) = index + .search_post( + json!({"q": "Captain", "hybrid": {"embedder": "default", "semanticRatio": 0.2}, "retrieveVectors": true}), + ) + .await; + snapshot!(code, @"200 OK"); + insta::assert_json_snapshot!(response["hits"], {"[]._vectors.default.embeddings" => "[vectors]"}, @r###" + [ + { + "title": "Captain Planet", + "desc": "He's not part of the Marvel Cinematic Universe", + "id": "2", + "_vectors": { + "default": { + "embeddings": "[vectors]", + "regenerate": true + } + } + }, + { + "title": "Captain Marvel", + "desc": "a Shazam ersatz", + "id": "3", + "_vectors": { + "default": { + "embeddings": "[vectors]", + "regenerate": true + } + } + }, + { + "title": "Shazam!", + "desc": "a Captain Marvel ersatz", + "id": "1", + "_vectors": { + "default": { + "embeddings": "[vectors]", + "regenerate": true + } + } + } + ] + "###); + // remove `_vectors` from displayed attributes let (response, code) = index.update_settings(json!({ "displayedAttributes": ["id", "title", "desc"]} )).await; From 73e87c152a4bd35fd4309141615676210c6b279c Mon Sep 17 00:00:00 2001 From: Tamo Date: Wed, 16 Oct 2024 15:43:27 +0200 Subject: [PATCH 065/111] rewrite most of the analytics especially the settings --- meilisearch/src/analytics/mock_analytics.rs | 109 -- meilisearch/src/analytics/mod.rs | 179 ++-- .../src/analytics/segment_analytics.rs | 211 ++-- meilisearch/src/lib.rs | 4 +- meilisearch/src/routes/dump.rs | 7 +- meilisearch/src/routes/features.rs | 58 +- meilisearch/src/routes/indexes/documents.rs | 318 +++++- .../src/routes/indexes/facet_search.rs | 112 +- meilisearch/src/routes/indexes/mod.rs | 53 +- meilisearch/src/routes/indexes/search.rs | 13 +- meilisearch/src/routes/indexes/settings.rs | 962 +++++++++++++----- meilisearch/src/routes/swap_indexes.rs | 2 +- 12 files changed, 1381 insertions(+), 647 deletions(-) delete mode 100644 meilisearch/src/analytics/mock_analytics.rs diff --git a/meilisearch/src/analytics/mock_analytics.rs b/meilisearch/src/analytics/mock_analytics.rs deleted file mode 100644 index 54b8d4f1b..000000000 --- a/meilisearch/src/analytics/mock_analytics.rs +++ /dev/null @@ -1,109 +0,0 @@ -use std::any::Any; -use std::sync::Arc; - -use actix_web::HttpRequest; -use meilisearch_types::InstanceUid; -use serde_json::Value; - -use super::{find_user_id, Analytics, DocumentDeletionKind, DocumentFetchKind}; -use crate::routes::indexes::documents::{DocumentEditionByFunction, UpdateDocumentsQuery}; -use crate::Opt; - -pub struct MockAnalytics { - instance_uid: Option, -} - -#[derive(Default)] -pub struct SearchAggregator; - -#[allow(dead_code)] -impl SearchAggregator { - pub fn from_query(_: &dyn Any, _: &dyn Any) -> Self { - Self - } - - pub fn succeed(&mut self, _: &dyn Any) {} -} - -#[derive(Default)] -pub struct SimilarAggregator; - -#[allow(dead_code)] -impl SimilarAggregator { - pub fn from_query(_: &dyn Any, _: &dyn Any) -> Self { - Self - } - - pub fn succeed(&mut self, _: &dyn Any) {} -} - -#[derive(Default)] -pub struct MultiSearchAggregator; - -#[allow(dead_code)] -impl MultiSearchAggregator { - pub fn from_federated_search(_: &dyn Any, _: &dyn Any) -> Self { - Self - } - - pub fn succeed(&mut self) {} -} - -#[derive(Default)] -pub struct FacetSearchAggregator; - -#[allow(dead_code)] -impl FacetSearchAggregator { - pub fn from_query(_: &dyn Any, _: &dyn Any) -> Self { - Self - } - - pub fn succeed(&mut self, _: &dyn Any) {} -} - -impl MockAnalytics { - #[allow(clippy::new_ret_no_self)] - pub fn new(opt: &Opt) -> Arc { - let instance_uid = find_user_id(&opt.db_path); - Arc::new(Self { instance_uid }) - } -} - -impl Analytics for MockAnalytics { - fn instance_uid(&self) -> Option<&meilisearch_types::InstanceUid> { - self.instance_uid.as_ref() - } - - // These methods are noop and should be optimized out - fn publish(&self, _event_name: String, _send: Value, _request: Option<&HttpRequest>) {} - fn get_search(&self, _aggregate: super::SearchAggregator) {} - fn post_search(&self, _aggregate: super::SearchAggregator) {} - fn get_similar(&self, _aggregate: super::SimilarAggregator) {} - fn post_similar(&self, _aggregate: super::SimilarAggregator) {} - fn post_multi_search(&self, _aggregate: super::MultiSearchAggregator) {} - fn post_facet_search(&self, _aggregate: super::FacetSearchAggregator) {} - fn add_documents( - &self, - _documents_query: &UpdateDocumentsQuery, - _index_creation: bool, - _request: &HttpRequest, - ) { - } - fn delete_documents(&self, _kind: DocumentDeletionKind, _request: &HttpRequest) {} - fn update_documents( - &self, - _documents_query: &UpdateDocumentsQuery, - _index_creation: bool, - _request: &HttpRequest, - ) { - } - fn update_documents_by_function( - &self, - _documents_query: &DocumentEditionByFunction, - _index_creation: bool, - _request: &HttpRequest, - ) { - } - fn get_fetch_documents(&self, _documents_query: &DocumentFetchKind, _request: &HttpRequest) {} - fn post_fetch_documents(&self, _documents_query: &DocumentFetchKind, _request: &HttpRequest) {} -} diff --git a/meilisearch/src/analytics/mod.rs b/meilisearch/src/analytics/mod.rs index 3c7ca0ed3..a8658d830 100644 --- a/meilisearch/src/analytics/mod.rs +++ b/meilisearch/src/analytics/mod.rs @@ -1,45 +1,51 @@ -mod mock_analytics; -#[cfg(feature = "analytics")] -mod segment_analytics; +pub mod segment_analytics; +use std::any::TypeId; +use std::collections::HashMap; use std::fs; use std::path::{Path, PathBuf}; use std::str::FromStr; use actix_web::HttpRequest; use meilisearch_types::InstanceUid; -pub use mock_analytics::MockAnalytics; use once_cell::sync::Lazy; use platform_dirs::AppDirs; -use serde_json::Value; - -use crate::routes::indexes::documents::{DocumentEditionByFunction, UpdateDocumentsQuery}; - -// if the analytics feature is disabled -// the `SegmentAnalytics` point to the mock instead of the real analytics -#[cfg(not(feature = "analytics"))] -pub type SegmentAnalytics = mock_analytics::MockAnalytics; -#[cfg(not(feature = "analytics"))] -pub type SearchAggregator = mock_analytics::SearchAggregator; -#[cfg(not(feature = "analytics"))] -pub type SimilarAggregator = mock_analytics::SimilarAggregator; -#[cfg(not(feature = "analytics"))] -pub type MultiSearchAggregator = mock_analytics::MultiSearchAggregator; -#[cfg(not(feature = "analytics"))] -pub type FacetSearchAggregator = mock_analytics::FacetSearchAggregator; +use segment::message::User; +use serde::Serialize; // if the feature analytics is enabled we use the real analytics -#[cfg(feature = "analytics")] pub type SegmentAnalytics = segment_analytics::SegmentAnalytics; -#[cfg(feature = "analytics")] -pub type SearchAggregator = segment_analytics::SearchAggregator; -#[cfg(feature = "analytics")] +pub use segment_analytics::SearchAggregator; pub type SimilarAggregator = segment_analytics::SimilarAggregator; -#[cfg(feature = "analytics")] pub type MultiSearchAggregator = segment_analytics::MultiSearchAggregator; -#[cfg(feature = "analytics")] pub type FacetSearchAggregator = segment_analytics::FacetSearchAggregator; +/// A macro used to quickly define events that don't aggregate or send anything besides an empty event with its name. +#[macro_export] +macro_rules! empty_analytics { + ($struct_name:ident, $event_name:literal) => { + #[derive(Default)] + struct $struct_name {} + + impl $crate::analytics::Aggregate for $struct_name { + fn event_name(&self) -> &'static str { + $event_name + } + + fn aggregate(self, other: Self) -> Self + where + Self: Sized, + { + self + } + + fn into_event(self) -> serde_json::Value { + serde_json::json!({}) + } + } + }; +} + /// The Meilisearch config dir: /// `~/.config/Meilisearch` on *NIX or *BSD. /// `~/Library/ApplicationSupport` on macOS. @@ -78,60 +84,73 @@ pub enum DocumentFetchKind { Normal { with_filter: bool, limit: usize, offset: usize, retrieve_vectors: bool }, } -pub trait Analytics: Sync + Send { - fn instance_uid(&self) -> Option<&InstanceUid>; +pub trait Aggregate { + fn event_name(&self) -> &'static str; + + fn aggregate(self, other: Self) -> Self + where + Self: Sized; + + fn into_event(self) -> impl Serialize + where + Self: Sized; +} + +/// Helper trait to define multiple aggregate with the same content but a different name. +/// Commonly used when you must aggregate a search with POST or with GET for example. +pub trait AggregateMethod { + fn event_name() -> &'static str; +} + +/// A macro used to quickly define multiple aggregate method with their name +#[macro_export] +macro_rules! aggregate_methods { + ($method:ident => $event_name:literal) => { + pub enum $method {} + + impl $crate::analytics::AggregateMethod for $method { + fn event_name() -> &'static str { + $event_name + } + } + }; + ($($method:ident => $event_name:literal,)+) => { + $( + aggregate_methods!($method => $event_name); + )+ + + }; +} + +pub struct Analytics { + // TODO: TAMO: remove + inner: Option, + + instance_uid: Option, + user: Option, + events: HashMap>, +} + +impl Analytics { + fn no_analytics() -> Self { + Self { inner: None, events: HashMap::new(), instance_uid: None, user: None } + } + + fn segment_analytics(segment: SegmentAnalytics) -> Self { + Self { + instance_uid: Some(segment.instance_uid), + user: Some(segment.user), + inner: Some(segment), + events: HashMap::new(), + } + } + + pub fn instance_uid(&self) -> Option<&InstanceUid> { + self.instance_uid + } /// The method used to publish most analytics that do not need to be batched every hours - fn publish(&self, event_name: String, send: Value, request: Option<&HttpRequest>); - - /// This method should be called to aggregate a get search - fn get_search(&self, aggregate: SearchAggregator); - - /// This method should be called to aggregate a post search - fn post_search(&self, aggregate: SearchAggregator); - - /// This method should be called to aggregate a get similar request - fn get_similar(&self, aggregate: SimilarAggregator); - - /// This method should be called to aggregate a post similar request - fn post_similar(&self, aggregate: SimilarAggregator); - - /// This method should be called to aggregate a post array of searches - fn post_multi_search(&self, aggregate: MultiSearchAggregator); - - /// This method should be called to aggregate post facet values searches - fn post_facet_search(&self, aggregate: FacetSearchAggregator); - - // this method should be called to aggregate an add documents request - fn add_documents( - &self, - documents_query: &UpdateDocumentsQuery, - index_creation: bool, - request: &HttpRequest, - ); - - // this method should be called to aggregate a fetch documents request - fn get_fetch_documents(&self, documents_query: &DocumentFetchKind, request: &HttpRequest); - - // this method should be called to aggregate a fetch documents request - fn post_fetch_documents(&self, documents_query: &DocumentFetchKind, request: &HttpRequest); - - // this method should be called to aggregate a add documents request - fn delete_documents(&self, kind: DocumentDeletionKind, request: &HttpRequest); - - // this method should be called to batch an update documents request - fn update_documents( - &self, - documents_query: &UpdateDocumentsQuery, - index_creation: bool, - request: &HttpRequest, - ); - - // this method should be called to batch an update documents by function request - fn update_documents_by_function( - &self, - documents_query: &DocumentEditionByFunction, - index_creation: bool, - request: &HttpRequest, - ); + pub fn publish(&self, send: impl Aggregate, request: Option<&HttpRequest>) { + let Some(segment) = self.inner else { return }; + } } diff --git a/meilisearch/src/analytics/segment_analytics.rs b/meilisearch/src/analytics/segment_analytics.rs index 476b3264e..8a6dfd780 100644 --- a/meilisearch/src/analytics/segment_analytics.rs +++ b/meilisearch/src/analytics/segment_analytics.rs @@ -25,7 +25,8 @@ use tokio::sync::mpsc::{self, Receiver, Sender}; use uuid::Uuid; use super::{ - config_user_id_path, DocumentDeletionKind, DocumentFetchKind, MEILISEARCH_CONFIG_PATH, + config_user_id_path, Aggregate, AggregateMethod, DocumentDeletionKind, DocumentFetchKind, + MEILISEARCH_CONFIG_PATH, }; use crate::analytics::Analytics; use crate::option::{ @@ -40,7 +41,7 @@ use crate::search::{ DEFAULT_HIGHLIGHT_POST_TAG, DEFAULT_HIGHLIGHT_PRE_TAG, DEFAULT_SEARCH_LIMIT, DEFAULT_SEMANTIC_RATIO, }; -use crate::Opt; +use crate::{aggregate_methods, Opt}; const ANALYTICS_HEADER: &str = "X-Meilisearch-Client"; @@ -87,9 +88,9 @@ pub enum AnalyticsMsg { } pub struct SegmentAnalytics { - instance_uid: InstanceUid, + pub instance_uid: InstanceUid, sender: Sender, - user: User, + pub user: User, } impl SegmentAnalytics { @@ -98,7 +99,7 @@ impl SegmentAnalytics { opt: &Opt, index_scheduler: Arc, auth_controller: Arc, - ) -> Arc { + ) -> Arc { let instance_uid = super::find_user_id(&opt.db_path); let first_time_run = instance_uid.is_none(); let instance_uid = instance_uid.unwrap_or_else(Uuid::new_v4); @@ -108,7 +109,7 @@ impl SegmentAnalytics { // if reqwest throws an error we won't be able to send analytics if client.is_err() { - return super::MockAnalytics::new(opt); + return Arc::new(Analytics::no_analytics()); } let client = @@ -161,10 +162,11 @@ impl SegmentAnalytics { let this = Self { instance_uid, sender, user: user.clone() }; - Arc::new(this) + Arc::new(Analytics::segment_analytics(this)) } } +/* impl super::Analytics for SegmentAnalytics { fn instance_uid(&self) -> Option<&InstanceUid> { Some(&self.instance_uid) @@ -253,6 +255,7 @@ impl super::Analytics for SegmentAnalytics { let _ = self.sender.try_send(AnalyticsMsg::AggregatePostFetchDocuments(aggregate)); } } +*/ /// This structure represent the `infos` field we send in the analytics. /// It's quite close to the `Opt` structure except all sensitive informations @@ -607,12 +610,7 @@ impl Segment { } #[derive(Default)] -pub struct SearchAggregator { - timestamp: Option, - - // context - user_agents: HashSet, - +pub struct SearchAggregator { // requests total_received: usize, total_succeeded: usize, @@ -684,9 +682,11 @@ pub struct SearchAggregator { show_ranking_score: bool, show_ranking_score_details: bool, ranking_score_threshold: bool, + + marker: std::marker::PhantomData, } -impl SearchAggregator { +impl SearchAggregator { #[allow(clippy::field_reassign_with_default)] pub fn from_query(query: &SearchQuery, request: &HttpRequest) -> Self { let SearchQuery { @@ -827,12 +827,21 @@ impl SearchAggregator { } self.time_spent.push(*processing_time_ms as usize); } +} - /// Aggregate one [SearchAggregator] into another. - pub fn aggregate(&mut self, mut other: Self) { +aggregate_methods!( + SearchGET => "Documents Searched GET", + SearchPOST => "Documents Searched POST", + +); + +impl Aggregate for SearchAggregator { + fn event_name(&self) -> &'static str { + Method::event_name() + } + + fn aggregate(mut self, mut other: Self) -> Self { let Self { - timestamp, - user_agents, total_received, total_succeeded, ref mut time_spent, @@ -871,17 +880,9 @@ impl SearchAggregator { total_used_negative_operator, ranking_score_threshold, ref mut locales, + marker: _, } = other; - if self.timestamp.is_none() { - self.timestamp = timestamp; - } - - // context - for user_agent in user_agents.into_iter() { - self.user_agents.insert(user_agent); - } - // request self.total_received = self.total_received.saturating_add(total_received); self.total_succeeded = self.total_succeeded.saturating_add(total_succeeded); @@ -961,12 +962,12 @@ impl SearchAggregator { // locales self.locales.append(locales); + + self } - pub fn into_event(self, user: &User, event_name: &str) -> Option { + fn into_event(self) -> Option { let Self { - timestamp, - user_agents, total_received, total_succeeded, time_spent, @@ -1005,90 +1006,78 @@ impl SearchAggregator { total_used_negative_operator, ranking_score_threshold, locales, + marker: _, } = self; - if total_received == 0 { - None - } else { - // we get all the values in a sorted manner - let time_spent = time_spent.into_sorted_vec(); - // the index of the 99th percentage of value - let percentile_99th = time_spent.len() * 99 / 100; - // We are only interested by the slowest value of the 99th fastest results - let time_spent = time_spent.get(percentile_99th); + // we get all the values in a sorted manner + let time_spent = time_spent.into_sorted_vec(); + // the index of the 99th percentage of value + let percentile_99th = time_spent.len() * 99 / 100; + // We are only interested by the slowest value of the 99th fastest results + let time_spent = time_spent.get(percentile_99th); - let properties = json!({ - "user-agent": user_agents, - "requests": { - "99th_response_time": time_spent.map(|t| format!("{:.2}", t)), - "total_succeeded": total_succeeded, - "total_failed": total_received.saturating_sub(total_succeeded), // just to be sure we never panics - "total_received": total_received, - "total_degraded": total_degraded, - "total_used_negative_operator": total_used_negative_operator, - }, - "sort": { - "with_geoPoint": sort_with_geo_point, - "avg_criteria_number": format!("{:.2}", sort_sum_of_criteria_terms as f64 / sort_total_number_of_criteria as f64), - }, - "distinct": distinct, - "filter": { - "with_geoRadius": filter_with_geo_radius, - "with_geoBoundingBox": filter_with_geo_bounding_box, - "avg_criteria_number": format!("{:.2}", filter_sum_of_criteria_terms as f64 / filter_total_number_of_criteria as f64), - "most_used_syntax": used_syntax.iter().max_by_key(|(_, v)| *v).map(|(k, _)| json!(k)).unwrap_or_else(|| json!(null)), - }, - "attributes_to_search_on": { - "total_number_of_uses": attributes_to_search_on_total_number_of_uses, - }, - "q": { - "max_terms_number": max_terms_number, - }, - "vector": { - "max_vector_size": max_vector_size, - "retrieve_vectors": retrieve_vectors, - }, - "hybrid": { - "enabled": hybrid, - "semantic_ratio": semantic_ratio, - }, - "pagination": { - "max_limit": max_limit, - "max_offset": max_offset, - "most_used_navigation": if finite_pagination > (total_received / 2) { "exhaustive" } else { "estimated" }, - }, - "formatting": { - "max_attributes_to_retrieve": max_attributes_to_retrieve, - "max_attributes_to_highlight": max_attributes_to_highlight, - "highlight_pre_tag": highlight_pre_tag, - "highlight_post_tag": highlight_post_tag, - "max_attributes_to_crop": max_attributes_to_crop, - "crop_marker": crop_marker, - "show_matches_position": show_matches_position, - "crop_length": crop_length, - }, - "facets": { - "avg_facets_number": format!("{:.2}", facets_sum_of_terms as f64 / facets_total_number_of_facets as f64), - }, - "matching_strategy": { - "most_used_strategy": matching_strategy.iter().max_by_key(|(_, v)| *v).map(|(k, _)| json!(k)).unwrap_or_else(|| json!(null)), - }, - "locales": locales, - "scoring": { - "show_ranking_score": show_ranking_score, - "show_ranking_score_details": show_ranking_score_details, - "ranking_score_threshold": ranking_score_threshold, - }, - }); - - Some(Track { - timestamp, - user: user.clone(), - event: event_name.to_string(), - properties, - ..Default::default() - }) - } + json!({ + "requests": { + "99th_response_time": time_spent.map(|t| format!("{:.2}", t)), + "total_succeeded": total_succeeded, + "total_failed": total_received.saturating_sub(total_succeeded), // just to be sure we never panics + "total_received": total_received, + "total_degraded": total_degraded, + "total_used_negative_operator": total_used_negative_operator, + }, + "sort": { + "with_geoPoint": sort_with_geo_point, + "avg_criteria_number": format!("{:.2}", sort_sum_of_criteria_terms as f64 / sort_total_number_of_criteria as f64), + }, + "distinct": distinct, + "filter": { + "with_geoRadius": filter_with_geo_radius, + "with_geoBoundingBox": filter_with_geo_bounding_box, + "avg_criteria_number": format!("{:.2}", filter_sum_of_criteria_terms as f64 / filter_total_number_of_criteria as f64), + "most_used_syntax": used_syntax.iter().max_by_key(|(_, v)| *v).map(|(k, _)| json!(k)).unwrap_or_else(|| json!(null)), + }, + "attributes_to_search_on": { + "total_number_of_uses": attributes_to_search_on_total_number_of_uses, + }, + "q": { + "max_terms_number": max_terms_number, + }, + "vector": { + "max_vector_size": max_vector_size, + "retrieve_vectors": retrieve_vectors, + }, + "hybrid": { + "enabled": hybrid, + "semantic_ratio": semantic_ratio, + }, + "pagination": { + "max_limit": max_limit, + "max_offset": max_offset, + "most_used_navigation": if finite_pagination > (total_received / 2) { "exhaustive" } else { "estimated" }, + }, + "formatting": { + "max_attributes_to_retrieve": max_attributes_to_retrieve, + "max_attributes_to_highlight": max_attributes_to_highlight, + "highlight_pre_tag": highlight_pre_tag, + "highlight_post_tag": highlight_post_tag, + "max_attributes_to_crop": max_attributes_to_crop, + "crop_marker": crop_marker, + "show_matches_position": show_matches_position, + "crop_length": crop_length, + }, + "facets": { + "avg_facets_number": format!("{:.2}", facets_sum_of_terms as f64 / facets_total_number_of_facets as f64), + }, + "matching_strategy": { + "most_used_strategy": matching_strategy.iter().max_by_key(|(_, v)| *v).map(|(k, _)| json!(k)).unwrap_or_else(|| json!(null)), + }, + "locales": locales, + "scoring": { + "show_ranking_score": show_ranking_score, + "show_ranking_score_details": show_ranking_score_details, + "ranking_score_threshold": ranking_score_threshold, + }, + }) } } diff --git a/meilisearch/src/lib.rs b/meilisearch/src/lib.rs index b24f18fae..80177876a 100644 --- a/meilisearch/src/lib.rs +++ b/meilisearch/src/lib.rs @@ -120,7 +120,7 @@ pub fn create_app( search_queue: Data, opt: Opt, logs: (LogRouteHandle, LogStderrHandle), - analytics: Arc, + analytics: Arc, enable_dashboard: bool, ) -> actix_web::App< impl ServiceFactory< @@ -473,7 +473,7 @@ pub fn configure_data( search_queue: Data, opt: &Opt, (logs_route, logs_stderr): (LogRouteHandle, LogStderrHandle), - analytics: Arc, + analytics: Arc, ) { let http_payload_size_limit = opt.http_payload_size_limit.as_u64() as usize; config diff --git a/meilisearch/src/routes/dump.rs b/meilisearch/src/routes/dump.rs index 7f3cd06a5..0fdeef5ed 100644 --- a/meilisearch/src/routes/dump.rs +++ b/meilisearch/src/routes/dump.rs @@ -4,7 +4,6 @@ use index_scheduler::IndexScheduler; use meilisearch_auth::AuthController; use meilisearch_types::error::ResponseError; use meilisearch_types::tasks::KindWithContent; -use serde_json::json; use tracing::debug; use crate::analytics::Analytics; @@ -18,14 +17,16 @@ pub fn configure(cfg: &mut web::ServiceConfig) { cfg.service(web::resource("").route(web::post().to(SeqHandler(create_dump)))); } +crate::empty_analytics!(DumpAnalytics, "Dump Created"); + pub async fn create_dump( index_scheduler: GuardedData, Data>, auth_controller: GuardedData, Data>, req: HttpRequest, opt: web::Data, - analytics: web::Data, + analytics: web::Data, ) -> Result { - analytics.publish("Dump Created".to_string(), json!({}), Some(&req)); + analytics.publish(DumpAnalytics::default(), Some(&req)); let task = KindWithContent::DumpCreation { keys: auth_controller.list_keys()?, diff --git a/meilisearch/src/routes/features.rs b/meilisearch/src/routes/features.rs index bc656bdbb..24c89938d 100644 --- a/meilisearch/src/routes/features.rs +++ b/meilisearch/src/routes/features.rs @@ -6,10 +6,11 @@ use index_scheduler::IndexScheduler; use meilisearch_types::deserr::DeserrJsonError; use meilisearch_types::error::ResponseError; use meilisearch_types::keys::actions; +use serde::Serialize; use serde_json::json; use tracing::debug; -use crate::analytics::Analytics; +use crate::analytics::{Aggregate, Analytics}; use crate::extractors::authentication::policies::ActionPolicy; use crate::extractors::authentication::GuardedData; use crate::extractors::sequential_extractor::SeqHandler; @@ -22,17 +23,19 @@ pub fn configure(cfg: &mut web::ServiceConfig) { ); } +crate::empty_analytics!(GetExperimentalFeatureAnalytics, "Experimental features Seen"); + async fn get_features( index_scheduler: GuardedData< ActionPolicy<{ actions::EXPERIMENTAL_FEATURES_GET }>, Data, >, req: HttpRequest, - analytics: Data, + analytics: Data, ) -> HttpResponse { let features = index_scheduler.features(); - analytics.publish("Experimental features Seen".to_string(), json!(null), Some(&req)); + analytics.publish(GetExperimentalFeatureAnalytics::default(), Some(&req)); let features = features.runtime_features(); debug!(returns = ?features, "Get features"); HttpResponse::Ok().json(features) @@ -53,6 +56,38 @@ pub struct RuntimeTogglableFeatures { pub contains_filter: Option, } +#[derive(Serialize)] +pub struct PatchExperimentalFeatureAnalytics { + vector_store: bool, + metrics: bool, + logs_route: bool, + edit_documents_by_function: bool, + contains_filter: bool, +} + +impl Aggregate for PatchExperimentalFeatureAnalytics { + fn event_name(&self) -> &'static str { + "Experimental features Updated" + } + + fn aggregate(self, other: Self) -> Self + where + Self: Sized, + { + Self { + vector_store: other.vector_store, + metrics: other.metrics, + logs_route: other.logs_route, + edit_documents_by_function: other.edit_documents_by_function, + contains_filter: other.contains_filter, + } + } + + fn into_event(self) -> serde_json::Value { + serde_json::to_value(self).unwrap() + } +} + async fn patch_features( index_scheduler: GuardedData< ActionPolicy<{ actions::EXPERIMENTAL_FEATURES_UPDATE }>, @@ -60,7 +95,7 @@ async fn patch_features( >, new_features: AwebJson, req: HttpRequest, - analytics: Data, + analytics: Data, ) -> Result { let features = index_scheduler.features(); debug!(parameters = ?new_features, "Patch features"); @@ -89,14 +124,13 @@ async fn patch_features( } = new_features; analytics.publish( - "Experimental features Updated".to_string(), - json!({ - "vector_store": vector_store, - "metrics": metrics, - "logs_route": logs_route, - "edit_documents_by_function": edit_documents_by_function, - "contains_filter": contains_filter, - }), + PatchExperimentalFeatureAnalytics { + vector_store, + metrics, + logs_route, + edit_documents_by_function, + contains_filter, + }, Some(&req), ); index_scheduler.put_runtime_features(new_features)?; diff --git a/meilisearch/src/routes/indexes/documents.rs b/meilisearch/src/routes/indexes/documents.rs index 85cf33c54..8f4cd026d 100644 --- a/meilisearch/src/routes/indexes/documents.rs +++ b/meilisearch/src/routes/indexes/documents.rs @@ -1,4 +1,6 @@ +use std::collections::HashSet; use std::io::ErrorKind; +use std::marker::PhantomData; use actix_web::http::header::CONTENT_TYPE; use actix_web::web::Data; @@ -23,14 +25,14 @@ use meilisearch_types::tasks::KindWithContent; use meilisearch_types::{milli, Document, Index}; use mime::Mime; use once_cell::sync::Lazy; -use serde::Deserialize; +use serde::{Deserialize, Serialize}; use serde_json::Value; use tempfile::tempfile; use tokio::fs::File; use tokio::io::{AsyncSeekExt, AsyncWriteExt, BufWriter}; use tracing::debug; -use crate::analytics::{Analytics, DocumentDeletionKind, DocumentFetchKind}; +use crate::analytics::{Aggregate, AggregateMethod, Analytics, DocumentDeletionKind}; use crate::error::MeilisearchHttpError; use crate::error::PayloadError::ReceivePayload; use crate::extractors::authentication::policies::*; @@ -41,7 +43,7 @@ use crate::routes::{ get_task_id, is_dry_run, PaginationView, SummarizedTaskView, PAGINATION_DEFAULT_LIMIT, }; use crate::search::{parse_filter, RetrieveVectors}; -use crate::Opt; +use crate::{aggregate_methods, Opt}; static ACCEPTED_CONTENT_TYPE: Lazy> = Lazy::new(|| { vec!["application/json".to_string(), "application/x-ndjson".to_string(), "text/csv".to_string()] @@ -100,12 +102,82 @@ pub struct GetDocument { retrieve_vectors: Param, } +#[derive(Default, Serialize)] +pub struct DocumentsFetchAggregator { + #[serde(rename = "requests.total_received")] + total_received: usize, + + // a call on ../documents/:doc_id + per_document_id: bool, + // if a filter was used + per_filter: bool, + + #[serde(rename = "vector.retrieve_vectors")] + retrieve_vectors: bool, + + // pagination + #[serde(rename = "pagination.max_limit")] + max_limit: usize, + #[serde(rename = "pagination.max_offset")] + max_offset: usize, +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum DocumentFetchKind { + PerDocumentId { retrieve_vectors: bool }, + Normal { with_filter: bool, limit: usize, offset: usize, retrieve_vectors: bool }, +} + +impl DocumentsFetchAggregator { + pub fn from_query(query: &DocumentFetchKind) -> Self { + let (limit, offset, retrieve_vectors) = match query { + DocumentFetchKind::PerDocumentId { retrieve_vectors } => (1, 0, *retrieve_vectors), + DocumentFetchKind::Normal { limit, offset, retrieve_vectors, .. } => { + (*limit, *offset, *retrieve_vectors) + } + }; + Self { + total_received: 1, + per_document_id: matches!(query, DocumentFetchKind::PerDocumentId { .. }), + per_filter: matches!(query, DocumentFetchKind::Normal { with_filter, .. } if *with_filter), + max_limit: limit, + max_offset: offset, + retrieve_vectors, + } + } +} + +impl Aggregate for DocumentsFetchAggregator { + // TODO: TAMO: Should we do the same event for the GET requests + fn event_name(&self) -> &'static str { + "Documents Fetched POST" + } + + fn aggregate(self, other: Self) -> Self + where + Self: Sized, + { + Self { + total_received: self.total_received.saturating_add(other.total_received), + per_document_id: self.per_document_id | other.per_document_id, + per_filter: self.per_filter | other.per_filter, + retrieve_vectors: self.retrieve_vectors | other.retrieve_vectors, + max_limit: self.max_limit.max(other.max_limit), + max_offset: self.max_offset.max(other.max_offset), + } + } + + fn into_event(self) -> Value { + serde_json::to_value(self).unwrap() + } +} + pub async fn get_document( index_scheduler: GuardedData, Data>, document_param: web::Path, params: AwebQueryParameter, req: HttpRequest, - analytics: web::Data, + analytics: web::Data, ) -> Result { let DocumentParam { index_uid, document_id } = document_param.into_inner(); debug!(parameters = ?params, "Get document"); @@ -117,9 +189,12 @@ pub async fn get_document( let features = index_scheduler.features(); let retrieve_vectors = RetrieveVectors::new(param_retrieve_vectors.0, features)?; - analytics.get_fetch_documents( - &DocumentFetchKind::PerDocumentId { retrieve_vectors: param_retrieve_vectors.0 }, - &req, + analytics.publish( + DocumentsFetchAggregator { + retrieve_vectors: param_retrieve_vectors.0, + ..Default::default() + }, + Some(&req), ); let index = index_scheduler.index(&index_uid)?; @@ -129,17 +204,57 @@ pub async fn get_document( Ok(HttpResponse::Ok().json(document)) } +#[derive(Default, Serialize)] +pub struct DocumentsDeletionAggregator { + #[serde(rename = "requests.total_received")] + total_received: usize, + per_document_id: bool, + clear_all: bool, + per_batch: bool, + per_filter: bool, +} + +impl Aggregate for DocumentsDeletionAggregator { + fn event_name(&self) -> &'static str { + "Documents Deleted" + } + + fn aggregate(self, other: Self) -> Self + where + Self: Sized, + { + Self { + total_received: self.total_received.saturating_add(other.total_received), + per_document_id: self.per_document_id | other.per_document_id, + clear_all: self.clear_all | other.clear_all, + per_batch: self.per_batch | other.per_batch, + per_filter: self.per_filter | other.per_filter, + } + } + + fn into_event(self) -> Value { + serde_json::to_value(self).unwrap() + } +} + pub async fn delete_document( index_scheduler: GuardedData, Data>, path: web::Path, req: HttpRequest, opt: web::Data, - analytics: web::Data, + analytics: web::Data, ) -> Result { let DocumentParam { index_uid, document_id } = path.into_inner(); let index_uid = IndexUid::try_from(index_uid)?; - analytics.delete_documents(DocumentDeletionKind::PerDocumentId, &req); + analytics.publish( + DocumentsDeletionAggregator { + total_received: 1, + per_document_id: true, + ..Default::default() + }, + Some(&req), + ); let task = KindWithContent::DocumentDeletion { index_uid: index_uid.to_string(), @@ -190,19 +305,21 @@ pub async fn documents_by_query_post( index_uid: web::Path, body: AwebJson, req: HttpRequest, - analytics: web::Data, + analytics: web::Data, ) -> Result { let body = body.into_inner(); debug!(parameters = ?body, "Get documents POST"); - analytics.post_fetch_documents( - &DocumentFetchKind::Normal { - with_filter: body.filter.is_some(), - limit: body.limit, - offset: body.offset, + analytics.publish( + DocumentsFetchAggregator { + total_received: 1, + per_filter: body.filter.is_some(), retrieve_vectors: body.retrieve_vectors, + max_limit: body.limit, + max_offset: body.offset, + ..Default::default() }, - &req, + Some(&req), ); documents_by_query(&index_scheduler, index_uid, body) @@ -213,7 +330,7 @@ pub async fn get_documents( index_uid: web::Path, params: AwebQueryParameter, req: HttpRequest, - analytics: web::Data, + analytics: web::Data, ) -> Result { debug!(parameters = ?params, "Get documents GET"); @@ -235,14 +352,16 @@ pub async fn get_documents( filter, }; - analytics.get_fetch_documents( - &DocumentFetchKind::Normal { - with_filter: query.filter.is_some(), - limit: query.limit, - offset: query.offset, + analytics.publish( + DocumentsFetchAggregator { + total_received: 1, + per_filter: query.filter.is_some(), retrieve_vectors: query.retrieve_vectors, + max_limit: query.limit, + max_offset: query.offset, + ..Default::default() }, - &req, + Some(&req), ); documents_by_query(&index_scheduler, index_uid, query) @@ -298,6 +417,42 @@ fn from_char_csv_delimiter( } } +aggregate_methods!( + Replaced => "Documents Added", + Updated => "Documents Updated", +); + +#[derive(Default, Serialize)] +pub struct DocumentsAggregator { + payload_types: HashSet, + primary_key: HashSet, + index_creation: bool, + #[serde(skip)] + method: PhantomData, +} + +impl Aggregate for DocumentsAggregator { + fn event_name(&self) -> &'static str { + Method::event_name() + } + + fn aggregate(mut self, other: Self) -> Self + where + Self: Sized, + { + Self { + payload_types: self.payload_types.union(&other.payload_types).collect(), + primary_key: self.primary_key.union(&other.primary_key).collect(), + index_creation: self.index_creation | other.index_creation, + method: PhantomData, + } + } + + fn into_event(self) -> Value { + serde_json::to_value(self).unwrap() + } +} + pub async fn replace_documents( index_scheduler: GuardedData, Data>, index_uid: web::Path, @@ -305,17 +460,33 @@ pub async fn replace_documents( body: Payload, req: HttpRequest, opt: web::Data, - analytics: web::Data, + analytics: web::Data, ) -> Result { let index_uid = IndexUid::try_from(index_uid.into_inner())?; debug!(parameters = ?params, "Replace documents"); let params = params.into_inner(); - analytics.add_documents( - ¶ms, - index_scheduler.index_exists(&index_uid).map_or(true, |x| !x), - &req, + let mut content_types = HashSet::new(); + let content_type = req + .headers() + .get(CONTENT_TYPE) + .and_then(|s| s.to_str().ok()) + .unwrap_or("unknown") + .to_string(); + content_types.insert(content_type); + let mut primary_keys = HashSet::new(); + if let Some(primary_key) = params.primary_key.clone() { + primary_keys.insert(primary_key); + } + analytics.publish( + DocumentsAggregator:: { + payload_types: content_types, + primary_key: primary_keys, + index_creation: index_scheduler.index_exists(&index_uid).map_or(true, |x| !x), + method: PhantomData, + }, + Some(&req), ); let allow_index_creation = index_scheduler.filters().allow_index_creation(&index_uid); @@ -346,17 +517,33 @@ pub async fn update_documents( body: Payload, req: HttpRequest, opt: web::Data, - analytics: web::Data, + analytics: web::Data, ) -> Result { let index_uid = IndexUid::try_from(index_uid.into_inner())?; let params = params.into_inner(); debug!(parameters = ?params, "Update documents"); - analytics.add_documents( - ¶ms, - index_scheduler.index_exists(&index_uid).map_or(true, |x| !x), - &req, + let mut content_types = HashSet::new(); + let content_type = req + .headers() + .get(CONTENT_TYPE) + .and_then(|s| s.to_str().ok()) + .unwrap_or("unknown") + .to_string(); + content_types.insert(content_type); + let mut primary_keys = HashSet::new(); + if let Some(primary_key) = params.primary_key.clone() { + primary_keys.insert(primary_key); + } + analytics.publish( + DocumentsAggregator:: { + payload_types: content_types, + primary_key: primary_keys, + index_creation: index_scheduler.index_exists(&index_uid).map_or(true, |x| !x), + method: PhantomData, + }, + Some(&req), ); let allow_index_creation = index_scheduler.filters().allow_index_creation(&index_uid); @@ -524,12 +711,15 @@ pub async fn delete_documents_batch( body: web::Json>, req: HttpRequest, opt: web::Data, - analytics: web::Data, + analytics: web::Data, ) -> Result { debug!(parameters = ?body, "Delete documents by batch"); let index_uid = IndexUid::try_from(index_uid.into_inner())?; - analytics.delete_documents(DocumentDeletionKind::PerBatch, &req); + analytics.publish( + DocumentsDeletionAggregator { total_received: 1, per_batch: true, ..Default::default() }, + Some(&req), + ); let ids = body .iter() @@ -562,14 +752,17 @@ pub async fn delete_documents_by_filter( body: AwebJson, req: HttpRequest, opt: web::Data, - analytics: web::Data, + analytics: web::Data, ) -> Result { debug!(parameters = ?body, "Delete documents by filter"); let index_uid = IndexUid::try_from(index_uid.into_inner())?; let index_uid = index_uid.into_inner(); let filter = body.into_inner().filter; - analytics.delete_documents(DocumentDeletionKind::PerFilter, &req); + analytics.publish( + DocumentsDeletionAggregator { total_received: 1, per_filter: true, ..Default::default() }, + Some(&req), + ); // we ensure the filter is well formed before enqueuing it crate::search::parse_filter(&filter, Code::InvalidDocumentFilter, index_scheduler.features())? @@ -599,13 +792,44 @@ pub struct DocumentEditionByFunction { pub function: String, } +#[derive(Default, Serialize)] +struct EditDocumentsByFunctionAggregator { + // Set to true if at least one request was filtered + filtered: bool, + // Set to true if at least one request contained a context + with_context: bool, + + index_creation: bool, +} + +impl Aggregate for EditDocumentsByFunctionAggregator { + fn event_name(&self) -> &'static str { + "Documents Edited By Function" + } + + fn aggregate(self, other: Self) -> Self + where + Self: Sized, + { + Self { + filtered: self.filtered | other.filtered, + with_context: self.with_context | other.with_context, + index_creation: self.index_creation | other.index_creation, + } + } + + fn into_event(self) -> Value { + serde_json::to_value(self).unwrap() + } +} + pub async fn edit_documents_by_function( index_scheduler: GuardedData, Data>, index_uid: web::Path, params: AwebJson, req: HttpRequest, opt: web::Data, - analytics: web::Data, + analytics: web::Data, ) -> Result { debug!(parameters = ?params, "Edit documents by function"); @@ -617,10 +841,13 @@ pub async fn edit_documents_by_function( let index_uid = index_uid.into_inner(); let params = params.into_inner(); - analytics.update_documents_by_function( - ¶ms, - index_scheduler.index(&index_uid).is_err(), - &req, + analytics.publish( + EditDocumentsByFunctionAggregator { + filtered: params.filter.is_some(), + with_context: params.context.is_some(), + index_creation: index_scheduler.index(&index_uid).is_err(), + }, + Some(&req), ); let DocumentEditionByFunction { filter, context, function } = params; @@ -670,10 +897,13 @@ pub async fn clear_all_documents( index_uid: web::Path, req: HttpRequest, opt: web::Data, - analytics: web::Data, + analytics: web::Data, ) -> Result { let index_uid = IndexUid::try_from(index_uid.into_inner())?; - analytics.delete_documents(DocumentDeletionKind::ClearAll, &req); + analytics.publish( + DocumentsDeletionAggregator { total_received: 1, clear_all: true, ..Default::default() }, + Some(&req), + ); let task = KindWithContent::DocumentClear { index_uid: index_uid.to_string() }; let uid = get_task_id(&req, &opt)?; diff --git a/meilisearch/src/routes/indexes/facet_search.rs b/meilisearch/src/routes/indexes/facet_search.rs index 1df80711d..1e9d0e15e 100644 --- a/meilisearch/src/routes/indexes/facet_search.rs +++ b/meilisearch/src/routes/indexes/facet_search.rs @@ -1,3 +1,5 @@ +use std::collections::{BinaryHeap, HashSet}; + use actix_web::web::Data; use actix_web::{web, HttpRequest, HttpResponse}; use deserr::actix_web::AwebJson; @@ -10,14 +12,15 @@ use meilisearch_types::locales::Locale; use serde_json::Value; use tracing::debug; -use crate::analytics::{Analytics, FacetSearchAggregator}; +use crate::analytics::{Aggregate, Analytics}; use crate::extractors::authentication::policies::*; use crate::extractors::authentication::GuardedData; use crate::routes::indexes::search::search_kind; use crate::search::{ - add_search_rules, perform_facet_search, HybridQuery, MatchingStrategy, RankingScoreThreshold, - SearchQuery, DEFAULT_CROP_LENGTH, DEFAULT_CROP_MARKER, DEFAULT_HIGHLIGHT_POST_TAG, - DEFAULT_HIGHLIGHT_PRE_TAG, DEFAULT_SEARCH_LIMIT, DEFAULT_SEARCH_OFFSET, + add_search_rules, perform_facet_search, FacetSearchResult, HybridQuery, MatchingStrategy, + RankingScoreThreshold, SearchQuery, DEFAULT_CROP_LENGTH, DEFAULT_CROP_MARKER, + DEFAULT_HIGHLIGHT_POST_TAG, DEFAULT_HIGHLIGHT_PRE_TAG, DEFAULT_SEARCH_LIMIT, + DEFAULT_SEARCH_OFFSET, }; use crate::search_queue::SearchQueue; @@ -53,13 +56,110 @@ pub struct FacetSearchQuery { pub locales: Option>, } +#[derive(Default)] +pub struct FacetSearchAggregator { + // requests + total_received: usize, + total_succeeded: usize, + time_spent: BinaryHeap, + + // The set of all facetNames that were used + facet_names: HashSet, + + // As there been any other parameter than the facetName or facetQuery ones? + additional_search_parameters_provided: bool, +} + +impl FacetSearchAggregator { + #[allow(clippy::field_reassign_with_default)] + pub fn from_query(query: &FacetSearchQuery, request: &HttpRequest) -> Self { + let FacetSearchQuery { + facet_query: _, + facet_name, + vector, + q, + filter, + matching_strategy, + attributes_to_search_on, + hybrid, + ranking_score_threshold, + locales, + } = query; + + Self { + total_received: 1, + facet_names: Some(facet_name.clone()).into_iter().collect(), + additional_search_parameters_provided: q.is_some() + || vector.is_some() + || filter.is_some() + || *matching_strategy != MatchingStrategy::default() + || attributes_to_search_on.is_some() + || hybrid.is_some() + || ranking_score_threshold.is_some() + || locales.is_some(), + ..Default::default() + } + } + + pub fn succeed(&mut self, result: &FacetSearchResult) { + let FacetSearchResult { facet_hits: _, facet_query: _, processing_time_ms } = result; + self.total_succeeded = 1; + self.time_spent.push(*processing_time_ms as usize); + } +} + +impl Aggregate for FacetSearchAggregator { + fn event_name(&self) -> &'static str { + "Facet Searched POST" + } + + fn aggregate(mut self, other: Self) -> Self + where + Self: Sized, + { + self.time_spent.insert(other.time_spent); + + Self { + total_received: self.total_received.saturating_add(other.total_received), + total_succeeded: self.total_succeeded.saturating_add(other.total_succeeded), + time_spent: self.time_spent, + facet_names: self.facet_names.union(&other.facet_names).collect(), + additional_search_parameters_provided: self.additional_search_parameters_provided + | other.additional_search_parameters_provided, + } + } + + fn into_event(self) -> Value { + let Self { + total_received, + total_succeeded, + time_spent, + facet_names, + additional_search_parameters_provided, + } = self; + + serde_json::json!({ + "requests": { + "99th_response_time": time_spent.map(|t| format!("{:.2}", t)), + "total_succeeded": total_succeeded, + "total_failed": total_received.saturating_sub(total_succeeded), // just to be sure we never panics + "total_received": total_received, + }, + "facets": { + "total_distinct_facet_count": facet_names.len(), + "additional_search_parameters_provided": additional_search_parameters_provided, + }, + }) + } +} + pub async fn search( index_scheduler: GuardedData, Data>, search_queue: Data, index_uid: web::Path, params: AwebJson, req: HttpRequest, - analytics: web::Data, + analytics: web::Data, ) -> Result { let index_uid = IndexUid::try_from(index_uid.into_inner())?; @@ -100,7 +200,7 @@ pub async fn search( if let Ok(ref search_result) = search_result { aggregate.succeed(search_result); } - analytics.post_facet_search(aggregate); + analytics.publish(aggregate, Some(&req)); let search_result = search_result?; diff --git a/meilisearch/src/routes/indexes/mod.rs b/meilisearch/src/routes/indexes/mod.rs index 35b747ccf..483a48a16 100644 --- a/meilisearch/src/routes/indexes/mod.rs +++ b/meilisearch/src/routes/indexes/mod.rs @@ -1,3 +1,4 @@ +use std::collections::BTreeSet; use std::convert::Infallible; use actix_web::web::Data; @@ -18,7 +19,7 @@ use time::OffsetDateTime; use tracing::debug; use super::{get_task_id, Pagination, SummarizedTaskView, PAGINATION_DEFAULT_LIMIT}; -use crate::analytics::Analytics; +use crate::analytics::{Aggregate, Analytics}; use crate::extractors::authentication::policies::*; use crate::extractors::authentication::{AuthenticationError, GuardedData}; use crate::extractors::sequential_extractor::SeqHandler; @@ -123,12 +124,34 @@ pub struct IndexCreateRequest { primary_key: Option, } +#[derive(Serialize)] +struct IndexCreatedAggregate { + primary_key: BTreeSet, +} + +impl Aggregate for IndexCreatedAggregate { + fn event_name(&self) -> &'static str { + "Index Created" + } + + fn aggregate(self, other: Self) -> Self + where + Self: Sized, + { + Self { primary_key: self.primary_key.union(&other.primary_key).collect() } + } + + fn into_event(self) -> impl Serialize { + self + } +} + pub async fn create_index( index_scheduler: GuardedData, Data>, body: AwebJson, req: HttpRequest, opt: web::Data, - analytics: web::Data, + analytics: web::Data, ) -> Result { debug!(parameters = ?body, "Create index"); let IndexCreateRequest { primary_key, uid } = body.into_inner(); @@ -136,8 +159,7 @@ pub async fn create_index( let allow_index_creation = index_scheduler.filters().allow_index_creation(&uid); if allow_index_creation { analytics.publish( - "Index Created".to_string(), - json!({ "primary_key": primary_key }), + IndexCreatedAggregate { primary_key: primary_key.iter().cloned().collect() }, Some(&req), ); @@ -194,20 +216,37 @@ pub async fn get_index( Ok(HttpResponse::Ok().json(index_view)) } +#[derive(Serialize)] +struct IndexUpdatedAggregate { + primary_key: BTreeSet, +} + +impl Aggregate for IndexUpdatedAggregate { + fn event_name(&self) -> &'static str { + "Index Updated" + } + + fn aggregate(self, other: Self) -> Self { + Self { primary_key: self.primary_key.union(&other.primary_key).collect() } + } + + fn into_event(self) -> impl Serialize { + self + } +} pub async fn update_index( index_scheduler: GuardedData, Data>, index_uid: web::Path, body: AwebJson, req: HttpRequest, opt: web::Data, - analytics: web::Data, + analytics: web::Data, ) -> Result { debug!(parameters = ?body, "Update index"); let index_uid = IndexUid::try_from(index_uid.into_inner())?; let body = body.into_inner(); analytics.publish( - "Index Updated".to_string(), - json!({ "primary_key": body.primary_key }), + IndexUpdatedAggregate { primary_key: body.primary_key.iter().cloned().collect() }, Some(&req), ); diff --git a/meilisearch/src/routes/indexes/search.rs b/meilisearch/src/routes/indexes/search.rs index 6a8eee521..f833a57d2 100644 --- a/meilisearch/src/routes/indexes/search.rs +++ b/meilisearch/src/routes/indexes/search.rs @@ -13,6 +13,7 @@ use meilisearch_types::serde_cs::vec::CS; use serde_json::Value; use tracing::debug; +use crate::analytics::segment_analytics::{SearchGET, SearchPOST}; use crate::analytics::{Analytics, SearchAggregator}; use crate::error::MeilisearchHttpError; use crate::extractors::authentication::policies::*; @@ -225,7 +226,7 @@ pub async fn search_with_url_query( index_uid: web::Path, params: AwebQueryParameter, req: HttpRequest, - analytics: web::Data, + analytics: web::Data, ) -> Result { debug!(parameters = ?params, "Search get"); let index_uid = IndexUid::try_from(index_uid.into_inner())?; @@ -237,7 +238,7 @@ pub async fn search_with_url_query( add_search_rules(&mut query.filter, search_rules); } - let mut aggregate = SearchAggregator::from_query(&query, &req); + let mut aggregate = SearchAggregator::::from_query(&query, &req); let index = index_scheduler.index(&index_uid)?; let features = index_scheduler.features(); @@ -254,7 +255,7 @@ pub async fn search_with_url_query( if let Ok(ref search_result) = search_result { aggregate.succeed(search_result); } - analytics.get_search(aggregate); + analytics.publish(aggregate, Some(&req)); let search_result = search_result?; @@ -268,7 +269,7 @@ pub async fn search_with_post( index_uid: web::Path, params: AwebJson, req: HttpRequest, - analytics: web::Data, + analytics: web::Data, ) -> Result { let index_uid = IndexUid::try_from(index_uid.into_inner())?; @@ -280,7 +281,7 @@ pub async fn search_with_post( add_search_rules(&mut query.filter, search_rules); } - let mut aggregate = SearchAggregator::from_query(&query, &req); + let mut aggregate = SearchAggregator::::from_query(&query, &req); let index = index_scheduler.index(&index_uid)?; @@ -302,7 +303,7 @@ pub async fn search_with_post( MEILISEARCH_DEGRADED_SEARCH_REQUESTS.inc(); } } - analytics.post_search(aggregate); + analytics.publish(aggregate, Some(&req)); let search_result = search_result?; diff --git a/meilisearch/src/routes/indexes/settings.rs b/meilisearch/src/routes/indexes/settings.rs index aaf8673d0..112f8671b 100644 --- a/meilisearch/src/routes/indexes/settings.rs +++ b/meilisearch/src/routes/indexes/settings.rs @@ -1,3 +1,5 @@ +use std::collections::{BTreeSet, HashSet}; + use actix_web::web::Data; use actix_web::{web, HttpRequest, HttpResponse}; use deserr::actix_web::AwebJson; @@ -7,12 +9,15 @@ use meilisearch_types::error::ResponseError; use meilisearch_types::facet_values_sort::FacetValuesSort; use meilisearch_types::index_uid::IndexUid; use meilisearch_types::milli::update::Setting; -use meilisearch_types::settings::{settings, RankingRuleView, SecretPolicy, Settings, Unchecked}; +use meilisearch_types::settings::{ + settings, ProximityPrecisionView, RankingRuleView, SecretPolicy, Settings, Unchecked, +}; use meilisearch_types::tasks::KindWithContent; +use serde::Serialize; use serde_json::json; use tracing::debug; -use crate::analytics::Analytics; +use crate::analytics::{Aggregate, Analytics}; use crate::extractors::authentication::policies::*; use crate::extractors::authentication::GuardedData; use crate::routes::{get_task_id, is_dry_run, SummarizedTaskView}; @@ -80,7 +85,7 @@ macro_rules! make_setting_route { body: deserr::actix_web::AwebJson, $err_ty>, req: HttpRequest, opt: web::Data, - $analytics_var: web::Data, + $analytics_var: web::Data, ) -> std::result::Result { let index_uid = IndexUid::try_from(index_uid.into_inner())?; @@ -162,16 +167,8 @@ make_setting_route!( "filterableAttributes", analytics, |setting: &Option>, req: &HttpRequest| { - use serde_json::json; - analytics.publish( - "FilterableAttributes Updated".to_string(), - json!({ - "filterable_attributes": { - "total": setting.as_ref().map(|filter| filter.len()).unwrap_or(0), - "has_geo": setting.as_ref().map(|filter| filter.contains("_geo")).unwrap_or(false), - } - }), + crate::routes::indexes::settings::FilterableAttributesAnalytics::new(setting.as_ref()).to_settings(), Some(req), ); } @@ -188,16 +185,8 @@ make_setting_route!( "sortableAttributes", analytics, |setting: &Option>, req: &HttpRequest| { - use serde_json::json; - analytics.publish( - "SortableAttributes Updated".to_string(), - json!({ - "sortable_attributes": { - "total": setting.as_ref().map(|sort| sort.len()), - "has_geo": setting.as_ref().map(|sort| sort.contains("_geo")), - }, - }), + crate::routes::indexes::settings::SortableAttributesAnalytics::new(setting.as_ref()).to_settings(), Some(req), ); } @@ -214,16 +203,8 @@ make_setting_route!( "displayedAttributes", analytics, |displayed: &Option>, req: &HttpRequest| { - use serde_json::json; - analytics.publish( - "DisplayedAttributes Updated".to_string(), - json!({ - "displayed_attributes": { - "total": displayed.as_ref().map(|displayed| displayed.len()), - "with_wildcard": displayed.as_ref().map(|displayed| displayed.iter().any(|displayed| displayed == "*")), - }, - }), + crate::routes::indexes::settings::DisplayedAttributesAnalytics::new(displayed.as_ref()).to_settings(), Some(req), ); } @@ -240,35 +221,8 @@ make_setting_route!( "typoTolerance", analytics, |setting: &Option, req: &HttpRequest| { - use serde_json::json; - analytics.publish( - "TypoTolerance Updated".to_string(), - json!({ - "typo_tolerance": { - "enabled": setting.as_ref().map(|s| !matches!(s.enabled, Setting::Set(false))), - "disable_on_attributes": setting - .as_ref() - .and_then(|s| s.disable_on_attributes.as_ref().set().map(|m| !m.is_empty())), - "disable_on_words": setting - .as_ref() - .and_then(|s| s.disable_on_words.as_ref().set().map(|m| !m.is_empty())), - "min_word_size_for_one_typo": setting - .as_ref() - .and_then(|s| s.min_word_size_for_typos - .as_ref() - .set() - .map(|s| s.one_typo.set())) - .flatten(), - "min_word_size_for_two_typos": setting - .as_ref() - .and_then(|s| s.min_word_size_for_typos - .as_ref() - .set() - .map(|s| s.two_typos.set())) - .flatten(), - }, - }), + crate::routes::indexes::settings::TypoToleranceAnalytics::new(setting.as_ref()).to_settings(), Some(req), ); } @@ -285,16 +239,8 @@ make_setting_route!( "searchableAttributes", analytics, |setting: &Option>, req: &HttpRequest| { - use serde_json::json; - analytics.publish( - "SearchableAttributes Updated".to_string(), - json!({ - "searchable_attributes": { - "total": setting.as_ref().map(|searchable| searchable.len()), - "with_wildcard": setting.as_ref().map(|searchable| searchable.iter().any(|searchable| searchable == "*")), - }, - }), + crate::routes::indexes::settings::SearchableAttributesAnalytics::new(setting.as_ref()).to_settings(), Some(req), ); } @@ -311,15 +257,8 @@ make_setting_route!( "stopWords", analytics, |stop_words: &Option>, req: &HttpRequest| { - use serde_json::json; - analytics.publish( - "StopWords Updated".to_string(), - json!({ - "stop_words": { - "total": stop_words.as_ref().map(|stop_words| stop_words.len()), - }, - }), + crate::routes::indexes::settings::StopWordsAnalytics::new(stop_words.as_ref()).to_settings(), Some(req), ); } @@ -336,15 +275,8 @@ make_setting_route!( "nonSeparatorTokens", analytics, |non_separator_tokens: &Option>, req: &HttpRequest| { - use serde_json::json; - analytics.publish( - "nonSeparatorTokens Updated".to_string(), - json!({ - "non_separator_tokens": { - "total": non_separator_tokens.as_ref().map(|non_separator_tokens| non_separator_tokens.len()), - }, - }), + crate::routes::indexes::settings::NonSeparatorTokensAnalytics::new(non_separator_tokens.as_ref()).to_settings(), Some(req), ); } @@ -361,15 +293,8 @@ make_setting_route!( "separatorTokens", analytics, |separator_tokens: &Option>, req: &HttpRequest| { - use serde_json::json; - analytics.publish( - "separatorTokens Updated".to_string(), - json!({ - "separator_tokens": { - "total": separator_tokens.as_ref().map(|separator_tokens| separator_tokens.len()), - }, - }), + crate::routes::indexes::settings::SeparatorTokensAnalytics::new(separator_tokens.as_ref()).to_settings(), Some(req), ); } @@ -386,15 +311,8 @@ make_setting_route!( "dictionary", analytics, |dictionary: &Option>, req: &HttpRequest| { - use serde_json::json; - analytics.publish( - "dictionary Updated".to_string(), - json!({ - "dictionary": { - "total": dictionary.as_ref().map(|dictionary| dictionary.len()), - }, - }), + crate::routes::indexes::settings::DictionaryAnalytics::new(dictionary.as_ref()).to_settings(), Some(req), ); } @@ -411,15 +329,8 @@ make_setting_route!( "synonyms", analytics, |synonyms: &Option>>, req: &HttpRequest| { - use serde_json::json; - analytics.publish( - "Synonyms Updated".to_string(), - json!({ - "synonyms": { - "total": synonyms.as_ref().map(|synonyms| synonyms.len()), - }, - }), + crate::routes::indexes::settings::SynonymsAnalytics::new(synonyms.as_ref()).to_settings(), Some(req), ); } @@ -436,14 +347,8 @@ make_setting_route!( "distinctAttribute", analytics, |distinct: &Option, req: &HttpRequest| { - use serde_json::json; analytics.publish( - "DistinctAttribute Updated".to_string(), - json!({ - "distinct_attribute": { - "set": distinct.is_some(), - } - }), + crate::routes::indexes::settings::DistinctAttributeAnalytics::new(distinct.as_ref()).to_settings(), Some(req), ); } @@ -460,15 +365,8 @@ make_setting_route!( "proximityPrecision", analytics, |precision: &Option, req: &HttpRequest| { - use serde_json::json; analytics.publish( - "ProximityPrecision Updated".to_string(), - json!({ - "proximity_precision": { - "set": precision.is_some(), - "value": precision.unwrap_or_default(), - } - }), + crate::routes::indexes::settings::ProximityPrecisionAnalytics::new(precision.as_ref()).to_settings(), Some(req), ); } @@ -485,12 +383,8 @@ make_setting_route!( "localizedAttributes", analytics, |rules: &Option>, req: &HttpRequest| { - use serde_json::json; analytics.publish( - "LocalizedAttributesRules Updated".to_string(), - json!({ - "locales": rules.as_ref().map(|rules| rules.iter().flat_map(|rule| rule.locales.iter().cloned()).collect::>()) - }), + crate::routes::indexes::settings::LocalesAnalytics::new(rules.as_ref()).to_settings(), Some(req), ); } @@ -507,21 +401,8 @@ make_setting_route!( "rankingRules", analytics, |setting: &Option>, req: &HttpRequest| { - use serde_json::json; - analytics.publish( - "RankingRules Updated".to_string(), - json!({ - "ranking_rules": { - "words_position": setting.as_ref().map(|rr| rr.iter().position(|s| matches!(s, meilisearch_types::settings::RankingRuleView::Words))), - "typo_position": setting.as_ref().map(|rr| rr.iter().position(|s| matches!(s, meilisearch_types::settings::RankingRuleView::Typo))), - "proximity_position": setting.as_ref().map(|rr| rr.iter().position(|s| matches!(s, meilisearch_types::settings::RankingRuleView::Proximity))), - "attribute_position": setting.as_ref().map(|rr| rr.iter().position(|s| matches!(s, meilisearch_types::settings::RankingRuleView::Attribute))), - "sort_position": setting.as_ref().map(|rr| rr.iter().position(|s| matches!(s, meilisearch_types::settings::RankingRuleView::Sort))), - "exactness_position": setting.as_ref().map(|rr| rr.iter().position(|s| matches!(s, meilisearch_types::settings::RankingRuleView::Exactness))), - "values": setting.as_ref().map(|rr| rr.iter().filter(|s| matches!(s, meilisearch_types::settings::RankingRuleView::Asc(_) | meilisearch_types::settings::RankingRuleView::Desc(_)) ).map(|x| x.to_string()).collect::>().join(", ")), - } - }), + crate::routes::indexes::settings::RankingRulesAnalytics::new(setting.as_ref()).to_settings(), Some(req), ); } @@ -538,20 +419,8 @@ make_setting_route!( "faceting", analytics, |setting: &Option, req: &HttpRequest| { - use serde_json::json; - use meilisearch_types::facet_values_sort::FacetValuesSort; - analytics.publish( - "Faceting Updated".to_string(), - json!({ - "faceting": { - "max_values_per_facet": setting.as_ref().and_then(|s| s.max_values_per_facet.set()), - "sort_facet_values_by_star_count": setting.as_ref().and_then(|s| { - s.sort_facet_values_by.as_ref().set().map(|s| s.iter().any(|(k, v)| k == "*" && v == &FacetValuesSort::Count)) - }), - "sort_facet_values_by_total": setting.as_ref().and_then(|s| s.sort_facet_values_by.as_ref().set().map(|s| s.len())), - }, - }), + crate::routes::indexes::settings::FacetingAnalytics::new(setting.as_ref()).to_settings(), Some(req), ); } @@ -568,15 +437,8 @@ make_setting_route!( "pagination", analytics, |setting: &Option, req: &HttpRequest| { - use serde_json::json; - analytics.publish( - "Pagination Updated".to_string(), - json!({ - "pagination": { - "max_total_hits": setting.as_ref().and_then(|s| s.max_total_hits.set()), - }, - }), + crate::routes::indexes::settings::PaginationAnalytics::new(setting.as_ref()).to_settings(), Some(req), ); } @@ -593,11 +455,8 @@ make_setting_route!( "embedders", analytics, |setting: &Option>>, req: &HttpRequest| { - - analytics.publish( - "Embedders Updated".to_string(), - serde_json::json!({"embedders": crate::routes::indexes::settings::embedder_analytics(setting.as_ref())}), + crate::routes::indexes::settings::EmbeddersAnalytics::new(setting.as_ref()).to_settings(), Some(req), ); } @@ -651,10 +510,15 @@ fn embedder_analytics( json!( { + // last "total": setting.as_ref().map(|s| s.len()), + // Merge the sources "sources": sources, + // |= "document_template_used": document_template_used, + // max "document_template_max_bytes": document_template_max_bytes, + // |= "binary_quantization_used": binary_quantization_used, } ) @@ -672,8 +536,7 @@ make_setting_route!( analytics, |setting: &Option, req: &HttpRequest| { analytics.publish( - "Search Cutoff Updated".to_string(), - serde_json::json!({"search_cutoff_ms": setting }), + crate::routes::indexes::settings::SearchCutoffMsAnalytics::new(setting.as_ref()).to_settings(), Some(req), ); } @@ -714,13 +577,639 @@ generate_configure!( search_cutoff_ms ); +#[derive(Serialize, Default)] +struct SettingsAnalytics { + ranking_rules: RankingRulesAnalytics, + searchable_attributes: SearchableAttributesAnalytics, + displayed_attributes: DisplayedAttributesAnalytics, + sortable_attributes: SortableAttributesAnalytics, + filterable_attributes: FilterableAttributesAnalytics, + distinct_attribute: DistinctAttributeAnalytics, + proximity_precision: ProximityPrecisionAnalytics, + typo_tolerance: TypoToleranceAnalytics, + faceting: FacetingAnalytics, + pagination: PaginationAnalytics, + stop_words: StopWordsAnalytics, + synonyms: SynonymsAnalytics, + embedders: EmbeddersAnalytics, + search_cutoff_ms: SearchCutoffMsAnalytics, + locales: LocalesAnalytics, + dictionary: DictionaryAnalytics, + separator_tokens: SeparatorTokensAnalytics, + non_separator_tokens: NonSeparatorTokensAnalytics, +} + +impl Aggregate for SettingsAnalytics { + fn event_name(&self) -> &'static str { + "Settings Updated" + } + + fn aggregate(self, other: Self) -> Self + where + Self: Sized, + { + Self { + ranking_rules: RankingRulesAnalytics { + words_position: self + .ranking_rules + .words_position + .or(other.ranking_rules.words_position), + typo_position: self + .ranking_rules + .typo_position + .or(other.ranking_rules.typo_position), + proximity_position: self + .ranking_rules + .proximity_position + .or(other.ranking_rules.proximity_position), + attribute_position: self + .ranking_rules + .attribute_position + .or(other.ranking_rules.attribute_position), + sort_position: self + .ranking_rules + .sort_position + .or(other.ranking_rules.sort_position), + exactness_position: self + .ranking_rules + .exactness_position + .or(other.ranking_rules.exactness_position), + values: self.ranking_rules.values.or(other.ranking_rules.values), + }, + searchable_attributes: SearchableAttributesAnalytics { + total: self.searchable_attributes.total.or(other.searchable_attributes.total), + with_wildcard: self + .searchable_attributes + .with_wildcard + .or(other.searchable_attributes.with_wildcard), + }, + displayed_attributes: DisplayedAttributesAnalytics { + total: self.displayed_attributes.total.or(other.displayed_attributes.total), + with_wildcard: self + .displayed_attributes + .with_wildcard + .or(other.displayed_attributes.with_wildcard), + }, + sortable_attributes: SortableAttributesAnalytics { + total: self.sortable_attributes.total.or(other.sortable_attributes.total), + has_geo: self.sortable_attributes.has_geo.or(other.sortable_attributes.has_geo), + }, + filterable_attributes: FilterableAttributesAnalytics { + total: self.filterable_attributes.total.or(other.filterable_attributes.total), + has_geo: self.filterable_attributes.has_geo.or(other.filterable_attributes.has_geo), + }, + distinct_attribute: DistinctAttributeAnalytics { + set: self.distinct_attribute.set.or(other.distinct_attribute.set), + }, + proximity_precision: ProximityPrecisionAnalytics { + set: self.proximity_precision.set(other.proximity_precision.set), + value: self.proximity_precision.value(other.proximity_precision.value), + }, + typo_tolerance: TypoToleranceAnalytics { + enabled: self.typo_tolerance.enabled.or(other.typo_tolerance.enabled), + disable_on_attributes: self + .typo_tolerance + .disable_on_attributes + .or(other.typo_tolerance.disable_on_attributes), + disable_on_words: self + .typo_tolerance + .disable_on_words + .or(other.typo_tolerance.disable_on_words), + min_word_size_for_one_typo: self + .typo_tolerance + .min_word_size_for_one_typo + .or(other.typo_tolerance.min_word_size_for_one_typo), + min_word_size_for_two_typos: self + .typo_tolerance + .min_word_size_for_two_typos + .or(other.typo_tolerance.min_word_size_for_two_typos), + }, + faceting: FacetingAnalytics { + max_values_per_facet: self + .faceting + .max_values_per_facet + .or(other.faceting.max_values_per_facet), + sort_facet_values_by_star_count: self + .faceting + .sort_facet_values_by_star_count + .or(other.faceting.sort_facet_values_by_star_count), + sort_facet_values_by_total: self + .faceting + .sort_facet_values_by_total + .or(other.faceting.sort_facet_values_by_total), + }, + pagination: PaginationAnalytics { + max_total_hits: self.pagination.max_total_hits.or(other.pagination.max_total_hits), + }, + stop_words: StopWordsAnalytics { + total: self.stop_words.total.or(other.stop_words.total), + }, + synonyms: SynonymsAnalytics { total: self.synonyms.total.or(other.synonyms.total) }, + embedders: EmbeddersAnalytics { + total: self.embedders.total.or(other.embedders.total), + sources: match (self.embedders.sources, other.embedders.sources) { + (None, None) => None, + (Some(sources), None) | (None, Some(sources)) => Some(sources), + (Some(this), Some(other)) => Some(this.union(&other).collect()), + }, + document_template_used: match ( + self.embedders.document_template_used, + other.embedders.document_template_used, + ) { + (None, None) => None, + (Some(used), None) | (None, Some(used)) => Some(used), + (Some(this), Some(other)) => Some(this | other), + }, + document_template_max_bytes: match ( + self.embedders.document_template_max_bytes, + other.embedders.document_template_max_bytes, + ) { + (None, None) => None, + (Some(bytes), None) | (None, Some(bytes)) => Some(bytes), + (Some(this), Some(other)) => Some(this.max(other)), + }, + binary_quantization_used: match ( + self.embedders.binary_quantization_used, + other.embedders.binary_quantization_used, + ) { + (None, None) => None, + (Some(bq), None) | (None, Some(bq)) => Some(bq), + (Some(this), Some(other)) => Some(this | other), + }, + }, + search_cutoff_ms: SearchCutoffMsAnalytics { + search_cutoff_ms: self + .search_cutoff_ms + .search_cutoff_ms + .or(other.search_cutoff_ms.search_cutoff_ms), + }, + locales: LocalesAnalytics { locales: self.locales.locales.or(other.locales.locales) }, + dictionary: DictionaryAnalytics { + total: self.dictionary.total.or(other.dictionary.total), + }, + separator_tokens: SeparatorTokensAnalytics { + total: self.separator_tokens.total.or(other.non_separator_tokens.total), + }, + non_separator_tokens: NonSeparatorTokensAnalytics { + total: self.non_separator_tokens.total.or(other.non_separator_tokens.total), + }, + } + } + + fn into_event(self) -> impl Serialize + where + Self: Sized, + { + self + } +} + +#[derive(Serialize, Default)] +struct RankingRulesAnalytics { + words_position: Option, + typo_position: Option, + proximity_position: Option, + attribute_position: Option, + sort_position: Option, + exactness_position: Option, + values: Option, +} + +impl RankingRulesAnalytics { + pub fn new(rr: Option<&Vec>) -> Self { + RankingRulesAnalytics { + words_position: rr.as_ref().map(|rr| { + rr.iter() + .position(|s| matches!(s, meilisearch_types::settings::RankingRuleView::Words)) + }), + typo_position: rr.as_ref().map(|rr| { + rr.iter() + .position(|s| matches!(s, meilisearch_types::settings::RankingRuleView::Typo)) + }), + proximity_position: rr.as_ref().map(|rr| { + rr.iter().position(|s| { + matches!(s, meilisearch_types::settings::RankingRuleView::Proximity) + }) + }), + attribute_position: rr.as_ref().map(|rr| { + rr.iter().position(|s| { + matches!(s, meilisearch_types::settings::RankingRuleView::Attribute) + }) + }), + sort_position: rr.as_ref().map(|rr| { + rr.iter() + .position(|s| matches!(s, meilisearch_types::settings::RankingRuleView::Sort)) + }), + exactness_position: rr.as_ref().map(|rr| { + rr.iter().position(|s| { + matches!(s, meilisearch_types::settings::RankingRuleView::Exactness) + }) + }), + values: rr.as_ref().map(|rr| { + rr.iter() + .filter(|s| { + matches!( + s, + meilisearch_types::settings::RankingRuleView::Asc(_) + | meilisearch_types::settings::RankingRuleView::Desc(_) + ) + }) + .map(|x| x.to_string()) + .collect::>() + .join(", ") + }), + } + } + + pub fn to_settings(self) -> SettingsAnalytics { + SettingsAnalytics { ranking_rules: self, ..Default::default() } + } +} + +#[derive(Serialize, Default)] +struct SearchableAttributesAnalytics { + total: Option, + with_wildcard: bool, +} + +impl SearchableAttributesAnalytics { + pub fn new(setting: Option<&Vec>) -> Self { + Self { + total: setting.as_ref().map(|searchable| searchable.len()), + with_wildcard: setting + .as_ref() + .map(|searchable| searchable.iter().any(|searchable| searchable == "*")), + } + } + + pub fn to_settings(self) -> SettingsAnalytics { + SettingsAnalytics { searchable_attributes: self, ..Default::default() } + } +} + +#[derive(Serialize, Default)] +struct DisplayedAttributesAnalytics { + total: usize, + with_wildcard: bool, +} + +impl DisplayedAttributesAnalytics { + pub fn new(displayed: Option<&Vec>) -> Self { + Self { + total: displayed.as_ref().map(|displayed| displayed.len()), + with_wildcard: displayed + .as_ref() + .map(|displayed| displayed.iter().any(|displayed| displayed == "*")), + } + } + + pub fn to_settings(self) -> SettingsAnalytics { + SettingsAnalytics { displayed_attributes: self, ..Default::default() } + } +} + +#[derive(Serialize, Default)] +struct SortableAttributesAnalytics { + total: usize, + has_geo: bool, +} + +impl SortableAttributesAnalytics { + pub fn new(setting: Option<&std::collections::BTreeSet>) -> Self { + Self { + total: setting.as_ref().map(|sort| sort.len()), + has_geo: setting.as_ref().map(|sort| sort.contains("_geo")), + } + } + + pub fn to_settings(self) -> SettingsAnalytics { + SettingsAnalytics { sortable_attributes: self, ..Default::default() } + } +} + +#[derive(Serialize, Default)] +struct FilterableAttributesAnalytics { + total: usize, + has_geo: bool, +} + +impl FilterableAttributesAnalytics { + pub fn new(setting: Option<&std::collections::BTreeSet>) -> Self { + Self { + total: setting.as_ref().map(|filter| filter.len()).unwrap_or(0), + has_geo: setting.as_ref().map(|filter| filter.contains("_geo")).unwrap_or(false), + } + } + + pub fn to_settings(self) -> SettingsAnalytics { + SettingsAnalytics { filterable_attributes: self, ..Default::default() } + } +} + +#[derive(Serialize, Default)] +struct DistinctAttributeAnalytics { + set: bool, +} + +impl DistinctAttributeAnalytics { + pub fn new(distinct: Option<&String>) -> Self { + Self { set: distinct.is_some() } + } + + pub fn to_settings(self) -> SettingsAnalytics { + SettingsAnalytics { distinct_attribute: self, ..Default::default() } + } +} + +#[derive(Serialize, Default)] +struct ProximityPrecisionAnalytics { + set: bool, + value: Option, +} + +impl ProximityPrecisionAnalytics { + pub fn new(precision: Option<&meilisearch_types::settings::ProximityPrecisionView>) -> Self { + Self { set: precision.is_some(), value: precision.unwrap_or_default() } + } + + pub fn to_settings(self) -> SettingsAnalytics { + SettingsAnalytics { proximity_precision: self, ..Default::default() } + } +} + +#[derive(Serialize, Default)] +struct TypoToleranceAnalytics { + enabled: Option, + disable_on_attributes: Option, + disable_on_words: Option, + min_word_size_for_one_typo: Option, + min_word_size_for_two_typos: Option, +} + +impl TypoToleranceAnalytics { + pub fn new(setting: Option<&meilisearch_types::settings::TypoSettings>) -> Self { + Self { + enabled: setting.as_ref().map(|s| !matches!(s.enabled, Setting::Set(false))), + disable_on_attributes: setting + .as_ref() + .and_then(|s| s.disable_on_attributes.as_ref().set().map(|m| !m.is_empty())), + disable_on_words: setting + .as_ref() + .and_then(|s| s.disable_on_words.as_ref().set().map(|m| !m.is_empty())), + min_word_size_for_one_typo: setting + .as_ref() + .and_then(|s| s.min_word_size_for_typos.as_ref().set().map(|s| s.one_typo.set())) + .flatten(), + min_word_size_for_two_typos: setting + .as_ref() + .and_then(|s| s.min_word_size_for_typos.as_ref().set().map(|s| s.two_typos.set())) + .flatten(), + } + } + pub fn to_settings(self) -> SettingsAnalytics { + SettingsAnalytics { typo_tolerance: self, ..Default::default() } + } +} + +#[derive(Serialize, Default)] +struct FacetingAnalytics { + max_values_per_facet: Option, + sort_facet_values_by_star_count: Option, + sort_facet_values_by_total: Option, +} + +impl FacetingAnalytics { + pub fn new(setting: Option<&meilisearch_types::settings::FacetingSettings>) -> Self { + Self { + max_values_per_facet: setting.as_ref().and_then(|s| s.max_values_per_facet.set()), + sort_facet_values_by_star_count: setting.as_ref().and_then(|s| { + s.sort_facet_values_by + .as_ref() + .set() + .map(|s| s.iter().any(|(k, v)| k == "*" && v == &FacetValuesSort::Count)) + }), + sort_facet_values_by_total: setting + .as_ref() + .and_then(|s| s.sort_facet_values_by.as_ref().set().map(|s| s.len())), + } + } + + pub fn to_settings(self) -> SettingsAnalytics { + SettingsAnalytics { faceting: self, ..Default::default() } + } +} + +#[derive(Serialize, Default)] +struct PaginationAnalytics { + max_total_hits: Option, +} + +impl PaginationAnalytics { + pub fn new(setting: Option<&meilisearch_types::settings::PaginationSettings>) -> Self { + Self { max_total_hits: setting.as_ref().and_then(|s| s.max_total_hits.set()) } + } + + pub fn to_settings(self) -> SettingsAnalytics { + SettingsAnalytics { pagination: self, ..Default::default() } + } +} + +#[derive(Serialize, Default)] +struct StopWordsAnalytics { + total: Option, +} + +impl StopWordsAnalytics { + pub fn new(stop_words: Option<&BTreeSet>) -> Self { + Self { total: stop_words.as_ref().map(|stop_words| stop_words.len()) } + } + + pub fn to_settings(self) -> SettingsAnalytics { + SettingsAnalytics { stop_words: self, ..Default::default() } + } +} + +#[derive(Serialize, Default)] +struct SynonymsAnalytics { + total: Option, +} + +impl SynonymsAnalytics { + pub fn new(synonyms: Option<&std::collections::BTreeMap>>) -> Self { + Self { total: synonyms.as_ref().map(|synonyms| synonyms.len()) } + } + + pub fn to_settings(self) -> SettingsAnalytics { + SettingsAnalytics { synonyms: self, ..Default::default() } + } +} + +#[derive(Serialize, Default)] +struct EmbeddersAnalytics { + // last + total: Option, + // Merge the sources + sources: Option>, + // |= + document_template_used: Option, + // max + document_template_max_bytes: Option, + // |= + binary_quantization_used: Option, +} + +impl EmbeddersAnalytics { + pub fn new( + setting: Option< + &std::collections::BTreeMap< + String, + Setting, + >, + >, + ) -> Self { + let mut sources = std::collections::HashSet::new(); + + if let Some(s) = &setting { + for source in s + .values() + .filter_map(|config| config.clone().set()) + .filter_map(|config| config.source.set()) + { + use meilisearch_types::milli::vector::settings::EmbedderSource; + match source { + EmbedderSource::OpenAi => sources.insert("openAi"), + EmbedderSource::HuggingFace => sources.insert("huggingFace"), + EmbedderSource::UserProvided => sources.insert("userProvided"), + EmbedderSource::Ollama => sources.insert("ollama"), + EmbedderSource::Rest => sources.insert("rest"), + }; + } + }; + + Self { + total: setting.as_ref().map(|s| s.len()), + sources, + document_template_used: setting.as_ref().map(|map| { + map.values() + .filter_map(|config| config.clone().set()) + .any(|config| config.document_template.set().is_some()) + }), + document_template_max_bytes: setting.as_ref().and_then(|map| { + map.values() + .filter_map(|config| config.clone().set()) + .filter_map(|config| config.document_template_max_bytes.set()) + .max() + }), + binary_quantization_used: setting.as_ref().map(|map| { + map.values() + .filter_map(|config| config.clone().set()) + .any(|config| config.binary_quantized.set().is_some()) + }), + } + } + + pub fn to_settings(self) -> SettingsAnalytics { + SettingsAnalytics { embedders: self, ..Default::default() } + } +} + +#[derive(Serialize, Default)] +#[serde(transparent)] +struct SearchCutoffMsAnalytics { + search_cutoff_ms: Option, +} + +impl SearchCutoffMsAnalytics { + pub fn new(setting: Option<&u64>) -> Self { + Self { search_cutoff_ms: setting } + } + + pub fn to_settings(self) -> SettingsAnalytics { + SettingsAnalytics { search_cutoff_ms: self, ..Default::default() } + } +} + +#[derive(Serialize, Default)] +#[serde(transparent)] +struct LocalesAnalytics { + locales: BTreeSet, +} + +impl LocalesAnalytics { + pub fn new( + rules: Option<&Vec>, + ) -> Self { + LocalesAnalytics { + locales: rules.as_ref().map(|rules| { + rules + .iter() + .flat_map(|rule| rule.locales.iter().cloned()) + .collect::>() + }), + } + } + + pub fn to_settings(self) -> SettingsAnalytics { + SettingsAnalytics { locales: self, ..Default::default() } + } +} + +#[derive(Serialize, Default)] +struct DictionaryAnalytics { + total: usize, +} + +impl DictionaryAnalytics { + pub fn new(dictionary: Option<&std::collections::BTreeSet>) -> Self { + Self { total: dictionary.as_ref().map(|dictionary| dictionary.len()) } + } + + pub fn to_settings(self) -> SettingsAnalytics { + SettingsAnalytics { dictionary: self, ..Default::default() } + } +} + +#[derive(Serialize, Default)] +struct SeparatorTokensAnalytics { + total: usize, +} + +impl SeparatorTokensAnalytics { + pub fn new(separator_tokens: Option<&std::collections::BTreeSet>) -> Self { + Self { total: separator_tokens.as_ref().map(|separator_tokens| separator_tokens.len()) } + } + + pub fn to_settings(self) -> SettingsAnalytics { + SettingsAnalytics { separator_tokens: self, ..Default::default() } + } +} + +#[derive(Serialize, Default)] +struct NonSeparatorTokensAnalytics { + total: usize, +} + +impl NonSeparatorTokensAnalytics { + pub fn new(non_separator_tokens: Option<&std::collections::BTreeSet>) -> Self { + Self { + total: non_separator_tokens + .as_ref() + .map(|non_separator_tokens| non_separator_tokens.len()), + } + } + + pub fn to_settings(self) -> SettingsAnalytics { + SettingsAnalytics { non_separator_tokens: self, ..Default::default() } + } +} + pub async fn update_all( index_scheduler: GuardedData, Data>, index_uid: web::Path, body: AwebJson, DeserrJsonError>, req: HttpRequest, opt: web::Data, - analytics: web::Data, + analytics: web::Data, ) -> Result { let index_uid = IndexUid::try_from(index_uid.into_inner())?; @@ -729,103 +1218,44 @@ pub async fn update_all( let new_settings = validate_settings(new_settings, &index_scheduler)?; analytics.publish( - "Settings Updated".to_string(), - json!({ - "ranking_rules": { - "words_position": new_settings.ranking_rules.as_ref().set().map(|rr| rr.iter().position(|s| matches!(s, RankingRuleView::Words))), - "typo_position": new_settings.ranking_rules.as_ref().set().map(|rr| rr.iter().position(|s| matches!(s, RankingRuleView::Typo))), - "proximity_position": new_settings.ranking_rules.as_ref().set().map(|rr| rr.iter().position(|s| matches!(s, RankingRuleView::Proximity))), - "attribute_position": new_settings.ranking_rules.as_ref().set().map(|rr| rr.iter().position(|s| matches!(s, RankingRuleView::Attribute))), - "sort_position": new_settings.ranking_rules.as_ref().set().map(|rr| rr.iter().position(|s| matches!(s, RankingRuleView::Sort))), - "exactness_position": new_settings.ranking_rules.as_ref().set().map(|rr| rr.iter().position(|s| matches!(s, RankingRuleView::Exactness))), - "values": new_settings.ranking_rules.as_ref().set().map(|rr| rr.iter().filter(|s| !matches!(s, RankingRuleView::Asc(_) | RankingRuleView::Desc(_)) ).map(|x| x.to_string()).collect::>().join(", ")), - }, - "searchable_attributes": { - "total": new_settings.searchable_attributes.as_ref().set().map(|searchable| searchable.len()), - "with_wildcard": new_settings.searchable_attributes.as_ref().set().map(|searchable| searchable.iter().any(|searchable| searchable == "*")), - }, - "displayed_attributes": { - "total": new_settings.displayed_attributes.as_ref().set().map(|displayed| displayed.len()), - "with_wildcard": new_settings.displayed_attributes.as_ref().set().map(|displayed| displayed.iter().any(|displayed| displayed == "*")), - }, - "sortable_attributes": { - "total": new_settings.sortable_attributes.as_ref().set().map(|sort| sort.len()), - "has_geo": new_settings.sortable_attributes.as_ref().set().map(|sort| sort.iter().any(|s| s == "_geo")), - }, - "filterable_attributes": { - "total": new_settings.filterable_attributes.as_ref().set().map(|filter| filter.len()), - "has_geo": new_settings.filterable_attributes.as_ref().set().map(|filter| filter.iter().any(|s| s == "_geo")), - }, - "distinct_attribute": { - "set": new_settings.distinct_attribute.as_ref().set().is_some() - }, - "proximity_precision": { - "set": new_settings.proximity_precision.as_ref().set().is_some(), - "value": new_settings.proximity_precision.as_ref().set().copied().unwrap_or_default() - }, - "typo_tolerance": { - "enabled": new_settings.typo_tolerance - .as_ref() - .set() - .and_then(|s| s.enabled.as_ref().set()) - .copied(), - "disable_on_attributes": new_settings.typo_tolerance - .as_ref() - .set() - .and_then(|s| s.disable_on_attributes.as_ref().set().map(|m| !m.is_empty())), - "disable_on_words": new_settings.typo_tolerance - .as_ref() - .set() - .and_then(|s| s.disable_on_words.as_ref().set().map(|m| !m.is_empty())), - "min_word_size_for_one_typo": new_settings.typo_tolerance - .as_ref() - .set() - .and_then(|s| s.min_word_size_for_typos - .as_ref() - .set() - .map(|s| s.one_typo.set())) - .flatten(), - "min_word_size_for_two_typos": new_settings.typo_tolerance - .as_ref() - .set() - .and_then(|s| s.min_word_size_for_typos - .as_ref() - .set() - .map(|s| s.two_typos.set())) - .flatten(), - }, - "faceting": { - "max_values_per_facet": new_settings.faceting - .as_ref() - .set() - .and_then(|s| s.max_values_per_facet.as_ref().set()), - "sort_facet_values_by_star_count": new_settings.faceting - .as_ref() - .set() - .and_then(|s| { - s.sort_facet_values_by.as_ref().set().map(|s| s.iter().any(|(k, v)| k == "*" && v == &FacetValuesSort::Count)) - }), - "sort_facet_values_by_total": new_settings.faceting - .as_ref() - .set() - .and_then(|s| s.sort_facet_values_by.as_ref().set().map(|s| s.len())), - }, - "pagination": { - "max_total_hits": new_settings.pagination - .as_ref() - .set() - .and_then(|s| s.max_total_hits.as_ref().set()), - }, - "stop_words": { - "total": new_settings.stop_words.as_ref().set().map(|stop_words| stop_words.len()), - }, - "synonyms": { - "total": new_settings.synonyms.as_ref().set().map(|synonyms| synonyms.len()), - }, - "embedders": crate::routes::indexes::settings::embedder_analytics(new_settings.embedders.as_ref().set()), - "search_cutoff_ms": new_settings.search_cutoff_ms.as_ref().set(), - "locales": new_settings.localized_attributes.as_ref().set().map(|rules| rules.iter().flat_map(|rule| rule.locales.iter().cloned()).collect::>()), - }), + SettingsAnalytics { + ranking_rules: RankingRulesAnalytics::new(new_settings.ranking_rules.as_ref().set()), + searchable_attributes: SearchableAttributesAnalytics::new( + new_settings.searchable_attributes.as_ref().set(), + ), + displayed_attributes: DisplayedAttributesAnalytics::new( + new_settings.displayed_attributes.as_ref().set(), + ), + sortable_attributes: SortableAttributesAnalytics::new( + new_settings.sortable_attributes.as_ref().set(), + ), + filterable_attributes: FilterableAttributesAnalytics::new( + new_settings.filterable_attributes.as_ref().set(), + ), + distinct_attribute: DistinctAttributeAnalytics::new( + new_settings.distinct_attribute.as_ref().set(), + ), + proximity_precision: ProximityPrecisionAnalytics::new( + new_settings.proximity_precision.as_ref().set(), + ), + typo_tolerance: TypoToleranceAnalytics::new(new_settings.typo_tolerance.as_ref().set()), + faceting: FacetingAnalytics::new(new_settings.faceting.as_ref().set()), + pagination: PaginationAnalytics::new(new_settings.pagination.as_ref().set()), + stop_words: StopWordsAnalytics::new(new_settings.stop_words.as_ref().set()), + synonyms: SynonymsAnalytics::new(new_settings.synonyms.as_ref().set()), + embedders: EmbeddersAnalytics::new(new_settings.embedders.as_ref().set()), + search_cutoff_ms: SearchCutoffMsAnalytics::new( + new_settings.search_cutoff_ms.as_ref().set(), + ), + locales: LocalesAnalytics::new(new_settings.localized_attributes.as_ref().set()), + dictionary: DictionaryAnalytics::new(new_settings.dictionary.as_ref().set()), + separator_tokens: SeparatorTokensAnalytics::new( + new_settings.separator_tokens.as_ref().set(), + ), + non_separator_tokens: NonSeparatorTokensAnalytics::new( + new_settings.non_separator_tokens.as_ref().set(), + ), + }, Some(&req), ); diff --git a/meilisearch/src/routes/swap_indexes.rs b/meilisearch/src/routes/swap_indexes.rs index 51a7b0707..34e904230 100644 --- a/meilisearch/src/routes/swap_indexes.rs +++ b/meilisearch/src/routes/swap_indexes.rs @@ -40,7 +40,7 @@ pub async fn swap_indexes( analytics.publish( "Indexes Swapped".to_string(), json!({ - "swap_operation_number": params.len(), + "swap_operation_number": params.len(), // Return the max ever encountered }), Some(&req), ); From e66fccc3f2e8c9ef9f576f9484d1135bf02716e6 Mon Sep 17 00:00:00 2001 From: Tamo Date: Wed, 16 Oct 2024 15:51:48 +0200 Subject: [PATCH 066/111] get rids of the analytics closure --- meilisearch/src/routes/indexes/settings.rs | 216 +++------------------ 1 file changed, 24 insertions(+), 192 deletions(-) diff --git a/meilisearch/src/routes/indexes/settings.rs b/meilisearch/src/routes/indexes/settings.rs index 112f8671b..db83cb39b 100644 --- a/meilisearch/src/routes/indexes/settings.rs +++ b/meilisearch/src/routes/indexes/settings.rs @@ -14,7 +14,6 @@ use meilisearch_types::settings::{ }; use meilisearch_types::tasks::KindWithContent; use serde::Serialize; -use serde_json::json; use tracing::debug; use crate::analytics::{Aggregate, Analytics}; @@ -25,7 +24,7 @@ use crate::Opt; #[macro_export] macro_rules! make_setting_route { - ($route:literal, $update_verb:ident, $type:ty, $err_ty:ty, $attr:ident, $camelcase_attr:literal, $analytics_var:ident, $analytics:expr) => { + ($route:literal, $update_verb:ident, $type:ty, $err_ty:ty, $attr:ident, $camelcase_attr:literal, $analytics:ident) => { pub mod $attr { use actix_web::web::Data; use actix_web::{web, HttpRequest, HttpResponse, Resource}; @@ -85,7 +84,7 @@ macro_rules! make_setting_route { body: deserr::actix_web::AwebJson, $err_ty>, req: HttpRequest, opt: web::Data, - $analytics_var: web::Data, + analytics: web::Data, ) -> std::result::Result { let index_uid = IndexUid::try_from(index_uid.into_inner())?; @@ -93,7 +92,10 @@ macro_rules! make_setting_route { debug!(parameters = ?body, "Update settings"); #[allow(clippy::redundant_closure_call)] - $analytics(&body, &req); + analytics.publish( + $crate::routes::indexes::settings::$analytics::new(body.as_ref()).to_settings(), + Some(&req), + ); let new_settings = Settings { $attr: match body { @@ -165,13 +167,7 @@ make_setting_route!( >, filterable_attributes, "filterableAttributes", - analytics, - |setting: &Option>, req: &HttpRequest| { - analytics.publish( - crate::routes::indexes::settings::FilterableAttributesAnalytics::new(setting.as_ref()).to_settings(), - Some(req), - ); - } + FilterableAttributesAnalytics ); make_setting_route!( @@ -183,13 +179,7 @@ make_setting_route!( >, sortable_attributes, "sortableAttributes", - analytics, - |setting: &Option>, req: &HttpRequest| { - analytics.publish( - crate::routes::indexes::settings::SortableAttributesAnalytics::new(setting.as_ref()).to_settings(), - Some(req), - ); - } + SortableAttributesAnalytics ); make_setting_route!( @@ -201,13 +191,7 @@ make_setting_route!( >, displayed_attributes, "displayedAttributes", - analytics, - |displayed: &Option>, req: &HttpRequest| { - analytics.publish( - crate::routes::indexes::settings::DisplayedAttributesAnalytics::new(displayed.as_ref()).to_settings(), - Some(req), - ); - } + DisplayedAttributesAnalytics ); make_setting_route!( @@ -219,13 +203,7 @@ make_setting_route!( >, typo_tolerance, "typoTolerance", - analytics, - |setting: &Option, req: &HttpRequest| { - analytics.publish( - crate::routes::indexes::settings::TypoToleranceAnalytics::new(setting.as_ref()).to_settings(), - Some(req), - ); - } + TypoToleranceAnalytics ); make_setting_route!( @@ -237,13 +215,7 @@ make_setting_route!( >, searchable_attributes, "searchableAttributes", - analytics, - |setting: &Option>, req: &HttpRequest| { - analytics.publish( - crate::routes::indexes::settings::SearchableAttributesAnalytics::new(setting.as_ref()).to_settings(), - Some(req), - ); - } + SearchableAttributesAnalytics ); make_setting_route!( @@ -255,13 +227,7 @@ make_setting_route!( >, stop_words, "stopWords", - analytics, - |stop_words: &Option>, req: &HttpRequest| { - analytics.publish( - crate::routes::indexes::settings::StopWordsAnalytics::new(stop_words.as_ref()).to_settings(), - Some(req), - ); - } + StopWordsAnalytics ); make_setting_route!( @@ -273,13 +239,7 @@ make_setting_route!( >, non_separator_tokens, "nonSeparatorTokens", - analytics, - |non_separator_tokens: &Option>, req: &HttpRequest| { - analytics.publish( - crate::routes::indexes::settings::NonSeparatorTokensAnalytics::new(non_separator_tokens.as_ref()).to_settings(), - Some(req), - ); - } + NonSeparatorTokensAnalytics ); make_setting_route!( @@ -291,13 +251,7 @@ make_setting_route!( >, separator_tokens, "separatorTokens", - analytics, - |separator_tokens: &Option>, req: &HttpRequest| { - analytics.publish( - crate::routes::indexes::settings::SeparatorTokensAnalytics::new(separator_tokens.as_ref()).to_settings(), - Some(req), - ); - } + SeparatorTokensAnalytics ); make_setting_route!( @@ -309,13 +263,7 @@ make_setting_route!( >, dictionary, "dictionary", - analytics, - |dictionary: &Option>, req: &HttpRequest| { - analytics.publish( - crate::routes::indexes::settings::DictionaryAnalytics::new(dictionary.as_ref()).to_settings(), - Some(req), - ); - } + DictionaryAnalytics ); make_setting_route!( @@ -327,13 +275,7 @@ make_setting_route!( >, synonyms, "synonyms", - analytics, - |synonyms: &Option>>, req: &HttpRequest| { - analytics.publish( - crate::routes::indexes::settings::SynonymsAnalytics::new(synonyms.as_ref()).to_settings(), - Some(req), - ); - } + SynonymsAnalytics ); make_setting_route!( @@ -345,13 +287,7 @@ make_setting_route!( >, distinct_attribute, "distinctAttribute", - analytics, - |distinct: &Option, req: &HttpRequest| { - analytics.publish( - crate::routes::indexes::settings::DistinctAttributeAnalytics::new(distinct.as_ref()).to_settings(), - Some(req), - ); - } + DistinctAttributeAnalytics ); make_setting_route!( @@ -363,13 +299,7 @@ make_setting_route!( >, proximity_precision, "proximityPrecision", - analytics, - |precision: &Option, req: &HttpRequest| { - analytics.publish( - crate::routes::indexes::settings::ProximityPrecisionAnalytics::new(precision.as_ref()).to_settings(), - Some(req), - ); - } + ProximityPrecisionAnalytics ); make_setting_route!( @@ -381,13 +311,7 @@ make_setting_route!( >, localized_attributes, "localizedAttributes", - analytics, - |rules: &Option>, req: &HttpRequest| { - analytics.publish( - crate::routes::indexes::settings::LocalesAnalytics::new(rules.as_ref()).to_settings(), - Some(req), - ); - } + LocalesAnalytics ); make_setting_route!( @@ -399,13 +323,7 @@ make_setting_route!( >, ranking_rules, "rankingRules", - analytics, - |setting: &Option>, req: &HttpRequest| { - analytics.publish( - crate::routes::indexes::settings::RankingRulesAnalytics::new(setting.as_ref()).to_settings(), - Some(req), - ); - } + RankingRulesAnalytics ); make_setting_route!( @@ -417,13 +335,7 @@ make_setting_route!( >, faceting, "faceting", - analytics, - |setting: &Option, req: &HttpRequest| { - analytics.publish( - crate::routes::indexes::settings::FacetingAnalytics::new(setting.as_ref()).to_settings(), - Some(req), - ); - } + FacetingAnalytics ); make_setting_route!( @@ -435,13 +347,7 @@ make_setting_route!( >, pagination, "pagination", - analytics, - |setting: &Option, req: &HttpRequest| { - analytics.publish( - crate::routes::indexes::settings::PaginationAnalytics::new(setting.as_ref()).to_settings(), - Some(req), - ); - } + PaginationAnalytics ); make_setting_route!( @@ -453,77 +359,9 @@ make_setting_route!( >, embedders, "embedders", - analytics, - |setting: &Option>>, req: &HttpRequest| { - analytics.publish( - crate::routes::indexes::settings::EmbeddersAnalytics::new(setting.as_ref()).to_settings(), - Some(req), - ); - } + EmbeddersAnalytics ); -fn embedder_analytics( - setting: Option< - &std::collections::BTreeMap< - String, - Setting, - >, - >, -) -> serde_json::Value { - let mut sources = std::collections::HashSet::new(); - - if let Some(s) = &setting { - for source in s - .values() - .filter_map(|config| config.clone().set()) - .filter_map(|config| config.source.set()) - { - use meilisearch_types::milli::vector::settings::EmbedderSource; - match source { - EmbedderSource::OpenAi => sources.insert("openAi"), - EmbedderSource::HuggingFace => sources.insert("huggingFace"), - EmbedderSource::UserProvided => sources.insert("userProvided"), - EmbedderSource::Ollama => sources.insert("ollama"), - EmbedderSource::Rest => sources.insert("rest"), - }; - } - }; - - let document_template_used = setting.as_ref().map(|map| { - map.values() - .filter_map(|config| config.clone().set()) - .any(|config| config.document_template.set().is_some()) - }); - - let document_template_max_bytes = setting.as_ref().and_then(|map| { - map.values() - .filter_map(|config| config.clone().set()) - .filter_map(|config| config.document_template_max_bytes.set()) - .max() - }); - - let binary_quantization_used = setting.as_ref().map(|map| { - map.values() - .filter_map(|config| config.clone().set()) - .any(|config| config.binary_quantized.set().is_some()) - }); - - json!( - { - // last - "total": setting.as_ref().map(|s| s.len()), - // Merge the sources - "sources": sources, - // |= - "document_template_used": document_template_used, - // max - "document_template_max_bytes": document_template_max_bytes, - // |= - "binary_quantization_used": binary_quantization_used, - } - ) -} - make_setting_route!( "/search-cutoff-ms", put, @@ -533,13 +371,7 @@ make_setting_route!( >, search_cutoff_ms, "searchCutoffMs", - analytics, - |setting: &Option, req: &HttpRequest| { - analytics.publish( - crate::routes::indexes::settings::SearchCutoffMsAnalytics::new(setting.as_ref()).to_settings(), - Some(req), - ); - } + SearchCutoffMsAnalytics ); macro_rules! generate_configure { From fdeb47fb549a242d318a17195e1a804e50aef5dd Mon Sep 17 00:00:00 2001 From: Tamo Date: Wed, 16 Oct 2024 17:16:33 +0200 Subject: [PATCH 067/111] implements all routes --- meilisearch/src/analytics/mod.rs | 14 +- .../src/analytics/segment_analytics.rs | 239 +++++++----------- meilisearch/src/routes/dump.rs | 2 +- meilisearch/src/routes/features.rs | 8 +- meilisearch/src/routes/indexes/documents.rs | 20 +- .../src/routes/indexes/facet_search.rs | 2 +- meilisearch/src/routes/indexes/mod.rs | 4 +- meilisearch/src/routes/indexes/search.rs | 4 +- meilisearch/src/routes/indexes/settings.rs | 152 ++++++----- meilisearch/src/routes/indexes/similar.rs | 13 +- meilisearch/src/routes/multi_search.rs | 6 +- meilisearch/src/routes/snapshot.rs | 7 +- meilisearch/src/routes/swap_indexes.rs | 32 ++- meilisearch/src/routes/tasks.rs | 129 +++++++--- 14 files changed, 337 insertions(+), 295 deletions(-) diff --git a/meilisearch/src/analytics/mod.rs b/meilisearch/src/analytics/mod.rs index a8658d830..a0ca47d8f 100644 --- a/meilisearch/src/analytics/mod.rs +++ b/meilisearch/src/analytics/mod.rs @@ -16,7 +16,9 @@ use serde::Serialize; // if the feature analytics is enabled we use the real analytics pub type SegmentAnalytics = segment_analytics::SegmentAnalytics; pub use segment_analytics::SearchAggregator; -pub type SimilarAggregator = segment_analytics::SimilarAggregator; +pub use segment_analytics::SimilarAggregator; + +use self::segment_analytics::extract_user_agents; pub type MultiSearchAggregator = segment_analytics::MultiSearchAggregator; pub type FacetSearchAggregator = segment_analytics::FacetSearchAggregator; @@ -32,14 +34,11 @@ macro_rules! empty_analytics { $event_name } - fn aggregate(self, other: Self) -> Self - where - Self: Sized, - { + fn aggregate(self, _other: Self) -> Self { self } - fn into_event(self) -> serde_json::Value { + fn into_event(self) -> impl serde::Serialize { serde_json::json!({}) } } @@ -150,7 +149,8 @@ impl Analytics { } /// The method used to publish most analytics that do not need to be batched every hours - pub fn publish(&self, send: impl Aggregate, request: Option<&HttpRequest>) { + pub fn publish(&self, send: impl Aggregate, request: &HttpRequest) { let Some(segment) = self.inner else { return }; + let user_agents = extract_user_agents(request); } } diff --git a/meilisearch/src/analytics/segment_analytics.rs b/meilisearch/src/analytics/segment_analytics.rs index 8a6dfd780..0572267e1 100644 --- a/meilisearch/src/analytics/segment_analytics.rs +++ b/meilisearch/src/analytics/segment_analytics.rs @@ -71,25 +71,8 @@ pub fn extract_user_agents(request: &HttpRequest) -> Vec { .collect() } -pub enum AnalyticsMsg { - BatchMessage(Track), - AggregateGetSearch(SearchAggregator), - AggregatePostSearch(SearchAggregator), - AggregateGetSimilar(SimilarAggregator), - AggregatePostSimilar(SimilarAggregator), - AggregatePostMultiSearch(MultiSearchAggregator), - AggregatePostFacetSearch(FacetSearchAggregator), - AggregateAddDocuments(DocumentsAggregator), - AggregateDeleteDocuments(DocumentsDeletionAggregator), - AggregateUpdateDocuments(DocumentsAggregator), - AggregateEditDocumentsByFunction(EditDocumentsByFunctionAggregator), - AggregateGetFetchDocuments(DocumentsFetchAggregator), - AggregatePostFetchDocuments(DocumentsFetchAggregator), -} - pub struct SegmentAnalytics { pub instance_uid: InstanceUid, - sender: Sender, pub user: User, } @@ -1083,8 +1066,6 @@ impl Aggregate for SearchAggregator { #[derive(Default)] pub struct MultiSearchAggregator { - timestamp: Option, - // requests total_received: usize, total_succeeded: usize, @@ -1103,9 +1084,6 @@ pub struct MultiSearchAggregator { // federation use_federation: bool, - - // context - user_agents: HashSet, } impl MultiSearchAggregator { @@ -1113,10 +1091,6 @@ impl MultiSearchAggregator { federated_search: &FederatedSearch, request: &HttpRequest, ) -> Self { - let timestamp = Some(OffsetDateTime::now_utc()); - - let user_agents = extract_user_agents(request).into_iter().collect(); - let use_federation = federated_search.federation.is_some(); let distinct_indexes: HashSet<_> = federated_search @@ -1166,7 +1140,6 @@ impl MultiSearchAggregator { federated_search.queries.iter().any(|query| query.show_ranking_score_details); Self { - timestamp, total_received: 1, total_succeeded: 0, total_distinct_index_count: distinct_indexes.len(), @@ -1174,7 +1147,6 @@ impl MultiSearchAggregator { total_search_count: federated_search.queries.len(), show_ranking_score, show_ranking_score_details, - user_agents, use_federation, } } @@ -1182,15 +1154,20 @@ impl MultiSearchAggregator { pub fn succeed(&mut self) { self.total_succeeded = self.total_succeeded.saturating_add(1); } +} + +impl Aggregate for MultiSearchAggregator { + fn event_name(&self) -> &'static str { + "Documents Searched by Multi-Search POST" + } /// Aggregate one [MultiSearchAggregator] into another. - pub fn aggregate(&mut self, other: Self) { + fn aggregate(mut self, other: Self) -> Self { // write the aggregate in a way that will cause a compilation error if a field is added. // get ownership of self, replacing it by a default value. - let this = std::mem::take(self); + let this = self; - let timestamp = this.timestamp.or(other.timestamp); let total_received = this.total_received.saturating_add(other.total_received); let total_succeeded = this.total_succeeded.saturating_add(other.total_succeeded); let total_distinct_index_count = @@ -1207,75 +1184,53 @@ impl MultiSearchAggregator { user_agents.insert(user_agent); } - // need all fields or compile error - let mut aggregated = Self { - timestamp, + Self { total_received, total_succeeded, total_distinct_index_count, total_single_index, total_search_count, - user_agents, show_ranking_score, show_ranking_score_details, use_federation, - // do not add _ or ..Default::default() here - }; - - // replace the default self with the aggregated value - std::mem::swap(self, &mut aggregated); + } } - pub fn into_event(self, user: &User, event_name: &str) -> Option { + fn into_event(self) -> impl Serialize { let Self { - timestamp, total_received, total_succeeded, total_distinct_index_count, total_single_index, total_search_count, - user_agents, show_ranking_score, show_ranking_score_details, use_federation, } = self; - if total_received == 0 { - None - } else { - let properties = json!({ - "user-agent": user_agents, - "requests": { - "total_succeeded": total_succeeded, - "total_failed": total_received.saturating_sub(total_succeeded), // just to be sure we never panics - "total_received": total_received, - }, - "indexes": { - "total_single_index": total_single_index, - "total_distinct_index_count": total_distinct_index_count, - "avg_distinct_index_count": (total_distinct_index_count as f64) / (total_received as f64), // not 0 else returned early - }, - "searches": { - "total_search_count": total_search_count, - "avg_search_count": (total_search_count as f64) / (total_received as f64), - }, - "scoring": { - "show_ranking_score": show_ranking_score, - "show_ranking_score_details": show_ranking_score_details, - }, - "federation": { - "use_federation": use_federation, - } - }); - - Some(Track { - timestamp, - user: user.clone(), - event: event_name.to_string(), - properties, - ..Default::default() - }) - } + json!({ + "requests": { + "total_succeeded": total_succeeded, + "total_failed": total_received.saturating_sub(total_succeeded), // just to be sure we never panics + "total_received": total_received, + }, + "indexes": { + "total_single_index": total_single_index, + "total_distinct_index_count": total_distinct_index_count, + "avg_distinct_index_count": (total_distinct_index_count as f64) / (total_received as f64), // not 0 else returned early + }, + "searches": { + "total_search_count": total_search_count, + "avg_search_count": (total_search_count as f64) / (total_received as f64), + }, + "scoring": { + "show_ranking_score": show_ranking_score, + "show_ranking_score_details": show_ranking_score_details, + }, + "federation": { + "use_federation": use_federation, + } + }) } } @@ -1752,13 +1707,13 @@ impl DocumentsFetchAggregator { } } +aggregate_methods!( + SimilarPOST => "Similar POST", + SimilarGET => "Similar GET", +); + #[derive(Default)] -pub struct SimilarAggregator { - timestamp: Option, - - // context - user_agents: HashSet, - +pub struct SimilarAggregator { // requests total_received: usize, total_succeeded: usize, @@ -1787,9 +1742,11 @@ pub struct SimilarAggregator { show_ranking_score: bool, show_ranking_score_details: bool, ranking_score_threshold: bool, + + marker: std::marker::PhantomData, } -impl SimilarAggregator { +impl SimilarAggregator { #[allow(clippy::field_reassign_with_default)] pub fn from_query(query: &SimilarQuery, request: &HttpRequest) -> Self { let SimilarQuery { @@ -1854,12 +1811,16 @@ impl SimilarAggregator { self.time_spent.push(*processing_time_ms as usize); } +} + +impl Aggregate for SimilarAggregator { + fn event_name(&self) -> &'static str { + Method::event_name() + } /// Aggregate one [SimilarAggregator] into another. - pub fn aggregate(&mut self, mut other: Self) { + fn aggregate(mut self, mut other: Self) -> Self { let Self { - timestamp, - user_agents, total_received, total_succeeded, ref mut time_spent, @@ -1875,17 +1836,9 @@ impl SimilarAggregator { show_ranking_score_details, ranking_score_threshold, retrieve_vectors, + marker: _, } = other; - if self.timestamp.is_none() { - self.timestamp = timestamp; - } - - // context - for user_agent in user_agents.into_iter() { - self.user_agents.insert(user_agent); - } - // request self.total_received = self.total_received.saturating_add(total_received); self.total_succeeded = self.total_succeeded.saturating_add(total_succeeded); @@ -1917,12 +1870,12 @@ impl SimilarAggregator { self.show_ranking_score |= show_ranking_score; self.show_ranking_score_details |= show_ranking_score_details; self.ranking_score_threshold |= ranking_score_threshold; + + self } - pub fn into_event(self, user: &User, event_name: &str) -> Option { + fn into_event(self) -> impl Serialize { let Self { - timestamp, - user_agents, total_received, total_succeeded, time_spent, @@ -1938,56 +1891,44 @@ impl SimilarAggregator { show_ranking_score_details, ranking_score_threshold, retrieve_vectors, + marker: _, } = self; - if total_received == 0 { - None - } else { - // we get all the values in a sorted manner - let time_spent = time_spent.into_sorted_vec(); - // the index of the 99th percentage of value - let percentile_99th = time_spent.len() * 99 / 100; - // We are only interested by the slowest value of the 99th fastest results - let time_spent = time_spent.get(percentile_99th); + // we get all the values in a sorted manner + let time_spent = time_spent.into_sorted_vec(); + // the index of the 99th percentage of value + let percentile_99th = time_spent.len() * 99 / 100; + // We are only interested by the slowest value of the 99th fastest results + let time_spent = time_spent.get(percentile_99th); - let properties = json!({ - "user-agent": user_agents, - "requests": { - "99th_response_time": time_spent.map(|t| format!("{:.2}", t)), - "total_succeeded": total_succeeded, - "total_failed": total_received.saturating_sub(total_succeeded), // just to be sure we never panics - "total_received": total_received, - }, - "filter": { - "with_geoRadius": filter_with_geo_radius, - "with_geoBoundingBox": filter_with_geo_bounding_box, - "avg_criteria_number": format!("{:.2}", filter_sum_of_criteria_terms as f64 / filter_total_number_of_criteria as f64), - "most_used_syntax": used_syntax.iter().max_by_key(|(_, v)| *v).map(|(k, _)| json!(k)).unwrap_or_else(|| json!(null)), - }, - "vector": { - "retrieve_vectors": retrieve_vectors, - }, - "pagination": { - "max_limit": max_limit, - "max_offset": max_offset, - }, - "formatting": { - "max_attributes_to_retrieve": max_attributes_to_retrieve, - }, - "scoring": { - "show_ranking_score": show_ranking_score, - "show_ranking_score_details": show_ranking_score_details, - "ranking_score_threshold": ranking_score_threshold, - }, - }); - - Some(Track { - timestamp, - user: user.clone(), - event: event_name.to_string(), - properties, - ..Default::default() - }) - } + json!({ + "requests": { + "99th_response_time": time_spent.map(|t| format!("{:.2}", t)), + "total_succeeded": total_succeeded, + "total_failed": total_received.saturating_sub(total_succeeded), // just to be sure we never panics + "total_received": total_received, + }, + "filter": { + "with_geoRadius": filter_with_geo_radius, + "with_geoBoundingBox": filter_with_geo_bounding_box, + "avg_criteria_number": format!("{:.2}", filter_sum_of_criteria_terms as f64 / filter_total_number_of_criteria as f64), + "most_used_syntax": used_syntax.iter().max_by_key(|(_, v)| *v).map(|(k, _)| json!(k)).unwrap_or_else(|| json!(null)), + }, + "vector": { + "retrieve_vectors": retrieve_vectors, + }, + "pagination": { + "max_limit": max_limit, + "max_offset": max_offset, + }, + "formatting": { + "max_attributes_to_retrieve": max_attributes_to_retrieve, + }, + "scoring": { + "show_ranking_score": show_ranking_score, + "show_ranking_score_details": show_ranking_score_details, + "ranking_score_threshold": ranking_score_threshold, + } + }) } } diff --git a/meilisearch/src/routes/dump.rs b/meilisearch/src/routes/dump.rs index 0fdeef5ed..c78dc4dad 100644 --- a/meilisearch/src/routes/dump.rs +++ b/meilisearch/src/routes/dump.rs @@ -26,7 +26,7 @@ pub async fn create_dump( opt: web::Data, analytics: web::Data, ) -> Result { - analytics.publish(DumpAnalytics::default(), Some(&req)); + analytics.publish(DumpAnalytics::default(), &req); let task = KindWithContent::DumpCreation { keys: auth_controller.list_keys()?, diff --git a/meilisearch/src/routes/features.rs b/meilisearch/src/routes/features.rs index 24c89938d..4ee5b37b0 100644 --- a/meilisearch/src/routes/features.rs +++ b/meilisearch/src/routes/features.rs @@ -35,7 +35,7 @@ async fn get_features( ) -> HttpResponse { let features = index_scheduler.features(); - analytics.publish(GetExperimentalFeatureAnalytics::default(), Some(&req)); + analytics.publish(GetExperimentalFeatureAnalytics::default(), &req); let features = features.runtime_features(); debug!(returns = ?features, "Get features"); HttpResponse::Ok().json(features) @@ -83,8 +83,8 @@ impl Aggregate for PatchExperimentalFeatureAnalytics { } } - fn into_event(self) -> serde_json::Value { - serde_json::to_value(self).unwrap() + fn into_event(self) -> impl Serialize { + self } } @@ -131,7 +131,7 @@ async fn patch_features( edit_documents_by_function, contains_filter, }, - Some(&req), + &req, ); index_scheduler.put_runtime_features(new_features)?; debug!(returns = ?new_features, "Patch features"); diff --git a/meilisearch/src/routes/indexes/documents.rs b/meilisearch/src/routes/indexes/documents.rs index 8f4cd026d..6dece61e6 100644 --- a/meilisearch/src/routes/indexes/documents.rs +++ b/meilisearch/src/routes/indexes/documents.rs @@ -194,7 +194,7 @@ pub async fn get_document( retrieve_vectors: param_retrieve_vectors.0, ..Default::default() }, - Some(&req), + &req, ); let index = index_scheduler.index(&index_uid)?; @@ -253,7 +253,7 @@ pub async fn delete_document( per_document_id: true, ..Default::default() }, - Some(&req), + &req, ); let task = KindWithContent::DocumentDeletion { @@ -319,7 +319,7 @@ pub async fn documents_by_query_post( max_offset: body.offset, ..Default::default() }, - Some(&req), + &req, ); documents_by_query(&index_scheduler, index_uid, body) @@ -361,7 +361,7 @@ pub async fn get_documents( max_offset: query.offset, ..Default::default() }, - Some(&req), + &req, ); documents_by_query(&index_scheduler, index_uid, query) @@ -486,7 +486,7 @@ pub async fn replace_documents( index_creation: index_scheduler.index_exists(&index_uid).map_or(true, |x| !x), method: PhantomData, }, - Some(&req), + &req, ); let allow_index_creation = index_scheduler.filters().allow_index_creation(&index_uid); @@ -543,7 +543,7 @@ pub async fn update_documents( index_creation: index_scheduler.index_exists(&index_uid).map_or(true, |x| !x), method: PhantomData, }, - Some(&req), + &req, ); let allow_index_creation = index_scheduler.filters().allow_index_creation(&index_uid); @@ -718,7 +718,7 @@ pub async fn delete_documents_batch( analytics.publish( DocumentsDeletionAggregator { total_received: 1, per_batch: true, ..Default::default() }, - Some(&req), + &req, ); let ids = body @@ -761,7 +761,7 @@ pub async fn delete_documents_by_filter( analytics.publish( DocumentsDeletionAggregator { total_received: 1, per_filter: true, ..Default::default() }, - Some(&req), + &req, ); // we ensure the filter is well formed before enqueuing it @@ -847,7 +847,7 @@ pub async fn edit_documents_by_function( with_context: params.context.is_some(), index_creation: index_scheduler.index(&index_uid).is_err(), }, - Some(&req), + &req, ); let DocumentEditionByFunction { filter, context, function } = params; @@ -902,7 +902,7 @@ pub async fn clear_all_documents( let index_uid = IndexUid::try_from(index_uid.into_inner())?; analytics.publish( DocumentsDeletionAggregator { total_received: 1, clear_all: true, ..Default::default() }, - Some(&req), + &req, ); let task = KindWithContent::DocumentClear { index_uid: index_uid.to_string() }; diff --git a/meilisearch/src/routes/indexes/facet_search.rs b/meilisearch/src/routes/indexes/facet_search.rs index 1e9d0e15e..f3c74a388 100644 --- a/meilisearch/src/routes/indexes/facet_search.rs +++ b/meilisearch/src/routes/indexes/facet_search.rs @@ -200,7 +200,7 @@ pub async fn search( if let Ok(ref search_result) = search_result { aggregate.succeed(search_result); } - analytics.publish(aggregate, Some(&req)); + analytics.publish(aggregate, &req); let search_result = search_result?; diff --git a/meilisearch/src/routes/indexes/mod.rs b/meilisearch/src/routes/indexes/mod.rs index 483a48a16..f926f663c 100644 --- a/meilisearch/src/routes/indexes/mod.rs +++ b/meilisearch/src/routes/indexes/mod.rs @@ -160,7 +160,7 @@ pub async fn create_index( if allow_index_creation { analytics.publish( IndexCreatedAggregate { primary_key: primary_key.iter().cloned().collect() }, - Some(&req), + &req, ); let task = KindWithContent::IndexCreation { index_uid: uid.to_string(), primary_key }; @@ -247,7 +247,7 @@ pub async fn update_index( let body = body.into_inner(); analytics.publish( IndexUpdatedAggregate { primary_key: body.primary_key.iter().cloned().collect() }, - Some(&req), + &req, ); let task = KindWithContent::IndexUpdate { diff --git a/meilisearch/src/routes/indexes/search.rs b/meilisearch/src/routes/indexes/search.rs index f833a57d2..538c46fd0 100644 --- a/meilisearch/src/routes/indexes/search.rs +++ b/meilisearch/src/routes/indexes/search.rs @@ -255,7 +255,7 @@ pub async fn search_with_url_query( if let Ok(ref search_result) = search_result { aggregate.succeed(search_result); } - analytics.publish(aggregate, Some(&req)); + analytics.publish(aggregate, &req); let search_result = search_result?; @@ -303,7 +303,7 @@ pub async fn search_with_post( MEILISEARCH_DEGRADED_SEARCH_REQUESTS.inc(); } } - analytics.publish(aggregate, Some(&req)); + analytics.publish(aggregate, &req); let search_result = search_result?; diff --git a/meilisearch/src/routes/indexes/settings.rs b/meilisearch/src/routes/indexes/settings.rs index db83cb39b..bb2f6792d 100644 --- a/meilisearch/src/routes/indexes/settings.rs +++ b/meilisearch/src/routes/indexes/settings.rs @@ -8,6 +8,7 @@ use meilisearch_types::deserr::DeserrJsonError; use meilisearch_types::error::ResponseError; use meilisearch_types::facet_values_sort::FacetValuesSort; use meilisearch_types::index_uid::IndexUid; +use meilisearch_types::locales::Locale; use meilisearch_types::milli::update::Setting; use meilisearch_types::settings::{ settings, ProximityPrecisionView, RankingRuleView, SecretPolicy, Settings, Unchecked, @@ -94,7 +95,7 @@ macro_rules! make_setting_route { #[allow(clippy::redundant_closure_call)] analytics.publish( $crate::routes::indexes::settings::$analytics::new(body.as_ref()).to_settings(), - Some(&req), + &req, ); let new_settings = Settings { @@ -491,11 +492,11 @@ impl Aggregate for SettingsAnalytics { has_geo: self.filterable_attributes.has_geo.or(other.filterable_attributes.has_geo), }, distinct_attribute: DistinctAttributeAnalytics { - set: self.distinct_attribute.set.or(other.distinct_attribute.set), + set: self.distinct_attribute.set | other.distinct_attribute.set, }, proximity_precision: ProximityPrecisionAnalytics { - set: self.proximity_precision.set(other.proximity_precision.set), - value: self.proximity_precision.value(other.proximity_precision.value), + set: self.proximity_precision.set | other.proximity_precision.set, + value: self.proximity_precision.value.or(other.proximity_precision.value), }, typo_tolerance: TypoToleranceAnalytics { enabled: self.typo_tolerance.enabled.or(other.typo_tolerance.enabled), @@ -542,7 +543,7 @@ impl Aggregate for SettingsAnalytics { sources: match (self.embedders.sources, other.embedders.sources) { (None, None) => None, (Some(sources), None) | (None, Some(sources)) => Some(sources), - (Some(this), Some(other)) => Some(this.union(&other).collect()), + (Some(this), Some(other)) => Some(this.union(&other).cloned().collect()), }, document_template_used: match ( self.embedders.document_template_used, @@ -598,45 +599,70 @@ impl Aggregate for SettingsAnalytics { #[derive(Serialize, Default)] struct RankingRulesAnalytics { - words_position: Option, - typo_position: Option, - proximity_position: Option, - attribute_position: Option, - sort_position: Option, - exactness_position: Option, - values: Option, + words_position: Option, + typo_position: Option, + proximity_position: Option, + attribute_position: Option, + sort_position: Option, + exactness_position: Option, + values: Option, } impl RankingRulesAnalytics { pub fn new(rr: Option<&Vec>) -> Self { RankingRulesAnalytics { - words_position: rr.as_ref().map(|rr| { - rr.iter() - .position(|s| matches!(s, meilisearch_types::settings::RankingRuleView::Words)) - }), - typo_position: rr.as_ref().map(|rr| { - rr.iter() - .position(|s| matches!(s, meilisearch_types::settings::RankingRuleView::Typo)) - }), - proximity_position: rr.as_ref().map(|rr| { - rr.iter().position(|s| { - matches!(s, meilisearch_types::settings::RankingRuleView::Proximity) + words_position: rr + .as_ref() + .map(|rr| { + rr.iter().position(|s| { + matches!(s, meilisearch_types::settings::RankingRuleView::Words) + }) }) - }), - attribute_position: rr.as_ref().map(|rr| { - rr.iter().position(|s| { - matches!(s, meilisearch_types::settings::RankingRuleView::Attribute) + .flatten(), + + typo_position: rr + .as_ref() + .map(|rr| { + rr.iter().position(|s| { + matches!(s, meilisearch_types::settings::RankingRuleView::Typo) + }) }) - }), - sort_position: rr.as_ref().map(|rr| { - rr.iter() - .position(|s| matches!(s, meilisearch_types::settings::RankingRuleView::Sort)) - }), - exactness_position: rr.as_ref().map(|rr| { - rr.iter().position(|s| { - matches!(s, meilisearch_types::settings::RankingRuleView::Exactness) + .flatten(), + + proximity_position: rr + .as_ref() + .map(|rr| { + rr.iter().position(|s| { + matches!(s, meilisearch_types::settings::RankingRuleView::Proximity) + }) }) - }), + .flatten(), + + attribute_position: rr + .as_ref() + .map(|rr| { + rr.iter().position(|s| { + matches!(s, meilisearch_types::settings::RankingRuleView::Attribute) + }) + }) + .flatten(), + sort_position: rr + .as_ref() + .map(|rr| { + rr.iter().position(|s| { + matches!(s, meilisearch_types::settings::RankingRuleView::Sort) + }) + }) + .flatten(), + exactness_position: rr + .as_ref() + .map(|rr| { + rr.iter().position(|s| { + matches!(s, meilisearch_types::settings::RankingRuleView::Exactness) + }) + }) + .flatten(), + values: rr.as_ref().map(|rr| { rr.iter() .filter(|s| { @@ -661,7 +687,7 @@ impl RankingRulesAnalytics { #[derive(Serialize, Default)] struct SearchableAttributesAnalytics { total: Option, - with_wildcard: bool, + with_wildcard: Option, } impl SearchableAttributesAnalytics { @@ -681,8 +707,8 @@ impl SearchableAttributesAnalytics { #[derive(Serialize, Default)] struct DisplayedAttributesAnalytics { - total: usize, - with_wildcard: bool, + total: Option, + with_wildcard: Option, } impl DisplayedAttributesAnalytics { @@ -702,8 +728,8 @@ impl DisplayedAttributesAnalytics { #[derive(Serialize, Default)] struct SortableAttributesAnalytics { - total: usize, - has_geo: bool, + total: Option, + has_geo: Option, } impl SortableAttributesAnalytics { @@ -721,15 +747,15 @@ impl SortableAttributesAnalytics { #[derive(Serialize, Default)] struct FilterableAttributesAnalytics { - total: usize, - has_geo: bool, + total: Option, + has_geo: Option, } impl FilterableAttributesAnalytics { pub fn new(setting: Option<&std::collections::BTreeSet>) -> Self { Self { - total: setting.as_ref().map(|filter| filter.len()).unwrap_or(0), - has_geo: setting.as_ref().map(|filter| filter.contains("_geo")).unwrap_or(false), + total: setting.as_ref().map(|filter| filter.len()), + has_geo: setting.as_ref().map(|filter| filter.contains("_geo")), } } @@ -761,7 +787,7 @@ struct ProximityPrecisionAnalytics { impl ProximityPrecisionAnalytics { pub fn new(precision: Option<&meilisearch_types::settings::ProximityPrecisionView>) -> Self { - Self { set: precision.is_some(), value: precision.unwrap_or_default() } + Self { set: precision.is_some(), value: precision.cloned() } } pub fn to_settings(self) -> SettingsAnalytics { @@ -774,8 +800,8 @@ struct TypoToleranceAnalytics { enabled: Option, disable_on_attributes: Option, disable_on_words: Option, - min_word_size_for_one_typo: Option, - min_word_size_for_two_typos: Option, + min_word_size_for_one_typo: Option, + min_word_size_for_two_typos: Option, } impl TypoToleranceAnalytics { @@ -805,9 +831,9 @@ impl TypoToleranceAnalytics { #[derive(Serialize, Default)] struct FacetingAnalytics { - max_values_per_facet: Option, + max_values_per_facet: Option, sort_facet_values_by_star_count: Option, - sort_facet_values_by_total: Option, + sort_facet_values_by_total: Option, } impl FacetingAnalytics { @@ -833,7 +859,7 @@ impl FacetingAnalytics { #[derive(Serialize, Default)] struct PaginationAnalytics { - max_total_hits: Option, + max_total_hits: Option, } impl PaginationAnalytics { @@ -909,18 +935,18 @@ impl EmbeddersAnalytics { { use meilisearch_types::milli::vector::settings::EmbedderSource; match source { - EmbedderSource::OpenAi => sources.insert("openAi"), - EmbedderSource::HuggingFace => sources.insert("huggingFace"), - EmbedderSource::UserProvided => sources.insert("userProvided"), - EmbedderSource::Ollama => sources.insert("ollama"), - EmbedderSource::Rest => sources.insert("rest"), + EmbedderSource::OpenAi => sources.insert("openAi".to_string()), + EmbedderSource::HuggingFace => sources.insert("huggingFace".to_string()), + EmbedderSource::UserProvided => sources.insert("userProvided".to_string()), + EmbedderSource::Ollama => sources.insert("ollama".to_string()), + EmbedderSource::Rest => sources.insert("rest".to_string()), }; } }; Self { total: setting.as_ref().map(|s| s.len()), - sources, + sources: Some(sources), document_template_used: setting.as_ref().map(|map| { map.values() .filter_map(|config| config.clone().set()) @@ -953,7 +979,7 @@ struct SearchCutoffMsAnalytics { impl SearchCutoffMsAnalytics { pub fn new(setting: Option<&u64>) -> Self { - Self { search_cutoff_ms: setting } + Self { search_cutoff_ms: setting.copied() } } pub fn to_settings(self) -> SettingsAnalytics { @@ -964,7 +990,7 @@ impl SearchCutoffMsAnalytics { #[derive(Serialize, Default)] #[serde(transparent)] struct LocalesAnalytics { - locales: BTreeSet, + locales: Option>, } impl LocalesAnalytics { @@ -988,7 +1014,7 @@ impl LocalesAnalytics { #[derive(Serialize, Default)] struct DictionaryAnalytics { - total: usize, + total: Option, } impl DictionaryAnalytics { @@ -1003,7 +1029,7 @@ impl DictionaryAnalytics { #[derive(Serialize, Default)] struct SeparatorTokensAnalytics { - total: usize, + total: Option, } impl SeparatorTokensAnalytics { @@ -1018,7 +1044,7 @@ impl SeparatorTokensAnalytics { #[derive(Serialize, Default)] struct NonSeparatorTokensAnalytics { - total: usize, + total: Option, } impl NonSeparatorTokensAnalytics { @@ -1088,7 +1114,7 @@ pub async fn update_all( new_settings.non_separator_tokens.as_ref().set(), ), }, - Some(&req), + &req, ); let allow_index_creation = index_scheduler.filters().allow_index_creation(&index_uid); diff --git a/meilisearch/src/routes/indexes/similar.rs b/meilisearch/src/routes/indexes/similar.rs index f94a02987..91c435254 100644 --- a/meilisearch/src/routes/indexes/similar.rs +++ b/meilisearch/src/routes/indexes/similar.rs @@ -13,6 +13,7 @@ use serde_json::Value; use tracing::debug; use super::ActionPolicy; +use crate::analytics::segment_analytics::{SimilarGET, SimilarPOST}; use crate::analytics::{Analytics, SimilarAggregator}; use crate::extractors::authentication::GuardedData; use crate::extractors::sequential_extractor::SeqHandler; @@ -34,13 +35,13 @@ pub async fn similar_get( index_uid: web::Path, params: AwebQueryParameter, req: HttpRequest, - analytics: web::Data, + analytics: web::Data, ) -> Result { let index_uid = IndexUid::try_from(index_uid.into_inner())?; let query = params.0.try_into()?; - let mut aggregate = SimilarAggregator::from_query(&query, &req); + let mut aggregate = SimilarAggregator::::from_query(&query, &req); debug!(parameters = ?query, "Similar get"); @@ -49,7 +50,7 @@ pub async fn similar_get( if let Ok(similar) = &similar { aggregate.succeed(similar); } - analytics.get_similar(aggregate); + analytics.publish(aggregate, &req); let similar = similar?; @@ -62,21 +63,21 @@ pub async fn similar_post( index_uid: web::Path, params: AwebJson, req: HttpRequest, - analytics: web::Data, + analytics: web::Data, ) -> Result { let index_uid = IndexUid::try_from(index_uid.into_inner())?; let query = params.into_inner(); debug!(parameters = ?query, "Similar post"); - let mut aggregate = SimilarAggregator::from_query(&query, &req); + let mut aggregate = SimilarAggregator::::from_query(&query, &req); let similar = similar(index_scheduler, index_uid, query).await; if let Ok(similar) = &similar { aggregate.succeed(similar); } - analytics.post_similar(aggregate); + analytics.publish(aggregate, &req); let similar = similar?; diff --git a/meilisearch/src/routes/multi_search.rs b/meilisearch/src/routes/multi_search.rs index 5fcb868c6..994c256d2 100644 --- a/meilisearch/src/routes/multi_search.rs +++ b/meilisearch/src/routes/multi_search.rs @@ -35,7 +35,7 @@ pub async fn multi_search_with_post( search_queue: Data, params: AwebJson, req: HttpRequest, - analytics: web::Data, + analytics: web::Data, ) -> Result { // Since we don't want to process half of the search requests and then get a permit refused // we're going to get one permit for the whole duration of the multi-search request. @@ -87,7 +87,7 @@ pub async fn multi_search_with_post( multi_aggregate.succeed(); } - analytics.post_multi_search(multi_aggregate); + analytics.publish(multi_aggregate, &req); HttpResponse::Ok().json(search_result??) } None => { @@ -149,7 +149,7 @@ pub async fn multi_search_with_post( if search_results.is_ok() { multi_aggregate.succeed(); } - analytics.post_multi_search(multi_aggregate); + analytics.publish(multi_aggregate, &req); let search_results = search_results.map_err(|(mut err, query_index)| { // Add the query index that failed as context for the error message. diff --git a/meilisearch/src/routes/snapshot.rs b/meilisearch/src/routes/snapshot.rs index 84673729f..cacbc41af 100644 --- a/meilisearch/src/routes/snapshot.rs +++ b/meilisearch/src/routes/snapshot.rs @@ -3,7 +3,6 @@ use actix_web::{web, HttpRequest, HttpResponse}; use index_scheduler::IndexScheduler; use meilisearch_types::error::ResponseError; use meilisearch_types::tasks::KindWithContent; -use serde_json::json; use tracing::debug; use crate::analytics::Analytics; @@ -17,13 +16,15 @@ pub fn configure(cfg: &mut web::ServiceConfig) { cfg.service(web::resource("").route(web::post().to(SeqHandler(create_snapshot)))); } +crate::empty_analytics!(SnapshotAnalytics, "Snapshot Created"); + pub async fn create_snapshot( index_scheduler: GuardedData, Data>, req: HttpRequest, opt: web::Data, - analytics: web::Data, + analytics: web::Data, ) -> Result { - analytics.publish("Snapshot Created".to_string(), json!({}), Some(&req)); + analytics.publish(SnapshotAnalytics::default(), &req); let task = KindWithContent::SnapshotCreation; let uid = get_task_id(&req, &opt)?; diff --git a/meilisearch/src/routes/swap_indexes.rs b/meilisearch/src/routes/swap_indexes.rs index 34e904230..42ebd7858 100644 --- a/meilisearch/src/routes/swap_indexes.rs +++ b/meilisearch/src/routes/swap_indexes.rs @@ -8,10 +8,11 @@ use meilisearch_types::error::deserr_codes::InvalidSwapIndexes; use meilisearch_types::error::ResponseError; use meilisearch_types::index_uid::IndexUid; use meilisearch_types::tasks::{IndexSwap, KindWithContent}; +use serde::Serialize; use serde_json::json; use super::{get_task_id, is_dry_run, SummarizedTaskView}; -use crate::analytics::Analytics; +use crate::analytics::{Aggregate, Analytics}; use crate::error::MeilisearchHttpError; use crate::extractors::authentication::policies::*; use crate::extractors::authentication::{AuthenticationError, GuardedData}; @@ -29,21 +30,34 @@ pub struct SwapIndexesPayload { indexes: Vec, } +#[derive(Serialize)] +struct IndexSwappedAnalytics { + swap_operation_number: usize, +} + +impl Aggregate for IndexSwappedAnalytics { + fn event_name(&self) -> &'static str { + "Indexes Swapped" + } + + fn aggregate(self, other: Self) -> Self { + Self { swap_operation_number: self.swap_operation_number.max(other.swap_operation_number) } + } + + fn into_event(self) -> impl Serialize { + self + } +} + pub async fn swap_indexes( index_scheduler: GuardedData, Data>, params: AwebJson, DeserrJsonError>, req: HttpRequest, opt: web::Data, - analytics: web::Data, + analytics: web::Data, ) -> Result { let params = params.into_inner(); - analytics.publish( - "Indexes Swapped".to_string(), - json!({ - "swap_operation_number": params.len(), // Return the max ever encountered - }), - Some(&req), - ); + analytics.publish(IndexSwappedAnalytics { swap_operation_number: params.len() }, &req); let filters = index_scheduler.filters(); let mut swaps = vec![]; diff --git a/meilisearch/src/routes/tasks.rs b/meilisearch/src/routes/tasks.rs index 3dc6520af..162d19ca1 100644 --- a/meilisearch/src/routes/tasks.rs +++ b/meilisearch/src/routes/tasks.rs @@ -12,18 +12,17 @@ use meilisearch_types::star_or::{OptionStarOr, OptionStarOrList}; use meilisearch_types::task_view::TaskView; use meilisearch_types::tasks::{Kind, KindWithContent, Status}; use serde::Serialize; -use serde_json::json; use time::format_description::well_known::Rfc3339; use time::macros::format_description; use time::{Date, Duration, OffsetDateTime, Time}; use tokio::task; use super::{get_task_id, is_dry_run, SummarizedTaskView}; -use crate::analytics::Analytics; +use crate::analytics::{Aggregate, AggregateMethod, Analytics}; use crate::extractors::authentication::policies::*; use crate::extractors::authentication::GuardedData; use crate::extractors::sequential_extractor::SeqHandler; -use crate::Opt; +use crate::{aggregate_methods, Opt}; const DEFAULT_LIMIT: u32 = 20; @@ -158,12 +157,69 @@ impl TaskDeletionOrCancelationQuery { } } +aggregate_methods!( + CancelTasks => "Tasks Canceled", + DeleteTasks => "Tasks Deleted", +); + +#[derive(Serialize)] +struct TaskFilterAnalytics { + filtered_by_uid: bool, + filtered_by_index_uid: bool, + filtered_by_type: bool, + filtered_by_status: bool, + filtered_by_canceled_by: bool, + filtered_by_before_enqueued_at: bool, + filtered_by_after_enqueued_at: bool, + filtered_by_before_started_at: bool, + filtered_by_after_started_at: bool, + filtered_by_before_finished_at: bool, + filtered_by_after_finished_at: bool, + + #[serde(skip)] + marker: std::marker::PhantomData, +} + +impl Aggregate for TaskFilterAnalytics { + fn event_name(&self) -> &'static str { + Method::event_name() + } + + fn aggregate(self, other: Self) -> Self { + Self { + filtered_by_uid: self.filtered_by_uid | other.filtered_by_uid, + filtered_by_index_uid: self.filtered_by_index_uid | other.filtered_by_index_uid, + filtered_by_type: self.filtered_by_type | other.filtered_by_type, + filtered_by_status: self.filtered_by_status | other.filtered_by_status, + filtered_by_canceled_by: self.filtered_by_canceled_by | other.filtered_by_canceled_by, + filtered_by_before_enqueued_at: self.filtered_by_before_enqueued_at + | other.filtered_by_before_enqueued_at, + filtered_by_after_enqueued_at: self.filtered_by_after_enqueued_at + | other.filtered_by_after_enqueued_at, + filtered_by_before_started_at: self.filtered_by_before_started_at + | other.filtered_by_before_started_at, + filtered_by_after_started_at: self.filtered_by_after_started_at + | other.filtered_by_after_started_at, + filtered_by_before_finished_at: self.filtered_by_before_finished_at + | other.filtered_by_before_finished_at, + filtered_by_after_finished_at: self.filtered_by_after_finished_at + | other.filtered_by_after_finished_at, + + marker: std::marker::PhantomData, + } + } + + fn into_event(self) -> impl Serialize { + self + } +} + async fn cancel_tasks( index_scheduler: GuardedData, Data>, params: AwebQueryParameter, req: HttpRequest, opt: web::Data, - analytics: web::Data, + analytics: web::Data, ) -> Result { let params = params.into_inner(); @@ -172,21 +228,22 @@ async fn cancel_tasks( } analytics.publish( - "Tasks Canceled".to_string(), - json!({ - "filtered_by_uid": params.uids.is_some(), - "filtered_by_index_uid": params.index_uids.is_some(), - "filtered_by_type": params.types.is_some(), - "filtered_by_status": params.statuses.is_some(), - "filtered_by_canceled_by": params.canceled_by.is_some(), - "filtered_by_before_enqueued_at": params.before_enqueued_at.is_some(), - "filtered_by_after_enqueued_at": params.after_enqueued_at.is_some(), - "filtered_by_before_started_at": params.before_started_at.is_some(), - "filtered_by_after_started_at": params.after_started_at.is_some(), - "filtered_by_before_finished_at": params.before_finished_at.is_some(), - "filtered_by_after_finished_at": params.after_finished_at.is_some(), - }), - Some(&req), + TaskFilterAnalytics:: { + filtered_by_uid: params.uids.is_some(), + filtered_by_index_uid: params.index_uids.is_some(), + filtered_by_type: params.types.is_some(), + filtered_by_status: params.statuses.is_some(), + filtered_by_canceled_by: params.canceled_by.is_some(), + filtered_by_before_enqueued_at: params.before_enqueued_at.is_some(), + filtered_by_after_enqueued_at: params.after_enqueued_at.is_some(), + filtered_by_before_started_at: params.before_started_at.is_some(), + filtered_by_after_started_at: params.after_started_at.is_some(), + filtered_by_before_finished_at: params.before_finished_at.is_some(), + filtered_by_after_finished_at: params.after_finished_at.is_some(), + + marker: std::marker::PhantomData, + }, + &req, ); let query = params.into_query(); @@ -214,7 +271,7 @@ async fn delete_tasks( params: AwebQueryParameter, req: HttpRequest, opt: web::Data, - analytics: web::Data, + analytics: web::Data, ) -> Result { let params = params.into_inner(); @@ -223,22 +280,24 @@ async fn delete_tasks( } analytics.publish( - "Tasks Deleted".to_string(), - json!({ - "filtered_by_uid": params.uids.is_some(), - "filtered_by_index_uid": params.index_uids.is_some(), - "filtered_by_type": params.types.is_some(), - "filtered_by_status": params.statuses.is_some(), - "filtered_by_canceled_by": params.canceled_by.is_some(), - "filtered_by_before_enqueued_at": params.before_enqueued_at.is_some(), - "filtered_by_after_enqueued_at": params.after_enqueued_at.is_some(), - "filtered_by_before_started_at": params.before_started_at.is_some(), - "filtered_by_after_started_at": params.after_started_at.is_some(), - "filtered_by_before_finished_at": params.before_finished_at.is_some(), - "filtered_by_after_finished_at": params.after_finished_at.is_some(), - }), - Some(&req), + TaskFilterAnalytics:: { + filtered_by_uid: params.uids.is_some(), + filtered_by_index_uid: params.index_uids.is_some(), + filtered_by_type: params.types.is_some(), + filtered_by_status: params.statuses.is_some(), + filtered_by_canceled_by: params.canceled_by.is_some(), + filtered_by_before_enqueued_at: params.before_enqueued_at.is_some(), + filtered_by_after_enqueued_at: params.after_enqueued_at.is_some(), + filtered_by_before_started_at: params.before_started_at.is_some(), + filtered_by_after_started_at: params.after_started_at.is_some(), + filtered_by_before_finished_at: params.before_finished_at.is_some(), + filtered_by_after_finished_at: params.after_finished_at.is_some(), + + marker: std::marker::PhantomData, + }, + &req, ); + let query = params.into_query(); let (tasks, _) = index_scheduler.get_task_ids_from_authorized_indexes( From ea6883189ef73429b748473d436b71ea4a7a5a52 Mon Sep 17 00:00:00 2001 From: Tamo Date: Wed, 16 Oct 2024 21:17:06 +0200 Subject: [PATCH 068/111] finish the analytics in all the routes --- meilisearch/src/analytics/mod.rs | 33 ++-- .../src/analytics/segment_analytics.rs | 153 +++--------------- meilisearch/src/routes/features.rs | 1 - meilisearch/src/routes/indexes/documents.rs | 58 +++---- .../src/routes/indexes/facet_search.rs | 24 +-- meilisearch/src/routes/indexes/mod.rs | 5 +- meilisearch/src/routes/indexes/similar.rs | 4 +- meilisearch/src/routes/multi_search.rs | 2 +- meilisearch/src/routes/swap_indexes.rs | 1 - meilisearch/src/routes/tasks.rs | 2 +- 10 files changed, 84 insertions(+), 199 deletions(-) diff --git a/meilisearch/src/analytics/mod.rs b/meilisearch/src/analytics/mod.rs index a0ca47d8f..ab6fd9993 100644 --- a/meilisearch/src/analytics/mod.rs +++ b/meilisearch/src/analytics/mod.rs @@ -1,7 +1,5 @@ pub mod segment_analytics; -use std::any::TypeId; -use std::collections::HashMap; use std::fs; use std::path::{Path, PathBuf}; use std::str::FromStr; @@ -10,7 +8,6 @@ use actix_web::HttpRequest; use meilisearch_types::InstanceUid; use once_cell::sync::Lazy; use platform_dirs::AppDirs; -use segment::message::User; use serde::Serialize; // if the feature analytics is enabled we use the real analytics @@ -83,7 +80,7 @@ pub enum DocumentFetchKind { Normal { with_filter: bool, limit: usize, offset: usize, retrieve_vectors: bool }, } -pub trait Aggregate { +pub trait Aggregate: 'static { fn event_name(&self) -> &'static str; fn aggregate(self, other: Self) -> Self @@ -97,7 +94,7 @@ pub trait Aggregate { /// Helper trait to define multiple aggregate with the same content but a different name. /// Commonly used when you must aggregate a search with POST or with GET for example. -pub trait AggregateMethod { +pub trait AggregateMethod: 'static + Default { fn event_name() -> &'static str; } @@ -105,7 +102,8 @@ pub trait AggregateMethod { #[macro_export] macro_rules! aggregate_methods { ($method:ident => $event_name:literal) => { - pub enum $method {} + #[derive(Default)] + pub struct $method {} impl $crate::analytics::AggregateMethod for $method { fn event_name() -> &'static str { @@ -122,35 +120,26 @@ macro_rules! aggregate_methods { } pub struct Analytics { - // TODO: TAMO: remove - inner: Option, - - instance_uid: Option, - user: Option, - events: HashMap>, + segment: Option, } impl Analytics { fn no_analytics() -> Self { - Self { inner: None, events: HashMap::new(), instance_uid: None, user: None } + Self { segment: None } } fn segment_analytics(segment: SegmentAnalytics) -> Self { - Self { - instance_uid: Some(segment.instance_uid), - user: Some(segment.user), - inner: Some(segment), - events: HashMap::new(), - } + Self { segment: Some(segment) } } pub fn instance_uid(&self) -> Option<&InstanceUid> { - self.instance_uid + self.segment.as_ref().map(|segment| segment.instance_uid.as_ref()) } /// The method used to publish most analytics that do not need to be batched every hours - pub fn publish(&self, send: impl Aggregate, request: &HttpRequest) { - let Some(segment) = self.inner else { return }; + pub fn publish(&self, event: impl Aggregate, request: &HttpRequest) { + let Some(ref segment) = self.segment else { return }; let user_agents = extract_user_agents(request); + let _ = segment.sender.try_send(Box::new(event)); } } diff --git a/meilisearch/src/analytics/segment_analytics.rs b/meilisearch/src/analytics/segment_analytics.rs index 0572267e1..601fefa1e 100644 --- a/meilisearch/src/analytics/segment_analytics.rs +++ b/meilisearch/src/analytics/segment_analytics.rs @@ -1,3 +1,4 @@ +use std::any::{Any, TypeId}; use std::collections::{BTreeSet, BinaryHeap, HashMap, HashSet}; use std::fs; use std::mem::take; @@ -74,6 +75,7 @@ pub fn extract_user_agents(request: &HttpRequest) -> Vec { pub struct SegmentAnalytics { pub instance_uid: InstanceUid, pub user: User, + pub sender: Sender>, } impl SegmentAnalytics { @@ -128,18 +130,7 @@ impl SegmentAnalytics { user: user.clone(), opt: opt.clone(), batcher, - post_search_aggregator: SearchAggregator::default(), - post_multi_search_aggregator: MultiSearchAggregator::default(), - post_facet_search_aggregator: FacetSearchAggregator::default(), - get_search_aggregator: SearchAggregator::default(), - add_documents_aggregator: DocumentsAggregator::default(), - delete_documents_aggregator: DocumentsDeletionAggregator::default(), - update_documents_aggregator: DocumentsAggregator::default(), - edit_documents_by_function_aggregator: EditDocumentsByFunctionAggregator::default(), - get_fetch_documents_aggregator: DocumentsFetchAggregator::default(), - post_fetch_documents_aggregator: DocumentsFetchAggregator::default(), - get_similar_aggregator: SimilarAggregator::default(), - post_similar_aggregator: SimilarAggregator::default(), + events: todo!(), }); tokio::spawn(segment.run(index_scheduler.clone(), auth_controller.clone())); @@ -387,22 +378,11 @@ impl From for Infos { } pub struct Segment { - inbox: Receiver, + inbox: Receiver>, user: User, opt: Opt, batcher: AutoBatcher, - get_search_aggregator: SearchAggregator, - post_search_aggregator: SearchAggregator, - post_multi_search_aggregator: MultiSearchAggregator, - post_facet_search_aggregator: FacetSearchAggregator, - add_documents_aggregator: DocumentsAggregator, - delete_documents_aggregator: DocumentsDeletionAggregator, - update_documents_aggregator: DocumentsAggregator, - edit_documents_by_function_aggregator: EditDocumentsByFunctionAggregator, - get_fetch_documents_aggregator: DocumentsFetchAggregator, - post_fetch_documents_aggregator: DocumentsFetchAggregator, - get_similar_aggregator: SimilarAggregator, - post_similar_aggregator: SimilarAggregator, + events: HashMap>, } impl Segment { @@ -455,19 +435,8 @@ impl Segment { }, msg = self.inbox.recv() => { match msg { - Some(AnalyticsMsg::BatchMessage(msg)) => drop(self.batcher.push(msg).await), - Some(AnalyticsMsg::AggregateGetSearch(agreg)) => self.get_search_aggregator.aggregate(agreg), - Some(AnalyticsMsg::AggregatePostSearch(agreg)) => self.post_search_aggregator.aggregate(agreg), - Some(AnalyticsMsg::AggregatePostMultiSearch(agreg)) => self.post_multi_search_aggregator.aggregate(agreg), - Some(AnalyticsMsg::AggregatePostFacetSearch(agreg)) => self.post_facet_search_aggregator.aggregate(agreg), - Some(AnalyticsMsg::AggregateAddDocuments(agreg)) => self.add_documents_aggregator.aggregate(agreg), - Some(AnalyticsMsg::AggregateDeleteDocuments(agreg)) => self.delete_documents_aggregator.aggregate(agreg), - Some(AnalyticsMsg::AggregateUpdateDocuments(agreg)) => self.update_documents_aggregator.aggregate(agreg), - Some(AnalyticsMsg::AggregateEditDocumentsByFunction(agreg)) => self.edit_documents_by_function_aggregator.aggregate(agreg), - Some(AnalyticsMsg::AggregateGetFetchDocuments(agreg)) => self.get_fetch_documents_aggregator.aggregate(agreg), - Some(AnalyticsMsg::AggregatePostFetchDocuments(agreg)) => self.post_fetch_documents_aggregator.aggregate(agreg), - Some(AnalyticsMsg::AggregateGetSimilar(agreg)) => self.get_similar_aggregator.aggregate(agreg), - Some(AnalyticsMsg::AggregatePostSimilar(agreg)) => self.post_similar_aggregator.aggregate(agreg), + // Some(AnalyticsMsg::BatchMessage(msg)) => drop(self.batcher.push(msg).await), + Some(_) => todo!(), None => (), } } @@ -507,87 +476,19 @@ impl Segment { .await; } - let Segment { - inbox: _, - opt: _, - batcher: _, - user, - get_search_aggregator, - post_search_aggregator, - post_multi_search_aggregator, - post_facet_search_aggregator, - add_documents_aggregator, - delete_documents_aggregator, - update_documents_aggregator, - edit_documents_by_function_aggregator, - get_fetch_documents_aggregator, - post_fetch_documents_aggregator, - get_similar_aggregator, - post_similar_aggregator, - } = self; + // We empty the list of events + let events = std::mem::take(&mut self.events); - if let Some(get_search) = - take(get_search_aggregator).into_event(user, "Documents Searched GET") - { - let _ = self.batcher.push(get_search).await; - } - if let Some(post_search) = - take(post_search_aggregator).into_event(user, "Documents Searched POST") - { - let _ = self.batcher.push(post_search).await; - } - if let Some(post_multi_search) = take(post_multi_search_aggregator) - .into_event(user, "Documents Searched by Multi-Search POST") - { - let _ = self.batcher.push(post_multi_search).await; - } - if let Some(post_facet_search) = - take(post_facet_search_aggregator).into_event(user, "Facet Searched POST") - { - let _ = self.batcher.push(post_facet_search).await; - } - if let Some(add_documents) = - take(add_documents_aggregator).into_event(user, "Documents Added") - { - let _ = self.batcher.push(add_documents).await; - } - if let Some(delete_documents) = - take(delete_documents_aggregator).into_event(user, "Documents Deleted") - { - let _ = self.batcher.push(delete_documents).await; - } - if let Some(update_documents) = - take(update_documents_aggregator).into_event(user, "Documents Updated") - { - let _ = self.batcher.push(update_documents).await; - } - if let Some(edit_documents_by_function) = take(edit_documents_by_function_aggregator) - .into_event(user, "Documents Edited By Function") - { - let _ = self.batcher.push(edit_documents_by_function).await; - } - if let Some(get_fetch_documents) = - take(get_fetch_documents_aggregator).into_event(user, "Documents Fetched GET") - { - let _ = self.batcher.push(get_fetch_documents).await; - } - if let Some(post_fetch_documents) = - take(post_fetch_documents_aggregator).into_event(user, "Documents Fetched POST") - { - let _ = self.batcher.push(post_fetch_documents).await; + for (_, mut event) in events { + self.batcher.push(Track { + user: self.user, + event: event.event_name().to_string(), + properties: event.into_event(), + timestamp: todo!(), + ..Default::default() + }); } - if let Some(get_similar_documents) = - take(get_similar_aggregator).into_event(user, "Similar GET") - { - let _ = self.batcher.push(get_similar_documents).await; - } - - if let Some(post_similar_documents) = - take(post_similar_aggregator).into_event(user, "Similar POST") - { - let _ = self.batcher.push(post_similar_documents).await; - } let _ = self.batcher.flush().await; } } @@ -702,10 +603,8 @@ impl SearchAggregator { } = query; let mut ret = Self::default(); - ret.timestamp = Some(OffsetDateTime::now_utc()); ret.total_received = 1; - ret.user_agents = extract_user_agents(request).into_iter().collect(); if let Some(ref sort) = sort { ret.sort_total_number_of_criteria = 1; @@ -949,7 +848,7 @@ impl Aggregate for SearchAggregator { self } - fn into_event(self) -> Option { + fn into_event(self) -> impl Serialize { let Self { total_received, total_succeeded, @@ -1087,10 +986,7 @@ pub struct MultiSearchAggregator { } impl MultiSearchAggregator { - pub fn from_federated_search( - federated_search: &FederatedSearch, - request: &HttpRequest, - ) -> Self { + pub fn from_federated_search(federated_search: &FederatedSearch) -> Self { let use_federation = federated_search.federation.is_some(); let distinct_indexes: HashSet<_> = federated_search @@ -1162,7 +1058,7 @@ impl Aggregate for MultiSearchAggregator { } /// Aggregate one [MultiSearchAggregator] into another. - fn aggregate(mut self, other: Self) -> Self { + fn aggregate(self, other: Self) -> Self { // write the aggregate in a way that will cause a compilation error if a field is added. // get ownership of self, replacing it by a default value. @@ -1177,13 +1073,8 @@ impl Aggregate for MultiSearchAggregator { let show_ranking_score = this.show_ranking_score || other.show_ranking_score; let show_ranking_score_details = this.show_ranking_score_details || other.show_ranking_score_details; - let mut user_agents = this.user_agents; let use_federation = this.use_federation || other.use_federation; - for user_agent in other.user_agents.into_iter() { - user_agents.insert(user_agent); - } - Self { total_received, total_succeeded, @@ -1748,7 +1639,7 @@ pub struct SimilarAggregator { impl SimilarAggregator { #[allow(clippy::field_reassign_with_default)] - pub fn from_query(query: &SimilarQuery, request: &HttpRequest) -> Self { + pub fn from_query(query: &SimilarQuery) -> Self { let SimilarQuery { id: _, embedder: _, @@ -1763,10 +1654,8 @@ impl SimilarAggregator { } = query; let mut ret = Self::default(); - ret.timestamp = Some(OffsetDateTime::now_utc()); ret.total_received = 1; - ret.user_agents = extract_user_agents(request).into_iter().collect(); if let Some(ref filter) = filter { static RE: Lazy = Lazy::new(|| Regex::new("AND | OR").unwrap()); diff --git a/meilisearch/src/routes/features.rs b/meilisearch/src/routes/features.rs index 4ee5b37b0..0b43c3f13 100644 --- a/meilisearch/src/routes/features.rs +++ b/meilisearch/src/routes/features.rs @@ -7,7 +7,6 @@ use meilisearch_types::deserr::DeserrJsonError; use meilisearch_types::error::ResponseError; use meilisearch_types::keys::actions; use serde::Serialize; -use serde_json::json; use tracing::debug; use crate::analytics::{Aggregate, Analytics}; diff --git a/meilisearch/src/routes/indexes/documents.rs b/meilisearch/src/routes/indexes/documents.rs index 6dece61e6..1573b768b 100644 --- a/meilisearch/src/routes/indexes/documents.rs +++ b/meilisearch/src/routes/indexes/documents.rs @@ -32,7 +32,7 @@ use tokio::fs::File; use tokio::io::{AsyncSeekExt, AsyncWriteExt, BufWriter}; use tracing::debug; -use crate::analytics::{Aggregate, AggregateMethod, Analytics, DocumentDeletionKind}; +use crate::analytics::{Aggregate, AggregateMethod, Analytics}; use crate::error::MeilisearchHttpError; use crate::error::PayloadError::ReceivePayload; use crate::extractors::authentication::policies::*; @@ -102,8 +102,13 @@ pub struct GetDocument { retrieve_vectors: Param, } +aggregate_methods!( + DocumentsGET => "Documents Fetched GET", + DocumentsPOST => "Documents Fetched POST", +); + #[derive(Default, Serialize)] -pub struct DocumentsFetchAggregator { +pub struct DocumentsFetchAggregator { #[serde(rename = "requests.total_received")] total_received: usize, @@ -120,6 +125,8 @@ pub struct DocumentsFetchAggregator { max_limit: usize, #[serde(rename = "pagination.max_offset")] max_offset: usize, + + marker: std::marker::PhantomData, } #[derive(Copy, Clone, Debug, PartialEq, Eq)] @@ -128,7 +135,7 @@ pub enum DocumentFetchKind { Normal { with_filter: bool, limit: usize, offset: usize, retrieve_vectors: bool }, } -impl DocumentsFetchAggregator { +impl DocumentsFetchAggregator { pub fn from_query(query: &DocumentFetchKind) -> Self { let (limit, offset, retrieve_vectors) = match query { DocumentFetchKind::PerDocumentId { retrieve_vectors } => (1, 0, *retrieve_vectors), @@ -136,6 +143,7 @@ impl DocumentsFetchAggregator { (*limit, *offset, *retrieve_vectors) } }; + Self { total_received: 1, per_document_id: matches!(query, DocumentFetchKind::PerDocumentId { .. }), @@ -143,20 +151,18 @@ impl DocumentsFetchAggregator { max_limit: limit, max_offset: offset, retrieve_vectors, + + marker: PhantomData, } } } -impl Aggregate for DocumentsFetchAggregator { - // TODO: TAMO: Should we do the same event for the GET requests +impl Aggregate for DocumentsFetchAggregator { fn event_name(&self) -> &'static str { - "Documents Fetched POST" + Method::event_name() } - fn aggregate(self, other: Self) -> Self - where - Self: Sized, - { + fn aggregate(self, other: Self) -> Self { Self { total_received: self.total_received.saturating_add(other.total_received), per_document_id: self.per_document_id | other.per_document_id, @@ -164,11 +170,12 @@ impl Aggregate for DocumentsFetchAggregator { retrieve_vectors: self.retrieve_vectors | other.retrieve_vectors, max_limit: self.max_limit.max(other.max_limit), max_offset: self.max_offset.max(other.max_offset), + marker: PhantomData, } } - fn into_event(self) -> Value { - serde_json::to_value(self).unwrap() + fn into_event(self) -> impl Serialize { + self } } @@ -190,7 +197,7 @@ pub async fn get_document( let retrieve_vectors = RetrieveVectors::new(param_retrieve_vectors.0, features)?; analytics.publish( - DocumentsFetchAggregator { + DocumentsFetchAggregator:: { retrieve_vectors: param_retrieve_vectors.0, ..Default::default() }, @@ -232,8 +239,8 @@ impl Aggregate for DocumentsDeletionAggregator { } } - fn into_event(self) -> Value { - serde_json::to_value(self).unwrap() + fn into_event(self) -> impl Serialize { + self } } @@ -311,7 +318,7 @@ pub async fn documents_by_query_post( debug!(parameters = ?body, "Get documents POST"); analytics.publish( - DocumentsFetchAggregator { + DocumentsFetchAggregator:: { total_received: 1, per_filter: body.filter.is_some(), retrieve_vectors: body.retrieve_vectors, @@ -353,7 +360,7 @@ pub async fn get_documents( }; analytics.publish( - DocumentsFetchAggregator { + DocumentsFetchAggregator:: { total_received: 1, per_filter: query.filter.is_some(), retrieve_vectors: query.retrieve_vectors, @@ -436,20 +443,17 @@ impl Aggregate for DocumentsAggregator { Method::event_name() } - fn aggregate(mut self, other: Self) -> Self - where - Self: Sized, - { + fn aggregate(self, other: Self) -> Self { Self { - payload_types: self.payload_types.union(&other.payload_types).collect(), - primary_key: self.primary_key.union(&other.primary_key).collect(), + payload_types: self.payload_types.union(&other.payload_types).cloned().collect(), + primary_key: self.primary_key.union(&other.primary_key).cloned().collect(), index_creation: self.index_creation | other.index_creation, method: PhantomData, } } - fn into_event(self) -> Value { - serde_json::to_value(self).unwrap() + fn into_event(self) -> impl Serialize { + self } } @@ -818,8 +822,8 @@ impl Aggregate for EditDocumentsByFunctionAggregator { } } - fn into_event(self) -> Value { - serde_json::to_value(self).unwrap() + fn into_event(self) -> impl Serialize { + self } } diff --git a/meilisearch/src/routes/indexes/facet_search.rs b/meilisearch/src/routes/indexes/facet_search.rs index f3c74a388..08618970d 100644 --- a/meilisearch/src/routes/indexes/facet_search.rs +++ b/meilisearch/src/routes/indexes/facet_search.rs @@ -9,6 +9,7 @@ use meilisearch_types::error::deserr_codes::*; use meilisearch_types::error::ResponseError; use meilisearch_types::index_uid::IndexUid; use meilisearch_types::locales::Locale; +use serde::Serialize; use serde_json::Value; use tracing::debug; @@ -72,7 +73,7 @@ pub struct FacetSearchAggregator { impl FacetSearchAggregator { #[allow(clippy::field_reassign_with_default)] - pub fn from_query(query: &FacetSearchQuery, request: &HttpRequest) -> Self { + pub fn from_query(query: &FacetSearchQuery) -> Self { let FacetSearchQuery { facet_query: _, facet_name, @@ -113,23 +114,22 @@ impl Aggregate for FacetSearchAggregator { "Facet Searched POST" } - fn aggregate(mut self, other: Self) -> Self - where - Self: Sized, - { - self.time_spent.insert(other.time_spent); + fn aggregate(mut self, other: Self) -> Self { + for time in other.time_spent { + self.time_spent.push(time); + } Self { total_received: self.total_received.saturating_add(other.total_received), total_succeeded: self.total_succeeded.saturating_add(other.total_succeeded), time_spent: self.time_spent, - facet_names: self.facet_names.union(&other.facet_names).collect(), + facet_names: self.facet_names.union(&other.facet_names).cloned().collect(), additional_search_parameters_provided: self.additional_search_parameters_provided | other.additional_search_parameters_provided, } } - fn into_event(self) -> Value { + fn into_event(self) -> impl Serialize { let Self { total_received, total_succeeded, @@ -137,6 +137,12 @@ impl Aggregate for FacetSearchAggregator { facet_names, additional_search_parameters_provided, } = self; + // the index of the 99th percentage of value + let percentile_99th = 0.99 * (total_succeeded as f64 - 1.) + 1.; + // we get all the values in a sorted manner + let time_spent = time_spent.into_sorted_vec(); + // We are only interested by the slowest value of the 99th fastest results + let time_spent = time_spent.get(percentile_99th as usize); serde_json::json!({ "requests": { @@ -166,7 +172,7 @@ pub async fn search( let query = params.into_inner(); debug!(parameters = ?query, "Facet search"); - let mut aggregate = FacetSearchAggregator::from_query(&query, &req); + let mut aggregate = FacetSearchAggregator::from_query(&query); let facet_query = query.facet_query.clone(); let facet_name = query.facet_name.clone(); diff --git a/meilisearch/src/routes/indexes/mod.rs b/meilisearch/src/routes/indexes/mod.rs index f926f663c..3c41f36fe 100644 --- a/meilisearch/src/routes/indexes/mod.rs +++ b/meilisearch/src/routes/indexes/mod.rs @@ -14,7 +14,6 @@ use meilisearch_types::index_uid::IndexUid; use meilisearch_types::milli::{self, FieldDistribution, Index}; use meilisearch_types::tasks::KindWithContent; use serde::Serialize; -use serde_json::json; use time::OffsetDateTime; use tracing::debug; @@ -138,7 +137,7 @@ impl Aggregate for IndexCreatedAggregate { where Self: Sized, { - Self { primary_key: self.primary_key.union(&other.primary_key).collect() } + Self { primary_key: self.primary_key.union(&other.primary_key).cloned().collect() } } fn into_event(self) -> impl Serialize { @@ -227,7 +226,7 @@ impl Aggregate for IndexUpdatedAggregate { } fn aggregate(self, other: Self) -> Self { - Self { primary_key: self.primary_key.union(&other.primary_key).collect() } + Self { primary_key: self.primary_key.union(&other.primary_key).cloned().collect() } } fn into_event(self) -> impl Serialize { diff --git a/meilisearch/src/routes/indexes/similar.rs b/meilisearch/src/routes/indexes/similar.rs index 91c435254..33df6bdad 100644 --- a/meilisearch/src/routes/indexes/similar.rs +++ b/meilisearch/src/routes/indexes/similar.rs @@ -41,7 +41,7 @@ pub async fn similar_get( let query = params.0.try_into()?; - let mut aggregate = SimilarAggregator::::from_query(&query, &req); + let mut aggregate = SimilarAggregator::::from_query(&query); debug!(parameters = ?query, "Similar get"); @@ -70,7 +70,7 @@ pub async fn similar_post( let query = params.into_inner(); debug!(parameters = ?query, "Similar post"); - let mut aggregate = SimilarAggregator::::from_query(&query, &req); + let mut aggregate = SimilarAggregator::::from_query(&query); let similar = similar(index_scheduler, index_uid, query).await; diff --git a/meilisearch/src/routes/multi_search.rs b/meilisearch/src/routes/multi_search.rs index 994c256d2..13a39cb44 100644 --- a/meilisearch/src/routes/multi_search.rs +++ b/meilisearch/src/routes/multi_search.rs @@ -43,7 +43,7 @@ pub async fn multi_search_with_post( let federated_search = params.into_inner(); - let mut multi_aggregate = MultiSearchAggregator::from_federated_search(&federated_search, &req); + let mut multi_aggregate = MultiSearchAggregator::from_federated_search(&federated_search); let FederatedSearch { mut queries, federation } = federated_search; diff --git a/meilisearch/src/routes/swap_indexes.rs b/meilisearch/src/routes/swap_indexes.rs index 42ebd7858..abdffbb73 100644 --- a/meilisearch/src/routes/swap_indexes.rs +++ b/meilisearch/src/routes/swap_indexes.rs @@ -9,7 +9,6 @@ use meilisearch_types::error::ResponseError; use meilisearch_types::index_uid::IndexUid; use meilisearch_types::tasks::{IndexSwap, KindWithContent}; use serde::Serialize; -use serde_json::json; use super::{get_task_id, is_dry_run, SummarizedTaskView}; use crate::analytics::{Aggregate, Analytics}; diff --git a/meilisearch/src/routes/tasks.rs b/meilisearch/src/routes/tasks.rs index 162d19ca1..f04e2ead2 100644 --- a/meilisearch/src/routes/tasks.rs +++ b/meilisearch/src/routes/tasks.rs @@ -180,7 +180,7 @@ struct TaskFilterAnalytics { marker: std::marker::PhantomData, } -impl Aggregate for TaskFilterAnalytics { +impl Aggregate for TaskFilterAnalytics { fn event_name(&self) -> &'static str { Method::event_name() } From 6728cfbfac2a1b3e56b7bb7f13687dc610b48ca3 Mon Sep 17 00:00:00 2001 From: Tamo Date: Thu, 17 Oct 2024 00:38:18 +0200 Subject: [PATCH 069/111] fix the analytics --- Cargo.lock | 7 ++ meilisearch/Cargo.toml | 1 + meilisearch/src/analytics/mod.rs | 34 ++++++--- .../src/analytics/segment_analytics.rs | 76 ++++++++++++------- 4 files changed, 81 insertions(+), 37 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c85a59952..733470384 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3415,6 +3415,7 @@ dependencies = [ "meilisearch-types", "mimalloc", "mime", + "mopa", "num_cpus", "obkv", "once_cell", @@ -3681,6 +3682,12 @@ dependencies = [ "syn 2.0.60", ] +[[package]] +name = "mopa" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a785740271256c230f57462d3b83e52f998433a7062fc18f96d5999474a9f915" + [[package]] name = "mutually_exclusive_features" version = "0.0.3" diff --git a/meilisearch/Cargo.toml b/meilisearch/Cargo.toml index 6c2fb4060..322b333ac 100644 --- a/meilisearch/Cargo.toml +++ b/meilisearch/Cargo.toml @@ -104,6 +104,7 @@ tracing-trace = { version = "0.1.0", path = "../tracing-trace" } tracing-actix-web = "0.7.11" build-info = { version = "1.7.0", path = "../build-info" } roaring = "0.10.2" +mopa = "0.2.2" [dev-dependencies] actix-rt = "2.10.0" diff --git a/meilisearch/src/analytics/mod.rs b/meilisearch/src/analytics/mod.rs index ab6fd9993..8a0a68bad 100644 --- a/meilisearch/src/analytics/mod.rs +++ b/meilisearch/src/analytics/mod.rs @@ -6,9 +6,9 @@ use std::str::FromStr; use actix_web::HttpRequest; use meilisearch_types::InstanceUid; +use mopa::mopafy; use once_cell::sync::Lazy; use platform_dirs::AppDirs; -use serde::Serialize; // if the feature analytics is enabled we use the real analytics pub type SegmentAnalytics = segment_analytics::SegmentAnalytics; @@ -31,11 +31,11 @@ macro_rules! empty_analytics { $event_name } - fn aggregate(self, _other: Self) -> Self { + fn aggregate(self: Box, _other: Box) -> Box { self } - fn into_event(self) -> impl serde::Serialize { + fn into_event(self: Box) -> serde_json::Value { serde_json::json!({}) } } @@ -80,18 +80,34 @@ pub enum DocumentFetchKind { Normal { with_filter: bool, limit: usize, offset: usize, retrieve_vectors: bool }, } -pub trait Aggregate: 'static { +pub trait Aggregate: 'static + mopa::Any + Send { fn event_name(&self) -> &'static str; - fn aggregate(self, other: Self) -> Self + fn aggregate(self: Box, other: Box) -> Box where Self: Sized; - fn into_event(self) -> impl Serialize + fn downcast_aggregate( + this: Box, + other: Box, + ) -> Option> where - Self: Sized; + Self: Sized, + { + if this.is::() && other.is::() { + let this = this.downcast::().ok()?; + let other = other.downcast::().ok()?; + Some(Self::aggregate(this, other)) + } else { + None + } + } + + fn into_event(self: Box) -> serde_json::Value; } +mopafy!(Aggregate); + /// Helper trait to define multiple aggregate with the same content but a different name. /// Commonly used when you must aggregate a search with POST or with GET for example. pub trait AggregateMethod: 'static + Default { @@ -137,9 +153,9 @@ impl Analytics { } /// The method used to publish most analytics that do not need to be batched every hours - pub fn publish(&self, event: impl Aggregate, request: &HttpRequest) { + pub fn publish(&self, event: T, request: &HttpRequest) { let Some(ref segment) = self.segment else { return }; let user_agents = extract_user_agents(request); - let _ = segment.sender.try_send(Box::new(event)); + let _ = segment.sender.try_send(segment_analytics::Message::new(event)); } } diff --git a/meilisearch/src/analytics/segment_analytics.rs b/meilisearch/src/analytics/segment_analytics.rs index 601fefa1e..1a1bb9226 100644 --- a/meilisearch/src/analytics/segment_analytics.rs +++ b/meilisearch/src/analytics/segment_analytics.rs @@ -1,7 +1,6 @@ -use std::any::{Any, TypeId}; +use std::any::TypeId; use std::collections::{BTreeSet, BinaryHeap, HashMap, HashSet}; use std::fs; -use std::mem::take; use std::path::{Path, PathBuf}; use std::sync::Arc; use std::time::{Duration, Instant}; @@ -72,10 +71,26 @@ pub fn extract_user_agents(request: &HttpRequest) -> Vec { .collect() } +pub struct Message { + type_id: TypeId, + event: Box, + aggregator_function: fn(Box, Box) -> Option>, +} + +impl Message { + pub fn new(event: T) -> Self { + Self { + type_id: TypeId::of::(), + event: Box::new(event), + aggregator_function: T::downcast_aggregate, + } + } +} + pub struct SegmentAnalytics { pub instance_uid: InstanceUid, pub user: User, - pub sender: Sender>, + pub sender: Sender, } impl SegmentAnalytics { @@ -378,7 +393,7 @@ impl From for Infos { } pub struct Segment { - inbox: Receiver>, + inbox: Receiver, user: User, opt: Opt, batcher: AutoBatcher, @@ -435,8 +450,13 @@ impl Segment { }, msg = self.inbox.recv() => { match msg { - // Some(AnalyticsMsg::BatchMessage(msg)) => drop(self.batcher.push(msg).await), - Some(_) => todo!(), + Some(Message { type_id, event, aggregator_function }) => { + let new_event = match self.events.remove(&type_id) { + Some(old) => (aggregator_function)(old, event).unwrap(), + None => event, + }; + self.events.insert(type_id, new_event); + }, None => (), } } @@ -479,9 +499,9 @@ impl Segment { // We empty the list of events let events = std::mem::take(&mut self.events); - for (_, mut event) in events { + for (_, event) in events { self.batcher.push(Track { - user: self.user, + user: self.user.clone(), event: event.event_name().to_string(), properties: event.into_event(), timestamp: todo!(), @@ -722,11 +742,11 @@ impl Aggregate for SearchAggregator { Method::event_name() } - fn aggregate(mut self, mut other: Self) -> Self { + fn aggregate(mut self: Box, other: Box) -> Box { let Self { total_received, total_succeeded, - ref mut time_spent, + mut time_spent, sort_with_geo_point, sort_sum_of_criteria_terms, sort_total_number_of_criteria, @@ -761,9 +781,9 @@ impl Aggregate for SearchAggregator { total_degraded, total_used_negative_operator, ranking_score_threshold, - ref mut locales, + mut locales, marker: _, - } = other; + } = *other; // request self.total_received = self.total_received.saturating_add(total_received); @@ -771,7 +791,7 @@ impl Aggregate for SearchAggregator { self.total_degraded = self.total_degraded.saturating_add(total_degraded); self.total_used_negative_operator = self.total_used_negative_operator.saturating_add(total_used_negative_operator); - self.time_spent.append(time_spent); + self.time_spent.append(&mut time_spent); // sort self.sort_with_geo_point |= sort_with_geo_point; @@ -843,12 +863,12 @@ impl Aggregate for SearchAggregator { self.ranking_score_threshold |= ranking_score_threshold; // locales - self.locales.append(locales); + self.locales.append(&mut locales); self } - fn into_event(self) -> impl Serialize { + fn into_event(self: Box) -> serde_json::Value { let Self { total_received, total_succeeded, @@ -889,7 +909,7 @@ impl Aggregate for SearchAggregator { ranking_score_threshold, locales, marker: _, - } = self; + } = *self; // we get all the values in a sorted manner let time_spent = time_spent.into_sorted_vec(); @@ -1058,11 +1078,11 @@ impl Aggregate for MultiSearchAggregator { } /// Aggregate one [MultiSearchAggregator] into another. - fn aggregate(self, other: Self) -> Self { + fn aggregate(self: Box, other: Box) -> Box { // write the aggregate in a way that will cause a compilation error if a field is added. // get ownership of self, replacing it by a default value. - let this = self; + let this = *self; let total_received = this.total_received.saturating_add(other.total_received); let total_succeeded = this.total_succeeded.saturating_add(other.total_succeeded); @@ -1075,7 +1095,7 @@ impl Aggregate for MultiSearchAggregator { this.show_ranking_score_details || other.show_ranking_score_details; let use_federation = this.use_federation || other.use_federation; - Self { + Box::new(Self { total_received, total_succeeded, total_distinct_index_count, @@ -1084,10 +1104,10 @@ impl Aggregate for MultiSearchAggregator { show_ranking_score, show_ranking_score_details, use_federation, - } + }) } - fn into_event(self) -> impl Serialize { + fn into_event(self: Box) -> serde_json::Value { let Self { total_received, total_succeeded, @@ -1097,7 +1117,7 @@ impl Aggregate for MultiSearchAggregator { show_ranking_score, show_ranking_score_details, use_federation, - } = self; + } = *self; json!({ "requests": { @@ -1708,11 +1728,11 @@ impl Aggregate for SimilarAggregator { } /// Aggregate one [SimilarAggregator] into another. - fn aggregate(mut self, mut other: Self) -> Self { + fn aggregate(mut self: Box, other: Box) -> Box { let Self { total_received, total_succeeded, - ref mut time_spent, + mut time_spent, filter_with_geo_radius, filter_with_geo_bounding_box, filter_sum_of_criteria_terms, @@ -1726,12 +1746,12 @@ impl Aggregate for SimilarAggregator { ranking_score_threshold, retrieve_vectors, marker: _, - } = other; + } = *other; // request self.total_received = self.total_received.saturating_add(total_received); self.total_succeeded = self.total_succeeded.saturating_add(total_succeeded); - self.time_spent.append(time_spent); + self.time_spent.append(&mut time_spent); // filter self.filter_with_geo_radius |= filter_with_geo_radius; @@ -1763,7 +1783,7 @@ impl Aggregate for SimilarAggregator { self } - fn into_event(self) -> impl Serialize { + fn into_event(self: Box) -> serde_json::Value { let Self { total_received, total_succeeded, @@ -1781,7 +1801,7 @@ impl Aggregate for SimilarAggregator { ranking_score_threshold, retrieve_vectors, marker: _, - } = self; + } = *self; // we get all the values in a sorted manner let time_spent = time_spent.into_sorted_vec(); From aa7a34ffe8b9572c44b4bd36c30f7cf3805a9ed7 Mon Sep 17 00:00:00 2001 From: Tamo Date: Thu, 17 Oct 2024 00:43:34 +0200 Subject: [PATCH 070/111] make the aggregate method send --- meilisearch/src/analytics/mod.rs | 2 +- meilisearch/src/analytics/segment_analytics.rs | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/meilisearch/src/analytics/mod.rs b/meilisearch/src/analytics/mod.rs index 8a0a68bad..f8a589901 100644 --- a/meilisearch/src/analytics/mod.rs +++ b/meilisearch/src/analytics/mod.rs @@ -110,7 +110,7 @@ mopafy!(Aggregate); /// Helper trait to define multiple aggregate with the same content but a different name. /// Commonly used when you must aggregate a search with POST or with GET for example. -pub trait AggregateMethod: 'static + Default { +pub trait AggregateMethod: 'static + Default + Send { fn event_name() -> &'static str; } diff --git a/meilisearch/src/analytics/segment_analytics.rs b/meilisearch/src/analytics/segment_analytics.rs index 1a1bb9226..92f03e48e 100644 --- a/meilisearch/src/analytics/segment_analytics.rs +++ b/meilisearch/src/analytics/segment_analytics.rs @@ -72,9 +72,12 @@ pub fn extract_user_agents(request: &HttpRequest) -> Vec { } pub struct Message { + // Since the type_id is solved statically we cannot retrieve it from the Box. + // Thus we have to send it in the message directly. type_id: TypeId, - event: Box, + // Same for the aggregate function. aggregator_function: fn(Box, Box) -> Option>, + event: Box, } impl Message { From e4ace98004fff86e35fe8dd4a2cdccfa8b03ce9f Mon Sep 17 00:00:00 2001 From: Tamo Date: Thu, 17 Oct 2024 01:04:25 +0200 Subject: [PATCH 071/111] fix all the routes + move to a better version of mopa --- Cargo.lock | 8 ++-- meilisearch/Cargo.toml | 2 +- meilisearch/src/analytics/mod.rs | 2 + meilisearch/src/routes/features.rs | 13 ++---- meilisearch/src/routes/indexes/documents.rs | 46 ++++++++----------- .../src/routes/indexes/facet_search.rs | 10 ++-- meilisearch/src/routes/indexes/mod.rs | 23 +++++----- meilisearch/src/routes/indexes/settings.rs | 16 ++----- meilisearch/src/routes/swap_indexes.rs | 10 ++-- meilisearch/src/routes/tasks.rs | 10 ++-- 10 files changed, 65 insertions(+), 75 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 733470384..500f28454 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3415,7 +3415,7 @@ dependencies = [ "meilisearch-types", "mimalloc", "mime", - "mopa", + "mopa-maintained", "num_cpus", "obkv", "once_cell", @@ -3683,10 +3683,10 @@ dependencies = [ ] [[package]] -name = "mopa" -version = "0.2.2" +name = "mopa-maintained" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a785740271256c230f57462d3b83e52f998433a7062fc18f96d5999474a9f915" +checksum = "79b7f3e22167862cc7c95b21a6f326c22e4bf40da59cbf000b368a310173ba11" [[package]] name = "mutually_exclusive_features" diff --git a/meilisearch/Cargo.toml b/meilisearch/Cargo.toml index 322b333ac..07357e724 100644 --- a/meilisearch/Cargo.toml +++ b/meilisearch/Cargo.toml @@ -104,7 +104,7 @@ tracing-trace = { version = "0.1.0", path = "../tracing-trace" } tracing-actix-web = "0.7.11" build-info = { version = "1.7.0", path = "../build-info" } roaring = "0.10.2" -mopa = "0.2.2" +mopa-maintained = "0.2.3" [dev-dependencies] actix-rt = "2.10.0" diff --git a/meilisearch/src/analytics/mod.rs b/meilisearch/src/analytics/mod.rs index f8a589901..b3e8109a3 100644 --- a/meilisearch/src/analytics/mod.rs +++ b/meilisearch/src/analytics/mod.rs @@ -1,3 +1,5 @@ +#![allow(clippy::transmute_ptr_to_ref)] // mopify isn't updated with the latest version of clippy yet + pub mod segment_analytics; use std::fs; diff --git a/meilisearch/src/routes/features.rs b/meilisearch/src/routes/features.rs index 0b43c3f13..1de00717d 100644 --- a/meilisearch/src/routes/features.rs +++ b/meilisearch/src/routes/features.rs @@ -69,21 +69,18 @@ impl Aggregate for PatchExperimentalFeatureAnalytics { "Experimental features Updated" } - fn aggregate(self, other: Self) -> Self - where - Self: Sized, - { - Self { + fn aggregate(self: Box, other: Box) -> Box { + Box::new(Self { vector_store: other.vector_store, metrics: other.metrics, logs_route: other.logs_route, edit_documents_by_function: other.edit_documents_by_function, contains_filter: other.contains_filter, - } + }) } - fn into_event(self) -> impl Serialize { - self + fn into_event(self: Box) -> serde_json::Value { + serde_json::to_value(*self).unwrap_or_default() } } diff --git a/meilisearch/src/routes/indexes/documents.rs b/meilisearch/src/routes/indexes/documents.rs index 1573b768b..854fa5b69 100644 --- a/meilisearch/src/routes/indexes/documents.rs +++ b/meilisearch/src/routes/indexes/documents.rs @@ -162,8 +162,8 @@ impl Aggregate for DocumentsFetchAggregator { Method::event_name() } - fn aggregate(self, other: Self) -> Self { - Self { + fn aggregate(self: Box, other: Box) -> Box { + Box::new(Self { total_received: self.total_received.saturating_add(other.total_received), per_document_id: self.per_document_id | other.per_document_id, per_filter: self.per_filter | other.per_filter, @@ -171,11 +171,11 @@ impl Aggregate for DocumentsFetchAggregator { max_limit: self.max_limit.max(other.max_limit), max_offset: self.max_offset.max(other.max_offset), marker: PhantomData, - } + }) } - fn into_event(self) -> impl Serialize { - self + fn into_event(self: Box) -> serde_json::Value { + serde_json::to_value(*self).unwrap_or_default() } } @@ -226,21 +226,18 @@ impl Aggregate for DocumentsDeletionAggregator { "Documents Deleted" } - fn aggregate(self, other: Self) -> Self - where - Self: Sized, - { - Self { + fn aggregate(self: Box, other: Box) -> Box { + Box::new(Self { total_received: self.total_received.saturating_add(other.total_received), per_document_id: self.per_document_id | other.per_document_id, clear_all: self.clear_all | other.clear_all, per_batch: self.per_batch | other.per_batch, per_filter: self.per_filter | other.per_filter, - } + }) } - fn into_event(self) -> impl Serialize { - self + fn into_event(self: Box) -> serde_json::Value { + serde_json::to_value(*self).unwrap_or_default() } } @@ -443,17 +440,17 @@ impl Aggregate for DocumentsAggregator { Method::event_name() } - fn aggregate(self, other: Self) -> Self { - Self { + fn aggregate(self: Box, other: Box) -> Box { + Box::new(Self { payload_types: self.payload_types.union(&other.payload_types).cloned().collect(), primary_key: self.primary_key.union(&other.primary_key).cloned().collect(), index_creation: self.index_creation | other.index_creation, method: PhantomData, - } + }) } - fn into_event(self) -> impl Serialize { - self + fn into_event(self: Box) -> serde_json::Value { + serde_json::to_value(self).unwrap_or_default() } } @@ -811,19 +808,16 @@ impl Aggregate for EditDocumentsByFunctionAggregator { "Documents Edited By Function" } - fn aggregate(self, other: Self) -> Self - where - Self: Sized, - { - Self { + fn aggregate(self: Box, other: Box) -> Box { + Box::new(Self { filtered: self.filtered | other.filtered, with_context: self.with_context | other.with_context, index_creation: self.index_creation | other.index_creation, - } + }) } - fn into_event(self) -> impl Serialize { - self + fn into_event(self: Box) -> serde_json::Value { + serde_json::to_value(*self).unwrap_or_default() } } diff --git a/meilisearch/src/routes/indexes/facet_search.rs b/meilisearch/src/routes/indexes/facet_search.rs index 08618970d..715eaaaa7 100644 --- a/meilisearch/src/routes/indexes/facet_search.rs +++ b/meilisearch/src/routes/indexes/facet_search.rs @@ -114,29 +114,29 @@ impl Aggregate for FacetSearchAggregator { "Facet Searched POST" } - fn aggregate(mut self, other: Self) -> Self { + fn aggregate(mut self: Box, other: Box) -> Box { for time in other.time_spent { self.time_spent.push(time); } - Self { + Box::new(Self { total_received: self.total_received.saturating_add(other.total_received), total_succeeded: self.total_succeeded.saturating_add(other.total_succeeded), time_spent: self.time_spent, facet_names: self.facet_names.union(&other.facet_names).cloned().collect(), additional_search_parameters_provided: self.additional_search_parameters_provided | other.additional_search_parameters_provided, - } + }) } - fn into_event(self) -> impl Serialize { + fn into_event(self: Box) -> serde_json::Value { let Self { total_received, total_succeeded, time_spent, facet_names, additional_search_parameters_provided, - } = self; + } = *self; // the index of the 99th percentage of value let percentile_99th = 0.99 * (total_succeeded as f64 - 1.) + 1.; // we get all the values in a sorted manner diff --git a/meilisearch/src/routes/indexes/mod.rs b/meilisearch/src/routes/indexes/mod.rs index 3c41f36fe..8972119d7 100644 --- a/meilisearch/src/routes/indexes/mod.rs +++ b/meilisearch/src/routes/indexes/mod.rs @@ -133,15 +133,14 @@ impl Aggregate for IndexCreatedAggregate { "Index Created" } - fn aggregate(self, other: Self) -> Self - where - Self: Sized, - { - Self { primary_key: self.primary_key.union(&other.primary_key).cloned().collect() } + fn aggregate(self: Box, other: Box) -> Box { + Box::new(Self { + primary_key: self.primary_key.union(&other.primary_key).cloned().collect(), + }) } - fn into_event(self) -> impl Serialize { - self + fn into_event(self: Box) -> serde_json::Value { + serde_json::to_value(*self).unwrap_or_default() } } @@ -225,12 +224,14 @@ impl Aggregate for IndexUpdatedAggregate { "Index Updated" } - fn aggregate(self, other: Self) -> Self { - Self { primary_key: self.primary_key.union(&other.primary_key).cloned().collect() } + fn aggregate(self: Box, other: Box) -> Box { + Box::new(Self { + primary_key: self.primary_key.union(&other.primary_key).cloned().collect(), + }) } - fn into_event(self) -> impl Serialize { - self + fn into_event(self: Box) -> serde_json::Value { + serde_json::to_value(*self).unwrap_or_default() } } pub async fn update_index( diff --git a/meilisearch/src/routes/indexes/settings.rs b/meilisearch/src/routes/indexes/settings.rs index bb2f6792d..f31f52dc1 100644 --- a/meilisearch/src/routes/indexes/settings.rs +++ b/meilisearch/src/routes/indexes/settings.rs @@ -437,11 +437,8 @@ impl Aggregate for SettingsAnalytics { "Settings Updated" } - fn aggregate(self, other: Self) -> Self - where - Self: Sized, - { - Self { + fn aggregate(self: Box, other: Box) -> Box { + Box::new(Self { ranking_rules: RankingRulesAnalytics { words_position: self .ranking_rules @@ -586,14 +583,11 @@ impl Aggregate for SettingsAnalytics { non_separator_tokens: NonSeparatorTokensAnalytics { total: self.non_separator_tokens.total.or(other.non_separator_tokens.total), }, - } + }) } - fn into_event(self) -> impl Serialize - where - Self: Sized, - { - self + fn into_event(self: Box) -> serde_json::Value { + serde_json::to_value(*self).unwrap_or_default() } } diff --git a/meilisearch/src/routes/swap_indexes.rs b/meilisearch/src/routes/swap_indexes.rs index abdffbb73..f7d8f4eff 100644 --- a/meilisearch/src/routes/swap_indexes.rs +++ b/meilisearch/src/routes/swap_indexes.rs @@ -39,12 +39,14 @@ impl Aggregate for IndexSwappedAnalytics { "Indexes Swapped" } - fn aggregate(self, other: Self) -> Self { - Self { swap_operation_number: self.swap_operation_number.max(other.swap_operation_number) } + fn aggregate(self: Box, other: Box) -> Box { + Box::new(Self { + swap_operation_number: self.swap_operation_number.max(other.swap_operation_number), + }) } - fn into_event(self) -> impl Serialize { - self + fn into_event(self: Box) -> serde_json::Value { + serde_json::to_value(*self).unwrap_or_default() } } diff --git a/meilisearch/src/routes/tasks.rs b/meilisearch/src/routes/tasks.rs index f04e2ead2..ff4aee998 100644 --- a/meilisearch/src/routes/tasks.rs +++ b/meilisearch/src/routes/tasks.rs @@ -185,8 +185,8 @@ impl Aggregate for TaskFilterAnalytics Self { - Self { + fn aggregate(self: Box, other: Box) -> Box { + Box::new(Self { filtered_by_uid: self.filtered_by_uid | other.filtered_by_uid, filtered_by_index_uid: self.filtered_by_index_uid | other.filtered_by_index_uid, filtered_by_type: self.filtered_by_type | other.filtered_by_type, @@ -206,11 +206,11 @@ impl Aggregate for TaskFilterAnalytics impl Serialize { - self + fn into_event(self: Box) -> serde_json::Value { + serde_json::to_value(*self).unwrap_or_default() } } From 7382fb21e41719a6be6dbf5f25b6c47ad7afc581 Mon Sep 17 00:00:00 2001 From: Tamo Date: Thu, 17 Oct 2024 08:38:11 +0200 Subject: [PATCH 072/111] fix the main --- meilisearch/src/analytics/mod.rs | 24 +++++++++++++------ .../src/analytics/segment_analytics.rs | 10 ++++---- meilisearch/src/lib.rs | 6 ++--- meilisearch/src/main.rs | 22 +++++------------ meilisearch/src/routes/indexes/search.rs | 4 ++-- 5 files changed, 33 insertions(+), 33 deletions(-) diff --git a/meilisearch/src/analytics/mod.rs b/meilisearch/src/analytics/mod.rs index b3e8109a3..91139e1dd 100644 --- a/meilisearch/src/analytics/mod.rs +++ b/meilisearch/src/analytics/mod.rs @@ -5,8 +5,11 @@ pub mod segment_analytics; use std::fs; use std::path::{Path, PathBuf}; use std::str::FromStr; +use std::sync::Arc; use actix_web::HttpRequest; +use index_scheduler::IndexScheduler; +use meilisearch_auth::AuthController; use meilisearch_types::InstanceUid; use mopa::mopafy; use once_cell::sync::Lazy; @@ -17,6 +20,8 @@ pub type SegmentAnalytics = segment_analytics::SegmentAnalytics; pub use segment_analytics::SearchAggregator; pub use segment_analytics::SimilarAggregator; +use crate::Opt; + use self::segment_analytics::extract_user_agents; pub type MultiSearchAggregator = segment_analytics::MultiSearchAggregator; pub type FacetSearchAggregator = segment_analytics::FacetSearchAggregator; @@ -137,17 +142,22 @@ macro_rules! aggregate_methods { }; } +#[derive(Clone)] pub struct Analytics { - segment: Option, + segment: Option>, } impl Analytics { - fn no_analytics() -> Self { - Self { segment: None } - } - - fn segment_analytics(segment: SegmentAnalytics) -> Self { - Self { segment: Some(segment) } + pub async fn new( + opt: &Opt, + index_scheduler: Arc, + auth_controller: Arc, + ) -> Self { + if opt.no_analytics { + Self { segment: None } + } else { + Self { segment: SegmentAnalytics::new(opt, index_scheduler, auth_controller).await } + } } pub fn instance_uid(&self) -> Option<&InstanceUid> { diff --git a/meilisearch/src/analytics/segment_analytics.rs b/meilisearch/src/analytics/segment_analytics.rs index 92f03e48e..3496853ff 100644 --- a/meilisearch/src/analytics/segment_analytics.rs +++ b/meilisearch/src/analytics/segment_analytics.rs @@ -102,7 +102,7 @@ impl SegmentAnalytics { opt: &Opt, index_scheduler: Arc, auth_controller: Arc, - ) -> Arc { + ) -> Option> { let instance_uid = super::find_user_id(&opt.db_path); let first_time_run = instance_uid.is_none(); let instance_uid = instance_uid.unwrap_or_else(Uuid::new_v4); @@ -112,7 +112,7 @@ impl SegmentAnalytics { // if reqwest throws an error we won't be able to send analytics if client.is_err() { - return Arc::new(Analytics::no_analytics()); + return None; } let client = @@ -148,13 +148,13 @@ impl SegmentAnalytics { user: user.clone(), opt: opt.clone(), batcher, - events: todo!(), + events: HashMap::new(), }); tokio::spawn(segment.run(index_scheduler.clone(), auth_controller.clone())); let this = Self { instance_uid, sender, user: user.clone() }; - Arc::new(Analytics::segment_analytics(this)) + Some(Arc::new(this)) } } @@ -595,7 +595,7 @@ pub struct SearchAggregator { impl SearchAggregator { #[allow(clippy::field_reassign_with_default)] - pub fn from_query(query: &SearchQuery, request: &HttpRequest) -> Self { + pub fn from_query(query: &SearchQuery) -> Self { let SearchQuery { q, vector, diff --git a/meilisearch/src/lib.rs b/meilisearch/src/lib.rs index 80177876a..633ad2776 100644 --- a/meilisearch/src/lib.rs +++ b/meilisearch/src/lib.rs @@ -120,7 +120,7 @@ pub fn create_app( search_queue: Data, opt: Opt, logs: (LogRouteHandle, LogStderrHandle), - analytics: Arc, + analytics: Data, enable_dashboard: bool, ) -> actix_web::App< impl ServiceFactory< @@ -473,14 +473,14 @@ pub fn configure_data( search_queue: Data, opt: &Opt, (logs_route, logs_stderr): (LogRouteHandle, LogStderrHandle), - analytics: Arc, + analytics: Data, ) { let http_payload_size_limit = opt.http_payload_size_limit.as_u64() as usize; config .app_data(index_scheduler) .app_data(auth) .app_data(search_queue) - .app_data(web::Data::from(analytics)) + .app_data(analytics) .app_data(web::Data::new(logs_route)) .app_data(web::Data::new(logs_stderr)) .app_data(web::Data::new(opt.clone())) diff --git a/meilisearch/src/main.rs b/meilisearch/src/main.rs index de9784d15..eebea3b6d 100644 --- a/meilisearch/src/main.rs +++ b/meilisearch/src/main.rs @@ -124,19 +124,12 @@ async fn try_main() -> anyhow::Result<()> { let (index_scheduler, auth_controller) = setup_meilisearch(&opt)?; - #[cfg(all(not(debug_assertions), feature = "analytics"))] - let analytics = if !opt.no_analytics { - analytics::SegmentAnalytics::new(&opt, index_scheduler.clone(), auth_controller.clone()) - .await - } else { - analytics::MockAnalytics::new(&opt) - }; - #[cfg(any(debug_assertions, not(feature = "analytics")))] - let analytics = analytics::MockAnalytics::new(&opt); + let analytics = + analytics::Analytics::new(&opt, index_scheduler.clone(), auth_controller.clone()).await; print_launch_resume(&opt, analytics.clone(), config_read_from); - run_http(index_scheduler, auth_controller, opt, log_handle, analytics).await?; + run_http(index_scheduler, auth_controller, opt, log_handle, Arc::new(analytics)).await?; Ok(()) } @@ -146,12 +139,13 @@ async fn run_http( auth_controller: Arc, opt: Opt, logs: (LogRouteHandle, LogStderrHandle), - analytics: Arc, + analytics: Arc, ) -> anyhow::Result<()> { let enable_dashboard = &opt.env == "development"; let opt_clone = opt.clone(); let index_scheduler = Data::from(index_scheduler); let auth_controller = Data::from(auth_controller); + let analytics = Data::from(analytics); let search_queue = SearchQueue::new( opt.experimental_search_queue_size, available_parallelism() @@ -187,11 +181,7 @@ async fn run_http( Ok(()) } -pub fn print_launch_resume( - opt: &Opt, - analytics: Arc, - config_read_from: Option, -) { +pub fn print_launch_resume(opt: &Opt, analytics: Analytics, config_read_from: Option) { let build_info = build_info::BuildInfo::from_build(); let protocol = diff --git a/meilisearch/src/routes/indexes/search.rs b/meilisearch/src/routes/indexes/search.rs index 538c46fd0..ac6e23c8f 100644 --- a/meilisearch/src/routes/indexes/search.rs +++ b/meilisearch/src/routes/indexes/search.rs @@ -238,7 +238,7 @@ pub async fn search_with_url_query( add_search_rules(&mut query.filter, search_rules); } - let mut aggregate = SearchAggregator::::from_query(&query, &req); + let mut aggregate = SearchAggregator::::from_query(&query); let index = index_scheduler.index(&index_uid)?; let features = index_scheduler.features(); @@ -281,7 +281,7 @@ pub async fn search_with_post( add_search_rules(&mut query.filter, search_rules); } - let mut aggregate = SearchAggregator::::from_query(&query, &req); + let mut aggregate = SearchAggregator::::from_query(&query); let index = index_scheduler.index(&index_uid)?; From ef77c7699b21422b4857878d072494e1bfc49d6b Mon Sep 17 00:00:00 2001 From: Tamo Date: Thu, 17 Oct 2024 09:06:23 +0200 Subject: [PATCH 073/111] add the required shared values between all the events and fix the timestamp --- meilisearch/src/analytics/mod.rs | 6 +- .../src/analytics/segment_analytics.rs | 75 +++++++++++++------ 2 files changed, 57 insertions(+), 24 deletions(-) diff --git a/meilisearch/src/analytics/mod.rs b/meilisearch/src/analytics/mod.rs index 91139e1dd..a3b8d6d1d 100644 --- a/meilisearch/src/analytics/mod.rs +++ b/meilisearch/src/analytics/mod.rs @@ -166,8 +166,8 @@ impl Analytics { /// The method used to publish most analytics that do not need to be batched every hours pub fn publish(&self, event: T, request: &HttpRequest) { - let Some(ref segment) = self.segment else { return }; - let user_agents = extract_user_agents(request); - let _ = segment.sender.try_send(segment_analytics::Message::new(event)); + if let Some(ref segment) = self.segment { + let _ = segment.sender.try_send(segment_analytics::Message::new(event, request)); + } } } diff --git a/meilisearch/src/analytics/segment_analytics.rs b/meilisearch/src/analytics/segment_analytics.rs index 3496853ff..00a3adaaf 100644 --- a/meilisearch/src/analytics/segment_analytics.rs +++ b/meilisearch/src/analytics/segment_analytics.rs @@ -28,7 +28,6 @@ use super::{ config_user_id_path, Aggregate, AggregateMethod, DocumentDeletionKind, DocumentFetchKind, MEILISEARCH_CONFIG_PATH, }; -use crate::analytics::Analytics; use crate::option::{ default_http_addr, IndexerOpts, LogMode, MaxMemory, MaxThreads, ScheduleSnapshot, }; @@ -58,7 +57,7 @@ fn write_user_id(db_path: &Path, user_id: &InstanceUid) { const SEGMENT_API_KEY: &str = "P3FWhhEsJiEDCuEHpmcN9DHcK4hVfBvb"; -pub fn extract_user_agents(request: &HttpRequest) -> Vec { +pub fn extract_user_agents(request: &HttpRequest) -> HashSet { request .headers() .get(ANALYTICS_HEADER) @@ -77,14 +76,26 @@ pub struct Message { type_id: TypeId, // Same for the aggregate function. aggregator_function: fn(Box, Box) -> Option>, - event: Box, + event: Event, +} + +pub struct Event { + original: Box, + timestamp: OffsetDateTime, + user_agents: HashSet, + total: usize, } impl Message { - pub fn new(event: T) -> Self { + pub fn new(event: T, request: &HttpRequest) -> Self { Self { type_id: TypeId::of::(), - event: Box::new(event), + event: Event { + original: Box::new(event), + timestamp: OffsetDateTime::now_utc(), + user_agents: extract_user_agents(request), + total: 1, + }, aggregator_function: T::downcast_aggregate, } } @@ -400,7 +411,7 @@ pub struct Segment { user: User, opt: Opt, batcher: AutoBatcher, - events: HashMap>, + events: HashMap, } impl Segment { @@ -451,22 +462,34 @@ impl Segment { _ = interval.tick() => { self.tick(index_scheduler.clone(), auth_controller.clone()).await; }, - msg = self.inbox.recv() => { - match msg { - Some(Message { type_id, event, aggregator_function }) => { - let new_event = match self.events.remove(&type_id) { - Some(old) => (aggregator_function)(old, event).unwrap(), - None => event, - }; - self.events.insert(type_id, new_event); - }, - None => (), - } - } + Some(msg) = self.inbox.recv() => { + self.handle_msg(msg); + } } } } + fn handle_msg(&mut self, Message { type_id, aggregator_function, event }: Message) { + let new_event = match self.events.remove(&type_id) { + Some(old) => { + // The function should never fail since we retrieved the corresponding TypeId in the map. But in the unfortunate + // case it could happens we're going to silently ignore the error + let Some(original) = (aggregator_function)(old.original, event.original) else { + return; + }; + Event { + original, + // We always want to return the FIRST timestamp ever encountered + timestamp: old.timestamp, + user_agents: old.user_agents.union(&event.user_agents).cloned().collect(), + total: old.total.saturating_add(event.total), + } + } + None => event, + }; + self.events.insert(type_id, new_event); + } + async fn tick( &mut self, index_scheduler: Arc, @@ -503,11 +526,21 @@ impl Segment { let events = std::mem::take(&mut self.events); for (_, event) in events { + let Event { original, timestamp, user_agents, total } = event; + let name = original.event_name(); + let mut properties = original.into_event(); + if properties["user-agent"].is_null() { + properties["user-agent"] = json!(user_agents); + }; + if properties["requests"]["total_received"].is_null() { + properties["requests"]["total_received"] = total.into(); + }; + self.batcher.push(Track { user: self.user.clone(), - event: event.event_name().to_string(), - properties: event.into_event(), - timestamp: todo!(), + event: name.to_string(), + properties, + timestamp: Some(timestamp), ..Default::default() }); } From 4ee65d870eab55f0c5098aaad659aa98fbd9d500 Mon Sep 17 00:00:00 2001 From: Tamo Date: Thu, 17 Oct 2024 09:14:34 +0200 Subject: [PATCH 074/111] remove a lot of ununsed code --- meilisearch/src/analytics/mod.rs | 4 +- .../src/analytics/segment_analytics.rs | 598 +----------------- .../src/routes/indexes/facet_search.rs | 1 - 3 files changed, 17 insertions(+), 586 deletions(-) diff --git a/meilisearch/src/analytics/mod.rs b/meilisearch/src/analytics/mod.rs index a3b8d6d1d..d08f3307c 100644 --- a/meilisearch/src/analytics/mod.rs +++ b/meilisearch/src/analytics/mod.rs @@ -22,9 +22,7 @@ pub use segment_analytics::SimilarAggregator; use crate::Opt; -use self::segment_analytics::extract_user_agents; -pub type MultiSearchAggregator = segment_analytics::MultiSearchAggregator; -pub type FacetSearchAggregator = segment_analytics::FacetSearchAggregator; +pub use self::segment_analytics::MultiSearchAggregator; /// A macro used to quickly define events that don't aggregate or send anything besides an empty event with its name. #[macro_export] diff --git a/meilisearch/src/analytics/segment_analytics.rs b/meilisearch/src/analytics/segment_analytics.rs index 00a3adaaf..1edfa1bdd 100644 --- a/meilisearch/src/analytics/segment_analytics.rs +++ b/meilisearch/src/analytics/segment_analytics.rs @@ -5,7 +5,7 @@ use std::path::{Path, PathBuf}; use std::sync::Arc; use std::time::{Duration, Instant}; -use actix_web::http::header::{CONTENT_TYPE, USER_AGENT}; +use actix_web::http::header::USER_AGENT; use actix_web::HttpRequest; use byte_unit::Byte; use index_scheduler::IndexScheduler; @@ -24,21 +24,15 @@ use tokio::select; use tokio::sync::mpsc::{self, Receiver, Sender}; use uuid::Uuid; -use super::{ - config_user_id_path, Aggregate, AggregateMethod, DocumentDeletionKind, DocumentFetchKind, - MEILISEARCH_CONFIG_PATH, -}; +use super::{config_user_id_path, Aggregate, AggregateMethod, MEILISEARCH_CONFIG_PATH}; use crate::option::{ default_http_addr, IndexerOpts, LogMode, MaxMemory, MaxThreads, ScheduleSnapshot, }; -use crate::routes::indexes::documents::{DocumentEditionByFunction, UpdateDocumentsQuery}; -use crate::routes::indexes::facet_search::FacetSearchQuery; use crate::routes::{create_all_stats, Stats}; use crate::search::{ - FacetSearchResult, FederatedSearch, MatchingStrategy, SearchQuery, SearchQueryWithIndex, - SearchResult, SimilarQuery, SimilarResult, DEFAULT_CROP_LENGTH, DEFAULT_CROP_MARKER, - DEFAULT_HIGHLIGHT_POST_TAG, DEFAULT_HIGHLIGHT_PRE_TAG, DEFAULT_SEARCH_LIMIT, - DEFAULT_SEMANTIC_RATIO, + FederatedSearch, SearchQuery, SearchQueryWithIndex, SearchResult, SimilarQuery, SimilarResult, + DEFAULT_CROP_LENGTH, DEFAULT_CROP_MARKER, DEFAULT_HIGHLIGHT_POST_TAG, + DEFAULT_HIGHLIGHT_PRE_TAG, DEFAULT_SEARCH_LIMIT, DEFAULT_SEMANTIC_RATIO, }; use crate::{aggregate_methods, Opt}; @@ -75,6 +69,7 @@ pub struct Message { // Thus we have to send it in the message directly. type_id: TypeId, // Same for the aggregate function. + #[allow(clippy::type_complexity)] aggregator_function: fn(Box, Box) -> Option>, event: Event, } @@ -169,97 +164,6 @@ impl SegmentAnalytics { } } -/* -impl super::Analytics for SegmentAnalytics { - fn instance_uid(&self) -> Option<&InstanceUid> { - Some(&self.instance_uid) - } - - fn publish(&self, event_name: String, mut send: Value, request: Option<&HttpRequest>) { - let user_agent = request.map(extract_user_agents); - - send["user-agent"] = json!(user_agent); - let event = Track { - user: self.user.clone(), - event: event_name.clone(), - properties: send, - ..Default::default() - }; - let _ = self.sender.try_send(AnalyticsMsg::BatchMessage(event)); - } - - fn get_search(&self, aggregate: SearchAggregator) { - let _ = self.sender.try_send(AnalyticsMsg::AggregateGetSearch(aggregate)); - } - - fn post_search(&self, aggregate: SearchAggregator) { - let _ = self.sender.try_send(AnalyticsMsg::AggregatePostSearch(aggregate)); - } - - fn get_similar(&self, aggregate: SimilarAggregator) { - let _ = self.sender.try_send(AnalyticsMsg::AggregateGetSimilar(aggregate)); - } - - fn post_similar(&self, aggregate: SimilarAggregator) { - let _ = self.sender.try_send(AnalyticsMsg::AggregatePostSimilar(aggregate)); - } - - fn post_facet_search(&self, aggregate: FacetSearchAggregator) { - let _ = self.sender.try_send(AnalyticsMsg::AggregatePostFacetSearch(aggregate)); - } - - fn post_multi_search(&self, aggregate: MultiSearchAggregator) { - let _ = self.sender.try_send(AnalyticsMsg::AggregatePostMultiSearch(aggregate)); - } - - fn add_documents( - &self, - documents_query: &UpdateDocumentsQuery, - index_creation: bool, - request: &HttpRequest, - ) { - let aggregate = DocumentsAggregator::from_query(documents_query, index_creation, request); - let _ = self.sender.try_send(AnalyticsMsg::AggregateAddDocuments(aggregate)); - } - - fn delete_documents(&self, kind: DocumentDeletionKind, request: &HttpRequest) { - let aggregate = DocumentsDeletionAggregator::from_query(kind, request); - let _ = self.sender.try_send(AnalyticsMsg::AggregateDeleteDocuments(aggregate)); - } - - fn update_documents( - &self, - documents_query: &UpdateDocumentsQuery, - index_creation: bool, - request: &HttpRequest, - ) { - let aggregate = DocumentsAggregator::from_query(documents_query, index_creation, request); - let _ = self.sender.try_send(AnalyticsMsg::AggregateUpdateDocuments(aggregate)); - } - - fn update_documents_by_function( - &self, - documents_query: &DocumentEditionByFunction, - index_creation: bool, - request: &HttpRequest, - ) { - let aggregate = - EditDocumentsByFunctionAggregator::from_query(documents_query, index_creation, request); - let _ = self.sender.try_send(AnalyticsMsg::AggregateEditDocumentsByFunction(aggregate)); - } - - fn get_fetch_documents(&self, documents_query: &DocumentFetchKind, request: &HttpRequest) { - let aggregate = DocumentsFetchAggregator::from_query(documents_query, request); - let _ = self.sender.try_send(AnalyticsMsg::AggregateGetFetchDocuments(aggregate)); - } - - fn post_fetch_documents(&self, documents_query: &DocumentFetchKind, request: &HttpRequest) { - let aggregate = DocumentsFetchAggregator::from_query(documents_query, request); - let _ = self.sender.try_send(AnalyticsMsg::AggregatePostFetchDocuments(aggregate)); - } -} -*/ - /// This structure represent the `infos` field we send in the analytics. /// It's quite close to the `Opt` structure except all sensitive informations /// have been simplified to a boolean. @@ -536,13 +440,16 @@ impl Segment { properties["requests"]["total_received"] = total.into(); }; - self.batcher.push(Track { - user: self.user.clone(), - event: name.to_string(), - properties, - timestamp: Some(timestamp), - ..Default::default() - }); + let _ = self + .batcher + .push(Track { + user: self.user.clone(), + event: name.to_string(), + properties, + timestamp: Some(timestamp), + ..Default::default() + }) + .await; } let _ = self.batcher.flush().await; @@ -1181,479 +1088,6 @@ impl Aggregate for MultiSearchAggregator { } } -#[derive(Default)] -pub struct FacetSearchAggregator { - timestamp: Option, - - // context - user_agents: HashSet, - - // requests - total_received: usize, - total_succeeded: usize, - time_spent: BinaryHeap, - - // The set of all facetNames that were used - facet_names: HashSet, - - // As there been any other parameter than the facetName or facetQuery ones? - additional_search_parameters_provided: bool, -} - -impl FacetSearchAggregator { - #[allow(clippy::field_reassign_with_default)] - pub fn from_query(query: &FacetSearchQuery, request: &HttpRequest) -> Self { - let FacetSearchQuery { - facet_query: _, - facet_name, - vector, - q, - filter, - matching_strategy, - attributes_to_search_on, - hybrid, - ranking_score_threshold, - locales, - } = query; - - let mut ret = Self::default(); - ret.timestamp = Some(OffsetDateTime::now_utc()); - - ret.total_received = 1; - ret.user_agents = extract_user_agents(request).into_iter().collect(); - ret.facet_names = Some(facet_name.clone()).into_iter().collect(); - - ret.additional_search_parameters_provided = q.is_some() - || vector.is_some() - || filter.is_some() - || *matching_strategy != MatchingStrategy::default() - || attributes_to_search_on.is_some() - || hybrid.is_some() - || ranking_score_threshold.is_some() - || locales.is_some(); - - ret - } - - pub fn succeed(&mut self, result: &FacetSearchResult) { - let FacetSearchResult { facet_hits: _, facet_query: _, processing_time_ms } = result; - self.total_succeeded = self.total_succeeded.saturating_add(1); - self.time_spent.push(*processing_time_ms as usize); - } - - /// Aggregate one [FacetSearchAggregator] into another. - pub fn aggregate(&mut self, mut other: Self) { - let Self { - timestamp, - user_agents, - total_received, - total_succeeded, - ref mut time_spent, - facet_names, - additional_search_parameters_provided, - } = other; - - if self.timestamp.is_none() { - self.timestamp = timestamp; - } - - // context - for user_agent in user_agents.into_iter() { - self.user_agents.insert(user_agent); - } - - // request - self.total_received = self.total_received.saturating_add(total_received); - self.total_succeeded = self.total_succeeded.saturating_add(total_succeeded); - self.time_spent.append(time_spent); - - // facet_names - for facet_name in facet_names.into_iter() { - self.facet_names.insert(facet_name); - } - - // additional_search_parameters_provided - self.additional_search_parameters_provided |= additional_search_parameters_provided; - } - - pub fn into_event(self, user: &User, event_name: &str) -> Option { - let Self { - timestamp, - user_agents, - total_received, - total_succeeded, - time_spent, - facet_names, - additional_search_parameters_provided, - } = self; - - if total_received == 0 { - None - } else { - // the index of the 99th percentage of value - let percentile_99th = 0.99 * (total_succeeded as f64 - 1.) + 1.; - // we get all the values in a sorted manner - let time_spent = time_spent.into_sorted_vec(); - // We are only interested by the slowest value of the 99th fastest results - let time_spent = time_spent.get(percentile_99th as usize); - - let properties = json!({ - "user-agent": user_agents, - "requests": { - "99th_response_time": time_spent.map(|t| format!("{:.2}", t)), - "total_succeeded": total_succeeded, - "total_failed": total_received.saturating_sub(total_succeeded), // just to be sure we never panics - "total_received": total_received, - }, - "facets": { - "total_distinct_facet_count": facet_names.len(), - "additional_search_parameters_provided": additional_search_parameters_provided, - }, - }); - - Some(Track { - timestamp, - user: user.clone(), - event: event_name.to_string(), - properties, - ..Default::default() - }) - } - } -} - -#[derive(Default)] -pub struct DocumentsAggregator { - timestamp: Option, - - // set to true when at least one request was received - updated: bool, - - // context - user_agents: HashSet, - - content_types: HashSet, - primary_keys: HashSet, - index_creation: bool, -} - -impl DocumentsAggregator { - pub fn from_query( - documents_query: &UpdateDocumentsQuery, - index_creation: bool, - request: &HttpRequest, - ) -> Self { - let UpdateDocumentsQuery { primary_key, csv_delimiter: _ } = documents_query; - - let mut primary_keys = HashSet::new(); - if let Some(primary_key) = primary_key.clone() { - primary_keys.insert(primary_key); - } - - let mut content_types = HashSet::new(); - let content_type = request - .headers() - .get(CONTENT_TYPE) - .and_then(|s| s.to_str().ok()) - .unwrap_or("unknown") - .to_string(); - content_types.insert(content_type); - - Self { - timestamp: Some(OffsetDateTime::now_utc()), - updated: true, - user_agents: extract_user_agents(request).into_iter().collect(), - content_types, - primary_keys, - index_creation, - } - } - - /// Aggregate one [DocumentsAggregator] into another. - pub fn aggregate(&mut self, other: Self) { - let Self { timestamp, user_agents, primary_keys, content_types, index_creation, updated } = - other; - - if self.timestamp.is_none() { - self.timestamp = timestamp; - } - - self.updated |= updated; - // we can't create a union because there is no `into_union` method - for user_agent in user_agents { - self.user_agents.insert(user_agent); - } - for primary_key in primary_keys { - self.primary_keys.insert(primary_key); - } - for content_type in content_types { - self.content_types.insert(content_type); - } - self.index_creation |= index_creation; - } - - pub fn into_event(self, user: &User, event_name: &str) -> Option { - let Self { timestamp, user_agents, primary_keys, content_types, index_creation, updated } = - self; - - if !updated { - None - } else { - let properties = json!({ - "user-agent": user_agents, - "payload_type": content_types, - "primary_key": primary_keys, - "index_creation": index_creation, - }); - - Some(Track { - timestamp, - user: user.clone(), - event: event_name.to_string(), - properties, - ..Default::default() - }) - } - } -} - -#[derive(Default)] -pub struct EditDocumentsByFunctionAggregator { - timestamp: Option, - - // Set to true if at least one request was filtered - filtered: bool, - // Set to true if at least one request contained a context - with_context: bool, - - // context - user_agents: HashSet, - - index_creation: bool, -} - -impl EditDocumentsByFunctionAggregator { - pub fn from_query( - documents_query: &DocumentEditionByFunction, - index_creation: bool, - request: &HttpRequest, - ) -> Self { - let DocumentEditionByFunction { filter, context, function: _ } = documents_query; - - Self { - timestamp: Some(OffsetDateTime::now_utc()), - user_agents: extract_user_agents(request).into_iter().collect(), - filtered: filter.is_some(), - with_context: context.is_some(), - index_creation, - } - } - - /// Aggregate one [DocumentsAggregator] into another. - pub fn aggregate(&mut self, other: Self) { - let Self { timestamp, user_agents, index_creation, filtered, with_context } = other; - - if self.timestamp.is_none() { - self.timestamp = timestamp; - } - - // we can't create a union because there is no `into_union` method - for user_agent in user_agents { - self.user_agents.insert(user_agent); - } - self.index_creation |= index_creation; - self.filtered |= filtered; - self.with_context |= with_context; - } - - pub fn into_event(self, user: &User, event_name: &str) -> Option { - let Self { timestamp, user_agents, index_creation, filtered, with_context } = self; - - // if we had no timestamp it means we never encountered any events and - // thus we don't need to send this event. - let timestamp = timestamp?; - - let properties = json!({ - "user-agent": user_agents, - "filtered": filtered, - "with_context": with_context, - "index_creation": index_creation, - }); - - Some(Track { - timestamp: Some(timestamp), - user: user.clone(), - event: event_name.to_string(), - properties, - ..Default::default() - }) - } -} - -#[derive(Default, Serialize)] -pub struct DocumentsDeletionAggregator { - #[serde(skip)] - timestamp: Option, - - // context - #[serde(rename = "user-agent")] - user_agents: HashSet, - - #[serde(rename = "requests.total_received")] - total_received: usize, - per_document_id: bool, - clear_all: bool, - per_batch: bool, - per_filter: bool, -} - -impl DocumentsDeletionAggregator { - pub fn from_query(kind: DocumentDeletionKind, request: &HttpRequest) -> Self { - Self { - timestamp: Some(OffsetDateTime::now_utc()), - user_agents: extract_user_agents(request).into_iter().collect(), - total_received: 1, - per_document_id: matches!(kind, DocumentDeletionKind::PerDocumentId), - clear_all: matches!(kind, DocumentDeletionKind::ClearAll), - per_batch: matches!(kind, DocumentDeletionKind::PerBatch), - per_filter: matches!(kind, DocumentDeletionKind::PerFilter), - } - } - - /// Aggregate one [DocumentsAggregator] into another. - pub fn aggregate(&mut self, other: Self) { - let Self { - timestamp, - user_agents, - total_received, - per_document_id, - clear_all, - per_batch, - per_filter, - } = other; - - if self.timestamp.is_none() { - self.timestamp = timestamp; - } - - // we can't create a union because there is no `into_union` method - for user_agent in user_agents { - self.user_agents.insert(user_agent); - } - self.total_received = self.total_received.saturating_add(total_received); - self.per_document_id |= per_document_id; - self.clear_all |= clear_all; - self.per_batch |= per_batch; - self.per_filter |= per_filter; - } - - pub fn into_event(self, user: &User, event_name: &str) -> Option { - // if we had no timestamp it means we never encountered any events and - // thus we don't need to send this event. - let timestamp = self.timestamp?; - - Some(Track { - timestamp: Some(timestamp), - user: user.clone(), - event: event_name.to_string(), - properties: serde_json::to_value(self).ok()?, - ..Default::default() - }) - } -} - -#[derive(Default, Serialize)] -pub struct DocumentsFetchAggregator { - #[serde(skip)] - timestamp: Option, - - // context - #[serde(rename = "user-agent")] - user_agents: HashSet, - - #[serde(rename = "requests.total_received")] - total_received: usize, - - // a call on ../documents/:doc_id - per_document_id: bool, - // if a filter was used - per_filter: bool, - - #[serde(rename = "vector.retrieve_vectors")] - retrieve_vectors: bool, - - // pagination - #[serde(rename = "pagination.max_limit")] - max_limit: usize, - #[serde(rename = "pagination.max_offset")] - max_offset: usize, -} - -impl DocumentsFetchAggregator { - pub fn from_query(query: &DocumentFetchKind, request: &HttpRequest) -> Self { - let (limit, offset, retrieve_vectors) = match query { - DocumentFetchKind::PerDocumentId { retrieve_vectors } => (1, 0, *retrieve_vectors), - DocumentFetchKind::Normal { limit, offset, retrieve_vectors, .. } => { - (*limit, *offset, *retrieve_vectors) - } - }; - Self { - timestamp: Some(OffsetDateTime::now_utc()), - user_agents: extract_user_agents(request).into_iter().collect(), - total_received: 1, - per_document_id: matches!(query, DocumentFetchKind::PerDocumentId { .. }), - per_filter: matches!(query, DocumentFetchKind::Normal { with_filter, .. } if *with_filter), - max_limit: limit, - max_offset: offset, - retrieve_vectors, - } - } - - /// Aggregate one [DocumentsFetchAggregator] into another. - pub fn aggregate(&mut self, other: Self) { - let Self { - timestamp, - user_agents, - total_received, - per_document_id, - per_filter, - max_limit, - max_offset, - retrieve_vectors, - } = other; - - if self.timestamp.is_none() { - self.timestamp = timestamp; - } - for user_agent in user_agents { - self.user_agents.insert(user_agent); - } - - self.total_received = self.total_received.saturating_add(total_received); - self.per_document_id |= per_document_id; - self.per_filter |= per_filter; - - self.max_limit = self.max_limit.max(max_limit); - self.max_offset = self.max_offset.max(max_offset); - - self.retrieve_vectors |= retrieve_vectors; - } - - pub fn into_event(self, user: &User, event_name: &str) -> Option { - // if we had no timestamp it means we never encountered any events and - // thus we don't need to send this event. - let timestamp = self.timestamp?; - - Some(Track { - timestamp: Some(timestamp), - user: user.clone(), - event: event_name.to_string(), - properties: serde_json::to_value(self).ok()?, - ..Default::default() - }) - } -} - aggregate_methods!( SimilarPOST => "Similar POST", SimilarGET => "Similar GET", diff --git a/meilisearch/src/routes/indexes/facet_search.rs b/meilisearch/src/routes/indexes/facet_search.rs index 715eaaaa7..8e40397c7 100644 --- a/meilisearch/src/routes/indexes/facet_search.rs +++ b/meilisearch/src/routes/indexes/facet_search.rs @@ -9,7 +9,6 @@ use meilisearch_types::error::deserr_codes::*; use meilisearch_types::error::ResponseError; use meilisearch_types::index_uid::IndexUid; use meilisearch_types::locales::Locale; -use serde::Serialize; use serde_json::Value; use tracing::debug; From 0fde49640a3f76cce57414e88b6690aa90ff8523 Mon Sep 17 00:00:00 2001 From: Tamo Date: Thu, 17 Oct 2024 09:18:25 +0200 Subject: [PATCH 075/111] make clippy happy --- meilisearch/src/main.rs | 1 - meilisearch/src/routes/indexes/settings.rs | 111 ++++++++------------- 2 files changed, 43 insertions(+), 69 deletions(-) diff --git a/meilisearch/src/main.rs b/meilisearch/src/main.rs index eebea3b6d..c0652bf1e 100644 --- a/meilisearch/src/main.rs +++ b/meilisearch/src/main.rs @@ -223,7 +223,6 @@ pub fn print_launch_resume(opt: &Opt, analytics: Analytics, config_read_from: Op eprintln!("Prototype:\t\t{:?}", prototype); } - #[cfg(all(not(debug_assertions), feature = "analytics"))] { if !opt.no_analytics { eprintln!( diff --git a/meilisearch/src/routes/indexes/settings.rs b/meilisearch/src/routes/indexes/settings.rs index f31f52dc1..745ad5c78 100644 --- a/meilisearch/src/routes/indexes/settings.rs +++ b/meilisearch/src/routes/indexes/settings.rs @@ -94,7 +94,7 @@ macro_rules! make_setting_route { #[allow(clippy::redundant_closure_call)] analytics.publish( - $crate::routes::indexes::settings::$analytics::new(body.as_ref()).to_settings(), + $crate::routes::indexes::settings::$analytics::new(body.as_ref()).into_settings(), &req, ); @@ -605,58 +605,33 @@ struct RankingRulesAnalytics { impl RankingRulesAnalytics { pub fn new(rr: Option<&Vec>) -> Self { RankingRulesAnalytics { - words_position: rr - .as_ref() - .map(|rr| { - rr.iter().position(|s| { - matches!(s, meilisearch_types::settings::RankingRuleView::Words) - }) + words_position: rr.as_ref().and_then(|rr| { + rr.iter() + .position(|s| matches!(s, meilisearch_types::settings::RankingRuleView::Words)) + }), + typo_position: rr.as_ref().and_then(|rr| { + rr.iter() + .position(|s| matches!(s, meilisearch_types::settings::RankingRuleView::Typo)) + }), + proximity_position: rr.as_ref().and_then(|rr| { + rr.iter().position(|s| { + matches!(s, meilisearch_types::settings::RankingRuleView::Proximity) }) - .flatten(), - - typo_position: rr - .as_ref() - .map(|rr| { - rr.iter().position(|s| { - matches!(s, meilisearch_types::settings::RankingRuleView::Typo) - }) + }), + attribute_position: rr.as_ref().and_then(|rr| { + rr.iter().position(|s| { + matches!(s, meilisearch_types::settings::RankingRuleView::Attribute) }) - .flatten(), - - proximity_position: rr - .as_ref() - .map(|rr| { - rr.iter().position(|s| { - matches!(s, meilisearch_types::settings::RankingRuleView::Proximity) - }) + }), + sort_position: rr.as_ref().and_then(|rr| { + rr.iter() + .position(|s| matches!(s, meilisearch_types::settings::RankingRuleView::Sort)) + }), + exactness_position: rr.as_ref().and_then(|rr| { + rr.iter().position(|s| { + matches!(s, meilisearch_types::settings::RankingRuleView::Exactness) }) - .flatten(), - - attribute_position: rr - .as_ref() - .map(|rr| { - rr.iter().position(|s| { - matches!(s, meilisearch_types::settings::RankingRuleView::Attribute) - }) - }) - .flatten(), - sort_position: rr - .as_ref() - .map(|rr| { - rr.iter().position(|s| { - matches!(s, meilisearch_types::settings::RankingRuleView::Sort) - }) - }) - .flatten(), - exactness_position: rr - .as_ref() - .map(|rr| { - rr.iter().position(|s| { - matches!(s, meilisearch_types::settings::RankingRuleView::Exactness) - }) - }) - .flatten(), - + }), values: rr.as_ref().map(|rr| { rr.iter() .filter(|s| { @@ -673,7 +648,7 @@ impl RankingRulesAnalytics { } } - pub fn to_settings(self) -> SettingsAnalytics { + pub fn into_settings(self) -> SettingsAnalytics { SettingsAnalytics { ranking_rules: self, ..Default::default() } } } @@ -694,7 +669,7 @@ impl SearchableAttributesAnalytics { } } - pub fn to_settings(self) -> SettingsAnalytics { + pub fn into_settings(self) -> SettingsAnalytics { SettingsAnalytics { searchable_attributes: self, ..Default::default() } } } @@ -715,7 +690,7 @@ impl DisplayedAttributesAnalytics { } } - pub fn to_settings(self) -> SettingsAnalytics { + pub fn into_settings(self) -> SettingsAnalytics { SettingsAnalytics { displayed_attributes: self, ..Default::default() } } } @@ -734,7 +709,7 @@ impl SortableAttributesAnalytics { } } - pub fn to_settings(self) -> SettingsAnalytics { + pub fn into_settings(self) -> SettingsAnalytics { SettingsAnalytics { sortable_attributes: self, ..Default::default() } } } @@ -753,7 +728,7 @@ impl FilterableAttributesAnalytics { } } - pub fn to_settings(self) -> SettingsAnalytics { + pub fn into_settings(self) -> SettingsAnalytics { SettingsAnalytics { filterable_attributes: self, ..Default::default() } } } @@ -768,7 +743,7 @@ impl DistinctAttributeAnalytics { Self { set: distinct.is_some() } } - pub fn to_settings(self) -> SettingsAnalytics { + pub fn into_settings(self) -> SettingsAnalytics { SettingsAnalytics { distinct_attribute: self, ..Default::default() } } } @@ -784,7 +759,7 @@ impl ProximityPrecisionAnalytics { Self { set: precision.is_some(), value: precision.cloned() } } - pub fn to_settings(self) -> SettingsAnalytics { + pub fn into_settings(self) -> SettingsAnalytics { SettingsAnalytics { proximity_precision: self, ..Default::default() } } } @@ -818,7 +793,7 @@ impl TypoToleranceAnalytics { .flatten(), } } - pub fn to_settings(self) -> SettingsAnalytics { + pub fn into_settings(self) -> SettingsAnalytics { SettingsAnalytics { typo_tolerance: self, ..Default::default() } } } @@ -846,7 +821,7 @@ impl FacetingAnalytics { } } - pub fn to_settings(self) -> SettingsAnalytics { + pub fn into_settings(self) -> SettingsAnalytics { SettingsAnalytics { faceting: self, ..Default::default() } } } @@ -861,7 +836,7 @@ impl PaginationAnalytics { Self { max_total_hits: setting.as_ref().and_then(|s| s.max_total_hits.set()) } } - pub fn to_settings(self) -> SettingsAnalytics { + pub fn into_settings(self) -> SettingsAnalytics { SettingsAnalytics { pagination: self, ..Default::default() } } } @@ -876,7 +851,7 @@ impl StopWordsAnalytics { Self { total: stop_words.as_ref().map(|stop_words| stop_words.len()) } } - pub fn to_settings(self) -> SettingsAnalytics { + pub fn into_settings(self) -> SettingsAnalytics { SettingsAnalytics { stop_words: self, ..Default::default() } } } @@ -891,7 +866,7 @@ impl SynonymsAnalytics { Self { total: synonyms.as_ref().map(|synonyms| synonyms.len()) } } - pub fn to_settings(self) -> SettingsAnalytics { + pub fn into_settings(self) -> SettingsAnalytics { SettingsAnalytics { synonyms: self, ..Default::default() } } } @@ -960,7 +935,7 @@ impl EmbeddersAnalytics { } } - pub fn to_settings(self) -> SettingsAnalytics { + pub fn into_settings(self) -> SettingsAnalytics { SettingsAnalytics { embedders: self, ..Default::default() } } } @@ -976,7 +951,7 @@ impl SearchCutoffMsAnalytics { Self { search_cutoff_ms: setting.copied() } } - pub fn to_settings(self) -> SettingsAnalytics { + pub fn into_settings(self) -> SettingsAnalytics { SettingsAnalytics { search_cutoff_ms: self, ..Default::default() } } } @@ -1001,7 +976,7 @@ impl LocalesAnalytics { } } - pub fn to_settings(self) -> SettingsAnalytics { + pub fn into_settings(self) -> SettingsAnalytics { SettingsAnalytics { locales: self, ..Default::default() } } } @@ -1016,7 +991,7 @@ impl DictionaryAnalytics { Self { total: dictionary.as_ref().map(|dictionary| dictionary.len()) } } - pub fn to_settings(self) -> SettingsAnalytics { + pub fn into_settings(self) -> SettingsAnalytics { SettingsAnalytics { dictionary: self, ..Default::default() } } } @@ -1031,7 +1006,7 @@ impl SeparatorTokensAnalytics { Self { total: separator_tokens.as_ref().map(|separator_tokens| separator_tokens.len()) } } - pub fn to_settings(self) -> SettingsAnalytics { + pub fn into_settings(self) -> SettingsAnalytics { SettingsAnalytics { separator_tokens: self, ..Default::default() } } } @@ -1050,7 +1025,7 @@ impl NonSeparatorTokensAnalytics { } } - pub fn to_settings(self) -> SettingsAnalytics { + pub fn into_settings(self) -> SettingsAnalytics { SettingsAnalytics { non_separator_tokens: self, ..Default::default() } } } From d9115b74f09118b3bc687f9c0853bb74469b0d87 Mon Sep 17 00:00:00 2001 From: Tamo Date: Thu, 17 Oct 2024 09:32:54 +0200 Subject: [PATCH 076/111] move the analytics settings code to a dedicated file --- meilisearch/src/routes/indexes/mod.rs | 1 + meilisearch/src/routes/indexes/settings.rs | 634 +----------------- .../src/routes/indexes/settings_analytics.rs | 627 +++++++++++++++++ 3 files changed, 632 insertions(+), 630 deletions(-) create mode 100644 meilisearch/src/routes/indexes/settings_analytics.rs diff --git a/meilisearch/src/routes/indexes/mod.rs b/meilisearch/src/routes/indexes/mod.rs index 8972119d7..65c81a57e 100644 --- a/meilisearch/src/routes/indexes/mod.rs +++ b/meilisearch/src/routes/indexes/mod.rs @@ -29,6 +29,7 @@ pub mod documents; pub mod facet_search; pub mod search; pub mod settings; +mod settings_analytics; pub mod similar; pub fn configure(cfg: &mut web::ServiceConfig) { diff --git a/meilisearch/src/routes/indexes/settings.rs b/meilisearch/src/routes/indexes/settings.rs index 745ad5c78..bca763a99 100644 --- a/meilisearch/src/routes/indexes/settings.rs +++ b/meilisearch/src/routes/indexes/settings.rs @@ -1,23 +1,17 @@ -use std::collections::{BTreeSet, HashSet}; - +use super::settings_analytics::*; use actix_web::web::Data; use actix_web::{web, HttpRequest, HttpResponse}; use deserr::actix_web::AwebJson; use index_scheduler::IndexScheduler; use meilisearch_types::deserr::DeserrJsonError; use meilisearch_types::error::ResponseError; -use meilisearch_types::facet_values_sort::FacetValuesSort; use meilisearch_types::index_uid::IndexUid; -use meilisearch_types::locales::Locale; use meilisearch_types::milli::update::Setting; -use meilisearch_types::settings::{ - settings, ProximityPrecisionView, RankingRuleView, SecretPolicy, Settings, Unchecked, -}; +use meilisearch_types::settings::{settings, SecretPolicy, Settings, Unchecked}; use meilisearch_types::tasks::KindWithContent; -use serde::Serialize; use tracing::debug; -use crate::analytics::{Aggregate, Analytics}; +use crate::analytics::Analytics; use crate::extractors::authentication::policies::*; use crate::extractors::authentication::GuardedData; use crate::routes::{get_task_id, is_dry_run, SummarizedTaskView}; @@ -94,7 +88,7 @@ macro_rules! make_setting_route { #[allow(clippy::redundant_closure_call)] analytics.publish( - $crate::routes::indexes::settings::$analytics::new(body.as_ref()).into_settings(), + $crate::routes::indexes::settings_analytics::$analytics::new(body.as_ref()).into_settings(), &req, ); @@ -410,626 +404,6 @@ generate_configure!( search_cutoff_ms ); -#[derive(Serialize, Default)] -struct SettingsAnalytics { - ranking_rules: RankingRulesAnalytics, - searchable_attributes: SearchableAttributesAnalytics, - displayed_attributes: DisplayedAttributesAnalytics, - sortable_attributes: SortableAttributesAnalytics, - filterable_attributes: FilterableAttributesAnalytics, - distinct_attribute: DistinctAttributeAnalytics, - proximity_precision: ProximityPrecisionAnalytics, - typo_tolerance: TypoToleranceAnalytics, - faceting: FacetingAnalytics, - pagination: PaginationAnalytics, - stop_words: StopWordsAnalytics, - synonyms: SynonymsAnalytics, - embedders: EmbeddersAnalytics, - search_cutoff_ms: SearchCutoffMsAnalytics, - locales: LocalesAnalytics, - dictionary: DictionaryAnalytics, - separator_tokens: SeparatorTokensAnalytics, - non_separator_tokens: NonSeparatorTokensAnalytics, -} - -impl Aggregate for SettingsAnalytics { - fn event_name(&self) -> &'static str { - "Settings Updated" - } - - fn aggregate(self: Box, other: Box) -> Box { - Box::new(Self { - ranking_rules: RankingRulesAnalytics { - words_position: self - .ranking_rules - .words_position - .or(other.ranking_rules.words_position), - typo_position: self - .ranking_rules - .typo_position - .or(other.ranking_rules.typo_position), - proximity_position: self - .ranking_rules - .proximity_position - .or(other.ranking_rules.proximity_position), - attribute_position: self - .ranking_rules - .attribute_position - .or(other.ranking_rules.attribute_position), - sort_position: self - .ranking_rules - .sort_position - .or(other.ranking_rules.sort_position), - exactness_position: self - .ranking_rules - .exactness_position - .or(other.ranking_rules.exactness_position), - values: self.ranking_rules.values.or(other.ranking_rules.values), - }, - searchable_attributes: SearchableAttributesAnalytics { - total: self.searchable_attributes.total.or(other.searchable_attributes.total), - with_wildcard: self - .searchable_attributes - .with_wildcard - .or(other.searchable_attributes.with_wildcard), - }, - displayed_attributes: DisplayedAttributesAnalytics { - total: self.displayed_attributes.total.or(other.displayed_attributes.total), - with_wildcard: self - .displayed_attributes - .with_wildcard - .or(other.displayed_attributes.with_wildcard), - }, - sortable_attributes: SortableAttributesAnalytics { - total: self.sortable_attributes.total.or(other.sortable_attributes.total), - has_geo: self.sortable_attributes.has_geo.or(other.sortable_attributes.has_geo), - }, - filterable_attributes: FilterableAttributesAnalytics { - total: self.filterable_attributes.total.or(other.filterable_attributes.total), - has_geo: self.filterable_attributes.has_geo.or(other.filterable_attributes.has_geo), - }, - distinct_attribute: DistinctAttributeAnalytics { - set: self.distinct_attribute.set | other.distinct_attribute.set, - }, - proximity_precision: ProximityPrecisionAnalytics { - set: self.proximity_precision.set | other.proximity_precision.set, - value: self.proximity_precision.value.or(other.proximity_precision.value), - }, - typo_tolerance: TypoToleranceAnalytics { - enabled: self.typo_tolerance.enabled.or(other.typo_tolerance.enabled), - disable_on_attributes: self - .typo_tolerance - .disable_on_attributes - .or(other.typo_tolerance.disable_on_attributes), - disable_on_words: self - .typo_tolerance - .disable_on_words - .or(other.typo_tolerance.disable_on_words), - min_word_size_for_one_typo: self - .typo_tolerance - .min_word_size_for_one_typo - .or(other.typo_tolerance.min_word_size_for_one_typo), - min_word_size_for_two_typos: self - .typo_tolerance - .min_word_size_for_two_typos - .or(other.typo_tolerance.min_word_size_for_two_typos), - }, - faceting: FacetingAnalytics { - max_values_per_facet: self - .faceting - .max_values_per_facet - .or(other.faceting.max_values_per_facet), - sort_facet_values_by_star_count: self - .faceting - .sort_facet_values_by_star_count - .or(other.faceting.sort_facet_values_by_star_count), - sort_facet_values_by_total: self - .faceting - .sort_facet_values_by_total - .or(other.faceting.sort_facet_values_by_total), - }, - pagination: PaginationAnalytics { - max_total_hits: self.pagination.max_total_hits.or(other.pagination.max_total_hits), - }, - stop_words: StopWordsAnalytics { - total: self.stop_words.total.or(other.stop_words.total), - }, - synonyms: SynonymsAnalytics { total: self.synonyms.total.or(other.synonyms.total) }, - embedders: EmbeddersAnalytics { - total: self.embedders.total.or(other.embedders.total), - sources: match (self.embedders.sources, other.embedders.sources) { - (None, None) => None, - (Some(sources), None) | (None, Some(sources)) => Some(sources), - (Some(this), Some(other)) => Some(this.union(&other).cloned().collect()), - }, - document_template_used: match ( - self.embedders.document_template_used, - other.embedders.document_template_used, - ) { - (None, None) => None, - (Some(used), None) | (None, Some(used)) => Some(used), - (Some(this), Some(other)) => Some(this | other), - }, - document_template_max_bytes: match ( - self.embedders.document_template_max_bytes, - other.embedders.document_template_max_bytes, - ) { - (None, None) => None, - (Some(bytes), None) | (None, Some(bytes)) => Some(bytes), - (Some(this), Some(other)) => Some(this.max(other)), - }, - binary_quantization_used: match ( - self.embedders.binary_quantization_used, - other.embedders.binary_quantization_used, - ) { - (None, None) => None, - (Some(bq), None) | (None, Some(bq)) => Some(bq), - (Some(this), Some(other)) => Some(this | other), - }, - }, - search_cutoff_ms: SearchCutoffMsAnalytics { - search_cutoff_ms: self - .search_cutoff_ms - .search_cutoff_ms - .or(other.search_cutoff_ms.search_cutoff_ms), - }, - locales: LocalesAnalytics { locales: self.locales.locales.or(other.locales.locales) }, - dictionary: DictionaryAnalytics { - total: self.dictionary.total.or(other.dictionary.total), - }, - separator_tokens: SeparatorTokensAnalytics { - total: self.separator_tokens.total.or(other.non_separator_tokens.total), - }, - non_separator_tokens: NonSeparatorTokensAnalytics { - total: self.non_separator_tokens.total.or(other.non_separator_tokens.total), - }, - }) - } - - fn into_event(self: Box) -> serde_json::Value { - serde_json::to_value(*self).unwrap_or_default() - } -} - -#[derive(Serialize, Default)] -struct RankingRulesAnalytics { - words_position: Option, - typo_position: Option, - proximity_position: Option, - attribute_position: Option, - sort_position: Option, - exactness_position: Option, - values: Option, -} - -impl RankingRulesAnalytics { - pub fn new(rr: Option<&Vec>) -> Self { - RankingRulesAnalytics { - words_position: rr.as_ref().and_then(|rr| { - rr.iter() - .position(|s| matches!(s, meilisearch_types::settings::RankingRuleView::Words)) - }), - typo_position: rr.as_ref().and_then(|rr| { - rr.iter() - .position(|s| matches!(s, meilisearch_types::settings::RankingRuleView::Typo)) - }), - proximity_position: rr.as_ref().and_then(|rr| { - rr.iter().position(|s| { - matches!(s, meilisearch_types::settings::RankingRuleView::Proximity) - }) - }), - attribute_position: rr.as_ref().and_then(|rr| { - rr.iter().position(|s| { - matches!(s, meilisearch_types::settings::RankingRuleView::Attribute) - }) - }), - sort_position: rr.as_ref().and_then(|rr| { - rr.iter() - .position(|s| matches!(s, meilisearch_types::settings::RankingRuleView::Sort)) - }), - exactness_position: rr.as_ref().and_then(|rr| { - rr.iter().position(|s| { - matches!(s, meilisearch_types::settings::RankingRuleView::Exactness) - }) - }), - values: rr.as_ref().map(|rr| { - rr.iter() - .filter(|s| { - matches!( - s, - meilisearch_types::settings::RankingRuleView::Asc(_) - | meilisearch_types::settings::RankingRuleView::Desc(_) - ) - }) - .map(|x| x.to_string()) - .collect::>() - .join(", ") - }), - } - } - - pub fn into_settings(self) -> SettingsAnalytics { - SettingsAnalytics { ranking_rules: self, ..Default::default() } - } -} - -#[derive(Serialize, Default)] -struct SearchableAttributesAnalytics { - total: Option, - with_wildcard: Option, -} - -impl SearchableAttributesAnalytics { - pub fn new(setting: Option<&Vec>) -> Self { - Self { - total: setting.as_ref().map(|searchable| searchable.len()), - with_wildcard: setting - .as_ref() - .map(|searchable| searchable.iter().any(|searchable| searchable == "*")), - } - } - - pub fn into_settings(self) -> SettingsAnalytics { - SettingsAnalytics { searchable_attributes: self, ..Default::default() } - } -} - -#[derive(Serialize, Default)] -struct DisplayedAttributesAnalytics { - total: Option, - with_wildcard: Option, -} - -impl DisplayedAttributesAnalytics { - pub fn new(displayed: Option<&Vec>) -> Self { - Self { - total: displayed.as_ref().map(|displayed| displayed.len()), - with_wildcard: displayed - .as_ref() - .map(|displayed| displayed.iter().any(|displayed| displayed == "*")), - } - } - - pub fn into_settings(self) -> SettingsAnalytics { - SettingsAnalytics { displayed_attributes: self, ..Default::default() } - } -} - -#[derive(Serialize, Default)] -struct SortableAttributesAnalytics { - total: Option, - has_geo: Option, -} - -impl SortableAttributesAnalytics { - pub fn new(setting: Option<&std::collections::BTreeSet>) -> Self { - Self { - total: setting.as_ref().map(|sort| sort.len()), - has_geo: setting.as_ref().map(|sort| sort.contains("_geo")), - } - } - - pub fn into_settings(self) -> SettingsAnalytics { - SettingsAnalytics { sortable_attributes: self, ..Default::default() } - } -} - -#[derive(Serialize, Default)] -struct FilterableAttributesAnalytics { - total: Option, - has_geo: Option, -} - -impl FilterableAttributesAnalytics { - pub fn new(setting: Option<&std::collections::BTreeSet>) -> Self { - Self { - total: setting.as_ref().map(|filter| filter.len()), - has_geo: setting.as_ref().map(|filter| filter.contains("_geo")), - } - } - - pub fn into_settings(self) -> SettingsAnalytics { - SettingsAnalytics { filterable_attributes: self, ..Default::default() } - } -} - -#[derive(Serialize, Default)] -struct DistinctAttributeAnalytics { - set: bool, -} - -impl DistinctAttributeAnalytics { - pub fn new(distinct: Option<&String>) -> Self { - Self { set: distinct.is_some() } - } - - pub fn into_settings(self) -> SettingsAnalytics { - SettingsAnalytics { distinct_attribute: self, ..Default::default() } - } -} - -#[derive(Serialize, Default)] -struct ProximityPrecisionAnalytics { - set: bool, - value: Option, -} - -impl ProximityPrecisionAnalytics { - pub fn new(precision: Option<&meilisearch_types::settings::ProximityPrecisionView>) -> Self { - Self { set: precision.is_some(), value: precision.cloned() } - } - - pub fn into_settings(self) -> SettingsAnalytics { - SettingsAnalytics { proximity_precision: self, ..Default::default() } - } -} - -#[derive(Serialize, Default)] -struct TypoToleranceAnalytics { - enabled: Option, - disable_on_attributes: Option, - disable_on_words: Option, - min_word_size_for_one_typo: Option, - min_word_size_for_two_typos: Option, -} - -impl TypoToleranceAnalytics { - pub fn new(setting: Option<&meilisearch_types::settings::TypoSettings>) -> Self { - Self { - enabled: setting.as_ref().map(|s| !matches!(s.enabled, Setting::Set(false))), - disable_on_attributes: setting - .as_ref() - .and_then(|s| s.disable_on_attributes.as_ref().set().map(|m| !m.is_empty())), - disable_on_words: setting - .as_ref() - .and_then(|s| s.disable_on_words.as_ref().set().map(|m| !m.is_empty())), - min_word_size_for_one_typo: setting - .as_ref() - .and_then(|s| s.min_word_size_for_typos.as_ref().set().map(|s| s.one_typo.set())) - .flatten(), - min_word_size_for_two_typos: setting - .as_ref() - .and_then(|s| s.min_word_size_for_typos.as_ref().set().map(|s| s.two_typos.set())) - .flatten(), - } - } - pub fn into_settings(self) -> SettingsAnalytics { - SettingsAnalytics { typo_tolerance: self, ..Default::default() } - } -} - -#[derive(Serialize, Default)] -struct FacetingAnalytics { - max_values_per_facet: Option, - sort_facet_values_by_star_count: Option, - sort_facet_values_by_total: Option, -} - -impl FacetingAnalytics { - pub fn new(setting: Option<&meilisearch_types::settings::FacetingSettings>) -> Self { - Self { - max_values_per_facet: setting.as_ref().and_then(|s| s.max_values_per_facet.set()), - sort_facet_values_by_star_count: setting.as_ref().and_then(|s| { - s.sort_facet_values_by - .as_ref() - .set() - .map(|s| s.iter().any(|(k, v)| k == "*" && v == &FacetValuesSort::Count)) - }), - sort_facet_values_by_total: setting - .as_ref() - .and_then(|s| s.sort_facet_values_by.as_ref().set().map(|s| s.len())), - } - } - - pub fn into_settings(self) -> SettingsAnalytics { - SettingsAnalytics { faceting: self, ..Default::default() } - } -} - -#[derive(Serialize, Default)] -struct PaginationAnalytics { - max_total_hits: Option, -} - -impl PaginationAnalytics { - pub fn new(setting: Option<&meilisearch_types::settings::PaginationSettings>) -> Self { - Self { max_total_hits: setting.as_ref().and_then(|s| s.max_total_hits.set()) } - } - - pub fn into_settings(self) -> SettingsAnalytics { - SettingsAnalytics { pagination: self, ..Default::default() } - } -} - -#[derive(Serialize, Default)] -struct StopWordsAnalytics { - total: Option, -} - -impl StopWordsAnalytics { - pub fn new(stop_words: Option<&BTreeSet>) -> Self { - Self { total: stop_words.as_ref().map(|stop_words| stop_words.len()) } - } - - pub fn into_settings(self) -> SettingsAnalytics { - SettingsAnalytics { stop_words: self, ..Default::default() } - } -} - -#[derive(Serialize, Default)] -struct SynonymsAnalytics { - total: Option, -} - -impl SynonymsAnalytics { - pub fn new(synonyms: Option<&std::collections::BTreeMap>>) -> Self { - Self { total: synonyms.as_ref().map(|synonyms| synonyms.len()) } - } - - pub fn into_settings(self) -> SettingsAnalytics { - SettingsAnalytics { synonyms: self, ..Default::default() } - } -} - -#[derive(Serialize, Default)] -struct EmbeddersAnalytics { - // last - total: Option, - // Merge the sources - sources: Option>, - // |= - document_template_used: Option, - // max - document_template_max_bytes: Option, - // |= - binary_quantization_used: Option, -} - -impl EmbeddersAnalytics { - pub fn new( - setting: Option< - &std::collections::BTreeMap< - String, - Setting, - >, - >, - ) -> Self { - let mut sources = std::collections::HashSet::new(); - - if let Some(s) = &setting { - for source in s - .values() - .filter_map(|config| config.clone().set()) - .filter_map(|config| config.source.set()) - { - use meilisearch_types::milli::vector::settings::EmbedderSource; - match source { - EmbedderSource::OpenAi => sources.insert("openAi".to_string()), - EmbedderSource::HuggingFace => sources.insert("huggingFace".to_string()), - EmbedderSource::UserProvided => sources.insert("userProvided".to_string()), - EmbedderSource::Ollama => sources.insert("ollama".to_string()), - EmbedderSource::Rest => sources.insert("rest".to_string()), - }; - } - }; - - Self { - total: setting.as_ref().map(|s| s.len()), - sources: Some(sources), - document_template_used: setting.as_ref().map(|map| { - map.values() - .filter_map(|config| config.clone().set()) - .any(|config| config.document_template.set().is_some()) - }), - document_template_max_bytes: setting.as_ref().and_then(|map| { - map.values() - .filter_map(|config| config.clone().set()) - .filter_map(|config| config.document_template_max_bytes.set()) - .max() - }), - binary_quantization_used: setting.as_ref().map(|map| { - map.values() - .filter_map(|config| config.clone().set()) - .any(|config| config.binary_quantized.set().is_some()) - }), - } - } - - pub fn into_settings(self) -> SettingsAnalytics { - SettingsAnalytics { embedders: self, ..Default::default() } - } -} - -#[derive(Serialize, Default)] -#[serde(transparent)] -struct SearchCutoffMsAnalytics { - search_cutoff_ms: Option, -} - -impl SearchCutoffMsAnalytics { - pub fn new(setting: Option<&u64>) -> Self { - Self { search_cutoff_ms: setting.copied() } - } - - pub fn into_settings(self) -> SettingsAnalytics { - SettingsAnalytics { search_cutoff_ms: self, ..Default::default() } - } -} - -#[derive(Serialize, Default)] -#[serde(transparent)] -struct LocalesAnalytics { - locales: Option>, -} - -impl LocalesAnalytics { - pub fn new( - rules: Option<&Vec>, - ) -> Self { - LocalesAnalytics { - locales: rules.as_ref().map(|rules| { - rules - .iter() - .flat_map(|rule| rule.locales.iter().cloned()) - .collect::>() - }), - } - } - - pub fn into_settings(self) -> SettingsAnalytics { - SettingsAnalytics { locales: self, ..Default::default() } - } -} - -#[derive(Serialize, Default)] -struct DictionaryAnalytics { - total: Option, -} - -impl DictionaryAnalytics { - pub fn new(dictionary: Option<&std::collections::BTreeSet>) -> Self { - Self { total: dictionary.as_ref().map(|dictionary| dictionary.len()) } - } - - pub fn into_settings(self) -> SettingsAnalytics { - SettingsAnalytics { dictionary: self, ..Default::default() } - } -} - -#[derive(Serialize, Default)] -struct SeparatorTokensAnalytics { - total: Option, -} - -impl SeparatorTokensAnalytics { - pub fn new(separator_tokens: Option<&std::collections::BTreeSet>) -> Self { - Self { total: separator_tokens.as_ref().map(|separator_tokens| separator_tokens.len()) } - } - - pub fn into_settings(self) -> SettingsAnalytics { - SettingsAnalytics { separator_tokens: self, ..Default::default() } - } -} - -#[derive(Serialize, Default)] -struct NonSeparatorTokensAnalytics { - total: Option, -} - -impl NonSeparatorTokensAnalytics { - pub fn new(non_separator_tokens: Option<&std::collections::BTreeSet>) -> Self { - Self { - total: non_separator_tokens - .as_ref() - .map(|non_separator_tokens| non_separator_tokens.len()), - } - } - - pub fn into_settings(self) -> SettingsAnalytics { - SettingsAnalytics { non_separator_tokens: self, ..Default::default() } - } -} - pub async fn update_all( index_scheduler: GuardedData, Data>, index_uid: web::Path, diff --git a/meilisearch/src/routes/indexes/settings_analytics.rs b/meilisearch/src/routes/indexes/settings_analytics.rs new file mode 100644 index 000000000..636ef3c57 --- /dev/null +++ b/meilisearch/src/routes/indexes/settings_analytics.rs @@ -0,0 +1,627 @@ +//! All the structures used to make the analytics on the settings works. +//! The signatures of the `new` functions are not very rust idiomatic because they must match the types received +//! through the sub-settings route directly without any manipulation. +//! This is why we often use a `Option<&Vec<_>>` instead of a `Option<&[_]>`. + +use meilisearch_types::locales::{Locale, LocalizedAttributesRuleView}; +use meilisearch_types::milli::update::Setting; +use meilisearch_types::milli::vector::settings::EmbeddingSettings; +use meilisearch_types::settings::{ + FacetingSettings, PaginationSettings, ProximityPrecisionView, TypoSettings, +}; +use meilisearch_types::{facet_values_sort::FacetValuesSort, settings::RankingRuleView}; +use serde::Serialize; +use std::collections::{BTreeMap, BTreeSet, HashSet}; + +use crate::analytics::Aggregate; + +#[derive(Serialize, Default)] +pub struct SettingsAnalytics { + pub ranking_rules: RankingRulesAnalytics, + pub searchable_attributes: SearchableAttributesAnalytics, + pub displayed_attributes: DisplayedAttributesAnalytics, + pub sortable_attributes: SortableAttributesAnalytics, + pub filterable_attributes: FilterableAttributesAnalytics, + pub distinct_attribute: DistinctAttributeAnalytics, + pub proximity_precision: ProximityPrecisionAnalytics, + pub typo_tolerance: TypoToleranceAnalytics, + pub faceting: FacetingAnalytics, + pub pagination: PaginationAnalytics, + pub stop_words: StopWordsAnalytics, + pub synonyms: SynonymsAnalytics, + pub embedders: EmbeddersAnalytics, + pub search_cutoff_ms: SearchCutoffMsAnalytics, + pub locales: LocalesAnalytics, + pub dictionary: DictionaryAnalytics, + pub separator_tokens: SeparatorTokensAnalytics, + pub non_separator_tokens: NonSeparatorTokensAnalytics, +} + +impl Aggregate for SettingsAnalytics { + fn event_name(&self) -> &'static str { + "Settings Updated" + } + + fn aggregate(self: Box, other: Box) -> Box { + Box::new(Self { + ranking_rules: RankingRulesAnalytics { + words_position: self + .ranking_rules + .words_position + .or(other.ranking_rules.words_position), + typo_position: self + .ranking_rules + .typo_position + .or(other.ranking_rules.typo_position), + proximity_position: self + .ranking_rules + .proximity_position + .or(other.ranking_rules.proximity_position), + attribute_position: self + .ranking_rules + .attribute_position + .or(other.ranking_rules.attribute_position), + sort_position: self + .ranking_rules + .sort_position + .or(other.ranking_rules.sort_position), + exactness_position: self + .ranking_rules + .exactness_position + .or(other.ranking_rules.exactness_position), + values: self.ranking_rules.values.or(other.ranking_rules.values), + }, + searchable_attributes: SearchableAttributesAnalytics { + total: self.searchable_attributes.total.or(other.searchable_attributes.total), + with_wildcard: self + .searchable_attributes + .with_wildcard + .or(other.searchable_attributes.with_wildcard), + }, + displayed_attributes: DisplayedAttributesAnalytics { + total: self.displayed_attributes.total.or(other.displayed_attributes.total), + with_wildcard: self + .displayed_attributes + .with_wildcard + .or(other.displayed_attributes.with_wildcard), + }, + sortable_attributes: SortableAttributesAnalytics { + total: self.sortable_attributes.total.or(other.sortable_attributes.total), + has_geo: self.sortable_attributes.has_geo.or(other.sortable_attributes.has_geo), + }, + filterable_attributes: FilterableAttributesAnalytics { + total: self.filterable_attributes.total.or(other.filterable_attributes.total), + has_geo: self.filterable_attributes.has_geo.or(other.filterable_attributes.has_geo), + }, + distinct_attribute: DistinctAttributeAnalytics { + set: self.distinct_attribute.set | other.distinct_attribute.set, + }, + proximity_precision: ProximityPrecisionAnalytics { + set: self.proximity_precision.set | other.proximity_precision.set, + value: self.proximity_precision.value.or(other.proximity_precision.value), + }, + typo_tolerance: TypoToleranceAnalytics { + enabled: self.typo_tolerance.enabled.or(other.typo_tolerance.enabled), + disable_on_attributes: self + .typo_tolerance + .disable_on_attributes + .or(other.typo_tolerance.disable_on_attributes), + disable_on_words: self + .typo_tolerance + .disable_on_words + .or(other.typo_tolerance.disable_on_words), + min_word_size_for_one_typo: self + .typo_tolerance + .min_word_size_for_one_typo + .or(other.typo_tolerance.min_word_size_for_one_typo), + min_word_size_for_two_typos: self + .typo_tolerance + .min_word_size_for_two_typos + .or(other.typo_tolerance.min_word_size_for_two_typos), + }, + faceting: FacetingAnalytics { + max_values_per_facet: self + .faceting + .max_values_per_facet + .or(other.faceting.max_values_per_facet), + sort_facet_values_by_star_count: self + .faceting + .sort_facet_values_by_star_count + .or(other.faceting.sort_facet_values_by_star_count), + sort_facet_values_by_total: self + .faceting + .sort_facet_values_by_total + .or(other.faceting.sort_facet_values_by_total), + }, + pagination: PaginationAnalytics { + max_total_hits: self.pagination.max_total_hits.or(other.pagination.max_total_hits), + }, + stop_words: StopWordsAnalytics { + total: self.stop_words.total.or(other.stop_words.total), + }, + synonyms: SynonymsAnalytics { total: self.synonyms.total.or(other.synonyms.total) }, + embedders: EmbeddersAnalytics { + total: self.embedders.total.or(other.embedders.total), + sources: match (self.embedders.sources, other.embedders.sources) { + (None, None) => None, + (Some(sources), None) | (None, Some(sources)) => Some(sources), + (Some(this), Some(other)) => Some(this.union(&other).cloned().collect()), + }, + document_template_used: match ( + self.embedders.document_template_used, + other.embedders.document_template_used, + ) { + (None, None) => None, + (Some(used), None) | (None, Some(used)) => Some(used), + (Some(this), Some(other)) => Some(this | other), + }, + document_template_max_bytes: match ( + self.embedders.document_template_max_bytes, + other.embedders.document_template_max_bytes, + ) { + (None, None) => None, + (Some(bytes), None) | (None, Some(bytes)) => Some(bytes), + (Some(this), Some(other)) => Some(this.max(other)), + }, + binary_quantization_used: match ( + self.embedders.binary_quantization_used, + other.embedders.binary_quantization_used, + ) { + (None, None) => None, + (Some(bq), None) | (None, Some(bq)) => Some(bq), + (Some(this), Some(other)) => Some(this | other), + }, + }, + search_cutoff_ms: SearchCutoffMsAnalytics { + search_cutoff_ms: self + .search_cutoff_ms + .search_cutoff_ms + .or(other.search_cutoff_ms.search_cutoff_ms), + }, + locales: LocalesAnalytics { locales: self.locales.locales.or(other.locales.locales) }, + dictionary: DictionaryAnalytics { + total: self.dictionary.total.or(other.dictionary.total), + }, + separator_tokens: SeparatorTokensAnalytics { + total: self.separator_tokens.total.or(other.non_separator_tokens.total), + }, + non_separator_tokens: NonSeparatorTokensAnalytics { + total: self.non_separator_tokens.total.or(other.non_separator_tokens.total), + }, + }) + } + + fn into_event(self: Box) -> serde_json::Value { + serde_json::to_value(*self).unwrap_or_default() + } +} + +#[derive(Serialize, Default)] +pub struct RankingRulesAnalytics { + pub words_position: Option, + pub typo_position: Option, + pub proximity_position: Option, + pub attribute_position: Option, + pub sort_position: Option, + pub exactness_position: Option, + pub values: Option, +} + +impl RankingRulesAnalytics { + pub fn new(rr: Option<&Vec>) -> Self { + RankingRulesAnalytics { + words_position: rr.as_ref().and_then(|rr| { + rr.iter() + .position(|s| matches!(s, meilisearch_types::settings::RankingRuleView::Words)) + }), + typo_position: rr.as_ref().and_then(|rr| { + rr.iter() + .position(|s| matches!(s, meilisearch_types::settings::RankingRuleView::Typo)) + }), + proximity_position: rr.as_ref().and_then(|rr| { + rr.iter().position(|s| { + matches!(s, meilisearch_types::settings::RankingRuleView::Proximity) + }) + }), + attribute_position: rr.as_ref().and_then(|rr| { + rr.iter().position(|s| { + matches!(s, meilisearch_types::settings::RankingRuleView::Attribute) + }) + }), + sort_position: rr.as_ref().and_then(|rr| { + rr.iter() + .position(|s| matches!(s, meilisearch_types::settings::RankingRuleView::Sort)) + }), + exactness_position: rr.as_ref().and_then(|rr| { + rr.iter().position(|s| { + matches!(s, meilisearch_types::settings::RankingRuleView::Exactness) + }) + }), + values: rr.as_ref().map(|rr| { + rr.iter() + .filter(|s| { + matches!( + s, + meilisearch_types::settings::RankingRuleView::Asc(_) + | meilisearch_types::settings::RankingRuleView::Desc(_) + ) + }) + .map(|x| x.to_string()) + .collect::>() + .join(", ") + }), + } + } + + pub fn into_settings(self) -> SettingsAnalytics { + SettingsAnalytics { ranking_rules: self, ..Default::default() } + } +} + +#[derive(Serialize, Default)] +pub struct SearchableAttributesAnalytics { + pub total: Option, + pub with_wildcard: Option, +} + +impl SearchableAttributesAnalytics { + pub fn new(setting: Option<&Vec>) -> Self { + Self { + total: setting.as_ref().map(|searchable| searchable.len()), + with_wildcard: setting + .as_ref() + .map(|searchable| searchable.iter().any(|searchable| searchable == "*")), + } + } + + pub fn into_settings(self) -> SettingsAnalytics { + SettingsAnalytics { searchable_attributes: self, ..Default::default() } + } +} + +#[derive(Serialize, Default)] +pub struct DisplayedAttributesAnalytics { + pub total: Option, + pub with_wildcard: Option, +} + +impl DisplayedAttributesAnalytics { + pub fn new(displayed: Option<&Vec>) -> Self { + Self { + total: displayed.as_ref().map(|displayed| displayed.len()), + with_wildcard: displayed + .as_ref() + .map(|displayed| displayed.iter().any(|displayed| displayed == "*")), + } + } + + pub fn into_settings(self) -> SettingsAnalytics { + SettingsAnalytics { displayed_attributes: self, ..Default::default() } + } +} + +#[derive(Serialize, Default)] +pub struct SortableAttributesAnalytics { + pub total: Option, + pub has_geo: Option, +} + +impl SortableAttributesAnalytics { + pub fn new(setting: Option<&BTreeSet>) -> Self { + Self { + total: setting.as_ref().map(|sort| sort.len()), + has_geo: setting.as_ref().map(|sort| sort.contains("_geo")), + } + } + + pub fn into_settings(self) -> SettingsAnalytics { + SettingsAnalytics { sortable_attributes: self, ..Default::default() } + } +} + +#[derive(Serialize, Default)] +pub struct FilterableAttributesAnalytics { + pub total: Option, + pub has_geo: Option, +} + +impl FilterableAttributesAnalytics { + pub fn new(setting: Option<&BTreeSet>) -> Self { + Self { + total: setting.as_ref().map(|filter| filter.len()), + has_geo: setting.as_ref().map(|filter| filter.contains("_geo")), + } + } + + pub fn into_settings(self) -> SettingsAnalytics { + SettingsAnalytics { filterable_attributes: self, ..Default::default() } + } +} + +#[derive(Serialize, Default)] +pub struct DistinctAttributeAnalytics { + pub set: bool, +} + +impl DistinctAttributeAnalytics { + pub fn new(distinct: Option<&String>) -> Self { + Self { set: distinct.is_some() } + } + + pub fn into_settings(self) -> SettingsAnalytics { + SettingsAnalytics { distinct_attribute: self, ..Default::default() } + } +} + +#[derive(Serialize, Default)] +pub struct ProximityPrecisionAnalytics { + pub set: bool, + pub value: Option, +} + +impl ProximityPrecisionAnalytics { + pub fn new(precision: Option<&ProximityPrecisionView>) -> Self { + Self { set: precision.is_some(), value: precision.cloned() } + } + + pub fn into_settings(self) -> SettingsAnalytics { + SettingsAnalytics { proximity_precision: self, ..Default::default() } + } +} + +#[derive(Serialize, Default)] +pub struct TypoToleranceAnalytics { + pub enabled: Option, + pub disable_on_attributes: Option, + pub disable_on_words: Option, + pub min_word_size_for_one_typo: Option, + pub min_word_size_for_two_typos: Option, +} + +impl TypoToleranceAnalytics { + pub fn new(setting: Option<&TypoSettings>) -> Self { + Self { + enabled: setting.as_ref().map(|s| !matches!(s.enabled, Setting::Set(false))), + disable_on_attributes: setting + .as_ref() + .and_then(|s| s.disable_on_attributes.as_ref().set().map(|m| !m.is_empty())), + disable_on_words: setting + .as_ref() + .and_then(|s| s.disable_on_words.as_ref().set().map(|m| !m.is_empty())), + min_word_size_for_one_typo: setting + .as_ref() + .and_then(|s| s.min_word_size_for_typos.as_ref().set().map(|s| s.one_typo.set())) + .flatten(), + min_word_size_for_two_typos: setting + .as_ref() + .and_then(|s| s.min_word_size_for_typos.as_ref().set().map(|s| s.two_typos.set())) + .flatten(), + } + } + pub fn into_settings(self) -> SettingsAnalytics { + SettingsAnalytics { typo_tolerance: self, ..Default::default() } + } +} + +#[derive(Serialize, Default)] +pub struct FacetingAnalytics { + pub max_values_per_facet: Option, + pub sort_facet_values_by_star_count: Option, + pub sort_facet_values_by_total: Option, +} + +impl FacetingAnalytics { + pub fn new(setting: Option<&FacetingSettings>) -> Self { + Self { + max_values_per_facet: setting.as_ref().and_then(|s| s.max_values_per_facet.set()), + sort_facet_values_by_star_count: setting.as_ref().and_then(|s| { + s.sort_facet_values_by + .as_ref() + .set() + .map(|s| s.iter().any(|(k, v)| k == "*" && v == &FacetValuesSort::Count)) + }), + sort_facet_values_by_total: setting + .as_ref() + .and_then(|s| s.sort_facet_values_by.as_ref().set().map(|s| s.len())), + } + } + + pub fn into_settings(self) -> SettingsAnalytics { + SettingsAnalytics { faceting: self, ..Default::default() } + } +} + +#[derive(Serialize, Default)] +pub struct PaginationAnalytics { + pub max_total_hits: Option, +} + +impl PaginationAnalytics { + pub fn new(setting: Option<&PaginationSettings>) -> Self { + Self { max_total_hits: setting.as_ref().and_then(|s| s.max_total_hits.set()) } + } + + pub fn into_settings(self) -> SettingsAnalytics { + SettingsAnalytics { pagination: self, ..Default::default() } + } +} + +#[derive(Serialize, Default)] +pub struct StopWordsAnalytics { + pub total: Option, +} + +impl StopWordsAnalytics { + pub fn new(stop_words: Option<&BTreeSet>) -> Self { + Self { total: stop_words.as_ref().map(|stop_words| stop_words.len()) } + } + + pub fn into_settings(self) -> SettingsAnalytics { + SettingsAnalytics { stop_words: self, ..Default::default() } + } +} + +#[derive(Serialize, Default)] +pub struct SynonymsAnalytics { + pub total: Option, +} + +impl SynonymsAnalytics { + pub fn new(synonyms: Option<&BTreeMap>>) -> Self { + Self { total: synonyms.as_ref().map(|synonyms| synonyms.len()) } + } + + pub fn into_settings(self) -> SettingsAnalytics { + SettingsAnalytics { synonyms: self, ..Default::default() } + } +} + +#[derive(Serialize, Default)] +pub struct EmbeddersAnalytics { + // last + pub total: Option, + // Merge the sources + pub sources: Option>, + // |= + pub document_template_used: Option, + // max + pub document_template_max_bytes: Option, + // |= + pub binary_quantization_used: Option, +} + +impl EmbeddersAnalytics { + pub fn new(setting: Option<&BTreeMap>>) -> Self { + let mut sources = std::collections::HashSet::new(); + + if let Some(s) = &setting { + for source in s + .values() + .filter_map(|config| config.clone().set()) + .filter_map(|config| config.source.set()) + { + use meilisearch_types::milli::vector::settings::EmbedderSource; + match source { + EmbedderSource::OpenAi => sources.insert("openAi".to_string()), + EmbedderSource::HuggingFace => sources.insert("huggingFace".to_string()), + EmbedderSource::UserProvided => sources.insert("userProvided".to_string()), + EmbedderSource::Ollama => sources.insert("ollama".to_string()), + EmbedderSource::Rest => sources.insert("rest".to_string()), + }; + } + }; + + Self { + total: setting.as_ref().map(|s| s.len()), + sources: Some(sources), + document_template_used: setting.as_ref().map(|map| { + map.values() + .filter_map(|config| config.clone().set()) + .any(|config| config.document_template.set().is_some()) + }), + document_template_max_bytes: setting.as_ref().and_then(|map| { + map.values() + .filter_map(|config| config.clone().set()) + .filter_map(|config| config.document_template_max_bytes.set()) + .max() + }), + binary_quantization_used: setting.as_ref().map(|map| { + map.values() + .filter_map(|config| config.clone().set()) + .any(|config| config.binary_quantized.set().is_some()) + }), + } + } + + pub fn into_settings(self) -> SettingsAnalytics { + SettingsAnalytics { embedders: self, ..Default::default() } + } +} + +#[derive(Serialize, Default)] +#[serde(transparent)] +pub struct SearchCutoffMsAnalytics { + pub search_cutoff_ms: Option, +} + +impl SearchCutoffMsAnalytics { + pub fn new(setting: Option<&u64>) -> Self { + Self { search_cutoff_ms: setting.copied() } + } + + pub fn into_settings(self) -> SettingsAnalytics { + SettingsAnalytics { search_cutoff_ms: self, ..Default::default() } + } +} + +#[derive(Serialize, Default)] +#[serde(transparent)] +pub struct LocalesAnalytics { + pub locales: Option>, +} + +impl LocalesAnalytics { + pub fn new(rules: Option<&Vec>) -> Self { + LocalesAnalytics { + locales: rules.as_ref().map(|rules| { + rules + .iter() + .flat_map(|rule| rule.locales.iter().cloned()) + .collect::>() + }), + } + } + + pub fn into_settings(self) -> SettingsAnalytics { + SettingsAnalytics { locales: self, ..Default::default() } + } +} + +#[derive(Serialize, Default)] +pub struct DictionaryAnalytics { + pub total: Option, +} + +impl DictionaryAnalytics { + pub fn new(dictionary: Option<&BTreeSet>) -> Self { + Self { total: dictionary.as_ref().map(|dictionary| dictionary.len()) } + } + + pub fn into_settings(self) -> SettingsAnalytics { + SettingsAnalytics { dictionary: self, ..Default::default() } + } +} + +#[derive(Serialize, Default)] +pub struct SeparatorTokensAnalytics { + pub total: Option, +} + +impl SeparatorTokensAnalytics { + pub fn new(separator_tokens: Option<&BTreeSet>) -> Self { + Self { total: separator_tokens.as_ref().map(|separator_tokens| separator_tokens.len()) } + } + + pub fn into_settings(self) -> SettingsAnalytics { + SettingsAnalytics { separator_tokens: self, ..Default::default() } + } +} + +#[derive(Serialize, Default)] +pub struct NonSeparatorTokensAnalytics { + pub total: Option, +} + +impl NonSeparatorTokensAnalytics { + pub fn new(non_separator_tokens: Option<&BTreeSet>) -> Self { + Self { + total: non_separator_tokens + .as_ref() + .map(|non_separator_tokens| non_separator_tokens.len()), + } + } + + pub fn into_settings(self) -> SettingsAnalytics { + SettingsAnalytics { non_separator_tokens: self, ..Default::default() } + } +} From 18ac4032aa5512c96b0068d0603f4db285f81bd9 Mon Sep 17 00:00:00 2001 From: Tamo Date: Thu, 17 Oct 2024 09:35:11 +0200 Subject: [PATCH 077/111] Remove the experimental feature seen --- meilisearch/src/routes/features.rs | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/meilisearch/src/routes/features.rs b/meilisearch/src/routes/features.rs index 1de00717d..8bdb3ffb3 100644 --- a/meilisearch/src/routes/features.rs +++ b/meilisearch/src/routes/features.rs @@ -17,24 +17,19 @@ use crate::extractors::sequential_extractor::SeqHandler; pub fn configure(cfg: &mut web::ServiceConfig) { cfg.service( web::resource("") - .route(web::get().to(SeqHandler(get_features))) + .route(web::get().to(get_features)) .route(web::patch().to(SeqHandler(patch_features))), ); } -crate::empty_analytics!(GetExperimentalFeatureAnalytics, "Experimental features Seen"); - async fn get_features( index_scheduler: GuardedData< ActionPolicy<{ actions::EXPERIMENTAL_FEATURES_GET }>, Data, >, - req: HttpRequest, - analytics: Data, ) -> HttpResponse { let features = index_scheduler.features(); - analytics.publish(GetExperimentalFeatureAnalytics::default(), &req); let features = features.runtime_features(); debug!(returns = ?features, "Get features"); HttpResponse::Ok().json(features) From 1ab6fec9030351956fd2462dc5afb3b2b317860c Mon Sep 17 00:00:00 2001 From: Tamo Date: Thu, 17 Oct 2024 09:49:21 +0200 Subject: [PATCH 078/111] send all experimental features in the info event including the runtime one --- .../src/analytics/segment_analytics.rs | 44 +++++++++++++------ 1 file changed, 31 insertions(+), 13 deletions(-) diff --git a/meilisearch/src/analytics/segment_analytics.rs b/meilisearch/src/analytics/segment_analytics.rs index 1edfa1bdd..c0c2b64d8 100644 --- a/meilisearch/src/analytics/segment_analytics.rs +++ b/meilisearch/src/analytics/segment_analytics.rs @@ -10,6 +10,7 @@ use actix_web::HttpRequest; use byte_unit::Byte; use index_scheduler::IndexScheduler; use meilisearch_auth::{AuthController, AuthFilter}; +use meilisearch_types::features::RuntimeTogglableFeatures; use meilisearch_types::locales::Locale; use meilisearch_types::InstanceUid; use once_cell::sync::Lazy; @@ -173,7 +174,9 @@ impl SegmentAnalytics { struct Infos { env: String, experimental_contains_filter: bool, + experimental_vector_store: bool, experimental_enable_metrics: bool, + experimental_edit_documents_by_function: bool, experimental_search_queue_size: usize, experimental_drop_search_after: usize, experimental_nb_searches_per_core: usize, @@ -210,8 +213,8 @@ struct Infos { ssl_tickets: bool, } -impl From for Infos { - fn from(options: Opt) -> Self { +impl Infos { + pub fn new(options: Opt, features: RuntimeTogglableFeatures) -> Self { // We wants to decompose this whole struct by hand to be sure we don't forget // to add analytics when we add a field in the Opt. // Thus we must not insert `..` at the end. @@ -254,8 +257,7 @@ impl From for Infos { log_level, indexer_options, config_file_path, - #[cfg(feature = "analytics")] - no_analytics: _, + no_analytics: _, } = options; let schedule_snapshot = match schedule_snapshot { @@ -266,18 +268,28 @@ impl From for Infos { let IndexerOpts { max_indexing_memory, max_indexing_threads, skip_index_budget: _ } = indexer_options; + let RuntimeTogglableFeatures { + vector_store, + metrics, + logs_route, + edit_documents_by_function, + contains_filter, + } = features; + // We're going to override every sensible information. // We consider information sensible if it contains a path, an address, or a key. Self { env, - experimental_contains_filter, - experimental_enable_metrics, + experimental_contains_filter: experimental_contains_filter | contains_filter, + experimental_vector_store: vector_store, + experimental_edit_documents_by_function: edit_documents_by_function, + experimental_enable_metrics: experimental_enable_metrics | metrics, experimental_search_queue_size, experimental_drop_search_after: experimental_drop_search_after.into(), experimental_nb_searches_per_core: experimental_nb_searches_per_core.into(), experimental_logs_mode, experimental_replication_parameters, - experimental_enable_logs_route, + experimental_enable_logs_route: experimental_enable_logs_route | logs_route, experimental_reduce_indexing_memory_usage, gpu_enabled: meilisearch_types::milli::vector::is_cuda_enabled(), db_path: db_path != PathBuf::from("./data.ms"), @@ -319,7 +331,7 @@ pub struct Segment { } impl Segment { - fn compute_traits(opt: &Opt, stats: Stats) -> Value { + fn compute_traits(opt: &Opt, stats: Stats, features: RuntimeTogglableFeatures) -> Value { static FIRST_START_TIMESTAMP: Lazy = Lazy::new(Instant::now); static SYSTEM: Lazy = Lazy::new(|| { let disks = Disks::new_with_refreshed_list(); @@ -347,7 +359,7 @@ impl Segment { "indexes_number": stats.indexes.len(), "documents_number": number_of_documents, }, - "infos": Infos::from(opt.clone()), + "infos": Infos::new(opt.clone(), features), }) } @@ -399,9 +411,11 @@ impl Segment { index_scheduler: Arc, auth_controller: Arc, ) { - if let Ok(stats) = - create_all_stats(index_scheduler.into(), auth_controller.into(), &AuthFilter::default()) - { + if let Ok(stats) = create_all_stats( + index_scheduler.clone().into(), + auth_controller.into(), + &AuthFilter::default(), + ) { // Replace the version number with the prototype name if any. let version = if let Some(prototype) = build_info::DescribeResult::from_build() .and_then(|describe| describe.as_prototype()) @@ -420,7 +434,11 @@ impl Segment { }, })), user: self.user.clone(), - traits: Self::compute_traits(&self.opt, stats), + traits: Self::compute_traits( + &self.opt, + stats, + index_scheduler.features().runtime_features(), + ), ..Default::default() }) .await; From fa1db6b7216fce5e9727dfacbcdccc770ef80f16 Mon Sep 17 00:00:00 2001 From: Tamo Date: Thu, 17 Oct 2024 09:55:30 +0200 Subject: [PATCH 079/111] fix the tests --- meilisearch/src/analytics/mod.rs | 4 ++++ meilisearch/tests/common/service.rs | 5 +++-- meilisearch/tests/logs/mod.rs | 5 +++-- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/meilisearch/src/analytics/mod.rs b/meilisearch/src/analytics/mod.rs index d08f3307c..75e8083c5 100644 --- a/meilisearch/src/analytics/mod.rs +++ b/meilisearch/src/analytics/mod.rs @@ -158,6 +158,10 @@ impl Analytics { } } + pub fn no_analytics() -> Self { + Self { segment: None } + } + pub fn instance_uid(&self) -> Option<&InstanceUid> { self.segment.as_ref().map(|segment| segment.instance_uid.as_ref()) } diff --git a/meilisearch/tests/common/service.rs b/meilisearch/tests/common/service.rs index 8addbacf8..c0b07c217 100644 --- a/meilisearch/tests/common/service.rs +++ b/meilisearch/tests/common/service.rs @@ -9,8 +9,9 @@ use actix_web::test; use actix_web::test::TestRequest; use actix_web::web::Data; use index_scheduler::IndexScheduler; +use meilisearch::analytics::Analytics; use meilisearch::search_queue::SearchQueue; -use meilisearch::{analytics, create_app, Opt, SubscriberForSecondLayer}; +use meilisearch::{create_app, Opt, SubscriberForSecondLayer}; use meilisearch_auth::AuthController; use tracing::level_filters::LevelFilter; use tracing_subscriber::Layer; @@ -141,7 +142,7 @@ impl Service { Data::new(search_queue), self.options.clone(), (route_layer_handle, stderr_layer_handle), - analytics::MockAnalytics::new(&self.options), + Data::new(Analytics::no_analytics()), true, )) .await diff --git a/meilisearch/tests/logs/mod.rs b/meilisearch/tests/logs/mod.rs index 9f4649dca..26482b561 100644 --- a/meilisearch/tests/logs/mod.rs +++ b/meilisearch/tests/logs/mod.rs @@ -7,8 +7,9 @@ use std::str::FromStr; use actix_web::http::header::ContentType; use actix_web::web::Data; use meili_snap::snapshot; +use meilisearch::analytics::Analytics; use meilisearch::search_queue::SearchQueue; -use meilisearch::{analytics, create_app, Opt, SubscriberForSecondLayer}; +use meilisearch::{create_app, Opt, SubscriberForSecondLayer}; use tracing::level_filters::LevelFilter; use tracing_subscriber::layer::SubscriberExt; use tracing_subscriber::Layer; @@ -54,7 +55,7 @@ async fn basic_test_log_stream_route() { Data::new(search_queue), server.service.options.clone(), (route_layer_handle, stderr_layer_handle), - analytics::MockAnalytics::new(&server.service.options), + Data::new(Analytics::no_analytics()), true, )) .await; From 3a7a20c7162b728a99327eb32b012f6651e7186b Mon Sep 17 00:00:00 2001 From: Tamo Date: Thu, 17 Oct 2024 11:14:33 +0200 Subject: [PATCH 080/111] remove the segment feature and always import segment --- meilisearch/Cargo.toml | 5 ++--- meilisearch/src/analytics/mod.rs | 21 +++++++++++++++---- .../src/analytics/segment_analytics.rs | 1 - meilisearch/src/option.rs | 9 +------- meilisearch/tests/common/server.rs | 1 - 5 files changed, 20 insertions(+), 17 deletions(-) diff --git a/meilisearch/Cargo.toml b/meilisearch/Cargo.toml index 07357e724..57202f59f 100644 --- a/meilisearch/Cargo.toml +++ b/meilisearch/Cargo.toml @@ -75,7 +75,7 @@ reqwest = { version = "0.12.5", features = [ rustls = { version = "0.23.11", features = ["ring"], default-features = false } rustls-pki-types = { version = "1.7.0", features = ["alloc"] } rustls-pemfile = "2.1.2" -segment = { version = "0.2.4", optional = true } +segment = { version = "0.2.4" } serde = { version = "1.0.204", features = ["derive"] } serde_json = { version = "1.0.120", features = ["preserve_order"] } sha2 = "0.10.8" @@ -132,8 +132,7 @@ tempfile = { version = "3.10.1", optional = true } zip = { version = "2.1.3", optional = true } [features] -default = ["analytics", "meilisearch-types/all-tokenizations", "mini-dashboard"] -analytics = ["segment"] +default = ["meilisearch-types/all-tokenizations", "mini-dashboard"] mini-dashboard = [ "static-files", "anyhow", diff --git a/meilisearch/src/analytics/mod.rs b/meilisearch/src/analytics/mod.rs index 75e8083c5..67b830204 100644 --- a/meilisearch/src/analytics/mod.rs +++ b/meilisearch/src/analytics/mod.rs @@ -1,5 +1,3 @@ -#![allow(clippy::transmute_ptr_to_ref)] // mopify isn't updated with the latest version of clippy yet - pub mod segment_analytics; use std::fs; @@ -85,13 +83,19 @@ pub enum DocumentFetchKind { Normal { with_filter: bool, limit: usize, offset: usize, retrieve_vectors: bool }, } +/// To send an event to segment, your event must be able to aggregate itself with another event of the same type. pub trait Aggregate: 'static + mopa::Any + Send { + /// The name of the event that will be sent to segment. fn event_name(&self) -> &'static str; + /// Will be called every time an event has been used twice before segment flushed its buffer. fn aggregate(self: Box, other: Box) -> Box where Self: Sized; + /// An internal helper function, you shouldn't implement it yourself. + /// This function should always be called on the same type. If `this` and `other` + /// aren't the same type behind the function will do nothing and return `None`. fn downcast_aggregate( this: Box, other: Box, @@ -100,6 +104,7 @@ pub trait Aggregate: 'static + mopa::Any + Send { Self: Sized, { if this.is::() && other.is::() { + // Both the two following lines cannot fail, but just to be sure we don't crash, we're still avoiding unwrapping let this = this.downcast::().ok()?; let other = other.downcast::().ok()?; Some(Self::aggregate(this, other)) @@ -108,18 +113,26 @@ pub trait Aggregate: 'static + mopa::Any + Send { } } + /// Converts your structure to the final event that'll be sent to segment. fn into_event(self: Box) -> serde_json::Value; } mopafy!(Aggregate); -/// Helper trait to define multiple aggregate with the same content but a different name. -/// Commonly used when you must aggregate a search with POST or with GET for example. +/// Helper trait to define multiple aggregates with the same content but a different name. +/// Commonly used when you must aggregate a search with POST or with GET, for example. pub trait AggregateMethod: 'static + Default + Send { fn event_name() -> &'static str; } /// A macro used to quickly define multiple aggregate method with their name +/// Usage: +/// ```rust +/// aggregate_methods!( +/// SearchGET => "Documents Searched GET", +/// SearchPOST => "Documents Searched POST", +/// ); +/// ``` #[macro_export] macro_rules! aggregate_methods { ($method:ident => $event_name:literal) => { diff --git a/meilisearch/src/analytics/segment_analytics.rs b/meilisearch/src/analytics/segment_analytics.rs index c0c2b64d8..10927f49b 100644 --- a/meilisearch/src/analytics/segment_analytics.rs +++ b/meilisearch/src/analytics/segment_analytics.rs @@ -695,7 +695,6 @@ impl SearchAggregator { aggregate_methods!( SearchGET => "Documents Searched GET", SearchPOST => "Documents Searched POST", - ); impl Aggregate for SearchAggregator { diff --git a/meilisearch/src/option.rs b/meilisearch/src/option.rs index 02dc660a4..7e87a5a2c 100644 --- a/meilisearch/src/option.rs +++ b/meilisearch/src/option.rs @@ -29,7 +29,6 @@ const MEILI_MASTER_KEY: &str = "MEILI_MASTER_KEY"; const MEILI_ENV: &str = "MEILI_ENV"; const MEILI_TASK_WEBHOOK_URL: &str = "MEILI_TASK_WEBHOOK_URL"; const MEILI_TASK_WEBHOOK_AUTHORIZATION_HEADER: &str = "MEILI_TASK_WEBHOOK_AUTHORIZATION_HEADER"; -#[cfg(feature = "analytics")] const MEILI_NO_ANALYTICS: &str = "MEILI_NO_ANALYTICS"; const MEILI_HTTP_PAYLOAD_SIZE_LIMIT: &str = "MEILI_HTTP_PAYLOAD_SIZE_LIMIT"; const MEILI_SSL_CERT_PATH: &str = "MEILI_SSL_CERT_PATH"; @@ -210,7 +209,6 @@ pub struct Opt { /// Meilisearch automatically collects data from all instances that do not opt out using this flag. /// All gathered data is used solely for the purpose of improving Meilisearch, and can be deleted /// at any time. - #[cfg(feature = "analytics")] #[serde(default)] // we can't send true #[clap(long, env = MEILI_NO_ANALYTICS)] pub no_analytics: bool, @@ -425,7 +423,6 @@ pub struct Opt { impl Opt { /// Whether analytics should be enabled or not. - #[cfg(all(not(debug_assertions), feature = "analytics"))] pub fn analytics(&self) -> bool { !self.no_analytics } @@ -505,7 +502,6 @@ impl Opt { ignore_missing_dump: _, ignore_dump_if_db_exists: _, config_file_path: _, - #[cfg(feature = "analytics")] no_analytics, experimental_contains_filter, experimental_enable_metrics, @@ -533,10 +529,7 @@ impl Opt { ); } - #[cfg(feature = "analytics")] - { - export_to_env_if_not_present(MEILI_NO_ANALYTICS, no_analytics.to_string()); - } + export_to_env_if_not_present(MEILI_NO_ANALYTICS, no_analytics.to_string()); export_to_env_if_not_present( MEILI_HTTP_PAYLOAD_SIZE_LIMIT, http_payload_size_limit.to_string(), diff --git a/meilisearch/tests/common/server.rs b/meilisearch/tests/common/server.rs index 6d331ebbc..92f181398 100644 --- a/meilisearch/tests/common/server.rs +++ b/meilisearch/tests/common/server.rs @@ -381,7 +381,6 @@ pub fn default_settings(dir: impl AsRef) -> Opt { db_path: dir.as_ref().join("db"), dump_dir: dir.as_ref().join("dumps"), env: "development".to_owned(), - #[cfg(feature = "analytics")] no_analytics: true, max_index_size: Byte::from_u64_with_unit(100, Unit::MiB).unwrap(), max_task_db_size: Byte::from_u64_with_unit(1, Unit::GiB).unwrap(), From 89e2d2b2b9b83a44e2a2af8e2d13020be72c1260 Mon Sep 17 00:00:00 2001 From: Tamo Date: Thu, 17 Oct 2024 13:55:49 +0200 Subject: [PATCH 081/111] fix the doctest --- meilisearch/src/analytics/mod.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/meilisearch/src/analytics/mod.rs b/meilisearch/src/analytics/mod.rs index 67b830204..48ac13fc0 100644 --- a/meilisearch/src/analytics/mod.rs +++ b/meilisearch/src/analytics/mod.rs @@ -128,6 +128,8 @@ pub trait AggregateMethod: 'static + Default + Send { /// A macro used to quickly define multiple aggregate method with their name /// Usage: /// ```rust +/// use meilisearch::aggregate_methods; +/// /// aggregate_methods!( /// SearchGET => "Documents Searched GET", /// SearchPOST => "Documents Searched POST", From e51e6f902a13525610c4d0a81125c7292da3de36 Mon Sep 17 00:00:00 2001 From: "F. Levi" <55688616+flevi29@users.noreply.github.com> Date: Sat, 19 Oct 2024 13:42:02 +0300 Subject: [PATCH 082/111] Highlight partially cropped matches too --- milli/src/search/new/matches/match.rs | 2 +- .../src/search/new/matches/matching_words.rs | 25 +++-- milli/src/search/new/matches/mod.rs | 94 ++++++++++--------- 3 files changed, 67 insertions(+), 54 deletions(-) diff --git a/milli/src/search/new/matches/match.rs b/milli/src/search/new/matches/match.rs index cc08b006c..2eef4d5a6 100644 --- a/milli/src/search/new/matches/match.rs +++ b/milli/src/search/new/matches/match.rs @@ -18,7 +18,7 @@ pub enum MatchPosition { #[derive(Clone, Debug)] pub struct Match { - pub match_len: usize, + pub char_count: usize, // ids of the query words that matches. pub ids: Vec, pub position: MatchPosition, diff --git a/milli/src/search/new/matches/matching_words.rs b/milli/src/search/new/matches/matching_words.rs index e4d2785ca..1f30a17ad 100644 --- a/milli/src/search/new/matches/matching_words.rs +++ b/milli/src/search/new/matches/matching_words.rs @@ -86,14 +86,17 @@ impl MatchingWords { continue; }; let prefix_length = char_index + c.len_utf8(); - let char_len = token.original_lengths(prefix_length).0; + let (char_count, byte_len) = token.original_lengths(prefix_length); let ids = &located_words.positions; - return Some(MatchType::Full { char_len, ids }); + return Some(MatchType::Full { ids, char_count, byte_len }); // else we exact match the token. } else if token.lemma() == word { - let char_len = token.char_end - token.char_start; let ids = &located_words.positions; - return Some(MatchType::Full { char_len, ids }); + return Some(MatchType::Full { + char_count: token.char_end - token.char_start, + byte_len: token.byte_end - token.byte_start, + ids, + }); } } } @@ -149,7 +152,7 @@ pub type WordId = u16; /// In these cases we need to match consecutively several tokens to consider that the match is full. #[derive(Debug, PartialEq)] pub enum MatchType<'a> { - Full { char_len: usize, ids: &'a RangeInclusive }, + Full { char_count: usize, byte_len: usize, ids: &'a RangeInclusive }, Partial(PartialMatch<'a>), } @@ -183,7 +186,11 @@ impl<'a> PartialMatch<'a> { // if there is no remaining word to match in the phrase and the current token is matching, // return a Full match. } else if is_matching { - Some(MatchType::Full { char_len: token.char_end - token.char_start, ids }) + Some(MatchType::Full { + char_count: token.char_end - token.char_start, + byte_len: token.byte_end - token.byte_start, + ids, + }) // if the current token doesn't match, return None to break the match sequence. } else { None @@ -270,7 +277,7 @@ pub(crate) mod tests { ..Default::default() }) .next(), - Some(MatchType::Full { char_len: 5, ids: &(0..=0) }) + Some(MatchType::Full { char_count: 5, byte_len: 5, ids: &(0..=0) }) ); assert_eq!( matching_words @@ -294,7 +301,7 @@ pub(crate) mod tests { ..Default::default() }) .next(), - Some(MatchType::Full { char_len: 5, ids: &(2..=2) }) + Some(MatchType::Full { char_count: 5, byte_len: 5, ids: &(2..=2) }) ); assert_eq!( matching_words @@ -306,7 +313,7 @@ pub(crate) mod tests { ..Default::default() }) .next(), - Some(MatchType::Full { char_len: 5, ids: &(2..=2) }) + Some(MatchType::Full { char_count: 5, byte_len: 5, ids: &(2..=2) }) ); assert_eq!( matching_words diff --git a/milli/src/search/new/matches/mod.rs b/milli/src/search/new/matches/mod.rs index ac0fb7e7b..80e3ec7b2 100644 --- a/milli/src/search/new/matches/mod.rs +++ b/milli/src/search/new/matches/mod.rs @@ -10,7 +10,10 @@ use matching_words::{MatchType, PartialMatch}; use r#match::{Match, MatchPosition}; use serde::Serialize; use simple_token_kind::SimpleTokenKind; -use std::borrow::Cow; +use std::{ + borrow::Cow, + cmp::{max, min}, +}; const DEFAULT_CROP_MARKER: &str = "…"; const DEFAULT_HIGHLIGHT_PREFIX: &str = ""; @@ -139,7 +142,7 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { Some(MatchType::Full { ids, .. }) => { // save the token that closes the partial match as a match. matches.push(Match { - match_len: word.char_end - *first_word_char_start, + char_count: word.char_end - *first_word_char_start, ids: ids.clone().collect(), position: MatchPosition::Phrase { word_positions: [first_word_position, word_position], @@ -182,10 +185,10 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { match match_type { // we match, we save the current token as a match, // then we continue the rest of the tokens. - MatchType::Full { char_len, ids } => { + MatchType::Full { ids, char_count, .. } => { let ids: Vec<_> = ids.clone().collect(); matches.push(Match { - match_len: char_len, + char_count, ids, position: MatchPosition::Word { word_position, token_position }, }); @@ -224,19 +227,15 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { .iter() .map(|m| MatchBounds { start: tokens[m.get_first_token_pos()].byte_start, - length: m.match_len, + // TODO: Why is this in chars, while start is in bytes? + length: m.char_count, }) .collect(), } } /// Returns the bounds in byte index of the crop window. - fn crop_bounds( - &self, - tokens: &[Token<'_>], - matches: &[Match], - crop_size: usize, - ) -> (usize, usize) { + fn crop_bounds(&self, tokens: &[Token<'_>], matches: &[Match], crop_size: usize) -> [usize; 2] { let ( mut remaining_words, is_iterating_forward, @@ -371,7 +370,7 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { let crop_byte_start = before_tokens.next().map_or(0, |t| t.byte_end); let crop_byte_end = after_tokens.next().map_or(self.text.len(), |t| t.byte_start); - (crop_byte_start, crop_byte_end) + [crop_byte_start, crop_byte_end] } // Returns the formatted version of the original text. @@ -382,78 +381,87 @@ impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { } else { match &self.matches { Some((tokens, matches)) => { - // If the text has to be cropped, - // crop around the best interval. - let (byte_start, byte_end) = match format_options.crop { + // If the text has to be cropped, crop around the best interval. + let [crop_byte_start, crop_byte_end] = match format_options.crop { Some(crop_size) if crop_size > 0 => { self.crop_bounds(tokens, matches, crop_size) } - _ => (0, self.text.len()), + _ => [0, self.text.len()], }; let mut formatted = Vec::new(); // push crop marker if it's not the start of the text. - if byte_start > 0 && !self.crop_marker.is_empty() { + if crop_byte_start > 0 && !self.crop_marker.is_empty() { formatted.push(self.crop_marker); } - let mut byte_index = byte_start; + let mut byte_index = crop_byte_start; if format_options.highlight { // insert highlight markers around matches. for m in matches { - let (current_byte_start, current_byte_end) = match m.position { + let [m_byte_start, m_byte_end] = match m.position { MatchPosition::Word { token_position, .. } => { let token = &tokens[token_position]; - (&token.byte_start, &token.byte_end) + [&token.byte_start, &token.byte_end] } MatchPosition::Phrase { token_positions: [ftp, ltp], .. } => { - (&tokens[ftp].byte_start, &tokens[ltp].byte_end) + [&tokens[ftp].byte_start, &tokens[ltp].byte_end] } }; - // skip matches out of the crop window. - if *current_byte_start < byte_start || *current_byte_end > byte_end { + // skip matches out of the crop window + if *m_byte_end < crop_byte_start || *m_byte_start > crop_byte_end { continue; } - if byte_index < *current_byte_start { - formatted.push(&self.text[byte_index..*current_byte_start]); + // adjust start and end to the crop window size + let [m_byte_start, m_byte_end] = [ + max(m_byte_start, &crop_byte_start), + min(m_byte_end, &crop_byte_end), + ]; + + // push text that is positioned before our matches + if byte_index < *m_byte_start { + formatted.push(&self.text[byte_index..*m_byte_start]); } - let highlight_byte_index = self.text[*current_byte_start..] - .char_indices() - .enumerate() - .find(|(i, _)| *i == m.match_len) - .map_or(*current_byte_end, |(_, (i, _))| i + *current_byte_start); - formatted.push(self.highlight_prefix); - formatted.push(&self.text[*current_byte_start..highlight_byte_index]); + + // TODO: This is additional work done, charabia::token::Token byte_len + // should already get us the original byte length, however, that doesn't work as + // it's supposed to, investigate why + let highlight_byte_index = self.text[*m_byte_start..] + .char_indices() + .nth(m.char_count) + .map_or(*m_byte_end, |(i, _)| min(i + *m_byte_start, *m_byte_end)); + formatted.push(&self.text[*m_byte_start..highlight_byte_index]); + formatted.push(self.highlight_suffix); // if it's a prefix highlight, we put the end of the word after the highlight marker. - if highlight_byte_index < *current_byte_end { - formatted.push(&self.text[highlight_byte_index..*current_byte_end]); + if highlight_byte_index < *m_byte_end { + formatted.push(&self.text[highlight_byte_index..*m_byte_end]); } - byte_index = *current_byte_end; + byte_index = *m_byte_end; } } // push the rest of the text between last match and the end of crop. - if byte_index < byte_end { - formatted.push(&self.text[byte_index..byte_end]); + if byte_index < crop_byte_end { + formatted.push(&self.text[byte_index..crop_byte_end]); } // push crop marker if it's not the end of the text. - if byte_end < self.text.len() && !self.crop_marker.is_empty() { + if crop_byte_end < self.text.len() && !self.crop_marker.is_empty() { formatted.push(self.crop_marker); } if formatted.len() == 1 { // avoid concatenating if there is already 1 slice. - Cow::Borrowed(&self.text[byte_start..byte_end]) + Cow::Borrowed(&self.text[crop_byte_start..crop_byte_end]) } else { Cow::Owned(formatted.concat()) } @@ -825,8 +833,7 @@ mod tests { let mut matcher = builder.build(text, None); insta::assert_snapshot!( matcher.format(format_options), - // @TODO: Should probably highlight it all, even if it didn't fit the whole phrase - @"The groundbreaking invention had the power to split the world…" + @"The groundbreaking invention had the power to split the world…" ); let builder = MatcherBuilder::new_test( @@ -837,7 +844,7 @@ mod tests { let mut matcher = builder.build(text, None); insta::assert_snapshot!( matcher.format(format_options), - // @TODO: Should probably include end of string in this case? + // TODO: Should include exclamation mark without crop markers @"…between those who embraced progress and those who resisted change…" ); @@ -860,8 +867,7 @@ mod tests { let mut matcher = builder.build(text, None); insta::assert_snapshot!( matcher.format(format_options), - // @TODO: "invention" should be highlighted as well - @"…invention had the power to split the world between those…" + @"…invention had the power to split the world between those…" ); } From c94679bde6993f91418e4113852ce9c667a198f8 Mon Sep 17 00:00:00 2001 From: Tamo Date: Sun, 20 Oct 2024 17:24:12 +0200 Subject: [PATCH 083/111] apply review comments --- meilisearch/src/routes/indexes/documents.rs | 56 +++++++++++++-------- 1 file changed, 34 insertions(+), 22 deletions(-) diff --git a/meilisearch/src/routes/indexes/documents.rs b/meilisearch/src/routes/indexes/documents.rs index 854fa5b69..60014bae4 100644 --- a/meilisearch/src/routes/indexes/documents.rs +++ b/meilisearch/src/routes/indexes/documents.rs @@ -107,11 +107,8 @@ aggregate_methods!( DocumentsPOST => "Documents Fetched POST", ); -#[derive(Default, Serialize)] +#[derive(Serialize)] pub struct DocumentsFetchAggregator { - #[serde(rename = "requests.total_received")] - total_received: usize, - // a call on ../documents/:doc_id per_document_id: bool, // if a filter was used @@ -145,7 +142,6 @@ impl DocumentsFetchAggregator { }; Self { - total_received: 1, per_document_id: matches!(query, DocumentFetchKind::PerDocumentId { .. }), per_filter: matches!(query, DocumentFetchKind::Normal { with_filter, .. } if *with_filter), max_limit: limit, @@ -164,7 +160,6 @@ impl Aggregate for DocumentsFetchAggregator { fn aggregate(self: Box, other: Box) -> Box { Box::new(Self { - total_received: self.total_received.saturating_add(other.total_received), per_document_id: self.per_document_id | other.per_document_id, per_filter: self.per_filter | other.per_filter, retrieve_vectors: self.retrieve_vectors | other.retrieve_vectors, @@ -199,7 +194,11 @@ pub async fn get_document( analytics.publish( DocumentsFetchAggregator:: { retrieve_vectors: param_retrieve_vectors.0, - ..Default::default() + per_document_id: true, + per_filter: false, + max_limit: 0, + max_offset: 0, + marker: PhantomData, }, &req, ); @@ -211,10 +210,8 @@ pub async fn get_document( Ok(HttpResponse::Ok().json(document)) } -#[derive(Default, Serialize)] +#[derive(Serialize)] pub struct DocumentsDeletionAggregator { - #[serde(rename = "requests.total_received")] - total_received: usize, per_document_id: bool, clear_all: bool, per_batch: bool, @@ -228,7 +225,6 @@ impl Aggregate for DocumentsDeletionAggregator { fn aggregate(self: Box, other: Box) -> Box { Box::new(Self { - total_received: self.total_received.saturating_add(other.total_received), per_document_id: self.per_document_id | other.per_document_id, clear_all: self.clear_all | other.clear_all, per_batch: self.per_batch | other.per_batch, @@ -253,9 +249,10 @@ pub async fn delete_document( analytics.publish( DocumentsDeletionAggregator { - total_received: 1, per_document_id: true, - ..Default::default() + clear_all: false, + per_batch: false, + per_filter: false, }, &req, ); @@ -316,12 +313,12 @@ pub async fn documents_by_query_post( analytics.publish( DocumentsFetchAggregator:: { - total_received: 1, per_filter: body.filter.is_some(), retrieve_vectors: body.retrieve_vectors, max_limit: body.limit, max_offset: body.offset, - ..Default::default() + per_document_id: false, + marker: PhantomData, }, &req, ); @@ -358,12 +355,12 @@ pub async fn get_documents( analytics.publish( DocumentsFetchAggregator:: { - total_received: 1, per_filter: query.filter.is_some(), retrieve_vectors: query.retrieve_vectors, max_limit: query.limit, max_offset: query.offset, - ..Default::default() + per_document_id: false, + marker: PhantomData, }, &req, ); @@ -426,7 +423,7 @@ aggregate_methods!( Updated => "Documents Updated", ); -#[derive(Default, Serialize)] +#[derive(Serialize)] pub struct DocumentsAggregator { payload_types: HashSet, primary_key: HashSet, @@ -718,7 +715,12 @@ pub async fn delete_documents_batch( let index_uid = IndexUid::try_from(index_uid.into_inner())?; analytics.publish( - DocumentsDeletionAggregator { total_received: 1, per_batch: true, ..Default::default() }, + DocumentsDeletionAggregator { + per_batch: true, + per_document_id: false, + clear_all: false, + per_filter: false, + }, &req, ); @@ -761,7 +763,12 @@ pub async fn delete_documents_by_filter( let filter = body.into_inner().filter; analytics.publish( - DocumentsDeletionAggregator { total_received: 1, per_filter: true, ..Default::default() }, + DocumentsDeletionAggregator { + per_filter: true, + per_document_id: false, + clear_all: false, + per_batch: false, + }, &req, ); @@ -793,7 +800,7 @@ pub struct DocumentEditionByFunction { pub function: String, } -#[derive(Default, Serialize)] +#[derive(Serialize)] struct EditDocumentsByFunctionAggregator { // Set to true if at least one request was filtered filtered: bool, @@ -899,7 +906,12 @@ pub async fn clear_all_documents( ) -> Result { let index_uid = IndexUid::try_from(index_uid.into_inner())?; analytics.publish( - DocumentsDeletionAggregator { total_received: 1, clear_all: true, ..Default::default() }, + DocumentsDeletionAggregator { + clear_all: true, + per_document_id: false, + per_batch: false, + per_filter: false, + }, &req, ); From 73b57228967dffe4a3da7214f2f6bc3ebb15cf5c Mon Sep 17 00:00:00 2001 From: Tamo Date: Sun, 20 Oct 2024 17:31:21 +0200 Subject: [PATCH 084/111] rename the other parameter of the aggregate method to new to avoid confusion --- meilisearch/src/analytics/mod.rs | 12 +-- .../src/analytics/segment_analytics.rs | 26 +++--- meilisearch/src/routes/features.rs | 12 +-- meilisearch/src/routes/indexes/documents.rs | 38 ++++---- .../src/routes/indexes/facet_search.rs | 12 +-- meilisearch/src/routes/indexes/mod.rs | 12 +-- .../src/routes/indexes/settings_analytics.rs | 86 +++++++++---------- meilisearch/src/routes/swap_indexes.rs | 4 +- meilisearch/src/routes/tasks.rs | 24 +++--- 9 files changed, 108 insertions(+), 118 deletions(-) diff --git a/meilisearch/src/analytics/mod.rs b/meilisearch/src/analytics/mod.rs index 48ac13fc0..27203ea71 100644 --- a/meilisearch/src/analytics/mod.rs +++ b/meilisearch/src/analytics/mod.rs @@ -89,7 +89,7 @@ pub trait Aggregate: 'static + mopa::Any + Send { fn event_name(&self) -> &'static str; /// Will be called every time an event has been used twice before segment flushed its buffer. - fn aggregate(self: Box, other: Box) -> Box + fn aggregate(self: Box, new: Box) -> Box where Self: Sized; @@ -97,16 +97,16 @@ pub trait Aggregate: 'static + mopa::Any + Send { /// This function should always be called on the same type. If `this` and `other` /// aren't the same type behind the function will do nothing and return `None`. fn downcast_aggregate( - this: Box, - other: Box, + old: Box, + new: Box, ) -> Option> where Self: Sized, { - if this.is::() && other.is::() { + if old.is::() && new.is::() { // Both the two following lines cannot fail, but just to be sure we don't crash, we're still avoiding unwrapping - let this = this.downcast::().ok()?; - let other = other.downcast::().ok()?; + let this = old.downcast::().ok()?; + let other = new.downcast::().ok()?; Some(Self::aggregate(this, other)) } else { None diff --git a/meilisearch/src/analytics/segment_analytics.rs b/meilisearch/src/analytics/segment_analytics.rs index 10927f49b..328a3a048 100644 --- a/meilisearch/src/analytics/segment_analytics.rs +++ b/meilisearch/src/analytics/segment_analytics.rs @@ -702,7 +702,7 @@ impl Aggregate for SearchAggregator { Method::event_name() } - fn aggregate(mut self: Box, other: Box) -> Box { + fn aggregate(mut self: Box, new: Box) -> Box { let Self { total_received, total_succeeded, @@ -743,7 +743,7 @@ impl Aggregate for SearchAggregator { ranking_score_threshold, mut locales, marker: _, - } = *other; + } = *new; // request self.total_received = self.total_received.saturating_add(total_received); @@ -1038,22 +1038,22 @@ impl Aggregate for MultiSearchAggregator { } /// Aggregate one [MultiSearchAggregator] into another. - fn aggregate(self: Box, other: Box) -> Box { + fn aggregate(self: Box, new: Box) -> Box { // write the aggregate in a way that will cause a compilation error if a field is added. // get ownership of self, replacing it by a default value. let this = *self; - let total_received = this.total_received.saturating_add(other.total_received); - let total_succeeded = this.total_succeeded.saturating_add(other.total_succeeded); + let total_received = this.total_received.saturating_add(new.total_received); + let total_succeeded = this.total_succeeded.saturating_add(new.total_succeeded); let total_distinct_index_count = - this.total_distinct_index_count.saturating_add(other.total_distinct_index_count); - let total_single_index = this.total_single_index.saturating_add(other.total_single_index); - let total_search_count = this.total_search_count.saturating_add(other.total_search_count); - let show_ranking_score = this.show_ranking_score || other.show_ranking_score; + this.total_distinct_index_count.saturating_add(new.total_distinct_index_count); + let total_single_index = this.total_single_index.saturating_add(new.total_single_index); + let total_search_count = this.total_search_count.saturating_add(new.total_search_count); + let show_ranking_score = this.show_ranking_score || new.show_ranking_score; let show_ranking_score_details = - this.show_ranking_score_details || other.show_ranking_score_details; - let use_federation = this.use_federation || other.use_federation; + this.show_ranking_score_details || new.show_ranking_score_details; + let use_federation = this.use_federation || new.use_federation; Box::new(Self { total_received, @@ -1215,7 +1215,7 @@ impl Aggregate for SimilarAggregator { } /// Aggregate one [SimilarAggregator] into another. - fn aggregate(mut self: Box, other: Box) -> Box { + fn aggregate(mut self: Box, new: Box) -> Box { let Self { total_received, total_succeeded, @@ -1233,7 +1233,7 @@ impl Aggregate for SimilarAggregator { ranking_score_threshold, retrieve_vectors, marker: _, - } = *other; + } = *new; // request self.total_received = self.total_received.saturating_add(total_received); diff --git a/meilisearch/src/routes/features.rs b/meilisearch/src/routes/features.rs index 8bdb3ffb3..5d93adc02 100644 --- a/meilisearch/src/routes/features.rs +++ b/meilisearch/src/routes/features.rs @@ -64,13 +64,13 @@ impl Aggregate for PatchExperimentalFeatureAnalytics { "Experimental features Updated" } - fn aggregate(self: Box, other: Box) -> Box { + fn aggregate(self: Box, new: Box) -> Box { Box::new(Self { - vector_store: other.vector_store, - metrics: other.metrics, - logs_route: other.logs_route, - edit_documents_by_function: other.edit_documents_by_function, - contains_filter: other.contains_filter, + vector_store: new.vector_store, + metrics: new.metrics, + logs_route: new.logs_route, + edit_documents_by_function: new.edit_documents_by_function, + contains_filter: new.contains_filter, }) } diff --git a/meilisearch/src/routes/indexes/documents.rs b/meilisearch/src/routes/indexes/documents.rs index 60014bae4..47f73ef42 100644 --- a/meilisearch/src/routes/indexes/documents.rs +++ b/meilisearch/src/routes/indexes/documents.rs @@ -158,13 +158,13 @@ impl Aggregate for DocumentsFetchAggregator { Method::event_name() } - fn aggregate(self: Box, other: Box) -> Box { + fn aggregate(self: Box, new: Box) -> Box { Box::new(Self { - per_document_id: self.per_document_id | other.per_document_id, - per_filter: self.per_filter | other.per_filter, - retrieve_vectors: self.retrieve_vectors | other.retrieve_vectors, - max_limit: self.max_limit.max(other.max_limit), - max_offset: self.max_offset.max(other.max_offset), + per_document_id: self.per_document_id | new.per_document_id, + per_filter: self.per_filter | new.per_filter, + retrieve_vectors: self.retrieve_vectors | new.retrieve_vectors, + max_limit: self.max_limit.max(new.max_limit), + max_offset: self.max_offset.max(new.max_offset), marker: PhantomData, }) } @@ -223,12 +223,12 @@ impl Aggregate for DocumentsDeletionAggregator { "Documents Deleted" } - fn aggregate(self: Box, other: Box) -> Box { + fn aggregate(self: Box, new: Box) -> Box { Box::new(Self { - per_document_id: self.per_document_id | other.per_document_id, - clear_all: self.clear_all | other.clear_all, - per_batch: self.per_batch | other.per_batch, - per_filter: self.per_filter | other.per_filter, + per_document_id: self.per_document_id | new.per_document_id, + clear_all: self.clear_all | new.clear_all, + per_batch: self.per_batch | new.per_batch, + per_filter: self.per_filter | new.per_filter, }) } @@ -437,11 +437,11 @@ impl Aggregate for DocumentsAggregator { Method::event_name() } - fn aggregate(self: Box, other: Box) -> Box { + fn aggregate(self: Box, new: Box) -> Box { Box::new(Self { - payload_types: self.payload_types.union(&other.payload_types).cloned().collect(), - primary_key: self.primary_key.union(&other.primary_key).cloned().collect(), - index_creation: self.index_creation | other.index_creation, + payload_types: self.payload_types.union(&new.payload_types).cloned().collect(), + primary_key: self.primary_key.union(&new.primary_key).cloned().collect(), + index_creation: self.index_creation | new.index_creation, method: PhantomData, }) } @@ -815,11 +815,11 @@ impl Aggregate for EditDocumentsByFunctionAggregator { "Documents Edited By Function" } - fn aggregate(self: Box, other: Box) -> Box { + fn aggregate(self: Box, new: Box) -> Box { Box::new(Self { - filtered: self.filtered | other.filtered, - with_context: self.with_context | other.with_context, - index_creation: self.index_creation | other.index_creation, + filtered: self.filtered | new.filtered, + with_context: self.with_context | new.with_context, + index_creation: self.index_creation | new.index_creation, }) } diff --git a/meilisearch/src/routes/indexes/facet_search.rs b/meilisearch/src/routes/indexes/facet_search.rs index 8e40397c7..99a4a4f28 100644 --- a/meilisearch/src/routes/indexes/facet_search.rs +++ b/meilisearch/src/routes/indexes/facet_search.rs @@ -113,18 +113,18 @@ impl Aggregate for FacetSearchAggregator { "Facet Searched POST" } - fn aggregate(mut self: Box, other: Box) -> Box { - for time in other.time_spent { + fn aggregate(mut self: Box, new: Box) -> Box { + for time in new.time_spent { self.time_spent.push(time); } Box::new(Self { - total_received: self.total_received.saturating_add(other.total_received), - total_succeeded: self.total_succeeded.saturating_add(other.total_succeeded), + total_received: self.total_received.saturating_add(new.total_received), + total_succeeded: self.total_succeeded.saturating_add(new.total_succeeded), time_spent: self.time_spent, - facet_names: self.facet_names.union(&other.facet_names).cloned().collect(), + facet_names: self.facet_names.union(&new.facet_names).cloned().collect(), additional_search_parameters_provided: self.additional_search_parameters_provided - | other.additional_search_parameters_provided, + | new.additional_search_parameters_provided, }) } diff --git a/meilisearch/src/routes/indexes/mod.rs b/meilisearch/src/routes/indexes/mod.rs index 65c81a57e..c8183186d 100644 --- a/meilisearch/src/routes/indexes/mod.rs +++ b/meilisearch/src/routes/indexes/mod.rs @@ -134,10 +134,8 @@ impl Aggregate for IndexCreatedAggregate { "Index Created" } - fn aggregate(self: Box, other: Box) -> Box { - Box::new(Self { - primary_key: self.primary_key.union(&other.primary_key).cloned().collect(), - }) + fn aggregate(self: Box, new: Box) -> Box { + Box::new(Self { primary_key: self.primary_key.union(&new.primary_key).cloned().collect() }) } fn into_event(self: Box) -> serde_json::Value { @@ -225,10 +223,8 @@ impl Aggregate for IndexUpdatedAggregate { "Index Updated" } - fn aggregate(self: Box, other: Box) -> Box { - Box::new(Self { - primary_key: self.primary_key.union(&other.primary_key).cloned().collect(), - }) + fn aggregate(self: Box, new: Box) -> Box { + Box::new(Self { primary_key: self.primary_key.union(&new.primary_key).cloned().collect() }) } fn into_event(self: Box) -> serde_json::Value { diff --git a/meilisearch/src/routes/indexes/settings_analytics.rs b/meilisearch/src/routes/indexes/settings_analytics.rs index 636ef3c57..e7d44fa20 100644 --- a/meilisearch/src/routes/indexes/settings_analytics.rs +++ b/meilisearch/src/routes/indexes/settings_analytics.rs @@ -42,114 +42,108 @@ impl Aggregate for SettingsAnalytics { "Settings Updated" } - fn aggregate(self: Box, other: Box) -> Box { + fn aggregate(self: Box, new: Box) -> Box { Box::new(Self { ranking_rules: RankingRulesAnalytics { words_position: self .ranking_rules .words_position - .or(other.ranking_rules.words_position), - typo_position: self - .ranking_rules - .typo_position - .or(other.ranking_rules.typo_position), + .or(new.ranking_rules.words_position), + typo_position: self.ranking_rules.typo_position.or(new.ranking_rules.typo_position), proximity_position: self .ranking_rules .proximity_position - .or(other.ranking_rules.proximity_position), + .or(new.ranking_rules.proximity_position), attribute_position: self .ranking_rules .attribute_position - .or(other.ranking_rules.attribute_position), - sort_position: self - .ranking_rules - .sort_position - .or(other.ranking_rules.sort_position), + .or(new.ranking_rules.attribute_position), + sort_position: self.ranking_rules.sort_position.or(new.ranking_rules.sort_position), exactness_position: self .ranking_rules .exactness_position - .or(other.ranking_rules.exactness_position), - values: self.ranking_rules.values.or(other.ranking_rules.values), + .or(new.ranking_rules.exactness_position), + values: self.ranking_rules.values.or(new.ranking_rules.values), }, searchable_attributes: SearchableAttributesAnalytics { - total: self.searchable_attributes.total.or(other.searchable_attributes.total), + total: self.searchable_attributes.total.or(new.searchable_attributes.total), with_wildcard: self .searchable_attributes .with_wildcard - .or(other.searchable_attributes.with_wildcard), + .or(new.searchable_attributes.with_wildcard), }, displayed_attributes: DisplayedAttributesAnalytics { - total: self.displayed_attributes.total.or(other.displayed_attributes.total), + total: self.displayed_attributes.total.or(new.displayed_attributes.total), with_wildcard: self .displayed_attributes .with_wildcard - .or(other.displayed_attributes.with_wildcard), + .or(new.displayed_attributes.with_wildcard), }, sortable_attributes: SortableAttributesAnalytics { - total: self.sortable_attributes.total.or(other.sortable_attributes.total), - has_geo: self.sortable_attributes.has_geo.or(other.sortable_attributes.has_geo), + total: self.sortable_attributes.total.or(new.sortable_attributes.total), + has_geo: self.sortable_attributes.has_geo.or(new.sortable_attributes.has_geo), }, filterable_attributes: FilterableAttributesAnalytics { - total: self.filterable_attributes.total.or(other.filterable_attributes.total), - has_geo: self.filterable_attributes.has_geo.or(other.filterable_attributes.has_geo), + total: self.filterable_attributes.total.or(new.filterable_attributes.total), + has_geo: self.filterable_attributes.has_geo.or(new.filterable_attributes.has_geo), }, distinct_attribute: DistinctAttributeAnalytics { - set: self.distinct_attribute.set | other.distinct_attribute.set, + set: self.distinct_attribute.set | new.distinct_attribute.set, }, proximity_precision: ProximityPrecisionAnalytics { - set: self.proximity_precision.set | other.proximity_precision.set, - value: self.proximity_precision.value.or(other.proximity_precision.value), + set: self.proximity_precision.set | new.proximity_precision.set, + value: self.proximity_precision.value.or(new.proximity_precision.value), }, typo_tolerance: TypoToleranceAnalytics { - enabled: self.typo_tolerance.enabled.or(other.typo_tolerance.enabled), + enabled: self.typo_tolerance.enabled.or(new.typo_tolerance.enabled), disable_on_attributes: self .typo_tolerance .disable_on_attributes - .or(other.typo_tolerance.disable_on_attributes), + .or(new.typo_tolerance.disable_on_attributes), disable_on_words: self .typo_tolerance .disable_on_words - .or(other.typo_tolerance.disable_on_words), + .or(new.typo_tolerance.disable_on_words), min_word_size_for_one_typo: self .typo_tolerance .min_word_size_for_one_typo - .or(other.typo_tolerance.min_word_size_for_one_typo), + .or(new.typo_tolerance.min_word_size_for_one_typo), min_word_size_for_two_typos: self .typo_tolerance .min_word_size_for_two_typos - .or(other.typo_tolerance.min_word_size_for_two_typos), + .or(new.typo_tolerance.min_word_size_for_two_typos), }, faceting: FacetingAnalytics { max_values_per_facet: self .faceting .max_values_per_facet - .or(other.faceting.max_values_per_facet), + .or(new.faceting.max_values_per_facet), sort_facet_values_by_star_count: self .faceting .sort_facet_values_by_star_count - .or(other.faceting.sort_facet_values_by_star_count), + .or(new.faceting.sort_facet_values_by_star_count), sort_facet_values_by_total: self .faceting .sort_facet_values_by_total - .or(other.faceting.sort_facet_values_by_total), + .or(new.faceting.sort_facet_values_by_total), }, pagination: PaginationAnalytics { - max_total_hits: self.pagination.max_total_hits.or(other.pagination.max_total_hits), + max_total_hits: self.pagination.max_total_hits.or(new.pagination.max_total_hits), }, stop_words: StopWordsAnalytics { - total: self.stop_words.total.or(other.stop_words.total), + total: self.stop_words.total.or(new.stop_words.total), }, - synonyms: SynonymsAnalytics { total: self.synonyms.total.or(other.synonyms.total) }, + synonyms: SynonymsAnalytics { total: self.synonyms.total.or(new.synonyms.total) }, embedders: EmbeddersAnalytics { - total: self.embedders.total.or(other.embedders.total), - sources: match (self.embedders.sources, other.embedders.sources) { + total: self.embedders.total.or(new.embedders.total), + sources: match (self.embedders.sources, new.embedders.sources) { (None, None) => None, (Some(sources), None) | (None, Some(sources)) => Some(sources), (Some(this), Some(other)) => Some(this.union(&other).cloned().collect()), }, document_template_used: match ( self.embedders.document_template_used, - other.embedders.document_template_used, + new.embedders.document_template_used, ) { (None, None) => None, (Some(used), None) | (None, Some(used)) => Some(used), @@ -157,7 +151,7 @@ impl Aggregate for SettingsAnalytics { }, document_template_max_bytes: match ( self.embedders.document_template_max_bytes, - other.embedders.document_template_max_bytes, + new.embedders.document_template_max_bytes, ) { (None, None) => None, (Some(bytes), None) | (None, Some(bytes)) => Some(bytes), @@ -165,7 +159,7 @@ impl Aggregate for SettingsAnalytics { }, binary_quantization_used: match ( self.embedders.binary_quantization_used, - other.embedders.binary_quantization_used, + new.embedders.binary_quantization_used, ) { (None, None) => None, (Some(bq), None) | (None, Some(bq)) => Some(bq), @@ -176,17 +170,17 @@ impl Aggregate for SettingsAnalytics { search_cutoff_ms: self .search_cutoff_ms .search_cutoff_ms - .or(other.search_cutoff_ms.search_cutoff_ms), + .or(new.search_cutoff_ms.search_cutoff_ms), }, - locales: LocalesAnalytics { locales: self.locales.locales.or(other.locales.locales) }, + locales: LocalesAnalytics { locales: self.locales.locales.or(new.locales.locales) }, dictionary: DictionaryAnalytics { - total: self.dictionary.total.or(other.dictionary.total), + total: self.dictionary.total.or(new.dictionary.total), }, separator_tokens: SeparatorTokensAnalytics { - total: self.separator_tokens.total.or(other.non_separator_tokens.total), + total: self.separator_tokens.total.or(new.non_separator_tokens.total), }, non_separator_tokens: NonSeparatorTokensAnalytics { - total: self.non_separator_tokens.total.or(other.non_separator_tokens.total), + total: self.non_separator_tokens.total.or(new.non_separator_tokens.total), }, }) } diff --git a/meilisearch/src/routes/swap_indexes.rs b/meilisearch/src/routes/swap_indexes.rs index f7d8f4eff..9b8b67e63 100644 --- a/meilisearch/src/routes/swap_indexes.rs +++ b/meilisearch/src/routes/swap_indexes.rs @@ -39,9 +39,9 @@ impl Aggregate for IndexSwappedAnalytics { "Indexes Swapped" } - fn aggregate(self: Box, other: Box) -> Box { + fn aggregate(self: Box, new: Box) -> Box { Box::new(Self { - swap_operation_number: self.swap_operation_number.max(other.swap_operation_number), + swap_operation_number: self.swap_operation_number.max(new.swap_operation_number), }) } diff --git a/meilisearch/src/routes/tasks.rs b/meilisearch/src/routes/tasks.rs index ff4aee998..712b8ecde 100644 --- a/meilisearch/src/routes/tasks.rs +++ b/meilisearch/src/routes/tasks.rs @@ -185,25 +185,25 @@ impl Aggregate for TaskFilterAnalytics, other: Box) -> Box { + fn aggregate(self: Box, new: Box) -> Box { Box::new(Self { - filtered_by_uid: self.filtered_by_uid | other.filtered_by_uid, - filtered_by_index_uid: self.filtered_by_index_uid | other.filtered_by_index_uid, - filtered_by_type: self.filtered_by_type | other.filtered_by_type, - filtered_by_status: self.filtered_by_status | other.filtered_by_status, - filtered_by_canceled_by: self.filtered_by_canceled_by | other.filtered_by_canceled_by, + filtered_by_uid: self.filtered_by_uid | new.filtered_by_uid, + filtered_by_index_uid: self.filtered_by_index_uid | new.filtered_by_index_uid, + filtered_by_type: self.filtered_by_type | new.filtered_by_type, + filtered_by_status: self.filtered_by_status | new.filtered_by_status, + filtered_by_canceled_by: self.filtered_by_canceled_by | new.filtered_by_canceled_by, filtered_by_before_enqueued_at: self.filtered_by_before_enqueued_at - | other.filtered_by_before_enqueued_at, + | new.filtered_by_before_enqueued_at, filtered_by_after_enqueued_at: self.filtered_by_after_enqueued_at - | other.filtered_by_after_enqueued_at, + | new.filtered_by_after_enqueued_at, filtered_by_before_started_at: self.filtered_by_before_started_at - | other.filtered_by_before_started_at, + | new.filtered_by_before_started_at, filtered_by_after_started_at: self.filtered_by_after_started_at - | other.filtered_by_after_started_at, + | new.filtered_by_after_started_at, filtered_by_before_finished_at: self.filtered_by_before_finished_at - | other.filtered_by_before_finished_at, + | new.filtered_by_before_finished_at, filtered_by_after_finished_at: self.filtered_by_after_finished_at - | other.filtered_by_after_finished_at, + | new.filtered_by_after_finished_at, marker: std::marker::PhantomData, }) From ac919df37dff4dda34ae2687517bb4b1a6b2b4cf Mon Sep 17 00:00:00 2001 From: Tamo Date: Sun, 20 Oct 2024 17:36:29 +0200 Subject: [PATCH 085/111] simplify the trait a bit more by getting rids of the downcast_aggregate method --- meilisearch/src/analytics/mod.rs | 20 ------------------- .../src/analytics/segment_analytics.rs | 18 ++++++++++++++++- 2 files changed, 17 insertions(+), 21 deletions(-) diff --git a/meilisearch/src/analytics/mod.rs b/meilisearch/src/analytics/mod.rs index 27203ea71..d72ab9d01 100644 --- a/meilisearch/src/analytics/mod.rs +++ b/meilisearch/src/analytics/mod.rs @@ -93,26 +93,6 @@ pub trait Aggregate: 'static + mopa::Any + Send { where Self: Sized; - /// An internal helper function, you shouldn't implement it yourself. - /// This function should always be called on the same type. If `this` and `other` - /// aren't the same type behind the function will do nothing and return `None`. - fn downcast_aggregate( - old: Box, - new: Box, - ) -> Option> - where - Self: Sized, - { - if old.is::() && new.is::() { - // Both the two following lines cannot fail, but just to be sure we don't crash, we're still avoiding unwrapping - let this = old.downcast::().ok()?; - let other = new.downcast::().ok()?; - Some(Self::aggregate(this, other)) - } else { - None - } - } - /// Converts your structure to the final event that'll be sent to segment. fn into_event(self: Box) -> serde_json::Value; } diff --git a/meilisearch/src/analytics/segment_analytics.rs b/meilisearch/src/analytics/segment_analytics.rs index 328a3a048..96a0a676c 100644 --- a/meilisearch/src/analytics/segment_analytics.rs +++ b/meilisearch/src/analytics/segment_analytics.rs @@ -82,6 +82,22 @@ pub struct Event { total: usize, } +/// This function should always be called on the same type. If `this` and `other` +/// aren't the same type the function will do nothing and return `None`. +fn downcast_aggregate( + old: Box, + new: Box, +) -> Option> { + if old.is::() && new.is::() { + // Both the two following lines cannot fail, but just to be sure we don't crash, we're still avoiding unwrapping + let this = old.downcast::().ok()?; + let other = new.downcast::().ok()?; + Some(ConcreteType::aggregate(this, other)) + } else { + None + } +} + impl Message { pub fn new(event: T, request: &HttpRequest) -> Self { Self { @@ -92,7 +108,7 @@ impl Message { user_agents: extract_user_agents(request), total: 1, }, - aggregator_function: T::downcast_aggregate, + aggregator_function: downcast_aggregate::, } } } From af589c85ec4746ef38a38420e0b6d433b1dc86d2 Mon Sep 17 00:00:00 2001 From: Tamo Date: Sun, 20 Oct 2024 17:40:31 +0200 Subject: [PATCH 086/111] reverse all the settings to keep the last one received instead of the first one received in case we receive the same setting multiple times --- .../src/routes/indexes/settings_analytics.rs | 94 +++++++++---------- 1 file changed, 47 insertions(+), 47 deletions(-) diff --git a/meilisearch/src/routes/indexes/settings_analytics.rs b/meilisearch/src/routes/indexes/settings_analytics.rs index e7d44fa20..de01b72e8 100644 --- a/meilisearch/src/routes/indexes/settings_analytics.rs +++ b/meilisearch/src/routes/indexes/settings_analytics.rs @@ -45,97 +45,97 @@ impl Aggregate for SettingsAnalytics { fn aggregate(self: Box, new: Box) -> Box { Box::new(Self { ranking_rules: RankingRulesAnalytics { - words_position: self + words_position: new .ranking_rules .words_position - .or(new.ranking_rules.words_position), - typo_position: self.ranking_rules.typo_position.or(new.ranking_rules.typo_position), - proximity_position: self + .or(self.ranking_rules.words_position), + typo_position: new.ranking_rules.typo_position.or(self.ranking_rules.typo_position), + proximity_position: new .ranking_rules .proximity_position - .or(new.ranking_rules.proximity_position), - attribute_position: self + .or(self.ranking_rules.proximity_position), + attribute_position: new .ranking_rules .attribute_position - .or(new.ranking_rules.attribute_position), - sort_position: self.ranking_rules.sort_position.or(new.ranking_rules.sort_position), - exactness_position: self + .or(self.ranking_rules.attribute_position), + sort_position: new.ranking_rules.sort_position.or(self.ranking_rules.sort_position), + exactness_position: new .ranking_rules .exactness_position - .or(new.ranking_rules.exactness_position), - values: self.ranking_rules.values.or(new.ranking_rules.values), + .or(self.ranking_rules.exactness_position), + values: new.ranking_rules.values.or(self.ranking_rules.values), }, searchable_attributes: SearchableAttributesAnalytics { - total: self.searchable_attributes.total.or(new.searchable_attributes.total), - with_wildcard: self + total: new.searchable_attributes.total.or(self.searchable_attributes.total), + with_wildcard: new .searchable_attributes .with_wildcard - .or(new.searchable_attributes.with_wildcard), + .or(self.searchable_attributes.with_wildcard), }, displayed_attributes: DisplayedAttributesAnalytics { - total: self.displayed_attributes.total.or(new.displayed_attributes.total), - with_wildcard: self + total: new.displayed_attributes.total.or(self.displayed_attributes.total), + with_wildcard: new .displayed_attributes .with_wildcard - .or(new.displayed_attributes.with_wildcard), + .or(self.displayed_attributes.with_wildcard), }, sortable_attributes: SortableAttributesAnalytics { - total: self.sortable_attributes.total.or(new.sortable_attributes.total), - has_geo: self.sortable_attributes.has_geo.or(new.sortable_attributes.has_geo), + total: new.sortable_attributes.total.or(self.sortable_attributes.total), + has_geo: new.sortable_attributes.has_geo.or(self.sortable_attributes.has_geo), }, filterable_attributes: FilterableAttributesAnalytics { - total: self.filterable_attributes.total.or(new.filterable_attributes.total), - has_geo: self.filterable_attributes.has_geo.or(new.filterable_attributes.has_geo), + total: new.filterable_attributes.total.or(self.filterable_attributes.total), + has_geo: new.filterable_attributes.has_geo.or(self.filterable_attributes.has_geo), }, distinct_attribute: DistinctAttributeAnalytics { set: self.distinct_attribute.set | new.distinct_attribute.set, }, proximity_precision: ProximityPrecisionAnalytics { set: self.proximity_precision.set | new.proximity_precision.set, - value: self.proximity_precision.value.or(new.proximity_precision.value), + value: new.proximity_precision.value.or(self.proximity_precision.value), }, typo_tolerance: TypoToleranceAnalytics { - enabled: self.typo_tolerance.enabled.or(new.typo_tolerance.enabled), - disable_on_attributes: self + enabled: new.typo_tolerance.enabled.or(self.typo_tolerance.enabled), + disable_on_attributes: new .typo_tolerance .disable_on_attributes - .or(new.typo_tolerance.disable_on_attributes), - disable_on_words: self + .or(self.typo_tolerance.disable_on_attributes), + disable_on_words: new .typo_tolerance .disable_on_words - .or(new.typo_tolerance.disable_on_words), - min_word_size_for_one_typo: self + .or(self.typo_tolerance.disable_on_words), + min_word_size_for_one_typo: new .typo_tolerance .min_word_size_for_one_typo - .or(new.typo_tolerance.min_word_size_for_one_typo), - min_word_size_for_two_typos: self + .or(self.typo_tolerance.min_word_size_for_one_typo), + min_word_size_for_two_typos: new .typo_tolerance .min_word_size_for_two_typos - .or(new.typo_tolerance.min_word_size_for_two_typos), + .or(self.typo_tolerance.min_word_size_for_two_typos), }, faceting: FacetingAnalytics { - max_values_per_facet: self + max_values_per_facet: new .faceting .max_values_per_facet - .or(new.faceting.max_values_per_facet), - sort_facet_values_by_star_count: self + .or(self.faceting.max_values_per_facet), + sort_facet_values_by_star_count: new .faceting .sort_facet_values_by_star_count - .or(new.faceting.sort_facet_values_by_star_count), - sort_facet_values_by_total: self + .or(self.faceting.sort_facet_values_by_star_count), + sort_facet_values_by_total: new .faceting .sort_facet_values_by_total - .or(new.faceting.sort_facet_values_by_total), + .or(self.faceting.sort_facet_values_by_total), }, pagination: PaginationAnalytics { - max_total_hits: self.pagination.max_total_hits.or(new.pagination.max_total_hits), + max_total_hits: new.pagination.max_total_hits.or(self.pagination.max_total_hits), }, stop_words: StopWordsAnalytics { - total: self.stop_words.total.or(new.stop_words.total), + total: new.stop_words.total.or(self.stop_words.total), }, - synonyms: SynonymsAnalytics { total: self.synonyms.total.or(new.synonyms.total) }, + synonyms: SynonymsAnalytics { total: new.synonyms.total.or(self.synonyms.total) }, embedders: EmbeddersAnalytics { - total: self.embedders.total.or(new.embedders.total), + total: new.embedders.total.or(self.embedders.total), sources: match (self.embedders.sources, new.embedders.sources) { (None, None) => None, (Some(sources), None) | (None, Some(sources)) => Some(sources), @@ -167,20 +167,20 @@ impl Aggregate for SettingsAnalytics { }, }, search_cutoff_ms: SearchCutoffMsAnalytics { - search_cutoff_ms: self + search_cutoff_ms: new .search_cutoff_ms .search_cutoff_ms - .or(new.search_cutoff_ms.search_cutoff_ms), + .or(self.search_cutoff_ms.search_cutoff_ms), }, - locales: LocalesAnalytics { locales: self.locales.locales.or(new.locales.locales) }, + locales: LocalesAnalytics { locales: new.locales.locales.or(self.locales.locales) }, dictionary: DictionaryAnalytics { - total: self.dictionary.total.or(new.dictionary.total), + total: new.dictionary.total.or(self.dictionary.total), }, separator_tokens: SeparatorTokensAnalytics { - total: self.separator_tokens.total.or(new.non_separator_tokens.total), + total: new.non_separator_tokens.total.or(self.separator_tokens.total), }, non_separator_tokens: NonSeparatorTokensAnalytics { - total: self.non_separator_tokens.total.or(new.non_separator_tokens.total), + total: new.non_separator_tokens.total.or(self.non_separator_tokens.total), }, }) } From 5675585fe8b4f51eed7b08bb30e1fed0f711e340 Mon Sep 17 00:00:00 2001 From: Tamo Date: Sun, 20 Oct 2024 17:54:43 +0200 Subject: [PATCH 087/111] move all the searches structures to new modules --- meilisearch/src/analytics/mod.rs | 4 - .../src/analytics/segment_analytics.rs | 868 +----------------- meilisearch/src/routes/indexes/mod.rs | 2 + meilisearch/src/routes/indexes/search.rs | 4 +- .../src/routes/indexes/search_analytics.rs | 485 ++++++++++ meilisearch/src/routes/indexes/similar.rs | 4 +- .../src/routes/indexes/similar_analytics.rs | 235 +++++ meilisearch/src/routes/mod.rs | 1 + meilisearch/src/routes/multi_search.rs | 4 +- .../src/routes/multi_search_analytics.rs | 170 ++++ 10 files changed, 903 insertions(+), 874 deletions(-) create mode 100644 meilisearch/src/routes/indexes/search_analytics.rs create mode 100644 meilisearch/src/routes/indexes/similar_analytics.rs create mode 100644 meilisearch/src/routes/multi_search_analytics.rs diff --git a/meilisearch/src/analytics/mod.rs b/meilisearch/src/analytics/mod.rs index d72ab9d01..bd14b0bfa 100644 --- a/meilisearch/src/analytics/mod.rs +++ b/meilisearch/src/analytics/mod.rs @@ -15,13 +15,9 @@ use platform_dirs::AppDirs; // if the feature analytics is enabled we use the real analytics pub type SegmentAnalytics = segment_analytics::SegmentAnalytics; -pub use segment_analytics::SearchAggregator; -pub use segment_analytics::SimilarAggregator; use crate::Opt; -pub use self::segment_analytics::MultiSearchAggregator; - /// A macro used to quickly define events that don't aggregate or send anything besides an empty event with its name. #[macro_export] macro_rules! empty_analytics { diff --git a/meilisearch/src/analytics/segment_analytics.rs b/meilisearch/src/analytics/segment_analytics.rs index 96a0a676c..7dc746b14 100644 --- a/meilisearch/src/analytics/segment_analytics.rs +++ b/meilisearch/src/analytics/segment_analytics.rs @@ -1,5 +1,5 @@ use std::any::TypeId; -use std::collections::{BTreeSet, BinaryHeap, HashMap, HashSet}; +use std::collections::{HashMap, HashSet}; use std::fs; use std::path::{Path, PathBuf}; use std::sync::Arc; @@ -11,10 +11,8 @@ use byte_unit::Byte; use index_scheduler::IndexScheduler; use meilisearch_auth::{AuthController, AuthFilter}; use meilisearch_types::features::RuntimeTogglableFeatures; -use meilisearch_types::locales::Locale; use meilisearch_types::InstanceUid; use once_cell::sync::Lazy; -use regex::Regex; use segment::message::{Identify, Track, User}; use segment::{AutoBatcher, Batcher, HttpClient}; use serde::Serialize; @@ -25,17 +23,12 @@ use tokio::select; use tokio::sync::mpsc::{self, Receiver, Sender}; use uuid::Uuid; -use super::{config_user_id_path, Aggregate, AggregateMethod, MEILISEARCH_CONFIG_PATH}; +use super::{config_user_id_path, Aggregate, MEILISEARCH_CONFIG_PATH}; use crate::option::{ default_http_addr, IndexerOpts, LogMode, MaxMemory, MaxThreads, ScheduleSnapshot, }; use crate::routes::{create_all_stats, Stats}; -use crate::search::{ - FederatedSearch, SearchQuery, SearchQueryWithIndex, SearchResult, SimilarQuery, SimilarResult, - DEFAULT_CROP_LENGTH, DEFAULT_CROP_MARKER, DEFAULT_HIGHLIGHT_POST_TAG, - DEFAULT_HIGHLIGHT_PRE_TAG, DEFAULT_SEARCH_LIMIT, DEFAULT_SEMANTIC_RATIO, -}; -use crate::{aggregate_methods, Opt}; +use crate::Opt; const ANALYTICS_HEADER: &str = "X-Meilisearch-Client"; @@ -489,858 +482,3 @@ impl Segment { let _ = self.batcher.flush().await; } } - -#[derive(Default)] -pub struct SearchAggregator { - // requests - total_received: usize, - total_succeeded: usize, - total_degraded: usize, - total_used_negative_operator: usize, - time_spent: BinaryHeap, - - // sort - sort_with_geo_point: bool, - // every time a request has a filter, this field must be incremented by the number of terms it contains - sort_sum_of_criteria_terms: usize, - // every time a request has a filter, this field must be incremented by one - sort_total_number_of_criteria: usize, - - // distinct - distinct: bool, - - // filter - filter_with_geo_radius: bool, - filter_with_geo_bounding_box: bool, - // every time a request has a filter, this field must be incremented by the number of terms it contains - filter_sum_of_criteria_terms: usize, - // every time a request has a filter, this field must be incremented by one - filter_total_number_of_criteria: usize, - used_syntax: HashMap, - - // attributes_to_search_on - // every time a search is done using attributes_to_search_on - attributes_to_search_on_total_number_of_uses: usize, - - // q - // The maximum number of terms in a q request - max_terms_number: usize, - - // vector - // The maximum number of floats in a vector request - max_vector_size: usize, - // Whether the semantic ratio passed to a hybrid search equals the default ratio. - semantic_ratio: bool, - hybrid: bool, - retrieve_vectors: bool, - - // every time a search is done, we increment the counter linked to the used settings - matching_strategy: HashMap, - - // List of the unique Locales passed as parameter - locales: BTreeSet, - - // pagination - max_limit: usize, - max_offset: usize, - finite_pagination: usize, - - // formatting - max_attributes_to_retrieve: usize, - max_attributes_to_highlight: usize, - highlight_pre_tag: bool, - highlight_post_tag: bool, - max_attributes_to_crop: usize, - crop_marker: bool, - show_matches_position: bool, - crop_length: bool, - - // facets - facets_sum_of_terms: usize, - facets_total_number_of_facets: usize, - - // scoring - show_ranking_score: bool, - show_ranking_score_details: bool, - ranking_score_threshold: bool, - - marker: std::marker::PhantomData, -} - -impl SearchAggregator { - #[allow(clippy::field_reassign_with_default)] - pub fn from_query(query: &SearchQuery) -> Self { - let SearchQuery { - q, - vector, - offset, - limit, - page, - hits_per_page, - attributes_to_retrieve: _, - retrieve_vectors, - attributes_to_crop: _, - crop_length, - attributes_to_highlight: _, - show_matches_position, - show_ranking_score, - show_ranking_score_details, - filter, - sort, - distinct, - facets: _, - highlight_pre_tag, - highlight_post_tag, - crop_marker, - matching_strategy, - attributes_to_search_on, - hybrid, - ranking_score_threshold, - locales, - } = query; - - let mut ret = Self::default(); - - ret.total_received = 1; - - if let Some(ref sort) = sort { - ret.sort_total_number_of_criteria = 1; - ret.sort_with_geo_point = sort.iter().any(|s| s.contains("_geoPoint(")); - ret.sort_sum_of_criteria_terms = sort.len(); - } - - ret.distinct = distinct.is_some(); - - if let Some(ref filter) = filter { - static RE: Lazy = Lazy::new(|| Regex::new("AND | OR").unwrap()); - ret.filter_total_number_of_criteria = 1; - - let syntax = match filter { - Value::String(_) => "string".to_string(), - Value::Array(values) => { - if values.iter().map(|v| v.to_string()).any(|s| RE.is_match(&s)) { - "mixed".to_string() - } else { - "array".to_string() - } - } - _ => "none".to_string(), - }; - // convert the string to a HashMap - ret.used_syntax.insert(syntax, 1); - - let stringified_filters = filter.to_string(); - ret.filter_with_geo_radius = stringified_filters.contains("_geoRadius("); - ret.filter_with_geo_bounding_box = stringified_filters.contains("_geoBoundingBox("); - ret.filter_sum_of_criteria_terms = RE.split(&stringified_filters).count(); - } - - // attributes_to_search_on - if attributes_to_search_on.is_some() { - ret.attributes_to_search_on_total_number_of_uses = 1; - } - - if let Some(ref q) = q { - ret.max_terms_number = q.split_whitespace().count(); - } - - if let Some(ref vector) = vector { - ret.max_vector_size = vector.len(); - } - ret.retrieve_vectors |= retrieve_vectors; - - if query.is_finite_pagination() { - let limit = hits_per_page.unwrap_or_else(DEFAULT_SEARCH_LIMIT); - ret.max_limit = limit; - ret.max_offset = page.unwrap_or(1).saturating_sub(1) * limit; - ret.finite_pagination = 1; - } else { - ret.max_limit = *limit; - ret.max_offset = *offset; - ret.finite_pagination = 0; - } - - ret.matching_strategy.insert(format!("{:?}", matching_strategy), 1); - - if let Some(locales) = locales { - ret.locales = locales.iter().copied().collect(); - } - - ret.highlight_pre_tag = *highlight_pre_tag != DEFAULT_HIGHLIGHT_PRE_TAG(); - ret.highlight_post_tag = *highlight_post_tag != DEFAULT_HIGHLIGHT_POST_TAG(); - ret.crop_marker = *crop_marker != DEFAULT_CROP_MARKER(); - ret.crop_length = *crop_length != DEFAULT_CROP_LENGTH(); - ret.show_matches_position = *show_matches_position; - - ret.show_ranking_score = *show_ranking_score; - ret.show_ranking_score_details = *show_ranking_score_details; - ret.ranking_score_threshold = ranking_score_threshold.is_some(); - - if let Some(hybrid) = hybrid { - ret.semantic_ratio = hybrid.semantic_ratio != DEFAULT_SEMANTIC_RATIO(); - ret.hybrid = true; - } - - ret - } - - pub fn succeed(&mut self, result: &SearchResult) { - let SearchResult { - hits: _, - query: _, - processing_time_ms, - hits_info: _, - semantic_hit_count: _, - facet_distribution: _, - facet_stats: _, - degraded, - used_negative_operator, - } = result; - - self.total_succeeded = self.total_succeeded.saturating_add(1); - if *degraded { - self.total_degraded = self.total_degraded.saturating_add(1); - } - if *used_negative_operator { - self.total_used_negative_operator = self.total_used_negative_operator.saturating_add(1); - } - self.time_spent.push(*processing_time_ms as usize); - } -} - -aggregate_methods!( - SearchGET => "Documents Searched GET", - SearchPOST => "Documents Searched POST", -); - -impl Aggregate for SearchAggregator { - fn event_name(&self) -> &'static str { - Method::event_name() - } - - fn aggregate(mut self: Box, new: Box) -> Box { - let Self { - total_received, - total_succeeded, - mut time_spent, - sort_with_geo_point, - sort_sum_of_criteria_terms, - sort_total_number_of_criteria, - distinct, - filter_with_geo_radius, - filter_with_geo_bounding_box, - filter_sum_of_criteria_terms, - filter_total_number_of_criteria, - used_syntax, - attributes_to_search_on_total_number_of_uses, - max_terms_number, - max_vector_size, - retrieve_vectors, - matching_strategy, - max_limit, - max_offset, - finite_pagination, - max_attributes_to_retrieve, - max_attributes_to_highlight, - highlight_pre_tag, - highlight_post_tag, - max_attributes_to_crop, - crop_marker, - show_matches_position, - crop_length, - facets_sum_of_terms, - facets_total_number_of_facets, - show_ranking_score, - show_ranking_score_details, - semantic_ratio, - hybrid, - total_degraded, - total_used_negative_operator, - ranking_score_threshold, - mut locales, - marker: _, - } = *new; - - // request - self.total_received = self.total_received.saturating_add(total_received); - self.total_succeeded = self.total_succeeded.saturating_add(total_succeeded); - self.total_degraded = self.total_degraded.saturating_add(total_degraded); - self.total_used_negative_operator = - self.total_used_negative_operator.saturating_add(total_used_negative_operator); - self.time_spent.append(&mut time_spent); - - // sort - self.sort_with_geo_point |= sort_with_geo_point; - self.sort_sum_of_criteria_terms = - self.sort_sum_of_criteria_terms.saturating_add(sort_sum_of_criteria_terms); - self.sort_total_number_of_criteria = - self.sort_total_number_of_criteria.saturating_add(sort_total_number_of_criteria); - - // distinct - self.distinct |= distinct; - - // filter - self.filter_with_geo_radius |= filter_with_geo_radius; - self.filter_with_geo_bounding_box |= filter_with_geo_bounding_box; - self.filter_sum_of_criteria_terms = - self.filter_sum_of_criteria_terms.saturating_add(filter_sum_of_criteria_terms); - self.filter_total_number_of_criteria = - self.filter_total_number_of_criteria.saturating_add(filter_total_number_of_criteria); - for (key, value) in used_syntax.into_iter() { - let used_syntax = self.used_syntax.entry(key).or_insert(0); - *used_syntax = used_syntax.saturating_add(value); - } - - // attributes_to_search_on - self.attributes_to_search_on_total_number_of_uses = self - .attributes_to_search_on_total_number_of_uses - .saturating_add(attributes_to_search_on_total_number_of_uses); - - // q - self.max_terms_number = self.max_terms_number.max(max_terms_number); - - // vector - self.max_vector_size = self.max_vector_size.max(max_vector_size); - self.retrieve_vectors |= retrieve_vectors; - self.semantic_ratio |= semantic_ratio; - self.hybrid |= hybrid; - - // pagination - self.max_limit = self.max_limit.max(max_limit); - self.max_offset = self.max_offset.max(max_offset); - self.finite_pagination += finite_pagination; - - // formatting - self.max_attributes_to_retrieve = - self.max_attributes_to_retrieve.max(max_attributes_to_retrieve); - self.max_attributes_to_highlight = - self.max_attributes_to_highlight.max(max_attributes_to_highlight); - self.highlight_pre_tag |= highlight_pre_tag; - self.highlight_post_tag |= highlight_post_tag; - self.max_attributes_to_crop = self.max_attributes_to_crop.max(max_attributes_to_crop); - self.crop_marker |= crop_marker; - self.show_matches_position |= show_matches_position; - self.crop_length |= crop_length; - - // facets - self.facets_sum_of_terms = self.facets_sum_of_terms.saturating_add(facets_sum_of_terms); - self.facets_total_number_of_facets = - self.facets_total_number_of_facets.saturating_add(facets_total_number_of_facets); - - // matching strategy - for (key, value) in matching_strategy.into_iter() { - let matching_strategy = self.matching_strategy.entry(key).or_insert(0); - *matching_strategy = matching_strategy.saturating_add(value); - } - - // scoring - self.show_ranking_score |= show_ranking_score; - self.show_ranking_score_details |= show_ranking_score_details; - self.ranking_score_threshold |= ranking_score_threshold; - - // locales - self.locales.append(&mut locales); - - self - } - - fn into_event(self: Box) -> serde_json::Value { - let Self { - total_received, - total_succeeded, - time_spent, - sort_with_geo_point, - sort_sum_of_criteria_terms, - sort_total_number_of_criteria, - distinct, - filter_with_geo_radius, - filter_with_geo_bounding_box, - filter_sum_of_criteria_terms, - filter_total_number_of_criteria, - used_syntax, - attributes_to_search_on_total_number_of_uses, - max_terms_number, - max_vector_size, - retrieve_vectors, - matching_strategy, - max_limit, - max_offset, - finite_pagination, - max_attributes_to_retrieve, - max_attributes_to_highlight, - highlight_pre_tag, - highlight_post_tag, - max_attributes_to_crop, - crop_marker, - show_matches_position, - crop_length, - facets_sum_of_terms, - facets_total_number_of_facets, - show_ranking_score, - show_ranking_score_details, - semantic_ratio, - hybrid, - total_degraded, - total_used_negative_operator, - ranking_score_threshold, - locales, - marker: _, - } = *self; - - // we get all the values in a sorted manner - let time_spent = time_spent.into_sorted_vec(); - // the index of the 99th percentage of value - let percentile_99th = time_spent.len() * 99 / 100; - // We are only interested by the slowest value of the 99th fastest results - let time_spent = time_spent.get(percentile_99th); - - json!({ - "requests": { - "99th_response_time": time_spent.map(|t| format!("{:.2}", t)), - "total_succeeded": total_succeeded, - "total_failed": total_received.saturating_sub(total_succeeded), // just to be sure we never panics - "total_received": total_received, - "total_degraded": total_degraded, - "total_used_negative_operator": total_used_negative_operator, - }, - "sort": { - "with_geoPoint": sort_with_geo_point, - "avg_criteria_number": format!("{:.2}", sort_sum_of_criteria_terms as f64 / sort_total_number_of_criteria as f64), - }, - "distinct": distinct, - "filter": { - "with_geoRadius": filter_with_geo_radius, - "with_geoBoundingBox": filter_with_geo_bounding_box, - "avg_criteria_number": format!("{:.2}", filter_sum_of_criteria_terms as f64 / filter_total_number_of_criteria as f64), - "most_used_syntax": used_syntax.iter().max_by_key(|(_, v)| *v).map(|(k, _)| json!(k)).unwrap_or_else(|| json!(null)), - }, - "attributes_to_search_on": { - "total_number_of_uses": attributes_to_search_on_total_number_of_uses, - }, - "q": { - "max_terms_number": max_terms_number, - }, - "vector": { - "max_vector_size": max_vector_size, - "retrieve_vectors": retrieve_vectors, - }, - "hybrid": { - "enabled": hybrid, - "semantic_ratio": semantic_ratio, - }, - "pagination": { - "max_limit": max_limit, - "max_offset": max_offset, - "most_used_navigation": if finite_pagination > (total_received / 2) { "exhaustive" } else { "estimated" }, - }, - "formatting": { - "max_attributes_to_retrieve": max_attributes_to_retrieve, - "max_attributes_to_highlight": max_attributes_to_highlight, - "highlight_pre_tag": highlight_pre_tag, - "highlight_post_tag": highlight_post_tag, - "max_attributes_to_crop": max_attributes_to_crop, - "crop_marker": crop_marker, - "show_matches_position": show_matches_position, - "crop_length": crop_length, - }, - "facets": { - "avg_facets_number": format!("{:.2}", facets_sum_of_terms as f64 / facets_total_number_of_facets as f64), - }, - "matching_strategy": { - "most_used_strategy": matching_strategy.iter().max_by_key(|(_, v)| *v).map(|(k, _)| json!(k)).unwrap_or_else(|| json!(null)), - }, - "locales": locales, - "scoring": { - "show_ranking_score": show_ranking_score, - "show_ranking_score_details": show_ranking_score_details, - "ranking_score_threshold": ranking_score_threshold, - }, - }) - } -} - -#[derive(Default)] -pub struct MultiSearchAggregator { - // requests - total_received: usize, - total_succeeded: usize, - - // sum of the number of distinct indexes in each single request, use with total_received to compute an avg - total_distinct_index_count: usize, - // number of queries with a single index, use with total_received to compute a proportion - total_single_index: usize, - - // sum of the number of search queries in the requests, use with total_received to compute an average - total_search_count: usize, - - // scoring - show_ranking_score: bool, - show_ranking_score_details: bool, - - // federation - use_federation: bool, -} - -impl MultiSearchAggregator { - pub fn from_federated_search(federated_search: &FederatedSearch) -> Self { - let use_federation = federated_search.federation.is_some(); - - let distinct_indexes: HashSet<_> = federated_search - .queries - .iter() - .map(|query| { - let query = &query; - // make sure we get a compilation error if a field gets added to / removed from SearchQueryWithIndex - let SearchQueryWithIndex { - index_uid, - federation_options: _, - q: _, - vector: _, - offset: _, - limit: _, - page: _, - hits_per_page: _, - attributes_to_retrieve: _, - retrieve_vectors: _, - attributes_to_crop: _, - crop_length: _, - attributes_to_highlight: _, - show_ranking_score: _, - show_ranking_score_details: _, - show_matches_position: _, - filter: _, - sort: _, - distinct: _, - facets: _, - highlight_pre_tag: _, - highlight_post_tag: _, - crop_marker: _, - matching_strategy: _, - attributes_to_search_on: _, - hybrid: _, - ranking_score_threshold: _, - locales: _, - } = query; - - index_uid.as_str() - }) - .collect(); - - let show_ranking_score = - federated_search.queries.iter().any(|query| query.show_ranking_score); - let show_ranking_score_details = - federated_search.queries.iter().any(|query| query.show_ranking_score_details); - - Self { - total_received: 1, - total_succeeded: 0, - total_distinct_index_count: distinct_indexes.len(), - total_single_index: if distinct_indexes.len() == 1 { 1 } else { 0 }, - total_search_count: federated_search.queries.len(), - show_ranking_score, - show_ranking_score_details, - use_federation, - } - } - - pub fn succeed(&mut self) { - self.total_succeeded = self.total_succeeded.saturating_add(1); - } -} - -impl Aggregate for MultiSearchAggregator { - fn event_name(&self) -> &'static str { - "Documents Searched by Multi-Search POST" - } - - /// Aggregate one [MultiSearchAggregator] into another. - fn aggregate(self: Box, new: Box) -> Box { - // write the aggregate in a way that will cause a compilation error if a field is added. - - // get ownership of self, replacing it by a default value. - let this = *self; - - let total_received = this.total_received.saturating_add(new.total_received); - let total_succeeded = this.total_succeeded.saturating_add(new.total_succeeded); - let total_distinct_index_count = - this.total_distinct_index_count.saturating_add(new.total_distinct_index_count); - let total_single_index = this.total_single_index.saturating_add(new.total_single_index); - let total_search_count = this.total_search_count.saturating_add(new.total_search_count); - let show_ranking_score = this.show_ranking_score || new.show_ranking_score; - let show_ranking_score_details = - this.show_ranking_score_details || new.show_ranking_score_details; - let use_federation = this.use_federation || new.use_federation; - - Box::new(Self { - total_received, - total_succeeded, - total_distinct_index_count, - total_single_index, - total_search_count, - show_ranking_score, - show_ranking_score_details, - use_federation, - }) - } - - fn into_event(self: Box) -> serde_json::Value { - let Self { - total_received, - total_succeeded, - total_distinct_index_count, - total_single_index, - total_search_count, - show_ranking_score, - show_ranking_score_details, - use_federation, - } = *self; - - json!({ - "requests": { - "total_succeeded": total_succeeded, - "total_failed": total_received.saturating_sub(total_succeeded), // just to be sure we never panics - "total_received": total_received, - }, - "indexes": { - "total_single_index": total_single_index, - "total_distinct_index_count": total_distinct_index_count, - "avg_distinct_index_count": (total_distinct_index_count as f64) / (total_received as f64), // not 0 else returned early - }, - "searches": { - "total_search_count": total_search_count, - "avg_search_count": (total_search_count as f64) / (total_received as f64), - }, - "scoring": { - "show_ranking_score": show_ranking_score, - "show_ranking_score_details": show_ranking_score_details, - }, - "federation": { - "use_federation": use_federation, - } - }) - } -} - -aggregate_methods!( - SimilarPOST => "Similar POST", - SimilarGET => "Similar GET", -); - -#[derive(Default)] -pub struct SimilarAggregator { - // requests - total_received: usize, - total_succeeded: usize, - time_spent: BinaryHeap, - - // filter - filter_with_geo_radius: bool, - filter_with_geo_bounding_box: bool, - // every time a request has a filter, this field must be incremented by the number of terms it contains - filter_sum_of_criteria_terms: usize, - // every time a request has a filter, this field must be incremented by one - filter_total_number_of_criteria: usize, - used_syntax: HashMap, - - // Whether a non-default embedder was specified - retrieve_vectors: bool, - - // pagination - max_limit: usize, - max_offset: usize, - - // formatting - max_attributes_to_retrieve: usize, - - // scoring - show_ranking_score: bool, - show_ranking_score_details: bool, - ranking_score_threshold: bool, - - marker: std::marker::PhantomData, -} - -impl SimilarAggregator { - #[allow(clippy::field_reassign_with_default)] - pub fn from_query(query: &SimilarQuery) -> Self { - let SimilarQuery { - id: _, - embedder: _, - offset, - limit, - attributes_to_retrieve: _, - retrieve_vectors, - show_ranking_score, - show_ranking_score_details, - filter, - ranking_score_threshold, - } = query; - - let mut ret = Self::default(); - - ret.total_received = 1; - - if let Some(ref filter) = filter { - static RE: Lazy = Lazy::new(|| Regex::new("AND | OR").unwrap()); - ret.filter_total_number_of_criteria = 1; - - let syntax = match filter { - Value::String(_) => "string".to_string(), - Value::Array(values) => { - if values.iter().map(|v| v.to_string()).any(|s| RE.is_match(&s)) { - "mixed".to_string() - } else { - "array".to_string() - } - } - _ => "none".to_string(), - }; - // convert the string to a HashMap - ret.used_syntax.insert(syntax, 1); - - let stringified_filters = filter.to_string(); - ret.filter_with_geo_radius = stringified_filters.contains("_geoRadius("); - ret.filter_with_geo_bounding_box = stringified_filters.contains("_geoBoundingBox("); - ret.filter_sum_of_criteria_terms = RE.split(&stringified_filters).count(); - } - - ret.max_limit = *limit; - ret.max_offset = *offset; - - ret.show_ranking_score = *show_ranking_score; - ret.show_ranking_score_details = *show_ranking_score_details; - ret.ranking_score_threshold = ranking_score_threshold.is_some(); - - ret.retrieve_vectors = *retrieve_vectors; - - ret - } - - pub fn succeed(&mut self, result: &SimilarResult) { - let SimilarResult { id: _, hits: _, processing_time_ms, hits_info: _ } = result; - - self.total_succeeded = self.total_succeeded.saturating_add(1); - - self.time_spent.push(*processing_time_ms as usize); - } -} - -impl Aggregate for SimilarAggregator { - fn event_name(&self) -> &'static str { - Method::event_name() - } - - /// Aggregate one [SimilarAggregator] into another. - fn aggregate(mut self: Box, new: Box) -> Box { - let Self { - total_received, - total_succeeded, - mut time_spent, - filter_with_geo_radius, - filter_with_geo_bounding_box, - filter_sum_of_criteria_terms, - filter_total_number_of_criteria, - used_syntax, - max_limit, - max_offset, - max_attributes_to_retrieve, - show_ranking_score, - show_ranking_score_details, - ranking_score_threshold, - retrieve_vectors, - marker: _, - } = *new; - - // request - self.total_received = self.total_received.saturating_add(total_received); - self.total_succeeded = self.total_succeeded.saturating_add(total_succeeded); - self.time_spent.append(&mut time_spent); - - // filter - self.filter_with_geo_radius |= filter_with_geo_radius; - self.filter_with_geo_bounding_box |= filter_with_geo_bounding_box; - self.filter_sum_of_criteria_terms = - self.filter_sum_of_criteria_terms.saturating_add(filter_sum_of_criteria_terms); - self.filter_total_number_of_criteria = - self.filter_total_number_of_criteria.saturating_add(filter_total_number_of_criteria); - for (key, value) in used_syntax.into_iter() { - let used_syntax = self.used_syntax.entry(key).or_insert(0); - *used_syntax = used_syntax.saturating_add(value); - } - - self.retrieve_vectors |= retrieve_vectors; - - // pagination - self.max_limit = self.max_limit.max(max_limit); - self.max_offset = self.max_offset.max(max_offset); - - // formatting - self.max_attributes_to_retrieve = - self.max_attributes_to_retrieve.max(max_attributes_to_retrieve); - - // scoring - self.show_ranking_score |= show_ranking_score; - self.show_ranking_score_details |= show_ranking_score_details; - self.ranking_score_threshold |= ranking_score_threshold; - - self - } - - fn into_event(self: Box) -> serde_json::Value { - let Self { - total_received, - total_succeeded, - time_spent, - filter_with_geo_radius, - filter_with_geo_bounding_box, - filter_sum_of_criteria_terms, - filter_total_number_of_criteria, - used_syntax, - max_limit, - max_offset, - max_attributes_to_retrieve, - show_ranking_score, - show_ranking_score_details, - ranking_score_threshold, - retrieve_vectors, - marker: _, - } = *self; - - // we get all the values in a sorted manner - let time_spent = time_spent.into_sorted_vec(); - // the index of the 99th percentage of value - let percentile_99th = time_spent.len() * 99 / 100; - // We are only interested by the slowest value of the 99th fastest results - let time_spent = time_spent.get(percentile_99th); - - json!({ - "requests": { - "99th_response_time": time_spent.map(|t| format!("{:.2}", t)), - "total_succeeded": total_succeeded, - "total_failed": total_received.saturating_sub(total_succeeded), // just to be sure we never panics - "total_received": total_received, - }, - "filter": { - "with_geoRadius": filter_with_geo_radius, - "with_geoBoundingBox": filter_with_geo_bounding_box, - "avg_criteria_number": format!("{:.2}", filter_sum_of_criteria_terms as f64 / filter_total_number_of_criteria as f64), - "most_used_syntax": used_syntax.iter().max_by_key(|(_, v)| *v).map(|(k, _)| json!(k)).unwrap_or_else(|| json!(null)), - }, - "vector": { - "retrieve_vectors": retrieve_vectors, - }, - "pagination": { - "max_limit": max_limit, - "max_offset": max_offset, - }, - "formatting": { - "max_attributes_to_retrieve": max_attributes_to_retrieve, - }, - "scoring": { - "show_ranking_score": show_ranking_score, - "show_ranking_score_details": show_ranking_score_details, - "ranking_score_threshold": ranking_score_threshold, - } - }) - } -} diff --git a/meilisearch/src/routes/indexes/mod.rs b/meilisearch/src/routes/indexes/mod.rs index c8183186d..7d073ec5f 100644 --- a/meilisearch/src/routes/indexes/mod.rs +++ b/meilisearch/src/routes/indexes/mod.rs @@ -28,9 +28,11 @@ use crate::Opt; pub mod documents; pub mod facet_search; pub mod search; +mod search_analytics; pub mod settings; mod settings_analytics; pub mod similar; +mod similar_analytics; pub fn configure(cfg: &mut web::ServiceConfig) { cfg.service( diff --git a/meilisearch/src/routes/indexes/search.rs b/meilisearch/src/routes/indexes/search.rs index ac6e23c8f..2f5cb4a36 100644 --- a/meilisearch/src/routes/indexes/search.rs +++ b/meilisearch/src/routes/indexes/search.rs @@ -13,13 +13,13 @@ use meilisearch_types::serde_cs::vec::CS; use serde_json::Value; use tracing::debug; -use crate::analytics::segment_analytics::{SearchGET, SearchPOST}; -use crate::analytics::{Analytics, SearchAggregator}; +use crate::analytics::Analytics; use crate::error::MeilisearchHttpError; use crate::extractors::authentication::policies::*; use crate::extractors::authentication::GuardedData; use crate::extractors::sequential_extractor::SeqHandler; use crate::metrics::MEILISEARCH_DEGRADED_SEARCH_REQUESTS; +use crate::routes::indexes::search_analytics::{SearchAggregator, SearchGET, SearchPOST}; use crate::search::{ add_search_rules, perform_search, HybridQuery, MatchingStrategy, RankingScoreThreshold, RetrieveVectors, SearchKind, SearchQuery, SemanticRatio, DEFAULT_CROP_LENGTH, diff --git a/meilisearch/src/routes/indexes/search_analytics.rs b/meilisearch/src/routes/indexes/search_analytics.rs new file mode 100644 index 000000000..8bbb1781f --- /dev/null +++ b/meilisearch/src/routes/indexes/search_analytics.rs @@ -0,0 +1,485 @@ +use once_cell::sync::Lazy; +use regex::Regex; +use serde_json::{json, Value}; +use std::collections::{BTreeSet, BinaryHeap, HashMap}; + +use meilisearch_types::locales::Locale; + +use crate::{ + aggregate_methods, + analytics::{Aggregate, AggregateMethod}, + search::{ + SearchQuery, SearchResult, DEFAULT_CROP_LENGTH, DEFAULT_CROP_MARKER, + DEFAULT_HIGHLIGHT_POST_TAG, DEFAULT_HIGHLIGHT_PRE_TAG, DEFAULT_SEARCH_LIMIT, + DEFAULT_SEMANTIC_RATIO, + }, +}; + +aggregate_methods!( + SearchGET => "Documents Searched GET", + SearchPOST => "Documents Searched POST", +); + +#[derive(Default)] +pub struct SearchAggregator { + // requests + total_received: usize, + total_succeeded: usize, + total_degraded: usize, + total_used_negative_operator: usize, + time_spent: BinaryHeap, + + // sort + sort_with_geo_point: bool, + // every time a request has a filter, this field must be incremented by the number of terms it contains + sort_sum_of_criteria_terms: usize, + // every time a request has a filter, this field must be incremented by one + sort_total_number_of_criteria: usize, + + // distinct + distinct: bool, + + // filter + filter_with_geo_radius: bool, + filter_with_geo_bounding_box: bool, + // every time a request has a filter, this field must be incremented by the number of terms it contains + filter_sum_of_criteria_terms: usize, + // every time a request has a filter, this field must be incremented by one + filter_total_number_of_criteria: usize, + used_syntax: HashMap, + + // attributes_to_search_on + // every time a search is done using attributes_to_search_on + attributes_to_search_on_total_number_of_uses: usize, + + // q + // The maximum number of terms in a q request + max_terms_number: usize, + + // vector + // The maximum number of floats in a vector request + max_vector_size: usize, + // Whether the semantic ratio passed to a hybrid search equals the default ratio. + semantic_ratio: bool, + hybrid: bool, + retrieve_vectors: bool, + + // every time a search is done, we increment the counter linked to the used settings + matching_strategy: HashMap, + + // List of the unique Locales passed as parameter + locales: BTreeSet, + + // pagination + max_limit: usize, + max_offset: usize, + finite_pagination: usize, + + // formatting + max_attributes_to_retrieve: usize, + max_attributes_to_highlight: usize, + highlight_pre_tag: bool, + highlight_post_tag: bool, + max_attributes_to_crop: usize, + crop_marker: bool, + show_matches_position: bool, + crop_length: bool, + + // facets + facets_sum_of_terms: usize, + facets_total_number_of_facets: usize, + + // scoring + show_ranking_score: bool, + show_ranking_score_details: bool, + ranking_score_threshold: bool, + + marker: std::marker::PhantomData, +} + +impl SearchAggregator { + #[allow(clippy::field_reassign_with_default)] + pub fn from_query(query: &SearchQuery) -> Self { + let SearchQuery { + q, + vector, + offset, + limit, + page, + hits_per_page, + attributes_to_retrieve: _, + retrieve_vectors, + attributes_to_crop: _, + crop_length, + attributes_to_highlight: _, + show_matches_position, + show_ranking_score, + show_ranking_score_details, + filter, + sort, + distinct, + facets: _, + highlight_pre_tag, + highlight_post_tag, + crop_marker, + matching_strategy, + attributes_to_search_on, + hybrid, + ranking_score_threshold, + locales, + } = query; + + let mut ret = Self::default(); + + ret.total_received = 1; + + if let Some(ref sort) = sort { + ret.sort_total_number_of_criteria = 1; + ret.sort_with_geo_point = sort.iter().any(|s| s.contains("_geoPoint(")); + ret.sort_sum_of_criteria_terms = sort.len(); + } + + ret.distinct = distinct.is_some(); + + if let Some(ref filter) = filter { + static RE: Lazy = Lazy::new(|| Regex::new("AND | OR").unwrap()); + ret.filter_total_number_of_criteria = 1; + + let syntax = match filter { + Value::String(_) => "string".to_string(), + Value::Array(values) => { + if values.iter().map(|v| v.to_string()).any(|s| RE.is_match(&s)) { + "mixed".to_string() + } else { + "array".to_string() + } + } + _ => "none".to_string(), + }; + // convert the string to a HashMap + ret.used_syntax.insert(syntax, 1); + + let stringified_filters = filter.to_string(); + ret.filter_with_geo_radius = stringified_filters.contains("_geoRadius("); + ret.filter_with_geo_bounding_box = stringified_filters.contains("_geoBoundingBox("); + ret.filter_sum_of_criteria_terms = RE.split(&stringified_filters).count(); + } + + // attributes_to_search_on + if attributes_to_search_on.is_some() { + ret.attributes_to_search_on_total_number_of_uses = 1; + } + + if let Some(ref q) = q { + ret.max_terms_number = q.split_whitespace().count(); + } + + if let Some(ref vector) = vector { + ret.max_vector_size = vector.len(); + } + ret.retrieve_vectors |= retrieve_vectors; + + if query.is_finite_pagination() { + let limit = hits_per_page.unwrap_or_else(DEFAULT_SEARCH_LIMIT); + ret.max_limit = limit; + ret.max_offset = page.unwrap_or(1).saturating_sub(1) * limit; + ret.finite_pagination = 1; + } else { + ret.max_limit = *limit; + ret.max_offset = *offset; + ret.finite_pagination = 0; + } + + ret.matching_strategy.insert(format!("{:?}", matching_strategy), 1); + + if let Some(locales) = locales { + ret.locales = locales.iter().copied().collect(); + } + + ret.highlight_pre_tag = *highlight_pre_tag != DEFAULT_HIGHLIGHT_PRE_TAG(); + ret.highlight_post_tag = *highlight_post_tag != DEFAULT_HIGHLIGHT_POST_TAG(); + ret.crop_marker = *crop_marker != DEFAULT_CROP_MARKER(); + ret.crop_length = *crop_length != DEFAULT_CROP_LENGTH(); + ret.show_matches_position = *show_matches_position; + + ret.show_ranking_score = *show_ranking_score; + ret.show_ranking_score_details = *show_ranking_score_details; + ret.ranking_score_threshold = ranking_score_threshold.is_some(); + + if let Some(hybrid) = hybrid { + ret.semantic_ratio = hybrid.semantic_ratio != DEFAULT_SEMANTIC_RATIO(); + ret.hybrid = true; + } + + ret + } + + pub fn succeed(&mut self, result: &SearchResult) { + let SearchResult { + hits: _, + query: _, + processing_time_ms, + hits_info: _, + semantic_hit_count: _, + facet_distribution: _, + facet_stats: _, + degraded, + used_negative_operator, + } = result; + + self.total_succeeded = self.total_succeeded.saturating_add(1); + if *degraded { + self.total_degraded = self.total_degraded.saturating_add(1); + } + if *used_negative_operator { + self.total_used_negative_operator = self.total_used_negative_operator.saturating_add(1); + } + self.time_spent.push(*processing_time_ms as usize); + } +} + +impl Aggregate for SearchAggregator { + fn event_name(&self) -> &'static str { + Method::event_name() + } + + fn aggregate(mut self: Box, new: Box) -> Box { + let Self { + total_received, + total_succeeded, + mut time_spent, + sort_with_geo_point, + sort_sum_of_criteria_terms, + sort_total_number_of_criteria, + distinct, + filter_with_geo_radius, + filter_with_geo_bounding_box, + filter_sum_of_criteria_terms, + filter_total_number_of_criteria, + used_syntax, + attributes_to_search_on_total_number_of_uses, + max_terms_number, + max_vector_size, + retrieve_vectors, + matching_strategy, + max_limit, + max_offset, + finite_pagination, + max_attributes_to_retrieve, + max_attributes_to_highlight, + highlight_pre_tag, + highlight_post_tag, + max_attributes_to_crop, + crop_marker, + show_matches_position, + crop_length, + facets_sum_of_terms, + facets_total_number_of_facets, + show_ranking_score, + show_ranking_score_details, + semantic_ratio, + hybrid, + total_degraded, + total_used_negative_operator, + ranking_score_threshold, + mut locales, + marker: _, + } = *new; + + // request + self.total_received = self.total_received.saturating_add(total_received); + self.total_succeeded = self.total_succeeded.saturating_add(total_succeeded); + self.total_degraded = self.total_degraded.saturating_add(total_degraded); + self.total_used_negative_operator = + self.total_used_negative_operator.saturating_add(total_used_negative_operator); + self.time_spent.append(&mut time_spent); + + // sort + self.sort_with_geo_point |= sort_with_geo_point; + self.sort_sum_of_criteria_terms = + self.sort_sum_of_criteria_terms.saturating_add(sort_sum_of_criteria_terms); + self.sort_total_number_of_criteria = + self.sort_total_number_of_criteria.saturating_add(sort_total_number_of_criteria); + + // distinct + self.distinct |= distinct; + + // filter + self.filter_with_geo_radius |= filter_with_geo_radius; + self.filter_with_geo_bounding_box |= filter_with_geo_bounding_box; + self.filter_sum_of_criteria_terms = + self.filter_sum_of_criteria_terms.saturating_add(filter_sum_of_criteria_terms); + self.filter_total_number_of_criteria = + self.filter_total_number_of_criteria.saturating_add(filter_total_number_of_criteria); + for (key, value) in used_syntax.into_iter() { + let used_syntax = self.used_syntax.entry(key).or_insert(0); + *used_syntax = used_syntax.saturating_add(value); + } + + // attributes_to_search_on + self.attributes_to_search_on_total_number_of_uses = self + .attributes_to_search_on_total_number_of_uses + .saturating_add(attributes_to_search_on_total_number_of_uses); + + // q + self.max_terms_number = self.max_terms_number.max(max_terms_number); + + // vector + self.max_vector_size = self.max_vector_size.max(max_vector_size); + self.retrieve_vectors |= retrieve_vectors; + self.semantic_ratio |= semantic_ratio; + self.hybrid |= hybrid; + + // pagination + self.max_limit = self.max_limit.max(max_limit); + self.max_offset = self.max_offset.max(max_offset); + self.finite_pagination += finite_pagination; + + // formatting + self.max_attributes_to_retrieve = + self.max_attributes_to_retrieve.max(max_attributes_to_retrieve); + self.max_attributes_to_highlight = + self.max_attributes_to_highlight.max(max_attributes_to_highlight); + self.highlight_pre_tag |= highlight_pre_tag; + self.highlight_post_tag |= highlight_post_tag; + self.max_attributes_to_crop = self.max_attributes_to_crop.max(max_attributes_to_crop); + self.crop_marker |= crop_marker; + self.show_matches_position |= show_matches_position; + self.crop_length |= crop_length; + + // facets + self.facets_sum_of_terms = self.facets_sum_of_terms.saturating_add(facets_sum_of_terms); + self.facets_total_number_of_facets = + self.facets_total_number_of_facets.saturating_add(facets_total_number_of_facets); + + // matching strategy + for (key, value) in matching_strategy.into_iter() { + let matching_strategy = self.matching_strategy.entry(key).or_insert(0); + *matching_strategy = matching_strategy.saturating_add(value); + } + + // scoring + self.show_ranking_score |= show_ranking_score; + self.show_ranking_score_details |= show_ranking_score_details; + self.ranking_score_threshold |= ranking_score_threshold; + + // locales + self.locales.append(&mut locales); + + self + } + + fn into_event(self: Box) -> serde_json::Value { + let Self { + total_received, + total_succeeded, + time_spent, + sort_with_geo_point, + sort_sum_of_criteria_terms, + sort_total_number_of_criteria, + distinct, + filter_with_geo_radius, + filter_with_geo_bounding_box, + filter_sum_of_criteria_terms, + filter_total_number_of_criteria, + used_syntax, + attributes_to_search_on_total_number_of_uses, + max_terms_number, + max_vector_size, + retrieve_vectors, + matching_strategy, + max_limit, + max_offset, + finite_pagination, + max_attributes_to_retrieve, + max_attributes_to_highlight, + highlight_pre_tag, + highlight_post_tag, + max_attributes_to_crop, + crop_marker, + show_matches_position, + crop_length, + facets_sum_of_terms, + facets_total_number_of_facets, + show_ranking_score, + show_ranking_score_details, + semantic_ratio, + hybrid, + total_degraded, + total_used_negative_operator, + ranking_score_threshold, + locales, + marker: _, + } = *self; + + // we get all the values in a sorted manner + let time_spent = time_spent.into_sorted_vec(); + // the index of the 99th percentage of value + let percentile_99th = time_spent.len() * 99 / 100; + // We are only interested by the slowest value of the 99th fastest results + let time_spent = time_spent.get(percentile_99th); + + json!({ + "requests": { + "99th_response_time": time_spent.map(|t| format!("{:.2}", t)), + "total_succeeded": total_succeeded, + "total_failed": total_received.saturating_sub(total_succeeded), // just to be sure we never panics + "total_received": total_received, + "total_degraded": total_degraded, + "total_used_negative_operator": total_used_negative_operator, + }, + "sort": { + "with_geoPoint": sort_with_geo_point, + "avg_criteria_number": format!("{:.2}", sort_sum_of_criteria_terms as f64 / sort_total_number_of_criteria as f64), + }, + "distinct": distinct, + "filter": { + "with_geoRadius": filter_with_geo_radius, + "with_geoBoundingBox": filter_with_geo_bounding_box, + "avg_criteria_number": format!("{:.2}", filter_sum_of_criteria_terms as f64 / filter_total_number_of_criteria as f64), + "most_used_syntax": used_syntax.iter().max_by_key(|(_, v)| *v).map(|(k, _)| json!(k)).unwrap_or_else(|| json!(null)), + }, + "attributes_to_search_on": { + "total_number_of_uses": attributes_to_search_on_total_number_of_uses, + }, + "q": { + "max_terms_number": max_terms_number, + }, + "vector": { + "max_vector_size": max_vector_size, + "retrieve_vectors": retrieve_vectors, + }, + "hybrid": { + "enabled": hybrid, + "semantic_ratio": semantic_ratio, + }, + "pagination": { + "max_limit": max_limit, + "max_offset": max_offset, + "most_used_navigation": if finite_pagination > (total_received / 2) { "exhaustive" } else { "estimated" }, + }, + "formatting": { + "max_attributes_to_retrieve": max_attributes_to_retrieve, + "max_attributes_to_highlight": max_attributes_to_highlight, + "highlight_pre_tag": highlight_pre_tag, + "highlight_post_tag": highlight_post_tag, + "max_attributes_to_crop": max_attributes_to_crop, + "crop_marker": crop_marker, + "show_matches_position": show_matches_position, + "crop_length": crop_length, + }, + "facets": { + "avg_facets_number": format!("{:.2}", facets_sum_of_terms as f64 / facets_total_number_of_facets as f64), + }, + "matching_strategy": { + "most_used_strategy": matching_strategy.iter().max_by_key(|(_, v)| *v).map(|(k, _)| json!(k)).unwrap_or_else(|| json!(null)), + }, + "locales": locales, + "scoring": { + "show_ranking_score": show_ranking_score, + "show_ranking_score_details": show_ranking_score_details, + "ranking_score_threshold": ranking_score_threshold, + }, + }) + } +} diff --git a/meilisearch/src/routes/indexes/similar.rs b/meilisearch/src/routes/indexes/similar.rs index 33df6bdad..79f42f0aa 100644 --- a/meilisearch/src/routes/indexes/similar.rs +++ b/meilisearch/src/routes/indexes/similar.rs @@ -13,10 +13,10 @@ use serde_json::Value; use tracing::debug; use super::ActionPolicy; -use crate::analytics::segment_analytics::{SimilarGET, SimilarPOST}; -use crate::analytics::{Analytics, SimilarAggregator}; +use crate::analytics::Analytics; use crate::extractors::authentication::GuardedData; use crate::extractors::sequential_extractor::SeqHandler; +use crate::routes::indexes::similar_analytics::{SimilarAggregator, SimilarGET, SimilarPOST}; use crate::search::{ add_search_rules, perform_similar, RankingScoreThresholdSimilar, RetrieveVectors, SearchKind, SimilarQuery, SimilarResult, DEFAULT_SEARCH_LIMIT, DEFAULT_SEARCH_OFFSET, diff --git a/meilisearch/src/routes/indexes/similar_analytics.rs b/meilisearch/src/routes/indexes/similar_analytics.rs new file mode 100644 index 000000000..69685a56c --- /dev/null +++ b/meilisearch/src/routes/indexes/similar_analytics.rs @@ -0,0 +1,235 @@ +use std::collections::{BinaryHeap, HashMap}; + +use once_cell::sync::Lazy; +use regex::Regex; +use serde_json::{json, Value}; + +use crate::{ + aggregate_methods, + analytics::{Aggregate, AggregateMethod}, + search::{SimilarQuery, SimilarResult}, +}; + +aggregate_methods!( + SimilarPOST => "Similar POST", + SimilarGET => "Similar GET", +); + +#[derive(Default)] +pub struct SimilarAggregator { + // requests + total_received: usize, + total_succeeded: usize, + time_spent: BinaryHeap, + + // filter + filter_with_geo_radius: bool, + filter_with_geo_bounding_box: bool, + // every time a request has a filter, this field must be incremented by the number of terms it contains + filter_sum_of_criteria_terms: usize, + // every time a request has a filter, this field must be incremented by one + filter_total_number_of_criteria: usize, + used_syntax: HashMap, + + // Whether a non-default embedder was specified + retrieve_vectors: bool, + + // pagination + max_limit: usize, + max_offset: usize, + + // formatting + max_attributes_to_retrieve: usize, + + // scoring + show_ranking_score: bool, + show_ranking_score_details: bool, + ranking_score_threshold: bool, + + marker: std::marker::PhantomData, +} + +impl SimilarAggregator { + #[allow(clippy::field_reassign_with_default)] + pub fn from_query(query: &SimilarQuery) -> Self { + let SimilarQuery { + id: _, + embedder: _, + offset, + limit, + attributes_to_retrieve: _, + retrieve_vectors, + show_ranking_score, + show_ranking_score_details, + filter, + ranking_score_threshold, + } = query; + + let mut ret = Self::default(); + + ret.total_received = 1; + + if let Some(ref filter) = filter { + static RE: Lazy = Lazy::new(|| Regex::new("AND | OR").unwrap()); + ret.filter_total_number_of_criteria = 1; + + let syntax = match filter { + Value::String(_) => "string".to_string(), + Value::Array(values) => { + if values.iter().map(|v| v.to_string()).any(|s| RE.is_match(&s)) { + "mixed".to_string() + } else { + "array".to_string() + } + } + _ => "none".to_string(), + }; + // convert the string to a HashMap + ret.used_syntax.insert(syntax, 1); + + let stringified_filters = filter.to_string(); + ret.filter_with_geo_radius = stringified_filters.contains("_geoRadius("); + ret.filter_with_geo_bounding_box = stringified_filters.contains("_geoBoundingBox("); + ret.filter_sum_of_criteria_terms = RE.split(&stringified_filters).count(); + } + + ret.max_limit = *limit; + ret.max_offset = *offset; + + ret.show_ranking_score = *show_ranking_score; + ret.show_ranking_score_details = *show_ranking_score_details; + ret.ranking_score_threshold = ranking_score_threshold.is_some(); + + ret.retrieve_vectors = *retrieve_vectors; + + ret + } + + pub fn succeed(&mut self, result: &SimilarResult) { + let SimilarResult { id: _, hits: _, processing_time_ms, hits_info: _ } = result; + + self.total_succeeded = self.total_succeeded.saturating_add(1); + + self.time_spent.push(*processing_time_ms as usize); + } +} + +impl Aggregate for SimilarAggregator { + fn event_name(&self) -> &'static str { + Method::event_name() + } + + /// Aggregate one [SimilarAggregator] into another. + fn aggregate(mut self: Box, new: Box) -> Box { + let Self { + total_received, + total_succeeded, + mut time_spent, + filter_with_geo_radius, + filter_with_geo_bounding_box, + filter_sum_of_criteria_terms, + filter_total_number_of_criteria, + used_syntax, + max_limit, + max_offset, + max_attributes_to_retrieve, + show_ranking_score, + show_ranking_score_details, + ranking_score_threshold, + retrieve_vectors, + marker: _, + } = *new; + + // request + self.total_received = self.total_received.saturating_add(total_received); + self.total_succeeded = self.total_succeeded.saturating_add(total_succeeded); + self.time_spent.append(&mut time_spent); + + // filter + self.filter_with_geo_radius |= filter_with_geo_radius; + self.filter_with_geo_bounding_box |= filter_with_geo_bounding_box; + self.filter_sum_of_criteria_terms = + self.filter_sum_of_criteria_terms.saturating_add(filter_sum_of_criteria_terms); + self.filter_total_number_of_criteria = + self.filter_total_number_of_criteria.saturating_add(filter_total_number_of_criteria); + for (key, value) in used_syntax.into_iter() { + let used_syntax = self.used_syntax.entry(key).or_insert(0); + *used_syntax = used_syntax.saturating_add(value); + } + + self.retrieve_vectors |= retrieve_vectors; + + // pagination + self.max_limit = self.max_limit.max(max_limit); + self.max_offset = self.max_offset.max(max_offset); + + // formatting + self.max_attributes_to_retrieve = + self.max_attributes_to_retrieve.max(max_attributes_to_retrieve); + + // scoring + self.show_ranking_score |= show_ranking_score; + self.show_ranking_score_details |= show_ranking_score_details; + self.ranking_score_threshold |= ranking_score_threshold; + + self + } + + fn into_event(self: Box) -> serde_json::Value { + let Self { + total_received, + total_succeeded, + time_spent, + filter_with_geo_radius, + filter_with_geo_bounding_box, + filter_sum_of_criteria_terms, + filter_total_number_of_criteria, + used_syntax, + max_limit, + max_offset, + max_attributes_to_retrieve, + show_ranking_score, + show_ranking_score_details, + ranking_score_threshold, + retrieve_vectors, + marker: _, + } = *self; + + // we get all the values in a sorted manner + let time_spent = time_spent.into_sorted_vec(); + // the index of the 99th percentage of value + let percentile_99th = time_spent.len() * 99 / 100; + // We are only interested by the slowest value of the 99th fastest results + let time_spent = time_spent.get(percentile_99th); + + json!({ + "requests": { + "99th_response_time": time_spent.map(|t| format!("{:.2}", t)), + "total_succeeded": total_succeeded, + "total_failed": total_received.saturating_sub(total_succeeded), // just to be sure we never panics + "total_received": total_received, + }, + "filter": { + "with_geoRadius": filter_with_geo_radius, + "with_geoBoundingBox": filter_with_geo_bounding_box, + "avg_criteria_number": format!("{:.2}", filter_sum_of_criteria_terms as f64 / filter_total_number_of_criteria as f64), + "most_used_syntax": used_syntax.iter().max_by_key(|(_, v)| *v).map(|(k, _)| json!(k)).unwrap_or_else(|| json!(null)), + }, + "vector": { + "retrieve_vectors": retrieve_vectors, + }, + "pagination": { + "max_limit": max_limit, + "max_offset": max_offset, + }, + "formatting": { + "max_attributes_to_retrieve": max_attributes_to_retrieve, + }, + "scoring": { + "show_ranking_score": show_ranking_score, + "show_ranking_score_details": show_ranking_score_details, + "ranking_score_threshold": ranking_score_threshold, + } + }) + } +} diff --git a/meilisearch/src/routes/mod.rs b/meilisearch/src/routes/mod.rs index c25aeee70..b7260ea08 100644 --- a/meilisearch/src/routes/mod.rs +++ b/meilisearch/src/routes/mod.rs @@ -25,6 +25,7 @@ pub mod indexes; mod logs; mod metrics; mod multi_search; +mod multi_search_analytics; mod snapshot; mod swap_indexes; pub mod tasks; diff --git a/meilisearch/src/routes/multi_search.rs b/meilisearch/src/routes/multi_search.rs index 13a39cb44..b7bd31716 100644 --- a/meilisearch/src/routes/multi_search.rs +++ b/meilisearch/src/routes/multi_search.rs @@ -9,7 +9,7 @@ use meilisearch_types::keys::actions; use serde::Serialize; use tracing::debug; -use crate::analytics::{Analytics, MultiSearchAggregator}; +use crate::analytics::Analytics; use crate::error::MeilisearchHttpError; use crate::extractors::authentication::policies::ActionPolicy; use crate::extractors::authentication::{AuthenticationError, GuardedData}; @@ -21,6 +21,8 @@ use crate::search::{ }; use crate::search_queue::SearchQueue; +use super::multi_search_analytics::MultiSearchAggregator; + pub fn configure(cfg: &mut web::ServiceConfig) { cfg.service(web::resource("").route(web::post().to(SeqHandler(multi_search_with_post)))); } diff --git a/meilisearch/src/routes/multi_search_analytics.rs b/meilisearch/src/routes/multi_search_analytics.rs new file mode 100644 index 000000000..be1218399 --- /dev/null +++ b/meilisearch/src/routes/multi_search_analytics.rs @@ -0,0 +1,170 @@ +use std::collections::HashSet; + +use serde_json::json; + +use crate::{ + analytics::Aggregate, + search::{FederatedSearch, SearchQueryWithIndex}, +}; + +#[derive(Default)] +pub struct MultiSearchAggregator { + // requests + total_received: usize, + total_succeeded: usize, + + // sum of the number of distinct indexes in each single request, use with total_received to compute an avg + total_distinct_index_count: usize, + // number of queries with a single index, use with total_received to compute a proportion + total_single_index: usize, + + // sum of the number of search queries in the requests, use with total_received to compute an average + total_search_count: usize, + + // scoring + show_ranking_score: bool, + show_ranking_score_details: bool, + + // federation + use_federation: bool, +} + +impl MultiSearchAggregator { + pub fn from_federated_search(federated_search: &FederatedSearch) -> Self { + let use_federation = federated_search.federation.is_some(); + + let distinct_indexes: HashSet<_> = federated_search + .queries + .iter() + .map(|query| { + let query = &query; + // make sure we get a compilation error if a field gets added to / removed from SearchQueryWithIndex + let SearchQueryWithIndex { + index_uid, + federation_options: _, + q: _, + vector: _, + offset: _, + limit: _, + page: _, + hits_per_page: _, + attributes_to_retrieve: _, + retrieve_vectors: _, + attributes_to_crop: _, + crop_length: _, + attributes_to_highlight: _, + show_ranking_score: _, + show_ranking_score_details: _, + show_matches_position: _, + filter: _, + sort: _, + distinct: _, + facets: _, + highlight_pre_tag: _, + highlight_post_tag: _, + crop_marker: _, + matching_strategy: _, + attributes_to_search_on: _, + hybrid: _, + ranking_score_threshold: _, + locales: _, + } = query; + + index_uid.as_str() + }) + .collect(); + + let show_ranking_score = + federated_search.queries.iter().any(|query| query.show_ranking_score); + let show_ranking_score_details = + federated_search.queries.iter().any(|query| query.show_ranking_score_details); + + Self { + total_received: 1, + total_succeeded: 0, + total_distinct_index_count: distinct_indexes.len(), + total_single_index: if distinct_indexes.len() == 1 { 1 } else { 0 }, + total_search_count: federated_search.queries.len(), + show_ranking_score, + show_ranking_score_details, + use_federation, + } + } + + pub fn succeed(&mut self) { + self.total_succeeded = self.total_succeeded.saturating_add(1); + } +} + +impl Aggregate for MultiSearchAggregator { + fn event_name(&self) -> &'static str { + "Documents Searched by Multi-Search POST" + } + + /// Aggregate one [MultiSearchAggregator] into another. + fn aggregate(self: Box, new: Box) -> Box { + // write the aggregate in a way that will cause a compilation error if a field is added. + + // get ownership of self, replacing it by a default value. + let this = *self; + + let total_received = this.total_received.saturating_add(new.total_received); + let total_succeeded = this.total_succeeded.saturating_add(new.total_succeeded); + let total_distinct_index_count = + this.total_distinct_index_count.saturating_add(new.total_distinct_index_count); + let total_single_index = this.total_single_index.saturating_add(new.total_single_index); + let total_search_count = this.total_search_count.saturating_add(new.total_search_count); + let show_ranking_score = this.show_ranking_score || new.show_ranking_score; + let show_ranking_score_details = + this.show_ranking_score_details || new.show_ranking_score_details; + let use_federation = this.use_federation || new.use_federation; + + Box::new(Self { + total_received, + total_succeeded, + total_distinct_index_count, + total_single_index, + total_search_count, + show_ranking_score, + show_ranking_score_details, + use_federation, + }) + } + + fn into_event(self: Box) -> serde_json::Value { + let Self { + total_received, + total_succeeded, + total_distinct_index_count, + total_single_index, + total_search_count, + show_ranking_score, + show_ranking_score_details, + use_federation, + } = *self; + + json!({ + "requests": { + "total_succeeded": total_succeeded, + "total_failed": total_received.saturating_sub(total_succeeded), // just to be sure we never panics + "total_received": total_received, + }, + "indexes": { + "total_single_index": total_single_index, + "total_distinct_index_count": total_distinct_index_count, + "avg_distinct_index_count": (total_distinct_index_count as f64) / (total_received as f64), // not 0 else returned early + }, + "searches": { + "total_search_count": total_search_count, + "avg_search_count": (total_search_count as f64) / (total_received as f64), + }, + "scoring": { + "show_ranking_score": show_ranking_score, + "show_ranking_score_details": show_ranking_score_details, + }, + "federation": { + "use_federation": use_federation, + } + }) + } +} From 9c1e54a2c86b1d39db975fc90bace2affaa5fa8a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Renault?= Date: Mon, 21 Oct 2024 08:18:43 +0200 Subject: [PATCH 088/111] Move crates under a sub folder to clean up the code --- .gitignore | 3 +- Cargo.toml | 36 +++++++++--------- {benchmarks => crates/benchmarks}/.gitignore | 0 {benchmarks => crates/benchmarks}/Cargo.toml | 0 {benchmarks => crates/benchmarks}/README.md | 0 .../benchmarks}/benches/indexing.rs | 0 .../benchmarks}/benches/search_geo.rs | 0 .../benchmarks}/benches/search_songs.rs | 0 .../benchmarks}/benches/search_wiki.rs | 0 .../benchmarks}/benches/utils.rs | 0 {benchmarks => crates/benchmarks}/build.rs | 0 .../benchmarks}/scripts/compare.sh | 0 .../benchmarks}/scripts/list.sh | 0 {benchmarks => crates/benchmarks}/src/lib.rs | 0 {build-info => crates/build-info}/Cargo.toml | 0 {build-info => crates/build-info}/build.rs | 0 {build-info => crates/build-info}/src/lib.rs | 0 {dump => crates/dump}/Cargo.toml | 0 {dump => crates/dump}/README.md | 0 {dump => crates/dump}/src/error.rs | 0 {dump => crates/dump}/src/lib.rs | 0 .../dump}/src/reader/compat/mod.rs | 0 ...ompat__v1_to_v2__test__compat_v1_v2-3.snap | 0 ...ompat__v1_to_v2__test__compat_v1_v2-6.snap | 0 ...ompat__v1_to_v2__test__compat_v1_v2-9.snap | 0 ...mpat__v2_to_v3__test__compat_v2_v3-11.snap | 0 ...mpat__v2_to_v3__test__compat_v2_v3-14.snap | 0 ...ompat__v2_to_v3__test__compat_v2_v3-5.snap | 0 ...ompat__v2_to_v3__test__compat_v2_v3-8.snap | 0 ...mpat__v3_to_v4__test__compat_v3_v4-12.snap | 0 ...mpat__v3_to_v4__test__compat_v3_v4-15.snap | 0 ...ompat__v3_to_v4__test__compat_v3_v4-6.snap | 0 ...ompat__v3_to_v4__test__compat_v3_v4-9.snap | 0 ...mpat__v4_to_v5__test__compat_v4_v5-12.snap | 0 ...ompat__v4_to_v5__test__compat_v4_v5-6.snap | 0 ...ompat__v4_to_v5__test__compat_v4_v5-9.snap | 0 ...mpat__v5_to_v6__test__compat_v5_v6-12.snap | 0 ...ompat__v5_to_v6__test__compat_v5_v6-6.snap | 0 ...ompat__v5_to_v6__test__compat_v5_v6-9.snap | 0 .../dump}/src/reader/compat/v1_to_v2.rs | 0 .../dump}/src/reader/compat/v2_to_v3.rs | 0 .../dump}/src/reader/compat/v3_to_v4.rs | 0 .../dump}/src/reader/compat/v4_to_v5.rs | 0 .../dump}/src/reader/compat/v5_to_v6.rs | 0 {dump => crates/dump}/src/reader/mod.rs | 0 ...dump__reader__test__import_dump_v1-10.snap | 0 .../dump__reader__test__import_dump_v1-4.snap | 0 .../dump__reader__test__import_dump_v1-7.snap | 0 ...dump__reader__test__import_dump_v2-11.snap | 0 ...dump__reader__test__import_dump_v2-14.snap | 0 .../dump__reader__test__import_dump_v2-5.snap | 0 .../dump__reader__test__import_dump_v2-8.snap | 0 ...rom_meilisearch_v0_22_0_issue_3435-11.snap | 0 ...from_meilisearch_v0_22_0_issue_3435-5.snap | 0 ...from_meilisearch_v0_22_0_issue_3435-8.snap | 0 ...dump__reader__test__import_dump_v3-11.snap | 0 ...dump__reader__test__import_dump_v3-14.snap | 0 .../dump__reader__test__import_dump_v3-5.snap | 0 .../dump__reader__test__import_dump_v3-8.snap | 0 ...dump__reader__test__import_dump_v4-12.snap | 0 .../dump__reader__test__import_dump_v4-6.snap | 0 .../dump__reader__test__import_dump_v4-9.snap | 0 ...dump__reader__test__import_dump_v5-12.snap | 0 .../dump__reader__test__import_dump_v5-6.snap | 0 .../dump__reader__test__import_dump_v5-9.snap | 0 ...__test__import_dump_v6_with_vectors-5.snap | 0 ...__test__import_dump_v6_with_vectors-6.snap | 0 ...__test__import_dump_v6_with_vectors-7.snap | 0 ...__test__import_dump_v6_with_vectors-8.snap | 0 ...__test__import_dump_v6_with_vectors-9.snap | 0 {dump => crates/dump}/src/reader/v1/mod.rs | 0 .../dump}/src/reader/v1/settings.rs | 0 ...mp__reader__v1__test__read_dump_v1-10.snap | 0 ...ump__reader__v1__test__read_dump_v1-2.snap | 0 ...ump__reader__v1__test__read_dump_v1-6.snap | 0 {dump => crates/dump}/src/reader/v1/update.rs | 0 {dump => crates/dump}/src/reader/v2/errors.rs | 0 {dump => crates/dump}/src/reader/v2/meta.rs | 0 {dump => crates/dump}/src/reader/v2/mod.rs | 0 .../dump}/src/reader/v2/settings.rs | 0 ...mp__reader__v2__test__read_dump_v2-11.snap | 0 ...mp__reader__v2__test__read_dump_v2-14.snap | 0 ...ump__reader__v2__test__read_dump_v2-5.snap | 0 ...ump__reader__v2__test__read_dump_v2-8.snap | 0 ...rom_meilisearch_v0_22_0_issue_3435-10.snap | 0 ...from_meilisearch_v0_22_0_issue_3435-4.snap | 0 ...from_meilisearch_v0_22_0_issue_3435-7.snap | 0 .../dump}/src/reader/v2/updates.rs | 0 {dump => crates/dump}/src/reader/v3/errors.rs | 0 {dump => crates/dump}/src/reader/v3/meta.rs | 0 {dump => crates/dump}/src/reader/v3/mod.rs | 0 .../dump}/src/reader/v3/settings.rs | 0 ...mp__reader__v3__test__read_dump_v3-11.snap | 0 ...mp__reader__v3__test__read_dump_v3-14.snap | 0 ...ump__reader__v3__test__read_dump_v3-5.snap | 0 ...ump__reader__v3__test__read_dump_v3-8.snap | 0 .../dump}/src/reader/v3/updates.rs | 0 {dump => crates/dump}/src/reader/v4/errors.rs | 0 {dump => crates/dump}/src/reader/v4/keys.rs | 0 {dump => crates/dump}/src/reader/v4/meta.rs | 0 {dump => crates/dump}/src/reader/v4/mod.rs | 0 .../dump}/src/reader/v4/settings.rs | 0 ...mp__reader__v4__test__read_dump_v4-10.snap | 0 ...mp__reader__v4__test__read_dump_v4-13.snap | 0 ...ump__reader__v4__test__read_dump_v4-7.snap | 0 {dump => crates/dump}/src/reader/v4/tasks.rs | 0 {dump => crates/dump}/src/reader/v5/errors.rs | 0 {dump => crates/dump}/src/reader/v5/keys.rs | 0 {dump => crates/dump}/src/reader/v5/meta.rs | 0 {dump => crates/dump}/src/reader/v5/mod.rs | 0 .../dump}/src/reader/v5/settings.rs | 0 ...mp__reader__v5__test__read_dump_v5-10.snap | 0 ...mp__reader__v5__test__read_dump_v5-13.snap | 0 ...ump__reader__v5__test__read_dump_v5-7.snap | 0 {dump => crates/dump}/src/reader/v5/tasks.rs | 0 {dump => crates/dump}/src/reader/v6/mod.rs | 0 {dump => crates/dump}/src/writer.rs | 0 {dump => crates/dump}/tests/assets/v1.dump | Bin .../dump}/tests/assets/v2-v0.22.0.dump | Bin {dump => crates/dump}/tests/assets/v2.dump | Bin {dump => crates/dump}/tests/assets/v3.dump | Bin {dump => crates/dump}/tests/assets/v4.dump | Bin {dump => crates/dump}/tests/assets/v5.dump | Bin .../tests/assets/v6-with-experimental.dump | Bin .../dump}/tests/assets/v6-with-vectors.dump | Bin {file-store => crates/file-store}/Cargo.toml | 0 {file-store => crates/file-store}/src/lib.rs | 0 .../filter-parser}/Cargo.toml | 0 .../filter-parser}/README.md | 0 .../filter-parser}/fuzz/.gitignore | 0 .../filter-parser}/fuzz/Cargo.toml | 0 .../fuzz}/fuzz/corpus/parse/test_1 | 0 .../fuzz}/fuzz/corpus/parse/test_10 | 0 .../fuzz}/fuzz/corpus/parse/test_11 | 0 .../fuzz}/fuzz/corpus/parse/test_12 | 0 .../fuzz}/fuzz/corpus/parse/test_13 | 0 .../fuzz}/fuzz/corpus/parse/test_14 | 0 .../fuzz}/fuzz/corpus/parse/test_15 | 0 .../fuzz}/fuzz/corpus/parse/test_16 | 0 .../fuzz}/fuzz/corpus/parse/test_17 | 0 .../fuzz}/fuzz/corpus/parse/test_18 | 0 .../fuzz}/fuzz/corpus/parse/test_19 | 0 .../fuzz}/fuzz/corpus/parse/test_2 | 0 .../fuzz}/fuzz/corpus/parse/test_20 | 0 .../fuzz}/fuzz/corpus/parse/test_21 | 0 .../fuzz}/fuzz/corpus/parse/test_22 | 0 .../fuzz}/fuzz/corpus/parse/test_23 | 0 .../fuzz}/fuzz/corpus/parse/test_24 | 0 .../fuzz}/fuzz/corpus/parse/test_25 | 0 .../fuzz}/fuzz/corpus/parse/test_26 | 0 .../fuzz}/fuzz/corpus/parse/test_27 | 0 .../fuzz}/fuzz/corpus/parse/test_28 | 0 .../fuzz}/fuzz/corpus/parse/test_29 | 0 .../fuzz}/fuzz/corpus/parse/test_3 | 0 .../fuzz}/fuzz/corpus/parse/test_30 | 0 .../fuzz}/fuzz/corpus/parse/test_31 | 0 .../fuzz}/fuzz/corpus/parse/test_32 | 0 .../fuzz}/fuzz/corpus/parse/test_33 | 0 .../fuzz}/fuzz/corpus/parse/test_34 | 0 .../fuzz}/fuzz/corpus/parse/test_35 | 0 .../fuzz}/fuzz/corpus/parse/test_36 | 0 .../fuzz}/fuzz/corpus/parse/test_37 | 0 .../fuzz}/fuzz/corpus/parse/test_38 | 0 .../fuzz}/fuzz/corpus/parse/test_39 | 0 .../fuzz}/fuzz/corpus/parse/test_4 | 0 .../fuzz}/fuzz/corpus/parse/test_40 | 0 .../fuzz}/fuzz/corpus/parse/test_41 | 0 .../fuzz}/fuzz/corpus/parse/test_42 | 0 .../fuzz}/fuzz/corpus/parse/test_43 | 0 .../fuzz}/fuzz/corpus/parse/test_5 | 0 .../fuzz}/fuzz/corpus/parse/test_6 | 0 .../fuzz}/fuzz/corpus/parse/test_7 | 0 .../fuzz}/fuzz/corpus/parse/test_8 | 0 .../fuzz}/fuzz/corpus/parse/test_9 | 0 .../filter-parser}/fuzz/fuzz_targets/parse.rs | 0 .../filter-parser}/src/condition.rs | 0 .../filter-parser}/src/error.rs | 0 .../filter-parser}/src/lib.rs | 0 .../filter-parser}/src/main.rs | 0 .../filter-parser}/src/value.rs | 0 .../flatten-serde-json}/Cargo.toml | 0 .../flatten-serde-json}/README.md | 0 .../flatten-serde-json}/benches/benchmarks.rs | 0 .../flatten-serde-json}/fuzz/Cargo.toml | 0 .../fuzz/fuzz_targets/flatten.rs | 0 .../flatten-serde-json}/src/lib.rs | 0 .../flatten-serde-json}/src/main.rs | 0 {fuzzers => crates/fuzzers}/Cargo.toml | 0 {fuzzers => crates/fuzzers}/README.md | 0 .../fuzzers}/src/bin/fuzz-indexing.rs | 0 {fuzzers => crates/fuzzers}/src/lib.rs | 0 .../index-scheduler}/Cargo.toml | 0 .../index-scheduler}/src/autobatcher.rs | 0 .../index-scheduler}/src/batch.rs | 0 .../index-scheduler}/src/error.rs | 0 .../index-scheduler}/src/features.rs | 0 .../src/index_mapper/index_map.rs | 0 .../index-scheduler}/src/index_mapper/mod.rs | 0 .../index-scheduler}/src/insta_snapshot.rs | 0 .../index-scheduler}/src/lib.rs | 0 .../index-scheduler}/src/lru.rs | 0 ...x_scheduler__tests__import_vectors-15.snap | 0 ...ex_scheduler__tests__import_vectors-2.snap | 0 ...x_scheduler__tests__import_vectors-22.snap | 0 ...ex_scheduler__tests__import_vectors-5.snap | 0 ...ex_scheduler__tests__import_vectors-8.snap | 0 ...ndex_scheduler__tests__import_vectors.snap | 0 ...x_scheduler__tests__settings_update-2.snap | 0 ...x_scheduler__tests__settings_update-5.snap | 0 ...dex_scheduler__tests__settings_update.snap | 0 .../cancel_processed.snap | 0 .../initial_tasks_enqueued.snap | 0 .../aborted_indexation.snap | 0 .../cancel_mix_of_tasks/cancel_processed.snap | 0 .../first_task_processed.snap | 0 ...rocessing_second_task_cancel_enqueued.snap | 0 .../after_dump_register.snap | 0 .../cancel_processed.snap | 0 .../cancel_registered.snap | 0 .../aborted_indexation.snap | 0 .../cancel_processed.snap | 0 .../cancel_task_registered.snap | 0 .../initial_task_processing.snap | 0 .../registered_the_first_task.snap | 0 .../cancel_processed.snap | 0 .../initial_task_processed.snap | 0 .../registered_the_first_task.snap | 0 .../all_tasks_processed.snap | 0 .../document_addition/after_register.snap | 0 .../after_the_batch_creation.snap | 0 .../once_everything_is_processed.snap | 0 .../after_processing_the_batch.snap | 0 .../documents.snap | 0 .../registered_the_first_task.snap | 0 .../registered_the_second_task.snap | 0 .../before_index_creation.snap | 0 .../both_task_succeeded.snap | 0 .../registered_the_first_task.snap | 0 .../registered_the_second_task.snap | 0 .../registered_the_third_task.snap | 0 .../1.snap | 0 .../2.snap | 0 .../after_failing_the_deletion.snap | 0 .../after_last_successful_addition.snap | 0 .../documents.snap | 0 .../registered_the_first_task.snap | 0 .../registered_the_second_task.snap | 0 .../document_addition_batch_created.snap | 0 .../document_addition_failed.snap | 0 .../registered_the_first_task.snap | 0 .../after_adding_the_documents.snap | 0 .../after_adding_the_settings.snap | 0 ...ter_adding_the_settings_and_documents.snap | 0 .../after_removing_the_documents.snap | 0 ...cuments_remaining_should_only_be_bork.snap | 0 .../registered_the_document_deletions.snap | 0 ...red_the_setting_and_document_addition.snap | 0 .../after_register.snap | 0 .../index_creation_failed.snap | 0 .../after_batch_succeeded.snap | 0 .../after_failing_to_commit.snap | 0 ...eeded_but_index_scheduler_not_updated.snap | 0 .../registered_the_first_task.snap | 0 .../task_successfully_processed.snap | 0 .../Intel to kefir succeeds.snap | 0 .../lib.rs/import_vectors/Intel to kefir.snap | 0 .../import_vectors/adding Intel succeeds.snap | 0 .../import_vectors/after adding Intel.snap | 0 ...ter_registering_settings_task_vectors.snap | 0 .../settings_update_processed_vectors.snap | 0 .../documents after initial push.snap | 0 .../after_batch_creation.snap | 0 .../registered_the_first_task.snap | 0 .../registered_the_second_task.snap | 0 .../registered_the_third_task.snap | 0 .../index_creation_failed.snap | 0 .../registered_the_first_task.snap | 0 .../processed_the_first_task.snap | 0 .../processed_the_second_task.snap | 0 .../processed_the_third_task.snap | 0 .../registered_the_first_task.snap | 0 .../registered_the_second_task.snap | 0 .../registered_the_third_task.snap | 0 .../first.snap | 0 .../fourth.snap | 0 .../registered_the_first_task.snap | 0 .../registered_the_fourth_task.snap | 0 .../registered_the_second_task.snap | 0 .../registered_the_third_task.snap | 0 .../second.snap | 0 .../third.snap | 0 .../lib.rs/query_tasks_canceled_by/start.snap | 0 .../processed_all_tasks.snap | 0 .../registered_the_first_task.snap | 0 .../registered_the_second_task.snap | 0 .../registered_the_third_task.snap | 0 .../lib.rs/query_tasks_simple/end.snap | 0 .../lib.rs/query_tasks_simple/start.snap | 0 .../query_tasks_special_rules/start.snap | 0 ...everything_is_successfully_registered.snap | 0 .../lib.rs/swap_indexes/create_a.snap | 0 .../lib.rs/swap_indexes/create_b.snap | 0 .../lib.rs/swap_indexes/create_c.snap | 0 .../lib.rs/swap_indexes/create_d.snap | 0 .../swap_indexes/first_swap_processed.snap | 0 .../swap_indexes/first_swap_registered.snap | 0 .../swap_indexes/second_swap_processed.snap | 0 .../third_empty_swap_processed.snap | 0 .../swap_indexes/two_swaps_registered.snap | 0 .../after_the_index_creation.snap | 0 .../first_swap_failed.snap | 0 .../initial_tasks_processed.snap | 0 .../initial_tasks_enqueued.snap | 0 .../initial_tasks_processed.snap | 0 .../task_deletion_processed.snap | 0 .../after_registering_the_task_deletion.snap | 0 .../initial_tasks_enqueued.snap | 0 .../initial_tasks_processed.snap | 0 .../task_deletion_processed.snap | 0 .../initial_tasks_enqueued.snap | 0 .../task_deletion_done.snap | 0 .../task_deletion_enqueued.snap | 0 .../task_deletion_processing.snap | 0 .../after_the_second_task_deletion.snap | 0 .../everything_has_been_processed.snap | 0 .../task_deletion_have_been_enqueued.snap | 0 .../task_deletion_have_been_processed.snap | 0 .../task_queue_is_full.snap | 0 .../task_deletion_have_not_been_enqueued.snap | 0 .../task_queue_is_full.snap | 0 .../after_processing_the_10_tasks.snap | 0 .../after_registering_the_10_tasks.snap | 0 .../documents.snap | 0 .../processed_the_first_task.snap | 0 .../registered_the_first_task.snap | 0 .../after_registering_the_10_tasks.snap | 0 .../all_tasks_processed.snap | 0 .../documents.snap | 0 .../five_tasks_processed.snap | 0 .../processed_the_first_task.snap | 0 .../registered_the_first_task.snap | 0 .../after_processing_the_10_tasks.snap | 0 .../after_registering_the_10_tasks.snap | 0 .../after_registering_the_10_tasks.snap | 0 .../all_tasks_processed.snap | 0 .../five_tasks_processed.snap | 0 .../after_registering_the_10_tasks.snap | 0 .../all_tasks_processed.snap | 0 .../documents.snap | 0 .../only_first_task_failed.snap | 0 .../after_registering_the_10_tasks.snap | 0 .../all_tasks_processed.snap | 0 .../documents.snap | 0 .../processed_the_first_task.snap | 0 .../registered_the_first_task.snap | 0 .../after_registering_the_5_tasks.snap | 0 .../documents.snap | 0 .../fifth_task_succeeds.snap | 0 .../first_and_second_task_fails.snap | 0 .../fourth_task_fails.snap | 0 .../third_task_succeeds.snap | 0 .../after_registering_the_3_tasks.snap | 0 .../documents.snap | 0 .../only_first_task_succeed.snap | 0 .../second_task_fails.snap | 0 .../third_task_fails.snap | 0 .../after_registering_the_3_tasks.snap | 0 .../documents.snap | 0 .../only_first_task_succeed.snap | 0 .../second_and_third_tasks_fails.snap | 0 .../after_registering_the_6_tasks.snap | 0 .../all_other_tasks_succeeds.snap | 0 .../documents.snap | 0 .../first_task_fails.snap | 0 .../second_task_fails.snap | 0 .../third_task_succeeds.snap | 0 .../after_registering_the_6_tasks.snap | 0 .../all_other_tasks_succeeds.snap | 0 .../documents.snap | 0 .../first_task_succeed.snap | 0 .../second_task_fails.snap | 0 .../third_task_succeeds.snap | 0 .../lib.rs/test_document_replace/1.snap | 0 .../lib.rs/test_document_replace/2.snap | 0 .../test_document_replace/documents.snap | 0 .../after_registering_the_10_tasks.snap | 0 .../all_tasks_processed.snap | 0 .../documents.snap | 0 .../five_tasks_processed.snap | 0 .../lib.rs/test_document_update/1.snap | 0 .../lib.rs/test_document_update/2.snap | 0 .../test_document_update/documents.snap | 0 .../after_registering_the_10_tasks.snap | 0 .../all_tasks_processed.snap | 0 .../documents.snap | 0 .../five_tasks_processed.snap | 0 .../after_registering_the_10_tasks.snap | 0 .../all_tasks_processed.snap | 0 .../documents.snap | 0 .../five_tasks_processed.snap | 0 .../after_registering_settings_task.snap | 0 .../settings_update_processed.snap | 0 .../registered_a_task.snap | 0 .../index-scheduler}/src/utils.rs | 0 .../index-scheduler}/src/uuid_codec.rs | 0 .../json-depth-checker}/Cargo.toml | 0 .../json-depth-checker}/benches/depth.rs | 0 .../json-depth-checker}/fuzz/Cargo.toml | 0 .../fuzz/fuzz_targets/depth.rs | 0 .../json-depth-checker}/src/lib.rs | 0 {meili-snap => crates/meili-snap}/Cargo.toml | 0 {meili-snap => crates/meili-snap}/src/lib.rs | 0 .../src/snapshots/lib.rs/snap/4.snap | 0 .../src/snapshots/lib.rs/snap/5.snap | 0 .../src/snapshots/lib.rs/snap/6.snap | 0 .../src/snapshots/lib.rs/snap/7.snap | 0 .../snapshots/lib.rs/snap/snap_name_1.snap | 0 .../src/snapshots/lib.rs/some_test/4.snap | 0 .../src/snapshots/lib.rs/some_test/5.snap | 0 .../src/snapshots/lib.rs/some_test/6.snap | 0 .../src/snapshots/lib.rs/some_test/7.snap | 0 .../lib.rs/some_test/snap_name_1.snap | 0 .../meilisearch-auth}/Cargo.toml | 0 .../meilisearch-auth}/src/dump.rs | 0 .../meilisearch-auth}/src/error.rs | 0 .../meilisearch-auth}/src/lib.rs | 0 .../meilisearch-auth}/src/store.rs | 0 .../meilisearch-types}/Cargo.toml | 0 .../meilisearch-types}/src/compression.rs | 0 .../meilisearch-types}/src/deserr/mod.rs | 0 .../src/deserr/query_params.rs | 0 .../src/document_formats.rs | 0 .../meilisearch-types}/src/error.rs | 0 .../src/facet_values_sort.rs | 0 .../meilisearch-types}/src/features.rs | 0 .../meilisearch-types}/src/index_uid.rs | 0 .../src/index_uid_pattern.rs | 0 .../meilisearch-types}/src/keys.rs | 0 .../meilisearch-types}/src/lib.rs | 0 .../meilisearch-types}/src/locales.rs | 0 .../meilisearch-types}/src/settings.rs | 0 .../meilisearch-types}/src/star_or.rs | 0 .../meilisearch-types}/src/task_view.rs | 0 .../meilisearch-types}/src/tasks.rs | 0 .../meilisearch-types}/src/versioning.rs | 0 .../meilisearch}/Cargo.toml | 0 {meilisearch => crates/meilisearch}/build.rs | 0 .../src/analytics/mock_analytics.rs | 0 .../meilisearch}/src/analytics/mod.rs | 0 .../src/analytics/segment_analytics.rs | 0 .../meilisearch}/src/error.rs | 0 .../src/extractors/authentication/error.rs | 0 .../src/extractors/authentication/mod.rs | 0 .../meilisearch}/src/extractors/mod.rs | 0 .../meilisearch}/src/extractors/payload.rs | 0 .../src/extractors/sequential_extractor.rs | 0 .../meilisearch}/src/lib.rs | 0 .../meilisearch}/src/main.rs | 0 .../meilisearch}/src/metrics.rs | 0 .../meilisearch}/src/middleware.rs | 0 .../meilisearch}/src/option.rs | 0 .../meilisearch}/src/routes/api_key.rs | 0 .../meilisearch}/src/routes/dump.rs | 0 .../meilisearch}/src/routes/features.rs | 0 .../src/routes/indexes/documents.rs | 0 .../src/routes/indexes/facet_search.rs | 0 .../meilisearch}/src/routes/indexes/mod.rs | 0 .../meilisearch}/src/routes/indexes/search.rs | 0 .../src/routes/indexes/settings.rs | 0 .../src/routes/indexes/similar.rs | 0 .../meilisearch}/src/routes/logs.rs | 0 .../meilisearch}/src/routes/metrics.rs | 0 .../meilisearch}/src/routes/mod.rs | 0 .../meilisearch}/src/routes/multi_search.rs | 0 .../meilisearch}/src/routes/snapshot.rs | 0 .../meilisearch}/src/routes/swap_indexes.rs | 0 .../meilisearch}/src/routes/tasks.rs | 0 .../meilisearch}/src/search/federated.rs | 0 .../meilisearch}/src/search/mod.rs | 0 .../meilisearch}/src/search/ranking_rules.rs | 0 .../meilisearch}/src/search_queue.rs | 0 .../tests/assets/dumps/v1/metadata.json | 0 .../assets/dumps/v1/test/documents.jsonl | 0 .../tests/assets/dumps/v1/test/settings.json | 0 .../tests/assets/dumps/v1/test/updates.jsonl | 0 .../meilisearch}/tests/assets/test_set.json | 0 .../meilisearch}/tests/assets/test_set.ndjson | 0 .../tests/assets/v1_v0.20.0_movies.dump | Bin .../v1_v0.20.0_movies_with_settings.dump | Bin .../v1_v0.20.0_rubygems_with_settings.dump | Bin .../tests/assets/v2_v0.21.1_movies.dump | Bin .../v2_v0.21.1_movies_with_settings.dump | Bin .../v2_v0.21.1_rubygems_with_settings.dump | Bin .../tests/assets/v3_v0.24.0_movies.dump | Bin .../v3_v0.24.0_movies_with_settings.dump | Bin .../v3_v0.24.0_rubygems_with_settings.dump | Bin .../tests/assets/v4_v0.25.2_movies.dump | Bin .../v4_v0.25.2_movies_with_settings.dump | Bin .../v4_v0.25.2_rubygems_with_settings.dump | Bin .../tests/assets/v5_v0.28.0_test_dump.dump | Bin ..._use_deactivated_experimental_setting.dump | Bin .../meilisearch}/tests/auth/api_keys.rs | 0 .../meilisearch}/tests/auth/authorization.rs | 0 .../meilisearch}/tests/auth/errors.rs | 0 .../meilisearch}/tests/auth/mod.rs | 0 .../meilisearch}/tests/auth/payload.rs | 0 .../meilisearch}/tests/auth/tenant_token.rs | 0 .../tests/auth/tenant_token_multi_search.rs | 0 .../meilisearch}/tests/common/encoder.rs | 0 .../meilisearch}/tests/common/index.rs | 0 .../meilisearch}/tests/common/mod.rs | 0 .../meilisearch}/tests/common/server.rs | 0 .../meilisearch}/tests/common/service.rs | 0 .../meilisearch}/tests/content_type.rs | 0 .../meilisearch}/tests/dashboard/mod.rs | 0 .../tests/documents/add_documents.rs | 0 .../tests/documents/delete_documents.rs | 0 .../meilisearch}/tests/documents/errors.rs | 0 .../tests/documents/get_documents.rs | 0 .../meilisearch}/tests/documents/mod.rs | 0 .../tests/documents/update_documents.rs | 0 .../meilisearch}/tests/dumps/data.rs | 0 .../meilisearch}/tests/dumps/mod.rs | 0 .../1.snap | 0 .../2.snap | 0 .../mod.rs/import_dump_v1_movie_raw/1.snap | 0 .../mod.rs/import_dump_v1_movie_raw/2.snap | 0 .../mod.rs/import_dump_v1_movie_raw/3.snap | 0 .../mod.rs/import_dump_v1_movie_raw/4.snap | 0 .../mod.rs/import_dump_v1_movie_raw/5.snap | 0 .../mod.rs/import_dump_v1_movie_raw/6.snap | 0 .../mod.rs/import_dump_v1_movie_raw/7.snap | 0 .../import_dump_v1_movie_with_settings/1.snap | 0 .../import_dump_v1_movie_with_settings/2.snap | 0 .../import_dump_v1_movie_with_settings/3.snap | 0 .../import_dump_v1_movie_with_settings/4.snap | 0 .../import_dump_v1_movie_with_settings/5.snap | 0 .../import_dump_v1_movie_with_settings/6.snap | 0 .../import_dump_v1_movie_with_settings/7.snap | 0 .../1.snap | 0 .../2.snap | 0 .../3.snap | 0 .../4.snap | 0 .../5.snap | 0 .../6.snap | 0 .../7.snap | 0 .../mod.rs/import_dump_v2_movie_raw/1.snap | 0 .../mod.rs/import_dump_v2_movie_raw/2.snap | 0 .../mod.rs/import_dump_v2_movie_raw/3.snap | 0 .../mod.rs/import_dump_v2_movie_raw/4.snap | 0 .../mod.rs/import_dump_v2_movie_raw/5.snap | 0 .../mod.rs/import_dump_v2_movie_raw/6.snap | 0 .../mod.rs/import_dump_v2_movie_raw/7.snap | 0 .../import_dump_v2_movie_with_settings/1.snap | 0 .../import_dump_v2_movie_with_settings/2.snap | 0 .../import_dump_v2_movie_with_settings/3.snap | 0 .../import_dump_v2_movie_with_settings/4.snap | 0 .../import_dump_v2_movie_with_settings/5.snap | 0 .../import_dump_v2_movie_with_settings/6.snap | 0 .../import_dump_v2_movie_with_settings/7.snap | 0 .../1.snap | 0 .../2.snap | 0 .../3.snap | 0 .../4.snap | 0 .../5.snap | 0 .../6.snap | 0 .../7.snap | 0 .../mod.rs/import_dump_v3_movie_raw/1.snap | 0 .../mod.rs/import_dump_v3_movie_raw/2.snap | 0 .../mod.rs/import_dump_v3_movie_raw/3.snap | 0 .../mod.rs/import_dump_v3_movie_raw/4.snap | 0 .../mod.rs/import_dump_v3_movie_raw/5.snap | 0 .../mod.rs/import_dump_v3_movie_raw/6.snap | 0 .../mod.rs/import_dump_v3_movie_raw/7.snap | 0 .../import_dump_v3_movie_with_settings/1.snap | 0 .../import_dump_v3_movie_with_settings/2.snap | 0 .../import_dump_v3_movie_with_settings/3.snap | 0 .../import_dump_v3_movie_with_settings/4.snap | 0 .../import_dump_v3_movie_with_settings/5.snap | 0 .../import_dump_v3_movie_with_settings/6.snap | 0 .../import_dump_v3_movie_with_settings/7.snap | 0 .../1.snap | 0 .../2.snap | 0 .../3.snap | 0 .../4.snap | 0 .../5.snap | 0 .../6.snap | 0 .../7.snap | 0 .../mod.rs/import_dump_v4_movie_raw/1.snap | 0 .../mod.rs/import_dump_v4_movie_raw/2.snap | 0 .../mod.rs/import_dump_v4_movie_raw/3.snap | 0 .../mod.rs/import_dump_v4_movie_raw/4.snap | 0 .../mod.rs/import_dump_v4_movie_raw/5.snap | 0 .../mod.rs/import_dump_v4_movie_raw/6.snap | 0 .../mod.rs/import_dump_v4_movie_raw/7.snap | 0 .../import_dump_v4_movie_with_settings/1.snap | 0 .../import_dump_v4_movie_with_settings/2.snap | 0 .../import_dump_v4_movie_with_settings/3.snap | 0 .../import_dump_v4_movie_with_settings/4.snap | 0 .../import_dump_v4_movie_with_settings/5.snap | 0 .../import_dump_v4_movie_with_settings/6.snap | 0 .../import_dump_v4_movie_with_settings/7.snap | 0 .../1.snap | 0 .../2.snap | 0 .../3.snap | 0 .../4.snap | 0 .../5.snap | 0 .../6.snap | 0 .../7.snap | 0 .../snapshots/mod.rs/import_dump_v5/1.snap | 0 .../snapshots/mod.rs/import_dump_v5/2.snap | 0 .../snapshots/mod.rs/import_dump_v5/3.snap | 0 .../snapshots/mod.rs/import_dump_v5/4.snap | 0 .../snapshots/mod.rs/import_dump_v5/5.snap | 0 .../snapshots/mod.rs/import_dump_v5/6.snap | 0 .../snapshots/mod.rs/import_dump_v5/7.snap | 0 .../meilisearch}/tests/features/mod.rs | 0 .../meilisearch}/tests/index/create_index.rs | 0 .../meilisearch}/tests/index/delete_index.rs | 0 .../meilisearch}/tests/index/errors.rs | 0 .../meilisearch}/tests/index/get_index.rs | 0 .../meilisearch}/tests/index/mod.rs | 0 .../meilisearch}/tests/index/stats.rs | 0 .../meilisearch}/tests/index/update_index.rs | 0 .../meilisearch}/tests/integration.rs | 0 .../meilisearch}/tests/logs/error.rs | 0 .../meilisearch}/tests/logs/mod.rs | 0 .../meilisearch}/tests/search/distinct.rs | 0 .../meilisearch}/tests/search/errors.rs | 0 .../meilisearch}/tests/search/facet_search.rs | 0 .../meilisearch}/tests/search/formatted.rs | 0 .../meilisearch}/tests/search/geo.rs | 0 .../meilisearch}/tests/search/hybrid.rs | 0 .../meilisearch}/tests/search/locales.rs | 0 .../tests/search/matching_strategy.rs | 0 .../meilisearch}/tests/search/mod.rs | 0 .../meilisearch}/tests/search/multi.rs | 0 .../meilisearch}/tests/search/pagination.rs | 0 .../tests/search/restrict_searchable.rs | 0 .../meilisearch}/tests/search/search_queue.rs | 0 .../distinct_at_search_time/succeed.snap | 0 .../distinct_at_search_time/task-succeed.snap | 0 .../meilisearch}/tests/settings/distinct.rs | 0 .../meilisearch}/tests/settings/errors.rs | 0 .../tests/settings/get_settings.rs | 0 .../meilisearch}/tests/settings/mod.rs | 0 .../tests/settings/proximity_settings.rs | 0 .../tests/settings/tokenizer_customization.rs | 0 .../meilisearch}/tests/similar/errors.rs | 0 .../meilisearch}/tests/similar/mod.rs | 0 .../meilisearch}/tests/snapshot/mod.rs | 0 .../meilisearch}/tests/stats/mod.rs | 0 .../meilisearch}/tests/swap_indexes/errors.rs | 0 .../meilisearch}/tests/swap_indexes/mod.rs | 0 .../meilisearch}/tests/tasks/errors.rs | 0 .../meilisearch}/tests/tasks/mod.rs | 0 .../meilisearch}/tests/tasks/webhook.rs | 0 .../tests/vector/binary_quantized.rs | 0 .../tests/vector/intel_gen.txt.gz | Bin .../meilisearch}/tests/vector/mod.rs | 0 .../meilisearch}/tests/vector/openai.rs | 0 .../tests/vector/openai_responses.json.gz | Bin .../vector/openai_tokenized_responses.json.gz | Bin .../meilisearch}/tests/vector/rest.rs | 0 .../meilisearch}/tests/vector/settings.rs | 0 .../document-added.snap | 0 .../document-deleted.snap | 0 .../settings-processed.snap | 0 {meilitool => crates/meilitool}/Cargo.toml | 0 {meilitool => crates/meilitool}/src/main.rs | 0 .../meilitool}/src/uuid_codec.rs | 0 {milli => crates/milli}/Cargo.toml | 0 {milli => crates/milli}/README.md | 0 {milli => crates/milli}/examples/index.rs | 0 {milli => crates/milli}/examples/search.rs | 0 {milli => crates/milli}/examples/settings.rs | 0 {milli => crates/milli}/fuzz/.gitignore | 0 {milli => crates/milli}/src/asc_desc.rs | 0 {milli => crates/milli}/src/criterion.rs | 0 .../milli}/src/documents/builder.rs | 0 .../milli}/src/documents/enriched.rs | 0 {milli => crates/milli}/src/documents/mod.rs | 0 .../milli}/src/documents/primary_key.rs | 0 .../milli}/src/documents/reader.rs | 0 .../milli}/src/documents/serde_impl.rs | 0 {milli => crates/milli}/src/error.rs | 0 .../milli}/src/external_documents_ids.rs | 0 .../milli}/src/facet/facet_type.rs | 0 .../milli}/src/facet/facet_value.rs | 0 {milli => crates/milli}/src/facet/mod.rs | 0 .../milli}/src/facet/value_encoding.rs | 0 .../milli}/src/fieldids_weights_map.rs | 0 {milli => crates/milli}/src/fields_ids_map.rs | 0 .../milli}/src/heed_codec/beu16_str_codec.rs | 0 .../milli}/src/heed_codec/beu32_str_codec.rs | 0 .../milli}/src/heed_codec/byte_slice_ref.rs | 0 .../facet/field_doc_id_facet_codec.rs | 0 .../milli}/src/heed_codec/facet/mod.rs | 0 .../src/heed_codec/facet/ordered_f64_codec.rs | 0 .../heed_codec/field_id_word_count_codec.rs | 0 .../milli}/src/heed_codec/fst_set_codec.rs | 0 {milli => crates/milli}/src/heed_codec/mod.rs | 0 .../milli}/src/heed_codec/obkv_codec.rs | 0 .../roaring_bitmap/bo_roaring_bitmap_codec.rs | 0 .../cbo_roaring_bitmap_codec.rs | 0 .../src/heed_codec/roaring_bitmap/mod.rs | 0 .../roaring_bitmap/roaring_bitmap_codec.rs | 0 .../bo_roaring_bitmap_len_codec.rs | 0 .../cbo_roaring_bitmap_len_codec.rs | 0 .../heed_codec/roaring_bitmap_length/mod.rs | 0 .../roaring_bitmap_len_codec.rs | 0 .../milli}/src/heed_codec/str_beu32_codec.rs | 0 .../milli}/src/heed_codec/str_ref.rs | 0 .../milli}/src/heed_codec/str_str_u8_codec.rs | 0 {milli => crates/milli}/src/index.rs | 0 {milli => crates/milli}/src/lib.rs | 0 .../milli}/src/localized_attributes_rules.rs | 0 {milli => crates/milli}/src/order_by_map.rs | 0 {milli => crates/milli}/src/prompt/context.rs | 0 .../milli}/src/prompt/document.rs | 0 {milli => crates/milli}/src/prompt/error.rs | 0 {milli => crates/milli}/src/prompt/fields.rs | 0 {milli => crates/milli}/src/prompt/mod.rs | 0 .../milli}/src/prompt/template_checker.rs | 0 {milli => crates/milli}/src/proximity.rs | 0 {milli => crates/milli}/src/score_details.rs | 0 .../src/search/facet/facet_distribution.rs | 0 .../search/facet/facet_distribution_iter.rs | 0 .../src/search/facet/facet_range_search.rs | 0 .../src/search/facet/facet_sort_ascending.rs | 0 .../src/search/facet/facet_sort_descending.rs | 0 .../milli}/src/search/facet/filter.rs | 0 .../milli}/src/search/facet/mod.rs | 0 .../milli}/src/search/facet/search.rs | 0 .../filter_distribution_all/0.snap | 0 .../filter_distribution_all/1.snap | 0 .../filter_distribution_all_stop_early/0.snap | 0 .../filter_distribution_all_stop_early/1.snap | 0 .../excluded_0.hash.snap | 0 .../excluded_1.hash.snap | 0 .../excluded_2.hash.snap | 0 .../excluded_3.hash.snap | 0 .../included_0.hash.snap | 0 .../included_1.hash.snap | 0 .../included_2.hash.snap | 0 .../included_3.hash.snap | 0 .../field_id_0_exact_0.hash.snap | 0 .../field_id_0_exact_1.hash.snap | 0 .../field_id_0_exact_2.hash.snap | 0 .../field_id_0_exact_3.hash.snap | 0 .../field_id_1_exact_0.snap | 0 .../field_id_1_exact_1.snap | 0 .../field_id_1_exact_2.hash.snap | 0 .../field_id_1_exact_3.hash.snap | 0 .../excluded_0.hash.snap | 0 .../excluded_1.hash.snap | 0 .../excluded_2.hash.snap | 0 .../excluded_3.hash.snap | 0 .../included_0.hash.snap | 0 .../included_1.hash.snap | 0 .../included_2.hash.snap | 0 .../included_3.hash.snap | 0 .../filter_range_pinch/excluded_0.hash.snap | 0 .../filter_range_pinch/excluded_1.hash.snap | 0 .../filter_range_pinch/excluded_2.hash.snap | 0 .../filter_range_pinch/excluded_3.hash.snap | 0 .../filter_range_pinch/included_0.hash.snap | 0 .../filter_range_pinch/included_1.hash.snap | 0 .../filter_range_pinch/included_2.hash.snap | 0 .../filter_range_pinch/included_3.hash.snap | 0 .../end_at_included_0.hash.snap | 0 .../end_at_included_1.hash.snap | 0 .../end_at_included_2.hash.snap | 0 .../end_at_included_3.hash.snap | 0 .../start_from_included_0.hash.snap | 0 .../start_from_included_1.hash.snap | 0 .../start_from_included_2.hash.snap | 0 .../start_from_included_3.hash.snap | 0 .../unbounded_field_id_0_0.snap | 0 .../unbounded_field_id_0_1.snap | 0 .../unbounded_field_id_0_2.snap | 0 .../unbounded_field_id_0_3.snap | 0 .../unbounded_field_id_1_0.snap | 0 .../unbounded_field_id_1_1.snap | 0 .../unbounded_field_id_1_2.snap | 0 .../unbounded_field_id_1_3.snap | 0 .../filter_sort_ascending/0.snap | 0 .../filter_sort_ascending/1.snap | 0 .../0-0.snap | 0 .../0-1.snap | 0 .../1-0.snap | 0 .../1-1.snap | 0 .../filter_sort_descending/0.snap | 0 .../filter_sort_descending/1.snap | 0 .../filter_sort_descending/2.snap | 0 .../0-0.snap | 0 .../0-1.snap | 0 .../1-0.snap | 0 .../1-1.snap | 0 .../milli}/src/search/fst_utils.rs | 0 {milli => crates/milli}/src/search/hybrid.rs | 0 {milli => crates/milli}/src/search/mod.rs | 0 .../milli}/src/search/new/bucket_sort.rs | 0 .../milli}/src/search/new/db_cache.rs | 0 .../milli}/src/search/new/distinct.rs | 0 .../milli}/src/search/new/exact_attribute.rs | 0 .../milli}/src/search/new/geo_sort.rs | 0 .../search/new/graph_based_ranking_rule.rs | 0 .../milli}/src/search/new/interner.rs | 0 .../milli}/src/search/new/limits.rs | 0 .../milli}/src/search/new/logger/mod.rs | 0 .../milli}/src/search/new/logger/visual.rs | 0 .../src/search/new/matches/matching_words.rs | 0 .../milli}/src/search/new/matches/mod.rs | 0 {milli => crates/milli}/src/search/new/mod.rs | 0 .../milli}/src/search/new/query_graph.rs | 0 .../new/query_term/compute_derivations.rs | 0 .../milli}/src/search/new/query_term/mod.rs | 0 .../src/search/new/query_term/ntypo_subset.rs | 0 .../src/search/new/query_term/parse_query.rs | 0 .../src/search/new/query_term/phrase.rs | 0 .../search/new/ranking_rule_graph/build.rs | 0 .../new/ranking_rule_graph/cheapest_paths.rs | 0 .../condition_docids_cache.rs | 0 .../new/ranking_rule_graph/dead_ends_cache.rs | 0 .../new/ranking_rule_graph/exactness/mod.rs | 0 .../search/new/ranking_rule_graph/fid/mod.rs | 0 .../src/search/new/ranking_rule_graph/mod.rs | 0 .../new/ranking_rule_graph/position/mod.rs | 0 .../new/ranking_rule_graph/proximity/build.rs | 0 .../proximity/compute_docids.rs | 0 .../new/ranking_rule_graph/proximity/mod.rs | 0 .../search/new/ranking_rule_graph/typo/mod.rs | 0 .../new/ranking_rule_graph/words/mod.rs | 0 .../milli}/src/search/new/ranking_rules.rs | 0 .../src/search/new/resolve_query_graph.rs | 0 .../milli}/src/search/new/small_bitmap.rs | 0 .../milli}/src/search/new/sort.rs | 0 .../src/search/new/tests/attribute_fid.rs | 0 .../search/new/tests/attribute_position.rs | 0 .../milli}/src/search/new/tests/cutoff.rs | 0 .../milli}/src/search/new/tests/distinct.rs | 0 .../milli}/src/search/new/tests/exactness.rs | 0 .../milli}/src/search/new/tests/geo_sort.rs | 0 .../src/search/new/tests/integration.rs | 0 .../milli}/src/search/new/tests/language.rs | 0 .../milli}/src/search/new/tests/mod.rs | 0 .../src/search/new/tests/ngram_split_words.rs | 0 .../milli}/src/search/new/tests/proximity.rs | 0 .../src/search/new/tests/proximity_typo.rs | 0 ...attribute_fid__attribute_fid_ngrams-4.snap | 0 ...__attribute_fid__attribute_fid_simple.snap | 0 ...__attribute_position_different_fields.snap | 0 ...e_position__attribute_position_ngrams.snap | 0 ...position__attribute_position_repeated.snap | 0 ...position__attribute_position_simple-2.snap | 0 ...sts__exactness__exactness_after_words.snap | 0 ...s__exactness_all_candidates_with_typo.snap | 0 ...ctness_attribute_starts_with_phrase-3.snap | 0 ...xactness_attribute_starts_with_phrase.snap | 0 ...xactness_attribute_starts_with_simple.snap | 0 ...ollowed_by_typo_prefer_no_typo_prefix.snap | 0 ...__exactness__exactness_simple_ordered.snap | 0 ...s__exactness__exactness_simple_random.snap | 0 ...xactness__exactness_simple_reversed-3.snap | 0 ..._exactness__exactness_simple_reversed.snap | 0 ...xactness__proximity_after_exactness-4.snap | 0 ..._exactness__proximity_after_exactness.snap | 0 ...exactness__typo_followed_by_exactness.snap | 0 ...sts__exactness__words_after_exactness.snap | 0 ...rch__new__tests__geo_sort__geo_sort-2.snap | 0 ...rch__new__tests__geo_sort__geo_sort-4.snap | 0 ..._around_the_edge_of_the_flat_earth-10.snap | 0 ..._around_the_edge_of_the_flat_earth-12.snap | 0 ..._around_the_edge_of_the_flat_earth-14.snap | 0 ..._around_the_edge_of_the_flat_earth-16.snap | 0 ..._around_the_edge_of_the_flat_earth-18.snap | 0 ...t_around_the_edge_of_the_flat_earth-2.snap | 0 ..._around_the_edge_of_the_flat_earth-20.snap | 0 ...t_around_the_edge_of_the_flat_earth-4.snap | 0 ...t_around_the_edge_of_the_flat_earth-6.snap | 0 ...t_around_the_edge_of_the_flat_earth-8.snap | 0 ...geo_sort__geo_sort_mixed_with_words-2.snap | 0 ...t_without_any_geo_faceted_documents-2.snap | 0 ...ts__proximity__proximity_prefix_db-11.snap | 0 ...ts__proximity__proximity_prefix_db-14.snap | 0 ...sts__proximity__proximity_prefix_db-2.snap | 0 ...sts__proximity__proximity_prefix_db-5.snap | 0 ...sts__proximity__proximity_prefix_db-8.snap | 0 ...ts__proximity__proximity_split_word-2.snap | 0 ...ts__proximity__proximity_split_word-5.snap | 0 ...ts__proximity__proximity_split_word-8.snap | 0 ..._search__new__tests__sort__redacted-2.snap | 0 ...li__search__new__tests__sort__sort-11.snap | 0 ...lli__search__new__tests__sort__sort-2.snap | 0 ...lli__search__new__tests__sort__sort-5.snap | 0 ...lli__search__new__tests__sort__sort-8.snap | 0 ...s__stop_words__stop_words_in_phrase-6.snap | 0 ...s__stop_words__stop_words_in_phrase-8.snap | 0 ...h__new__tests__typo__typo_bucketing-2.snap | 0 ...h__new__tests__typo__typo_bucketing-5.snap | 0 ...h__new__tests__typo__typo_bucketing-8.snap | 0 ...__tests__typo__typo_exact_attribute-4.snap | 0 ..._new__tests__typo__typo_exact_word-12.snap | 0 ..._not_preceded_by_words_ranking_rule-2.snap | 0 ..._not_preceded_by_words_ranking_rule-5.snap | 0 ...ch__new__tests__typo__typo_synonyms-2.snap | 0 ...ch__new__tests__typo__typo_synonyms-5.snap | 0 ..._proximity__trap_basic_and_complex1-2.snap | 0 ...ests__typo_proximity__trap_complex2-2.snap | 0 ...ms__words_proximity_tms_last_phrase-2.snap | 0 ...ms__words_proximity_tms_last_phrase-5.snap | 0 ...ms__words_proximity_tms_last_simple-2.snap | 0 ...ms__words_proximity_tms_last_simple-5.snap | 0 ...ew__tests__words_tms__words_tms_all-2.snap | 0 ...s__words_tms__words_tms_last_phrase-2.snap | 0 ...s__words_tms__words_tms_last_phrase-5.snap | 0 ...s__words_tms__words_tms_last_simple-2.snap | 0 .../milli}/src/search/new/tests/sort.rs | 0 .../milli}/src/search/new/tests/stop_words.rs | 0 .../milli}/src/search/new/tests/typo.rs | 0 .../src/search/new/tests/typo_proximity.rs | 0 .../milli}/src/search/new/tests/words_tms.rs | 0 .../milli}/src/search/new/vector_sort.rs | 0 {milli => crates/milli}/src/search/similar.rs | 0 {milli => crates/milli}/src/snapshot_tests.rs | 0 .../bug_3007/geo_faceted_documents_ids.snap | 0 .../geo_faceted_documents_ids.snap | 0 .../milli}/src/thread_pool_no_abort.rs | 0 .../src/update/available_documents_ids.rs | 0 .../milli}/src/update/clear_documents.rs | 0 {milli => crates/milli}/src/update/del_add.rs | 0 .../milli}/src/update/facet/bulk.rs | 0 .../milli}/src/update/facet/incremental.rs | 0 .../milli}/src/update/facet/mod.rs | 0 .../bulk.rs/insert/default.hash.snap | 0 .../large_group_small_min_level.hash.snap | 0 .../insert/odd_group_odd_min_level.hash.snap | 0 .../small_group_large_min_level.hash.snap | 0 .../small_group_small_min_level.hash.snap | 0 .../default.hash.snap | 0 .../large_group_small_min_level.hash.snap | 0 .../odd_group_odd_min_level.hash.snap | 0 .../small_group_large_min_level.hash.snap | 0 .../small_group_small_min_level.hash.snap | 0 .../bulk.rs/insert_string/default.hash.snap | 0 .../large_group_small_min_level.hash.snap | 0 .../odd_group_odd_min_level.hash.snap | 0 .../small_group_large_min_level.hash.snap | 0 .../small_group_small_min_level.hash.snap | 0 .../incremental.rs/append/append.hash.snap | 0 .../incremental.rs/delete_from_end/0.snap | 0 .../delete_from_end/100.hash.snap | 0 .../incremental.rs/delete_from_end/15.snap | 0 .../delete_from_end/150.hash.snap | 0 .../incremental.rs/delete_from_end/17.snap | 0 .../delete_from_end/200.hash.snap | 0 .../delete_from_start/127.hash.snap | 0 .../incremental.rs/delete_from_start/215.snap | 0 .../incremental.rs/delete_from_start/255.snap | 0 .../delete_shuffled/127.hash.snap | 0 .../delete_shuffled/215.hash.snap | 0 .../incremental.rs/delete_shuffled/255.snap | 0 .../after_delete.hash.snap | 0 .../before_delete.hash.snap | 0 .../in_place_level0_insert.snap | 0 .../many_field_ids_append.hash.snap | 0 .../many_field_ids_prepend.hash.snap | 0 .../merge_values/merge_values.hash.snap | 0 .../incremental.rs/prepend/prepend.hash.snap | 0 .../after_delete.hash.snap | 0 .../before_delete.hash.snap | 0 .../shuffled/shuffled.hash.snap | 0 .../src/update/index_documents/enrich.rs | 0 .../extract/extract_docid_word_positions.rs | 0 .../extract/extract_facet_number_docids.rs | 0 .../extract/extract_facet_string_docids.rs | 0 .../extract/extract_fid_docid_facet_values.rs | 0 .../extract/extract_fid_word_count_docids.rs | 0 .../extract/extract_geo_points.rs | 0 .../extract/extract_vector_points.rs | 0 .../extract/extract_word_docids.rs | 0 .../extract_word_pair_proximity_docids.rs | 0 .../extract/extract_word_position_docids.rs | 0 .../src/update/index_documents/extract/mod.rs | 0 .../index_documents/helpers/clonable_mmap.rs | 0 .../index_documents/helpers/grenad_helpers.rs | 0 .../helpers/merge_functions.rs | 0 .../src/update/index_documents/helpers/mod.rs | 0 .../milli}/src/update/index_documents/mod.rs | 0 .../src/update/index_documents/parallel.rs | 0 .../documents_ids.snap | 0 .../facet_id_exists_docids.snap | 0 .../word_docids.snap | 0 .../word_pair_proximity_docids.snap | 0 .../documents_ids.snap | 0 .../word_docids.snap | 0 .../word_pair_proximity_docids.snap | 0 .../facet_id_exists_docids.snap | 0 .../facet_id_f64_docids.snap | 0 .../facet_id_string_docids.snap | 0 .../word_docids.snap | 0 .../word_pair_proximity_docids.snap | 0 .../facet_id_f64_docids.snap | 0 .../facet_id_string_docids.snap | 0 .../initial/word_docids.snap | 0 .../updated/word_docids.snap | 0 .../src/update/index_documents/transform.rs | 0 .../src/update/index_documents/typed_chunk.rs | 0 .../milli}/src/update/indexer_config.rs | 0 {milli => crates/milli}/src/update/mod.rs | 0 .../milli}/src/update/settings.rs | 0 .../milli}/src/update/update_step.rs | 0 .../milli}/src/update/word_prefix_docids.rs | 0 .../src/update/words_prefix_integer_docids.rs | 0 .../milli}/src/update/words_prefixes_fst.rs | 0 {milli => crates/milli}/src/vector/error.rs | 0 {milli => crates/milli}/src/vector/hf.rs | 0 .../milli}/src/vector/json_template.rs | 0 {milli => crates/milli}/src/vector/manual.rs | 0 {milli => crates/milli}/src/vector/mod.rs | 0 {milli => crates/milli}/src/vector/ollama.rs | 0 {milli => crates/milli}/src/vector/openai.rs | 0 .../milli}/src/vector/parsed_vectors.rs | 0 {milli => crates/milli}/src/vector/rest.rs | 0 .../milli}/src/vector/settings.rs | 0 .../milli}/tests/assets/test_set.ndjson | 0 {milli => crates/milli}/tests/mod.rs | 0 .../milli}/tests/search/distinct.rs | 0 .../milli}/tests/search/facet_distribution.rs | 0 .../milli}/tests/search/filters.rs | 0 {milli => crates/milli}/tests/search/mod.rs | 0 .../milli}/tests/search/phrase_search.rs | 0 .../milli}/tests/search/query_criteria.rs | 0 {milli => crates/milli}/tests/search/sort.rs | 0 .../milli}/tests/search/typo_tolerance.rs | 0 .../permissive-json-pointer}/Cargo.toml | 0 .../permissive-json-pointer}/README.md | 0 .../permissive-json-pointer}/src/lib.rs | 0 .../tracing-trace}/Cargo.toml | 0 .../src/bin/trace-to-callstats.rs | 0 .../src/bin/trace-to-firefox.rs | 0 .../tracing-trace}/src/entry.rs | 0 .../tracing-trace}/src/error.rs | 0 .../tracing-trace}/src/layer.rs | 0 .../tracing-trace}/src/lib.rs | 0 .../tracing-trace}/src/main.rs | 0 .../src/processor/firefox_profiler.rs | 0 .../tracing-trace}/src/processor/fmt.rs | 0 .../tracing-trace}/src/processor/mod.rs | 0 .../src/processor/span_stats.rs | 0 {xtask => crates/xtask}/Cargo.toml | 0 {xtask => crates/xtask}/src/bench/assets.rs | 0 {xtask => crates/xtask}/src/bench/client.rs | 0 {xtask => crates/xtask}/src/bench/command.rs | 0 .../xtask}/src/bench/dashboard.rs | 0 {xtask => crates/xtask}/src/bench/env_info.rs | 0 .../xtask}/src/bench/meili_process.rs | 0 {xtask => crates/xtask}/src/bench/mod.rs | 0 {xtask => crates/xtask}/src/bench/workload.rs | 0 {xtask => crates/xtask}/src/lib.rs | 0 {xtask => crates/xtask}/src/main.rs | 0 1062 files changed, 19 insertions(+), 20 deletions(-) rename {benchmarks => crates/benchmarks}/.gitignore (100%) rename {benchmarks => crates/benchmarks}/Cargo.toml (100%) rename {benchmarks => crates/benchmarks}/README.md (100%) rename {benchmarks => crates/benchmarks}/benches/indexing.rs (100%) rename {benchmarks => crates/benchmarks}/benches/search_geo.rs (100%) rename {benchmarks => crates/benchmarks}/benches/search_songs.rs (100%) rename {benchmarks => crates/benchmarks}/benches/search_wiki.rs (100%) rename {benchmarks => crates/benchmarks}/benches/utils.rs (100%) rename {benchmarks => crates/benchmarks}/build.rs (100%) rename {benchmarks => crates/benchmarks}/scripts/compare.sh (100%) rename {benchmarks => crates/benchmarks}/scripts/list.sh (100%) rename {benchmarks => crates/benchmarks}/src/lib.rs (100%) rename {build-info => crates/build-info}/Cargo.toml (100%) rename {build-info => crates/build-info}/build.rs (100%) rename {build-info => crates/build-info}/src/lib.rs (100%) rename {dump => crates/dump}/Cargo.toml (100%) rename {dump => crates/dump}/README.md (100%) rename {dump => crates/dump}/src/error.rs (100%) rename {dump => crates/dump}/src/lib.rs (100%) rename {dump => crates/dump}/src/reader/compat/mod.rs (100%) rename {dump => crates/dump}/src/reader/compat/snapshots/dump__reader__compat__v1_to_v2__test__compat_v1_v2-3.snap (100%) rename {dump => crates/dump}/src/reader/compat/snapshots/dump__reader__compat__v1_to_v2__test__compat_v1_v2-6.snap (100%) rename {dump => crates/dump}/src/reader/compat/snapshots/dump__reader__compat__v1_to_v2__test__compat_v1_v2-9.snap (100%) rename {dump => crates/dump}/src/reader/compat/snapshots/dump__reader__compat__v2_to_v3__test__compat_v2_v3-11.snap (100%) rename {dump => crates/dump}/src/reader/compat/snapshots/dump__reader__compat__v2_to_v3__test__compat_v2_v3-14.snap (100%) rename {dump => crates/dump}/src/reader/compat/snapshots/dump__reader__compat__v2_to_v3__test__compat_v2_v3-5.snap (100%) rename {dump => crates/dump}/src/reader/compat/snapshots/dump__reader__compat__v2_to_v3__test__compat_v2_v3-8.snap (100%) rename {dump => crates/dump}/src/reader/compat/snapshots/dump__reader__compat__v3_to_v4__test__compat_v3_v4-12.snap (100%) rename {dump => crates/dump}/src/reader/compat/snapshots/dump__reader__compat__v3_to_v4__test__compat_v3_v4-15.snap (100%) rename {dump => crates/dump}/src/reader/compat/snapshots/dump__reader__compat__v3_to_v4__test__compat_v3_v4-6.snap (100%) rename {dump => crates/dump}/src/reader/compat/snapshots/dump__reader__compat__v3_to_v4__test__compat_v3_v4-9.snap (100%) rename {dump => crates/dump}/src/reader/compat/snapshots/dump__reader__compat__v4_to_v5__test__compat_v4_v5-12.snap (100%) rename {dump => crates/dump}/src/reader/compat/snapshots/dump__reader__compat__v4_to_v5__test__compat_v4_v5-6.snap (100%) rename {dump => crates/dump}/src/reader/compat/snapshots/dump__reader__compat__v4_to_v5__test__compat_v4_v5-9.snap (100%) rename {dump => crates/dump}/src/reader/compat/snapshots/dump__reader__compat__v5_to_v6__test__compat_v5_v6-12.snap (100%) rename {dump => crates/dump}/src/reader/compat/snapshots/dump__reader__compat__v5_to_v6__test__compat_v5_v6-6.snap (100%) rename {dump => crates/dump}/src/reader/compat/snapshots/dump__reader__compat__v5_to_v6__test__compat_v5_v6-9.snap (100%) rename {dump => crates/dump}/src/reader/compat/v1_to_v2.rs (100%) rename {dump => crates/dump}/src/reader/compat/v2_to_v3.rs (100%) rename {dump => crates/dump}/src/reader/compat/v3_to_v4.rs (100%) rename {dump => crates/dump}/src/reader/compat/v4_to_v5.rs (100%) rename {dump => crates/dump}/src/reader/compat/v5_to_v6.rs (100%) rename {dump => crates/dump}/src/reader/mod.rs (100%) rename {dump => crates/dump}/src/reader/snapshots/dump__reader__test__import_dump_v1-10.snap (100%) rename {dump => crates/dump}/src/reader/snapshots/dump__reader__test__import_dump_v1-4.snap (100%) rename {dump => crates/dump}/src/reader/snapshots/dump__reader__test__import_dump_v1-7.snap (100%) rename {dump => crates/dump}/src/reader/snapshots/dump__reader__test__import_dump_v2-11.snap (100%) rename {dump => crates/dump}/src/reader/snapshots/dump__reader__test__import_dump_v2-14.snap (100%) rename {dump => crates/dump}/src/reader/snapshots/dump__reader__test__import_dump_v2-5.snap (100%) rename {dump => crates/dump}/src/reader/snapshots/dump__reader__test__import_dump_v2-8.snap (100%) rename {dump => crates/dump}/src/reader/snapshots/dump__reader__test__import_dump_v2_from_meilisearch_v0_22_0_issue_3435-11.snap (100%) rename {dump => crates/dump}/src/reader/snapshots/dump__reader__test__import_dump_v2_from_meilisearch_v0_22_0_issue_3435-5.snap (100%) rename {dump => crates/dump}/src/reader/snapshots/dump__reader__test__import_dump_v2_from_meilisearch_v0_22_0_issue_3435-8.snap (100%) rename {dump => crates/dump}/src/reader/snapshots/dump__reader__test__import_dump_v3-11.snap (100%) rename {dump => crates/dump}/src/reader/snapshots/dump__reader__test__import_dump_v3-14.snap (100%) rename {dump => crates/dump}/src/reader/snapshots/dump__reader__test__import_dump_v3-5.snap (100%) rename {dump => crates/dump}/src/reader/snapshots/dump__reader__test__import_dump_v3-8.snap (100%) rename {dump => crates/dump}/src/reader/snapshots/dump__reader__test__import_dump_v4-12.snap (100%) rename {dump => crates/dump}/src/reader/snapshots/dump__reader__test__import_dump_v4-6.snap (100%) rename {dump => crates/dump}/src/reader/snapshots/dump__reader__test__import_dump_v4-9.snap (100%) rename {dump => crates/dump}/src/reader/snapshots/dump__reader__test__import_dump_v5-12.snap (100%) rename {dump => crates/dump}/src/reader/snapshots/dump__reader__test__import_dump_v5-6.snap (100%) rename {dump => crates/dump}/src/reader/snapshots/dump__reader__test__import_dump_v5-9.snap (100%) rename {dump => crates/dump}/src/reader/snapshots/dump__reader__test__import_dump_v6_with_vectors-5.snap (100%) rename {dump => crates/dump}/src/reader/snapshots/dump__reader__test__import_dump_v6_with_vectors-6.snap (100%) rename {dump => crates/dump}/src/reader/snapshots/dump__reader__test__import_dump_v6_with_vectors-7.snap (100%) rename {dump => crates/dump}/src/reader/snapshots/dump__reader__test__import_dump_v6_with_vectors-8.snap (100%) rename {dump => crates/dump}/src/reader/snapshots/dump__reader__test__import_dump_v6_with_vectors-9.snap (100%) rename {dump => crates/dump}/src/reader/v1/mod.rs (100%) rename {dump => crates/dump}/src/reader/v1/settings.rs (100%) rename {dump => crates/dump}/src/reader/v1/snapshots/dump__reader__v1__test__read_dump_v1-10.snap (100%) rename {dump => crates/dump}/src/reader/v1/snapshots/dump__reader__v1__test__read_dump_v1-2.snap (100%) rename {dump => crates/dump}/src/reader/v1/snapshots/dump__reader__v1__test__read_dump_v1-6.snap (100%) rename {dump => crates/dump}/src/reader/v1/update.rs (100%) rename {dump => crates/dump}/src/reader/v2/errors.rs (100%) rename {dump => crates/dump}/src/reader/v2/meta.rs (100%) rename {dump => crates/dump}/src/reader/v2/mod.rs (100%) rename {dump => crates/dump}/src/reader/v2/settings.rs (100%) rename {dump => crates/dump}/src/reader/v2/snapshots/dump__reader__v2__test__read_dump_v2-11.snap (100%) rename {dump => crates/dump}/src/reader/v2/snapshots/dump__reader__v2__test__read_dump_v2-14.snap (100%) rename {dump => crates/dump}/src/reader/v2/snapshots/dump__reader__v2__test__read_dump_v2-5.snap (100%) rename {dump => crates/dump}/src/reader/v2/snapshots/dump__reader__v2__test__read_dump_v2-8.snap (100%) rename {dump => crates/dump}/src/reader/v2/snapshots/dump__reader__v2__test__read_dump_v2_from_meilisearch_v0_22_0_issue_3435-10.snap (100%) rename {dump => crates/dump}/src/reader/v2/snapshots/dump__reader__v2__test__read_dump_v2_from_meilisearch_v0_22_0_issue_3435-4.snap (100%) rename {dump => crates/dump}/src/reader/v2/snapshots/dump__reader__v2__test__read_dump_v2_from_meilisearch_v0_22_0_issue_3435-7.snap (100%) rename {dump => crates/dump}/src/reader/v2/updates.rs (100%) rename {dump => crates/dump}/src/reader/v3/errors.rs (100%) rename {dump => crates/dump}/src/reader/v3/meta.rs (100%) rename {dump => crates/dump}/src/reader/v3/mod.rs (100%) rename {dump => crates/dump}/src/reader/v3/settings.rs (100%) rename {dump => crates/dump}/src/reader/v3/snapshots/dump__reader__v3__test__read_dump_v3-11.snap (100%) rename {dump => crates/dump}/src/reader/v3/snapshots/dump__reader__v3__test__read_dump_v3-14.snap (100%) rename {dump => crates/dump}/src/reader/v3/snapshots/dump__reader__v3__test__read_dump_v3-5.snap (100%) rename {dump => crates/dump}/src/reader/v3/snapshots/dump__reader__v3__test__read_dump_v3-8.snap (100%) rename {dump => crates/dump}/src/reader/v3/updates.rs (100%) rename {dump => crates/dump}/src/reader/v4/errors.rs (100%) rename {dump => crates/dump}/src/reader/v4/keys.rs (100%) rename {dump => crates/dump}/src/reader/v4/meta.rs (100%) rename {dump => crates/dump}/src/reader/v4/mod.rs (100%) rename {dump => crates/dump}/src/reader/v4/settings.rs (100%) rename {dump => crates/dump}/src/reader/v4/snapshots/dump__reader__v4__test__read_dump_v4-10.snap (100%) rename {dump => crates/dump}/src/reader/v4/snapshots/dump__reader__v4__test__read_dump_v4-13.snap (100%) rename {dump => crates/dump}/src/reader/v4/snapshots/dump__reader__v4__test__read_dump_v4-7.snap (100%) rename {dump => crates/dump}/src/reader/v4/tasks.rs (100%) rename {dump => crates/dump}/src/reader/v5/errors.rs (100%) rename {dump => crates/dump}/src/reader/v5/keys.rs (100%) rename {dump => crates/dump}/src/reader/v5/meta.rs (100%) rename {dump => crates/dump}/src/reader/v5/mod.rs (100%) rename {dump => crates/dump}/src/reader/v5/settings.rs (100%) rename {dump => crates/dump}/src/reader/v5/snapshots/dump__reader__v5__test__read_dump_v5-10.snap (100%) rename {dump => crates/dump}/src/reader/v5/snapshots/dump__reader__v5__test__read_dump_v5-13.snap (100%) rename {dump => crates/dump}/src/reader/v5/snapshots/dump__reader__v5__test__read_dump_v5-7.snap (100%) rename {dump => crates/dump}/src/reader/v5/tasks.rs (100%) rename {dump => crates/dump}/src/reader/v6/mod.rs (100%) rename {dump => crates/dump}/src/writer.rs (100%) rename {dump => crates/dump}/tests/assets/v1.dump (100%) rename {dump => crates/dump}/tests/assets/v2-v0.22.0.dump (100%) rename {dump => crates/dump}/tests/assets/v2.dump (100%) rename {dump => crates/dump}/tests/assets/v3.dump (100%) rename {dump => crates/dump}/tests/assets/v4.dump (100%) rename {dump => crates/dump}/tests/assets/v5.dump (100%) rename {dump => crates/dump}/tests/assets/v6-with-experimental.dump (100%) rename {dump => crates/dump}/tests/assets/v6-with-vectors.dump (100%) rename {file-store => crates/file-store}/Cargo.toml (100%) rename {file-store => crates/file-store}/src/lib.rs (100%) rename {filter-parser => crates/filter-parser}/Cargo.toml (100%) rename {filter-parser => crates/filter-parser}/README.md (100%) rename {filter-parser => crates/filter-parser}/fuzz/.gitignore (100%) rename {filter-parser => crates/filter-parser}/fuzz/Cargo.toml (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_1 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_10 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_11 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_12 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_13 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_14 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_15 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_16 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_17 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_18 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_19 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_2 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_20 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_21 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_22 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_23 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_24 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_25 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_26 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_27 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_28 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_29 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_3 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_30 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_31 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_32 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_33 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_34 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_35 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_36 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_37 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_38 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_39 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_4 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_40 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_41 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_42 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_43 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_5 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_6 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_7 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_8 (100%) rename {filter-parser => crates/filter-parser/fuzz}/fuzz/corpus/parse/test_9 (100%) rename {filter-parser => crates/filter-parser}/fuzz/fuzz_targets/parse.rs (100%) rename {filter-parser => crates/filter-parser}/src/condition.rs (100%) rename {filter-parser => crates/filter-parser}/src/error.rs (100%) rename {filter-parser => crates/filter-parser}/src/lib.rs (100%) rename {filter-parser => crates/filter-parser}/src/main.rs (100%) rename {filter-parser => crates/filter-parser}/src/value.rs (100%) rename {flatten-serde-json => crates/flatten-serde-json}/Cargo.toml (100%) rename {flatten-serde-json => crates/flatten-serde-json}/README.md (100%) rename {flatten-serde-json => crates/flatten-serde-json}/benches/benchmarks.rs (100%) rename {flatten-serde-json => crates/flatten-serde-json}/fuzz/Cargo.toml (100%) rename {flatten-serde-json => crates/flatten-serde-json}/fuzz/fuzz_targets/flatten.rs (100%) rename {flatten-serde-json => crates/flatten-serde-json}/src/lib.rs (100%) rename {flatten-serde-json => crates/flatten-serde-json}/src/main.rs (100%) rename {fuzzers => crates/fuzzers}/Cargo.toml (100%) rename {fuzzers => crates/fuzzers}/README.md (100%) rename {fuzzers => crates/fuzzers}/src/bin/fuzz-indexing.rs (100%) rename {fuzzers => crates/fuzzers}/src/lib.rs (100%) rename {index-scheduler => crates/index-scheduler}/Cargo.toml (100%) rename {index-scheduler => crates/index-scheduler}/src/autobatcher.rs (100%) rename {index-scheduler => crates/index-scheduler}/src/batch.rs (100%) rename {index-scheduler => crates/index-scheduler}/src/error.rs (100%) rename {index-scheduler => crates/index-scheduler}/src/features.rs (100%) rename {index-scheduler => crates/index-scheduler}/src/index_mapper/index_map.rs (100%) rename {index-scheduler => crates/index-scheduler}/src/index_mapper/mod.rs (100%) rename {index-scheduler => crates/index-scheduler}/src/insta_snapshot.rs (100%) rename {index-scheduler => crates/index-scheduler}/src/lib.rs (100%) rename {index-scheduler => crates/index-scheduler}/src/lru.rs (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/index_scheduler__tests__import_vectors-15.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/index_scheduler__tests__import_vectors-2.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/index_scheduler__tests__import_vectors-22.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/index_scheduler__tests__import_vectors-5.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/index_scheduler__tests__import_vectors-8.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/index_scheduler__tests__import_vectors.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/index_scheduler__tests__settings_update-2.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/index_scheduler__tests__settings_update-5.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/index_scheduler__tests__settings_update.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/cancel_enqueued_task/cancel_processed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/cancel_enqueued_task/initial_tasks_enqueued.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/cancel_mix_of_tasks/aborted_indexation.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/cancel_mix_of_tasks/cancel_processed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/cancel_mix_of_tasks/first_task_processed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/cancel_mix_of_tasks/processing_second_task_cancel_enqueued.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/cancel_processing_dump/after_dump_register.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/cancel_processing_dump/cancel_processed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/cancel_processing_dump/cancel_registered.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/cancel_processing_task/aborted_indexation.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/cancel_processing_task/cancel_processed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/cancel_processing_task/cancel_task_registered.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/cancel_processing_task/initial_task_processing.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/cancel_processing_task/registered_the_first_task.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/cancel_succeeded_task/cancel_processed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/cancel_succeeded_task/initial_task_processed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/cancel_succeeded_task/registered_the_first_task.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/do_not_batch_task_of_different_indexes/all_tasks_processed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/document_addition/after_register.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/document_addition/after_the_batch_creation.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/document_addition/once_everything_is_processed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/document_addition_and_document_deletion/after_processing_the_batch.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/document_addition_and_document_deletion/documents.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/document_addition_and_document_deletion/registered_the_first_task.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/document_addition_and_document_deletion/registered_the_second_task.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/document_addition_and_index_deletion/before_index_creation.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/document_addition_and_index_deletion/both_task_succeeded.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/document_addition_and_index_deletion/registered_the_first_task.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/document_addition_and_index_deletion/registered_the_second_task.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/document_addition_and_index_deletion/registered_the_third_task.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/document_addition_and_index_deletion_on_unexisting_index/1.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/document_addition_and_index_deletion_on_unexisting_index/2.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/document_deletion_and_document_addition/after_failing_the_deletion.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/document_deletion_and_document_addition/after_last_successful_addition.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/document_deletion_and_document_addition/documents.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/document_deletion_and_document_addition/registered_the_first_task.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/document_deletion_and_document_addition/registered_the_second_task.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/fail_in_process_batch_for_document_addition/document_addition_batch_created.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/fail_in_process_batch_for_document_addition/document_addition_failed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/fail_in_process_batch_for_document_addition/registered_the_first_task.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/fail_in_process_batch_for_document_deletion/after_adding_the_documents.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/fail_in_process_batch_for_document_deletion/after_adding_the_settings.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/fail_in_process_batch_for_document_deletion/after_adding_the_settings_and_documents.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/fail_in_process_batch_for_document_deletion/after_removing_the_documents.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/fail_in_process_batch_for_document_deletion/documents_remaining_should_only_be_bork.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/fail_in_process_batch_for_document_deletion/registered_the_document_deletions.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/fail_in_process_batch_for_document_deletion/registered_the_setting_and_document_addition.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/fail_in_process_batch_for_index_creation/after_register.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/fail_in_process_batch_for_index_creation/index_creation_failed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/fail_in_update_task_after_process_batch_success_for_document_addition/after_batch_succeeded.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/fail_in_update_task_after_process_batch_success_for_document_addition/after_failing_to_commit.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/fail_in_update_task_after_process_batch_success_for_document_addition/document_addition_succeeded_but_index_scheduler_not_updated.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/fail_in_update_task_after_process_batch_success_for_document_addition/registered_the_first_task.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/fail_in_update_task_after_process_batch_success_for_document_addition/task_successfully_processed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/import_vectors/Intel to kefir succeeds.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/import_vectors/Intel to kefir.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/import_vectors/adding Intel succeeds.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/import_vectors/after adding Intel.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/import_vectors/after_registering_settings_task_vectors.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/import_vectors/settings_update_processed_vectors.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/import_vectors_first_and_embedder_later/documents after initial push.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/insert_task_while_another_task_is_processing/after_batch_creation.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/insert_task_while_another_task_is_processing/registered_the_first_task.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/insert_task_while_another_task_is_processing/registered_the_second_task.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/insert_task_while_another_task_is_processing/registered_the_third_task.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/panic_in_process_batch_for_index_creation/index_creation_failed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/panic_in_process_batch_for_index_creation/registered_the_first_task.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/process_tasks_inserted_without_new_signal/processed_the_first_task.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/process_tasks_inserted_without_new_signal/processed_the_second_task.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/process_tasks_inserted_without_new_signal/processed_the_third_task.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/process_tasks_inserted_without_new_signal/registered_the_first_task.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/process_tasks_inserted_without_new_signal/registered_the_second_task.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/process_tasks_inserted_without_new_signal/registered_the_third_task.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/process_tasks_without_autobatching/first.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/process_tasks_without_autobatching/fourth.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/process_tasks_without_autobatching/registered_the_first_task.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/process_tasks_without_autobatching/registered_the_fourth_task.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/process_tasks_without_autobatching/registered_the_second_task.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/process_tasks_without_autobatching/registered_the_third_task.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/process_tasks_without_autobatching/second.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/process_tasks_without_autobatching/third.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/query_tasks_canceled_by/start.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/query_tasks_from_and_limit/processed_all_tasks.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/query_tasks_from_and_limit/registered_the_first_task.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/query_tasks_from_and_limit/registered_the_second_task.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/query_tasks_from_and_limit/registered_the_third_task.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/query_tasks_simple/end.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/query_tasks_simple/start.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/query_tasks_special_rules/start.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/register/everything_is_successfully_registered.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/swap_indexes/create_a.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/swap_indexes/create_b.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/swap_indexes/create_c.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/swap_indexes/create_d.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/swap_indexes/first_swap_processed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/swap_indexes/first_swap_registered.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/swap_indexes/second_swap_processed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/swap_indexes/third_empty_swap_processed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/swap_indexes/two_swaps_registered.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/swap_indexes_errors/after_the_index_creation.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/swap_indexes_errors/first_swap_failed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/swap_indexes_errors/initial_tasks_processed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/task_deletion_delete_same_task_twice/initial_tasks_enqueued.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/task_deletion_delete_same_task_twice/initial_tasks_processed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/task_deletion_delete_same_task_twice/task_deletion_processed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/task_deletion_deleteable/after_registering_the_task_deletion.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/task_deletion_deleteable/initial_tasks_enqueued.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/task_deletion_deleteable/initial_tasks_processed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/task_deletion_deleteable/task_deletion_processed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/task_deletion_undeleteable/initial_tasks_enqueued.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/task_deletion_undeleteable/task_deletion_done.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/task_deletion_undeleteable/task_deletion_enqueued.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/task_deletion_undeleteable/task_deletion_processing.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_auto_deletion_of_tasks/after_the_second_task_deletion.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_auto_deletion_of_tasks/everything_has_been_processed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_auto_deletion_of_tasks/task_deletion_have_been_enqueued.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_auto_deletion_of_tasks/task_deletion_have_been_processed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_auto_deletion_of_tasks/task_queue_is_full.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_disable_auto_deletion_of_tasks/task_deletion_have_not_been_enqueued.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_disable_auto_deletion_of_tasks/task_queue_is_full.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index/after_processing_the_10_tasks.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index/after_registering_the_10_tasks.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index/documents.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index/processed_the_first_task.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index/registered_the_first_task.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index_without_autobatching/after_registering_the_10_tasks.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index_without_autobatching/all_tasks_processed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index_without_autobatching/documents.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index_without_autobatching/five_tasks_processed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index_without_autobatching/processed_the_first_task.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index_without_autobatching/registered_the_first_task.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_cant_create_index_without_index/after_processing_the_10_tasks.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_cant_create_index_without_index/after_registering_the_10_tasks.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_cant_create_index_without_index_without_autobatching/after_registering_the_10_tasks.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_cant_create_index_without_index_without_autobatching/all_tasks_processed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_cant_create_index_without_index_without_autobatching/five_tasks_processed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_mixed_right_without_index_starts_with_cant_create/after_registering_the_10_tasks.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_mixed_right_without_index_starts_with_cant_create/all_tasks_processed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_mixed_right_without_index_starts_with_cant_create/documents.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_mixed_right_without_index_starts_with_cant_create/only_first_task_failed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_mixed_rights_with_index/after_registering_the_10_tasks.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_mixed_rights_with_index/all_tasks_processed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_mixed_rights_with_index/documents.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_mixed_rights_with_index/processed_the_first_task.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_mixed_rights_with_index/registered_the_first_task.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_with_bad_primary_key/after_registering_the_5_tasks.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_with_bad_primary_key/documents.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_with_bad_primary_key/fifth_task_succeeds.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_with_bad_primary_key/first_and_second_task_fails.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_with_bad_primary_key/fourth_task_fails.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_with_bad_primary_key/third_task_succeeds.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key/after_registering_the_3_tasks.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key/documents.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key/only_first_task_succeed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key/second_task_fails.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key/third_task_fails.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key_batch_wrong_key/after_registering_the_3_tasks.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key_batch_wrong_key/documents.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key_batch_wrong_key/only_first_task_succeed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key_batch_wrong_key/second_and_third_tasks_fails.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key/after_registering_the_6_tasks.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key/all_other_tasks_succeeds.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key/documents.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key/first_task_fails.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key/second_task_fails.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key/third_task_succeeds.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key_inference_works/after_registering_the_6_tasks.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key_inference_works/all_other_tasks_succeeds.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key_inference_works/documents.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key_inference_works/first_task_succeed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key_inference_works/second_task_fails.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key_inference_works/third_task_succeeds.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_replace/1.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_replace/2.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_replace/documents.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_replace_without_autobatching/after_registering_the_10_tasks.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_replace_without_autobatching/all_tasks_processed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_replace_without_autobatching/documents.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_replace_without_autobatching/five_tasks_processed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_update/1.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_update/2.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_update/documents.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_update_without_autobatching/after_registering_the_10_tasks.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_update_without_autobatching/all_tasks_processed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_update_without_autobatching/documents.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_document_update_without_autobatching/five_tasks_processed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_mixed_document_addition/after_registering_the_10_tasks.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_mixed_document_addition/all_tasks_processed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_mixed_document_addition/documents.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_mixed_document_addition/five_tasks_processed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_settings_update/after_registering_settings_task.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_settings_update/settings_update_processed.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/snapshots/lib.rs/test_task_is_processing/registered_a_task.snap (100%) rename {index-scheduler => crates/index-scheduler}/src/utils.rs (100%) rename {index-scheduler => crates/index-scheduler}/src/uuid_codec.rs (100%) rename {json-depth-checker => crates/json-depth-checker}/Cargo.toml (100%) rename {json-depth-checker => crates/json-depth-checker}/benches/depth.rs (100%) rename {json-depth-checker => crates/json-depth-checker}/fuzz/Cargo.toml (100%) rename {json-depth-checker => crates/json-depth-checker}/fuzz/fuzz_targets/depth.rs (100%) rename {json-depth-checker => crates/json-depth-checker}/src/lib.rs (100%) rename {meili-snap => crates/meili-snap}/Cargo.toml (100%) rename {meili-snap => crates/meili-snap}/src/lib.rs (100%) rename {meili-snap => crates/meili-snap}/src/snapshots/lib.rs/snap/4.snap (100%) rename {meili-snap => crates/meili-snap}/src/snapshots/lib.rs/snap/5.snap (100%) rename {meili-snap => crates/meili-snap}/src/snapshots/lib.rs/snap/6.snap (100%) rename {meili-snap => crates/meili-snap}/src/snapshots/lib.rs/snap/7.snap (100%) rename {meili-snap => crates/meili-snap}/src/snapshots/lib.rs/snap/snap_name_1.snap (100%) rename {meili-snap => crates/meili-snap}/src/snapshots/lib.rs/some_test/4.snap (100%) rename {meili-snap => crates/meili-snap}/src/snapshots/lib.rs/some_test/5.snap (100%) rename {meili-snap => crates/meili-snap}/src/snapshots/lib.rs/some_test/6.snap (100%) rename {meili-snap => crates/meili-snap}/src/snapshots/lib.rs/some_test/7.snap (100%) rename {meili-snap => crates/meili-snap}/src/snapshots/lib.rs/some_test/snap_name_1.snap (100%) rename {meilisearch-auth => crates/meilisearch-auth}/Cargo.toml (100%) rename {meilisearch-auth => crates/meilisearch-auth}/src/dump.rs (100%) rename {meilisearch-auth => crates/meilisearch-auth}/src/error.rs (100%) rename {meilisearch-auth => crates/meilisearch-auth}/src/lib.rs (100%) rename {meilisearch-auth => crates/meilisearch-auth}/src/store.rs (100%) rename {meilisearch-types => crates/meilisearch-types}/Cargo.toml (100%) rename {meilisearch-types => crates/meilisearch-types}/src/compression.rs (100%) rename {meilisearch-types => crates/meilisearch-types}/src/deserr/mod.rs (100%) rename {meilisearch-types => crates/meilisearch-types}/src/deserr/query_params.rs (100%) rename {meilisearch-types => crates/meilisearch-types}/src/document_formats.rs (100%) rename {meilisearch-types => crates/meilisearch-types}/src/error.rs (100%) rename {meilisearch-types => crates/meilisearch-types}/src/facet_values_sort.rs (100%) rename {meilisearch-types => crates/meilisearch-types}/src/features.rs (100%) rename {meilisearch-types => crates/meilisearch-types}/src/index_uid.rs (100%) rename {meilisearch-types => crates/meilisearch-types}/src/index_uid_pattern.rs (100%) rename {meilisearch-types => crates/meilisearch-types}/src/keys.rs (100%) rename {meilisearch-types => crates/meilisearch-types}/src/lib.rs (100%) rename {meilisearch-types => crates/meilisearch-types}/src/locales.rs (100%) rename {meilisearch-types => crates/meilisearch-types}/src/settings.rs (100%) rename {meilisearch-types => crates/meilisearch-types}/src/star_or.rs (100%) rename {meilisearch-types => crates/meilisearch-types}/src/task_view.rs (100%) rename {meilisearch-types => crates/meilisearch-types}/src/tasks.rs (100%) rename {meilisearch-types => crates/meilisearch-types}/src/versioning.rs (100%) rename {meilisearch => crates/meilisearch}/Cargo.toml (100%) rename {meilisearch => crates/meilisearch}/build.rs (100%) rename {meilisearch => crates/meilisearch}/src/analytics/mock_analytics.rs (100%) rename {meilisearch => crates/meilisearch}/src/analytics/mod.rs (100%) rename {meilisearch => crates/meilisearch}/src/analytics/segment_analytics.rs (100%) rename {meilisearch => crates/meilisearch}/src/error.rs (100%) rename {meilisearch => crates/meilisearch}/src/extractors/authentication/error.rs (100%) rename {meilisearch => crates/meilisearch}/src/extractors/authentication/mod.rs (100%) rename {meilisearch => crates/meilisearch}/src/extractors/mod.rs (100%) rename {meilisearch => crates/meilisearch}/src/extractors/payload.rs (100%) rename {meilisearch => crates/meilisearch}/src/extractors/sequential_extractor.rs (100%) rename {meilisearch => crates/meilisearch}/src/lib.rs (100%) rename {meilisearch => crates/meilisearch}/src/main.rs (100%) rename {meilisearch => crates/meilisearch}/src/metrics.rs (100%) rename {meilisearch => crates/meilisearch}/src/middleware.rs (100%) rename {meilisearch => crates/meilisearch}/src/option.rs (100%) rename {meilisearch => crates/meilisearch}/src/routes/api_key.rs (100%) rename {meilisearch => crates/meilisearch}/src/routes/dump.rs (100%) rename {meilisearch => crates/meilisearch}/src/routes/features.rs (100%) rename {meilisearch => crates/meilisearch}/src/routes/indexes/documents.rs (100%) rename {meilisearch => crates/meilisearch}/src/routes/indexes/facet_search.rs (100%) rename {meilisearch => crates/meilisearch}/src/routes/indexes/mod.rs (100%) rename {meilisearch => crates/meilisearch}/src/routes/indexes/search.rs (100%) rename {meilisearch => crates/meilisearch}/src/routes/indexes/settings.rs (100%) rename {meilisearch => crates/meilisearch}/src/routes/indexes/similar.rs (100%) rename {meilisearch => crates/meilisearch}/src/routes/logs.rs (100%) rename {meilisearch => crates/meilisearch}/src/routes/metrics.rs (100%) rename {meilisearch => crates/meilisearch}/src/routes/mod.rs (100%) rename {meilisearch => crates/meilisearch}/src/routes/multi_search.rs (100%) rename {meilisearch => crates/meilisearch}/src/routes/snapshot.rs (100%) rename {meilisearch => crates/meilisearch}/src/routes/swap_indexes.rs (100%) rename {meilisearch => crates/meilisearch}/src/routes/tasks.rs (100%) rename {meilisearch => crates/meilisearch}/src/search/federated.rs (100%) rename {meilisearch => crates/meilisearch}/src/search/mod.rs (100%) rename {meilisearch => crates/meilisearch}/src/search/ranking_rules.rs (100%) rename {meilisearch => crates/meilisearch}/src/search_queue.rs (100%) rename {meilisearch => crates/meilisearch}/tests/assets/dumps/v1/metadata.json (100%) rename {meilisearch => crates/meilisearch}/tests/assets/dumps/v1/test/documents.jsonl (100%) rename {meilisearch => crates/meilisearch}/tests/assets/dumps/v1/test/settings.json (100%) rename {meilisearch => crates/meilisearch}/tests/assets/dumps/v1/test/updates.jsonl (100%) rename {meilisearch => crates/meilisearch}/tests/assets/test_set.json (100%) rename {meilisearch => crates/meilisearch}/tests/assets/test_set.ndjson (100%) rename {meilisearch => crates/meilisearch}/tests/assets/v1_v0.20.0_movies.dump (100%) rename {meilisearch => crates/meilisearch}/tests/assets/v1_v0.20.0_movies_with_settings.dump (100%) rename {meilisearch => crates/meilisearch}/tests/assets/v1_v0.20.0_rubygems_with_settings.dump (100%) rename {meilisearch => crates/meilisearch}/tests/assets/v2_v0.21.1_movies.dump (100%) rename {meilisearch => crates/meilisearch}/tests/assets/v2_v0.21.1_movies_with_settings.dump (100%) rename {meilisearch => crates/meilisearch}/tests/assets/v2_v0.21.1_rubygems_with_settings.dump (100%) rename {meilisearch => crates/meilisearch}/tests/assets/v3_v0.24.0_movies.dump (100%) rename {meilisearch => crates/meilisearch}/tests/assets/v3_v0.24.0_movies_with_settings.dump (100%) rename {meilisearch => crates/meilisearch}/tests/assets/v3_v0.24.0_rubygems_with_settings.dump (100%) rename {meilisearch => crates/meilisearch}/tests/assets/v4_v0.25.2_movies.dump (100%) rename {meilisearch => crates/meilisearch}/tests/assets/v4_v0.25.2_movies_with_settings.dump (100%) rename {meilisearch => crates/meilisearch}/tests/assets/v4_v0.25.2_rubygems_with_settings.dump (100%) rename {meilisearch => crates/meilisearch}/tests/assets/v5_v0.28.0_test_dump.dump (100%) rename {meilisearch => crates/meilisearch}/tests/assets/v6_v1.6.0_use_deactivated_experimental_setting.dump (100%) rename {meilisearch => crates/meilisearch}/tests/auth/api_keys.rs (100%) rename {meilisearch => crates/meilisearch}/tests/auth/authorization.rs (100%) rename {meilisearch => crates/meilisearch}/tests/auth/errors.rs (100%) rename {meilisearch => crates/meilisearch}/tests/auth/mod.rs (100%) rename {meilisearch => crates/meilisearch}/tests/auth/payload.rs (100%) rename {meilisearch => crates/meilisearch}/tests/auth/tenant_token.rs (100%) rename {meilisearch => crates/meilisearch}/tests/auth/tenant_token_multi_search.rs (100%) rename {meilisearch => crates/meilisearch}/tests/common/encoder.rs (100%) rename {meilisearch => crates/meilisearch}/tests/common/index.rs (100%) rename {meilisearch => crates/meilisearch}/tests/common/mod.rs (100%) rename {meilisearch => crates/meilisearch}/tests/common/server.rs (100%) rename {meilisearch => crates/meilisearch}/tests/common/service.rs (100%) rename {meilisearch => crates/meilisearch}/tests/content_type.rs (100%) rename {meilisearch => crates/meilisearch}/tests/dashboard/mod.rs (100%) rename {meilisearch => crates/meilisearch}/tests/documents/add_documents.rs (100%) rename {meilisearch => crates/meilisearch}/tests/documents/delete_documents.rs (100%) rename {meilisearch => crates/meilisearch}/tests/documents/errors.rs (100%) rename {meilisearch => crates/meilisearch}/tests/documents/get_documents.rs (100%) rename {meilisearch => crates/meilisearch}/tests/documents/mod.rs (100%) rename {meilisearch => crates/meilisearch}/tests/documents/update_documents.rs (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/data.rs (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/mod.rs (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/generate_and_import_dump_containing_vectors/1.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/generate_and_import_dump_containing_vectors/2.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_raw/1.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_raw/2.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_raw/3.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_raw/4.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_raw/5.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_raw/6.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_raw/7.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_with_settings/1.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_with_settings/2.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_with_settings/3.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_with_settings/4.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_with_settings/5.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_with_settings/6.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_with_settings/7.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v1_rubygems_with_settings/1.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v1_rubygems_with_settings/2.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v1_rubygems_with_settings/3.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v1_rubygems_with_settings/4.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v1_rubygems_with_settings/5.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v1_rubygems_with_settings/6.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v1_rubygems_with_settings/7.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_raw/1.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_raw/2.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_raw/3.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_raw/4.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_raw/5.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_raw/6.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_raw/7.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_with_settings/1.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_with_settings/2.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_with_settings/3.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_with_settings/4.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_with_settings/5.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_with_settings/6.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_with_settings/7.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v2_rubygems_with_settings/1.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v2_rubygems_with_settings/2.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v2_rubygems_with_settings/3.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v2_rubygems_with_settings/4.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v2_rubygems_with_settings/5.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v2_rubygems_with_settings/6.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v2_rubygems_with_settings/7.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_raw/1.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_raw/2.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_raw/3.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_raw/4.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_raw/5.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_raw/6.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_raw/7.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_with_settings/1.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_with_settings/2.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_with_settings/3.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_with_settings/4.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_with_settings/5.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_with_settings/6.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_with_settings/7.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v3_rubygems_with_settings/1.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v3_rubygems_with_settings/2.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v3_rubygems_with_settings/3.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v3_rubygems_with_settings/4.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v3_rubygems_with_settings/5.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v3_rubygems_with_settings/6.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v3_rubygems_with_settings/7.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_raw/1.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_raw/2.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_raw/3.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_raw/4.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_raw/5.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_raw/6.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_raw/7.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_with_settings/1.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_with_settings/2.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_with_settings/3.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_with_settings/4.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_with_settings/5.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_with_settings/6.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_with_settings/7.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v4_rubygems_with_settings/1.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v4_rubygems_with_settings/2.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v4_rubygems_with_settings/3.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v4_rubygems_with_settings/4.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v4_rubygems_with_settings/5.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v4_rubygems_with_settings/6.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v4_rubygems_with_settings/7.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v5/1.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v5/2.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v5/3.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v5/4.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v5/5.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v5/6.snap (100%) rename {meilisearch => crates/meilisearch}/tests/dumps/snapshots/mod.rs/import_dump_v5/7.snap (100%) rename {meilisearch => crates/meilisearch}/tests/features/mod.rs (100%) rename {meilisearch => crates/meilisearch}/tests/index/create_index.rs (100%) rename {meilisearch => crates/meilisearch}/tests/index/delete_index.rs (100%) rename {meilisearch => crates/meilisearch}/tests/index/errors.rs (100%) rename {meilisearch => crates/meilisearch}/tests/index/get_index.rs (100%) rename {meilisearch => crates/meilisearch}/tests/index/mod.rs (100%) rename {meilisearch => crates/meilisearch}/tests/index/stats.rs (100%) rename {meilisearch => crates/meilisearch}/tests/index/update_index.rs (100%) rename {meilisearch => crates/meilisearch}/tests/integration.rs (100%) rename {meilisearch => crates/meilisearch}/tests/logs/error.rs (100%) rename {meilisearch => crates/meilisearch}/tests/logs/mod.rs (100%) rename {meilisearch => crates/meilisearch}/tests/search/distinct.rs (100%) rename {meilisearch => crates/meilisearch}/tests/search/errors.rs (100%) rename {meilisearch => crates/meilisearch}/tests/search/facet_search.rs (100%) rename {meilisearch => crates/meilisearch}/tests/search/formatted.rs (100%) rename {meilisearch => crates/meilisearch}/tests/search/geo.rs (100%) rename {meilisearch => crates/meilisearch}/tests/search/hybrid.rs (100%) rename {meilisearch => crates/meilisearch}/tests/search/locales.rs (100%) rename {meilisearch => crates/meilisearch}/tests/search/matching_strategy.rs (100%) rename {meilisearch => crates/meilisearch}/tests/search/mod.rs (100%) rename {meilisearch => crates/meilisearch}/tests/search/multi.rs (100%) rename {meilisearch => crates/meilisearch}/tests/search/pagination.rs (100%) rename {meilisearch => crates/meilisearch}/tests/search/restrict_searchable.rs (100%) rename {meilisearch => crates/meilisearch}/tests/search/search_queue.rs (100%) rename {meilisearch => crates/meilisearch}/tests/search/snapshots/distinct.rs/distinct_at_search_time/succeed.snap (100%) rename {meilisearch => crates/meilisearch}/tests/search/snapshots/errors.rs/distinct_at_search_time/task-succeed.snap (100%) rename {meilisearch => crates/meilisearch}/tests/settings/distinct.rs (100%) rename {meilisearch => crates/meilisearch}/tests/settings/errors.rs (100%) rename {meilisearch => crates/meilisearch}/tests/settings/get_settings.rs (100%) rename {meilisearch => crates/meilisearch}/tests/settings/mod.rs (100%) rename {meilisearch => crates/meilisearch}/tests/settings/proximity_settings.rs (100%) rename {meilisearch => crates/meilisearch}/tests/settings/tokenizer_customization.rs (100%) rename {meilisearch => crates/meilisearch}/tests/similar/errors.rs (100%) rename {meilisearch => crates/meilisearch}/tests/similar/mod.rs (100%) rename {meilisearch => crates/meilisearch}/tests/snapshot/mod.rs (100%) rename {meilisearch => crates/meilisearch}/tests/stats/mod.rs (100%) rename {meilisearch => crates/meilisearch}/tests/swap_indexes/errors.rs (100%) rename {meilisearch => crates/meilisearch}/tests/swap_indexes/mod.rs (100%) rename {meilisearch => crates/meilisearch}/tests/tasks/errors.rs (100%) rename {meilisearch => crates/meilisearch}/tests/tasks/mod.rs (100%) rename {meilisearch => crates/meilisearch}/tests/tasks/webhook.rs (100%) rename {meilisearch => crates/meilisearch}/tests/vector/binary_quantized.rs (100%) rename {meilisearch => crates/meilisearch}/tests/vector/intel_gen.txt.gz (100%) rename {meilisearch => crates/meilisearch}/tests/vector/mod.rs (100%) rename {meilisearch => crates/meilisearch}/tests/vector/openai.rs (100%) rename {meilisearch => crates/meilisearch}/tests/vector/openai_responses.json.gz (100%) rename {meilisearch => crates/meilisearch}/tests/vector/openai_tokenized_responses.json.gz (100%) rename {meilisearch => crates/meilisearch}/tests/vector/rest.rs (100%) rename {meilisearch => crates/meilisearch}/tests/vector/settings.rs (100%) rename {meilisearch => crates/meilisearch}/tests/vector/snapshots/mod.rs/add_remove_one_vector_4588/document-added.snap (100%) rename {meilisearch => crates/meilisearch}/tests/vector/snapshots/mod.rs/add_remove_one_vector_4588/document-deleted.snap (100%) rename {meilisearch => crates/meilisearch}/tests/vector/snapshots/mod.rs/add_remove_one_vector_4588/settings-processed.snap (100%) rename {meilitool => crates/meilitool}/Cargo.toml (100%) rename {meilitool => crates/meilitool}/src/main.rs (100%) rename {meilitool => crates/meilitool}/src/uuid_codec.rs (100%) rename {milli => crates/milli}/Cargo.toml (100%) rename {milli => crates/milli}/README.md (100%) rename {milli => crates/milli}/examples/index.rs (100%) rename {milli => crates/milli}/examples/search.rs (100%) rename {milli => crates/milli}/examples/settings.rs (100%) rename {milli => crates/milli}/fuzz/.gitignore (100%) rename {milli => crates/milli}/src/asc_desc.rs (100%) rename {milli => crates/milli}/src/criterion.rs (100%) rename {milli => crates/milli}/src/documents/builder.rs (100%) rename {milli => crates/milli}/src/documents/enriched.rs (100%) rename {milli => crates/milli}/src/documents/mod.rs (100%) rename {milli => crates/milli}/src/documents/primary_key.rs (100%) rename {milli => crates/milli}/src/documents/reader.rs (100%) rename {milli => crates/milli}/src/documents/serde_impl.rs (100%) rename {milli => crates/milli}/src/error.rs (100%) rename {milli => crates/milli}/src/external_documents_ids.rs (100%) rename {milli => crates/milli}/src/facet/facet_type.rs (100%) rename {milli => crates/milli}/src/facet/facet_value.rs (100%) rename {milli => crates/milli}/src/facet/mod.rs (100%) rename {milli => crates/milli}/src/facet/value_encoding.rs (100%) rename {milli => crates/milli}/src/fieldids_weights_map.rs (100%) rename {milli => crates/milli}/src/fields_ids_map.rs (100%) rename {milli => crates/milli}/src/heed_codec/beu16_str_codec.rs (100%) rename {milli => crates/milli}/src/heed_codec/beu32_str_codec.rs (100%) rename {milli => crates/milli}/src/heed_codec/byte_slice_ref.rs (100%) rename {milli => crates/milli}/src/heed_codec/facet/field_doc_id_facet_codec.rs (100%) rename {milli => crates/milli}/src/heed_codec/facet/mod.rs (100%) rename {milli => crates/milli}/src/heed_codec/facet/ordered_f64_codec.rs (100%) rename {milli => crates/milli}/src/heed_codec/field_id_word_count_codec.rs (100%) rename {milli => crates/milli}/src/heed_codec/fst_set_codec.rs (100%) rename {milli => crates/milli}/src/heed_codec/mod.rs (100%) rename {milli => crates/milli}/src/heed_codec/obkv_codec.rs (100%) rename {milli => crates/milli}/src/heed_codec/roaring_bitmap/bo_roaring_bitmap_codec.rs (100%) rename {milli => crates/milli}/src/heed_codec/roaring_bitmap/cbo_roaring_bitmap_codec.rs (100%) rename {milli => crates/milli}/src/heed_codec/roaring_bitmap/mod.rs (100%) rename {milli => crates/milli}/src/heed_codec/roaring_bitmap/roaring_bitmap_codec.rs (100%) rename {milli => crates/milli}/src/heed_codec/roaring_bitmap_length/bo_roaring_bitmap_len_codec.rs (100%) rename {milli => crates/milli}/src/heed_codec/roaring_bitmap_length/cbo_roaring_bitmap_len_codec.rs (100%) rename {milli => crates/milli}/src/heed_codec/roaring_bitmap_length/mod.rs (100%) rename {milli => crates/milli}/src/heed_codec/roaring_bitmap_length/roaring_bitmap_len_codec.rs (100%) rename {milli => crates/milli}/src/heed_codec/str_beu32_codec.rs (100%) rename {milli => crates/milli}/src/heed_codec/str_ref.rs (100%) rename {milli => crates/milli}/src/heed_codec/str_str_u8_codec.rs (100%) rename {milli => crates/milli}/src/index.rs (100%) rename {milli => crates/milli}/src/lib.rs (100%) rename {milli => crates/milli}/src/localized_attributes_rules.rs (100%) rename {milli => crates/milli}/src/order_by_map.rs (100%) rename {milli => crates/milli}/src/prompt/context.rs (100%) rename {milli => crates/milli}/src/prompt/document.rs (100%) rename {milli => crates/milli}/src/prompt/error.rs (100%) rename {milli => crates/milli}/src/prompt/fields.rs (100%) rename {milli => crates/milli}/src/prompt/mod.rs (100%) rename {milli => crates/milli}/src/prompt/template_checker.rs (100%) rename {milli => crates/milli}/src/proximity.rs (100%) rename {milli => crates/milli}/src/score_details.rs (100%) rename {milli => crates/milli}/src/search/facet/facet_distribution.rs (100%) rename {milli => crates/milli}/src/search/facet/facet_distribution_iter.rs (100%) rename {milli => crates/milli}/src/search/facet/facet_range_search.rs (100%) rename {milli => crates/milli}/src/search/facet/facet_sort_ascending.rs (100%) rename {milli => crates/milli}/src/search/facet/facet_sort_descending.rs (100%) rename {milli => crates/milli}/src/search/facet/filter.rs (100%) rename {milli => crates/milli}/src/search/facet/mod.rs (100%) rename {milli => crates/milli}/src/search/facet/search.rs (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_distribution_iter.rs/filter_distribution_all/0.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_distribution_iter.rs/filter_distribution_all/1.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_distribution_iter.rs/filter_distribution_all_stop_early/0.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_distribution_iter.rs/filter_distribution_all_stop_early/1.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_decreasing/excluded_0.hash.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_decreasing/excluded_1.hash.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_decreasing/excluded_2.hash.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_decreasing/excluded_3.hash.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_decreasing/included_0.hash.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_decreasing/included_1.hash.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_decreasing/included_2.hash.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_decreasing/included_3.hash.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_exact/field_id_0_exact_0.hash.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_exact/field_id_0_exact_1.hash.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_exact/field_id_0_exact_2.hash.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_exact/field_id_0_exact_3.hash.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_exact/field_id_1_exact_0.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_exact/field_id_1_exact_1.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_exact/field_id_1_exact_2.hash.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_exact/field_id_1_exact_3.hash.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_increasing/excluded_0.hash.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_increasing/excluded_1.hash.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_increasing/excluded_2.hash.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_increasing/excluded_3.hash.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_increasing/included_0.hash.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_increasing/included_1.hash.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_increasing/included_2.hash.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_increasing/included_3.hash.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_pinch/excluded_0.hash.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_pinch/excluded_1.hash.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_pinch/excluded_2.hash.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_pinch/excluded_3.hash.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_pinch/included_0.hash.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_pinch/included_1.hash.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_pinch/included_2.hash.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_pinch/included_3.hash.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/end_at_included_0.hash.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/end_at_included_1.hash.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/end_at_included_2.hash.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/end_at_included_3.hash.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/start_from_included_0.hash.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/start_from_included_1.hash.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/start_from_included_2.hash.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/start_from_included_3.hash.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/unbounded_field_id_0_0.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/unbounded_field_id_0_1.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/unbounded_field_id_0_2.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/unbounded_field_id_0_3.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/unbounded_field_id_1_0.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/unbounded_field_id_1_1.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/unbounded_field_id_1_2.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/unbounded_field_id_1_3.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_sort_ascending.rs/filter_sort_ascending/0.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_sort_ascending.rs/filter_sort_ascending/1.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_sort_ascending.rs/filter_sort_ascending_multiple_field_ids/0-0.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_sort_ascending.rs/filter_sort_ascending_multiple_field_ids/0-1.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_sort_ascending.rs/filter_sort_ascending_multiple_field_ids/1-0.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_sort_ascending.rs/filter_sort_ascending_multiple_field_ids/1-1.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_sort_descending.rs/filter_sort_descending/0.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_sort_descending.rs/filter_sort_descending/1.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_sort_descending.rs/filter_sort_descending/2.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_sort_descending.rs/filter_sort_descending_multiple_field_ids/0-0.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_sort_descending.rs/filter_sort_descending_multiple_field_ids/0-1.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_sort_descending.rs/filter_sort_descending_multiple_field_ids/1-0.snap (100%) rename {milli => crates/milli}/src/search/facet/snapshots/facet_sort_descending.rs/filter_sort_descending_multiple_field_ids/1-1.snap (100%) rename {milli => crates/milli}/src/search/fst_utils.rs (100%) rename {milli => crates/milli}/src/search/hybrid.rs (100%) rename {milli => crates/milli}/src/search/mod.rs (100%) rename {milli => crates/milli}/src/search/new/bucket_sort.rs (100%) rename {milli => crates/milli}/src/search/new/db_cache.rs (100%) rename {milli => crates/milli}/src/search/new/distinct.rs (100%) rename {milli => crates/milli}/src/search/new/exact_attribute.rs (100%) rename {milli => crates/milli}/src/search/new/geo_sort.rs (100%) rename {milli => crates/milli}/src/search/new/graph_based_ranking_rule.rs (100%) rename {milli => crates/milli}/src/search/new/interner.rs (100%) rename {milli => crates/milli}/src/search/new/limits.rs (100%) rename {milli => crates/milli}/src/search/new/logger/mod.rs (100%) rename {milli => crates/milli}/src/search/new/logger/visual.rs (100%) rename {milli => crates/milli}/src/search/new/matches/matching_words.rs (100%) rename {milli => crates/milli}/src/search/new/matches/mod.rs (100%) rename {milli => crates/milli}/src/search/new/mod.rs (100%) rename {milli => crates/milli}/src/search/new/query_graph.rs (100%) rename {milli => crates/milli}/src/search/new/query_term/compute_derivations.rs (100%) rename {milli => crates/milli}/src/search/new/query_term/mod.rs (100%) rename {milli => crates/milli}/src/search/new/query_term/ntypo_subset.rs (100%) rename {milli => crates/milli}/src/search/new/query_term/parse_query.rs (100%) rename {milli => crates/milli}/src/search/new/query_term/phrase.rs (100%) rename {milli => crates/milli}/src/search/new/ranking_rule_graph/build.rs (100%) rename {milli => crates/milli}/src/search/new/ranking_rule_graph/cheapest_paths.rs (100%) rename {milli => crates/milli}/src/search/new/ranking_rule_graph/condition_docids_cache.rs (100%) rename {milli => crates/milli}/src/search/new/ranking_rule_graph/dead_ends_cache.rs (100%) rename {milli => crates/milli}/src/search/new/ranking_rule_graph/exactness/mod.rs (100%) rename {milli => crates/milli}/src/search/new/ranking_rule_graph/fid/mod.rs (100%) rename {milli => crates/milli}/src/search/new/ranking_rule_graph/mod.rs (100%) rename {milli => crates/milli}/src/search/new/ranking_rule_graph/position/mod.rs (100%) rename {milli => crates/milli}/src/search/new/ranking_rule_graph/proximity/build.rs (100%) rename {milli => crates/milli}/src/search/new/ranking_rule_graph/proximity/compute_docids.rs (100%) rename {milli => crates/milli}/src/search/new/ranking_rule_graph/proximity/mod.rs (100%) rename {milli => crates/milli}/src/search/new/ranking_rule_graph/typo/mod.rs (100%) rename {milli => crates/milli}/src/search/new/ranking_rule_graph/words/mod.rs (100%) rename {milli => crates/milli}/src/search/new/ranking_rules.rs (100%) rename {milli => crates/milli}/src/search/new/resolve_query_graph.rs (100%) rename {milli => crates/milli}/src/search/new/small_bitmap.rs (100%) rename {milli => crates/milli}/src/search/new/sort.rs (100%) rename {milli => crates/milli}/src/search/new/tests/attribute_fid.rs (100%) rename {milli => crates/milli}/src/search/new/tests/attribute_position.rs (100%) rename {milli => crates/milli}/src/search/new/tests/cutoff.rs (100%) rename {milli => crates/milli}/src/search/new/tests/distinct.rs (100%) rename {milli => crates/milli}/src/search/new/tests/exactness.rs (100%) rename {milli => crates/milli}/src/search/new/tests/geo_sort.rs (100%) rename {milli => crates/milli}/src/search/new/tests/integration.rs (100%) rename {milli => crates/milli}/src/search/new/tests/language.rs (100%) rename {milli => crates/milli}/src/search/new/tests/mod.rs (100%) rename {milli => crates/milli}/src/search/new/tests/ngram_split_words.rs (100%) rename {milli => crates/milli}/src/search/new/tests/proximity.rs (100%) rename {milli => crates/milli}/src/search/new/tests/proximity_typo.rs (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__attribute_fid__attribute_fid_ngrams-4.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__attribute_fid__attribute_fid_simple.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__attribute_position__attribute_position_different_fields.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__attribute_position__attribute_position_ngrams.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__attribute_position__attribute_position_repeated.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__attribute_position__attribute_position_simple-2.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_after_words.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_all_candidates_with_typo.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_attribute_starts_with_phrase-3.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_attribute_starts_with_phrase.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_attribute_starts_with_simple.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_followed_by_typo_prefer_no_typo_prefix.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_simple_ordered.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_simple_random.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_simple_reversed-3.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_simple_reversed.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__exactness__proximity_after_exactness-4.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__exactness__proximity_after_exactness.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__exactness__typo_followed_by_exactness.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__exactness__words_after_exactness.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort-2.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort-4.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-10.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-12.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-14.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-16.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-18.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-2.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-20.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-4.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-6.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-8.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_mixed_with_words-2.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_without_any_geo_faceted_documents-2.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__proximity__proximity_prefix_db-11.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__proximity__proximity_prefix_db-14.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__proximity__proximity_prefix_db-2.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__proximity__proximity_prefix_db-5.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__proximity__proximity_prefix_db-8.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__proximity__proximity_split_word-2.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__proximity__proximity_split_word-5.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__proximity__proximity_split_word-8.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__sort__redacted-2.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__sort__sort-11.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__sort__sort-2.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__sort__sort-5.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__sort__sort-8.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__stop_words__stop_words_in_phrase-6.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__stop_words__stop_words_in_phrase-8.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_bucketing-2.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_bucketing-5.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_bucketing-8.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_exact_attribute-4.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_exact_word-12.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_ranking_rule_not_preceded_by_words_ranking_rule-2.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_ranking_rule_not_preceded_by_words_ranking_rule-5.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_synonyms-2.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_synonyms-5.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__typo_proximity__trap_basic_and_complex1-2.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__typo_proximity__trap_complex2-2.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__words_tms__words_proximity_tms_last_phrase-2.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__words_tms__words_proximity_tms_last_phrase-5.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__words_tms__words_proximity_tms_last_simple-2.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__words_tms__words_proximity_tms_last_simple-5.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__words_tms__words_tms_all-2.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__words_tms__words_tms_last_phrase-2.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__words_tms__words_tms_last_phrase-5.snap (100%) rename {milli => crates/milli}/src/search/new/tests/snapshots/milli__search__new__tests__words_tms__words_tms_last_simple-2.snap (100%) rename {milli => crates/milli}/src/search/new/tests/sort.rs (100%) rename {milli => crates/milli}/src/search/new/tests/stop_words.rs (100%) rename {milli => crates/milli}/src/search/new/tests/typo.rs (100%) rename {milli => crates/milli}/src/search/new/tests/typo_proximity.rs (100%) rename {milli => crates/milli}/src/search/new/tests/words_tms.rs (100%) rename {milli => crates/milli}/src/search/new/vector_sort.rs (100%) rename {milli => crates/milli}/src/search/similar.rs (100%) rename {milli => crates/milli}/src/snapshot_tests.rs (100%) rename {milli => crates/milli}/src/snapshots/index.rs/bug_3007/geo_faceted_documents_ids.snap (100%) rename {milli => crates/milli}/src/snapshots/index.rs/unexpected_extra_fields_in_geo_field/geo_faceted_documents_ids.snap (100%) rename {milli => crates/milli}/src/thread_pool_no_abort.rs (100%) rename {milli => crates/milli}/src/update/available_documents_ids.rs (100%) rename {milli => crates/milli}/src/update/clear_documents.rs (100%) rename {milli => crates/milli}/src/update/del_add.rs (100%) rename {milli => crates/milli}/src/update/facet/bulk.rs (100%) rename {milli => crates/milli}/src/update/facet/incremental.rs (100%) rename {milli => crates/milli}/src/update/facet/mod.rs (100%) rename {milli => crates/milli}/src/update/facet/snapshots/bulk.rs/insert/default.hash.snap (100%) rename {milli => crates/milli}/src/update/facet/snapshots/bulk.rs/insert/large_group_small_min_level.hash.snap (100%) rename {milli => crates/milli}/src/update/facet/snapshots/bulk.rs/insert/odd_group_odd_min_level.hash.snap (100%) rename {milli => crates/milli}/src/update/facet/snapshots/bulk.rs/insert/small_group_large_min_level.hash.snap (100%) rename {milli => crates/milli}/src/update/facet/snapshots/bulk.rs/insert/small_group_small_min_level.hash.snap (100%) rename {milli => crates/milli}/src/update/facet/snapshots/bulk.rs/insert_delete_field_insert/default.hash.snap (100%) rename {milli => crates/milli}/src/update/facet/snapshots/bulk.rs/insert_delete_field_insert/large_group_small_min_level.hash.snap (100%) rename {milli => crates/milli}/src/update/facet/snapshots/bulk.rs/insert_delete_field_insert/odd_group_odd_min_level.hash.snap (100%) rename {milli => crates/milli}/src/update/facet/snapshots/bulk.rs/insert_delete_field_insert/small_group_large_min_level.hash.snap (100%) rename {milli => crates/milli}/src/update/facet/snapshots/bulk.rs/insert_delete_field_insert/small_group_small_min_level.hash.snap (100%) rename {milli => crates/milli}/src/update/facet/snapshots/bulk.rs/insert_string/default.hash.snap (100%) rename {milli => crates/milli}/src/update/facet/snapshots/bulk.rs/insert_string/large_group_small_min_level.hash.snap (100%) rename {milli => crates/milli}/src/update/facet/snapshots/bulk.rs/insert_string/odd_group_odd_min_level.hash.snap (100%) rename {milli => crates/milli}/src/update/facet/snapshots/bulk.rs/insert_string/small_group_large_min_level.hash.snap (100%) rename {milli => crates/milli}/src/update/facet/snapshots/bulk.rs/insert_string/small_group_small_min_level.hash.snap (100%) rename {milli => crates/milli}/src/update/facet/snapshots/incremental.rs/append/append.hash.snap (100%) rename {milli => crates/milli}/src/update/facet/snapshots/incremental.rs/delete_from_end/0.snap (100%) rename {milli => crates/milli}/src/update/facet/snapshots/incremental.rs/delete_from_end/100.hash.snap (100%) rename {milli => crates/milli}/src/update/facet/snapshots/incremental.rs/delete_from_end/15.snap (100%) rename {milli => crates/milli}/src/update/facet/snapshots/incremental.rs/delete_from_end/150.hash.snap (100%) rename {milli => crates/milli}/src/update/facet/snapshots/incremental.rs/delete_from_end/17.snap (100%) rename {milli => crates/milli}/src/update/facet/snapshots/incremental.rs/delete_from_end/200.hash.snap (100%) rename {milli => crates/milli}/src/update/facet/snapshots/incremental.rs/delete_from_start/127.hash.snap (100%) rename {milli => crates/milli}/src/update/facet/snapshots/incremental.rs/delete_from_start/215.snap (100%) rename {milli => crates/milli}/src/update/facet/snapshots/incremental.rs/delete_from_start/255.snap (100%) rename {milli => crates/milli}/src/update/facet/snapshots/incremental.rs/delete_shuffled/127.hash.snap (100%) rename {milli => crates/milli}/src/update/facet/snapshots/incremental.rs/delete_shuffled/215.hash.snap (100%) rename {milli => crates/milli}/src/update/facet/snapshots/incremental.rs/delete_shuffled/255.snap (100%) rename {milli => crates/milli}/src/update/facet/snapshots/incremental.rs/in_place_level0_delete/after_delete.hash.snap (100%) rename {milli => crates/milli}/src/update/facet/snapshots/incremental.rs/in_place_level0_delete/before_delete.hash.snap (100%) rename {milli => crates/milli}/src/update/facet/snapshots/incremental.rs/in_place_level0_insert/in_place_level0_insert.snap (100%) rename {milli => crates/milli}/src/update/facet/snapshots/incremental.rs/many_field_ids_append/many_field_ids_append.hash.snap (100%) rename {milli => crates/milli}/src/update/facet/snapshots/incremental.rs/many_field_ids_prepend/many_field_ids_prepend.hash.snap (100%) rename {milli => crates/milli}/src/update/facet/snapshots/incremental.rs/merge_values/merge_values.hash.snap (100%) rename {milli => crates/milli}/src/update/facet/snapshots/incremental.rs/prepend/prepend.hash.snap (100%) rename {milli => crates/milli}/src/update/facet/snapshots/incremental.rs/shuffle_merge_string_and_delete/after_delete.hash.snap (100%) rename {milli => crates/milli}/src/update/facet/snapshots/incremental.rs/shuffle_merge_string_and_delete/before_delete.hash.snap (100%) rename {milli => crates/milli}/src/update/facet/snapshots/incremental.rs/shuffled/shuffled.hash.snap (100%) rename {milli => crates/milli}/src/update/index_documents/enrich.rs (100%) rename {milli => crates/milli}/src/update/index_documents/extract/extract_docid_word_positions.rs (100%) rename {milli => crates/milli}/src/update/index_documents/extract/extract_facet_number_docids.rs (100%) rename {milli => crates/milli}/src/update/index_documents/extract/extract_facet_string_docids.rs (100%) rename {milli => crates/milli}/src/update/index_documents/extract/extract_fid_docid_facet_values.rs (100%) rename {milli => crates/milli}/src/update/index_documents/extract/extract_fid_word_count_docids.rs (100%) rename {milli => crates/milli}/src/update/index_documents/extract/extract_geo_points.rs (100%) rename {milli => crates/milli}/src/update/index_documents/extract/extract_vector_points.rs (100%) rename {milli => crates/milli}/src/update/index_documents/extract/extract_word_docids.rs (100%) rename {milli => crates/milli}/src/update/index_documents/extract/extract_word_pair_proximity_docids.rs (100%) rename {milli => crates/milli}/src/update/index_documents/extract/extract_word_position_docids.rs (100%) rename {milli => crates/milli}/src/update/index_documents/extract/mod.rs (100%) rename {milli => crates/milli}/src/update/index_documents/helpers/clonable_mmap.rs (100%) rename {milli => crates/milli}/src/update/index_documents/helpers/grenad_helpers.rs (100%) rename {milli => crates/milli}/src/update/index_documents/helpers/merge_functions.rs (100%) rename {milli => crates/milli}/src/update/index_documents/helpers/mod.rs (100%) rename {milli => crates/milli}/src/update/index_documents/mod.rs (100%) rename {milli => crates/milli}/src/update/index_documents/parallel.rs (100%) rename {milli => crates/milli}/src/update/index_documents/snapshots/mod.rs/delete_documents_with_numbers_as_primary_key/documents_ids.snap (100%) rename {milli => crates/milli}/src/update/index_documents/snapshots/mod.rs/delete_documents_with_numbers_as_primary_key/facet_id_exists_docids.snap (100%) rename {milli => crates/milli}/src/update/index_documents/snapshots/mod.rs/delete_documents_with_numbers_as_primary_key/word_docids.snap (100%) rename {milli => crates/milli}/src/update/index_documents/snapshots/mod.rs/delete_documents_with_numbers_as_primary_key/word_pair_proximity_docids.snap (100%) rename {milli => crates/milli}/src/update/index_documents/snapshots/mod.rs/delete_documents_with_strange_primary_key/documents_ids.snap (100%) rename {milli => crates/milli}/src/update/index_documents/snapshots/mod.rs/delete_documents_with_strange_primary_key/word_docids.snap (100%) rename {milli => crates/milli}/src/update/index_documents/snapshots/mod.rs/delete_documents_with_strange_primary_key/word_pair_proximity_docids.snap (100%) rename {milli => crates/milli}/src/update/index_documents/snapshots/mod.rs/filtered_placeholder_search_should_not_return_deleted_documents/facet_id_exists_docids.snap (100%) rename {milli => crates/milli}/src/update/index_documents/snapshots/mod.rs/filtered_placeholder_search_should_not_return_deleted_documents/facet_id_f64_docids.snap (100%) rename {milli => crates/milli}/src/update/index_documents/snapshots/mod.rs/filtered_placeholder_search_should_not_return_deleted_documents/facet_id_string_docids.snap (100%) rename {milli => crates/milli}/src/update/index_documents/snapshots/mod.rs/filtered_placeholder_search_should_not_return_deleted_documents/word_docids.snap (100%) rename {milli => crates/milli}/src/update/index_documents/snapshots/mod.rs/filtered_placeholder_search_should_not_return_deleted_documents/word_pair_proximity_docids.snap (100%) rename {milli => crates/milli}/src/update/index_documents/snapshots/mod.rs/geo_filtered_placeholder_search_should_not_return_deleted_documents/facet_id_f64_docids.snap (100%) rename {milli => crates/milli}/src/update/index_documents/snapshots/mod.rs/geo_filtered_placeholder_search_should_not_return_deleted_documents/facet_id_string_docids.snap (100%) rename {milli => crates/milli}/src/update/index_documents/snapshots/mod.rs/simple_documents_replace/initial/word_docids.snap (100%) rename {milli => crates/milli}/src/update/index_documents/snapshots/mod.rs/simple_documents_replace/updated/word_docids.snap (100%) rename {milli => crates/milli}/src/update/index_documents/transform.rs (100%) rename {milli => crates/milli}/src/update/index_documents/typed_chunk.rs (100%) rename {milli => crates/milli}/src/update/indexer_config.rs (100%) rename {milli => crates/milli}/src/update/mod.rs (100%) rename {milli => crates/milli}/src/update/settings.rs (100%) rename {milli => crates/milli}/src/update/update_step.rs (100%) rename {milli => crates/milli}/src/update/word_prefix_docids.rs (100%) rename {milli => crates/milli}/src/update/words_prefix_integer_docids.rs (100%) rename {milli => crates/milli}/src/update/words_prefixes_fst.rs (100%) rename {milli => crates/milli}/src/vector/error.rs (100%) rename {milli => crates/milli}/src/vector/hf.rs (100%) rename {milli => crates/milli}/src/vector/json_template.rs (100%) rename {milli => crates/milli}/src/vector/manual.rs (100%) rename {milli => crates/milli}/src/vector/mod.rs (100%) rename {milli => crates/milli}/src/vector/ollama.rs (100%) rename {milli => crates/milli}/src/vector/openai.rs (100%) rename {milli => crates/milli}/src/vector/parsed_vectors.rs (100%) rename {milli => crates/milli}/src/vector/rest.rs (100%) rename {milli => crates/milli}/src/vector/settings.rs (100%) rename {milli => crates/milli}/tests/assets/test_set.ndjson (100%) rename {milli => crates/milli}/tests/mod.rs (100%) rename {milli => crates/milli}/tests/search/distinct.rs (100%) rename {milli => crates/milli}/tests/search/facet_distribution.rs (100%) rename {milli => crates/milli}/tests/search/filters.rs (100%) rename {milli => crates/milli}/tests/search/mod.rs (100%) rename {milli => crates/milli}/tests/search/phrase_search.rs (100%) rename {milli => crates/milli}/tests/search/query_criteria.rs (100%) rename {milli => crates/milli}/tests/search/sort.rs (100%) rename {milli => crates/milli}/tests/search/typo_tolerance.rs (100%) rename {permissive-json-pointer => crates/permissive-json-pointer}/Cargo.toml (100%) rename {permissive-json-pointer => crates/permissive-json-pointer}/README.md (100%) rename {permissive-json-pointer => crates/permissive-json-pointer}/src/lib.rs (100%) rename {tracing-trace => crates/tracing-trace}/Cargo.toml (100%) rename {tracing-trace => crates/tracing-trace}/src/bin/trace-to-callstats.rs (100%) rename {tracing-trace => crates/tracing-trace}/src/bin/trace-to-firefox.rs (100%) rename {tracing-trace => crates/tracing-trace}/src/entry.rs (100%) rename {tracing-trace => crates/tracing-trace}/src/error.rs (100%) rename {tracing-trace => crates/tracing-trace}/src/layer.rs (100%) rename {tracing-trace => crates/tracing-trace}/src/lib.rs (100%) rename {tracing-trace => crates/tracing-trace}/src/main.rs (100%) rename {tracing-trace => crates/tracing-trace}/src/processor/firefox_profiler.rs (100%) rename {tracing-trace => crates/tracing-trace}/src/processor/fmt.rs (100%) rename {tracing-trace => crates/tracing-trace}/src/processor/mod.rs (100%) rename {tracing-trace => crates/tracing-trace}/src/processor/span_stats.rs (100%) rename {xtask => crates/xtask}/Cargo.toml (100%) rename {xtask => crates/xtask}/src/bench/assets.rs (100%) rename {xtask => crates/xtask}/src/bench/client.rs (100%) rename {xtask => crates/xtask}/src/bench/command.rs (100%) rename {xtask => crates/xtask}/src/bench/dashboard.rs (100%) rename {xtask => crates/xtask}/src/bench/env_info.rs (100%) rename {xtask => crates/xtask}/src/bench/meili_process.rs (100%) rename {xtask => crates/xtask}/src/bench/mod.rs (100%) rename {xtask => crates/xtask}/src/bench/workload.rs (100%) rename {xtask => crates/xtask}/src/lib.rs (100%) rename {xtask => crates/xtask}/src/main.rs (100%) diff --git a/.gitignore b/.gitignore index e00f45c1e..0d6750008 100644 --- a/.gitignore +++ b/.gitignore @@ -5,7 +5,6 @@ **/*.json_lines **/*.rs.bk /*.mdb -/query-history.txt /data.ms /snapshots /dumps @@ -19,4 +18,4 @@ *.snap.new # Fuzzcheck data for the facet indexing fuzz test -milli/fuzz/update::facet::incremental::fuzz::fuzz/ +crates/milli/fuzz/update::facet::incremental::fuzz::fuzz/ diff --git a/Cargo.toml b/Cargo.toml index 5d9e1bd82..4e65ae83d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,24 +1,24 @@ [workspace] resolver = "2" members = [ - "meilisearch", - "meilitool", - "meilisearch-types", - "meilisearch-auth", - "meili-snap", - "index-scheduler", - "dump", - "file-store", - "permissive-json-pointer", - "milli", - "filter-parser", - "flatten-serde-json", - "json-depth-checker", - "benchmarks", - "fuzzers", - "tracing-trace", - "xtask", - "build-info", + "crates/meilisearch", + "crates/meilitool", + "crates/meilisearch-types", + "crates/meilisearch-auth", + "crates/meili-snap", + "crates/index-scheduler", + "crates/dump", + "crates/file-store", + "crates/permissive-json-pointer", + "crates/milli", + "crates/filter-parser", + "crates/flatten-serde-json", + "crates/json-depth-checker", + "crates/benchmarks", + "crates/fuzzers", + "crates/tracing-trace", + "crates/xtask", + "crates/build-info", ] [workspace.package] diff --git a/benchmarks/.gitignore b/crates/benchmarks/.gitignore similarity index 100% rename from benchmarks/.gitignore rename to crates/benchmarks/.gitignore diff --git a/benchmarks/Cargo.toml b/crates/benchmarks/Cargo.toml similarity index 100% rename from benchmarks/Cargo.toml rename to crates/benchmarks/Cargo.toml diff --git a/benchmarks/README.md b/crates/benchmarks/README.md similarity index 100% rename from benchmarks/README.md rename to crates/benchmarks/README.md diff --git a/benchmarks/benches/indexing.rs b/crates/benchmarks/benches/indexing.rs similarity index 100% rename from benchmarks/benches/indexing.rs rename to crates/benchmarks/benches/indexing.rs diff --git a/benchmarks/benches/search_geo.rs b/crates/benchmarks/benches/search_geo.rs similarity index 100% rename from benchmarks/benches/search_geo.rs rename to crates/benchmarks/benches/search_geo.rs diff --git a/benchmarks/benches/search_songs.rs b/crates/benchmarks/benches/search_songs.rs similarity index 100% rename from benchmarks/benches/search_songs.rs rename to crates/benchmarks/benches/search_songs.rs diff --git a/benchmarks/benches/search_wiki.rs b/crates/benchmarks/benches/search_wiki.rs similarity index 100% rename from benchmarks/benches/search_wiki.rs rename to crates/benchmarks/benches/search_wiki.rs diff --git a/benchmarks/benches/utils.rs b/crates/benchmarks/benches/utils.rs similarity index 100% rename from benchmarks/benches/utils.rs rename to crates/benchmarks/benches/utils.rs diff --git a/benchmarks/build.rs b/crates/benchmarks/build.rs similarity index 100% rename from benchmarks/build.rs rename to crates/benchmarks/build.rs diff --git a/benchmarks/scripts/compare.sh b/crates/benchmarks/scripts/compare.sh similarity index 100% rename from benchmarks/scripts/compare.sh rename to crates/benchmarks/scripts/compare.sh diff --git a/benchmarks/scripts/list.sh b/crates/benchmarks/scripts/list.sh similarity index 100% rename from benchmarks/scripts/list.sh rename to crates/benchmarks/scripts/list.sh diff --git a/benchmarks/src/lib.rs b/crates/benchmarks/src/lib.rs similarity index 100% rename from benchmarks/src/lib.rs rename to crates/benchmarks/src/lib.rs diff --git a/build-info/Cargo.toml b/crates/build-info/Cargo.toml similarity index 100% rename from build-info/Cargo.toml rename to crates/build-info/Cargo.toml diff --git a/build-info/build.rs b/crates/build-info/build.rs similarity index 100% rename from build-info/build.rs rename to crates/build-info/build.rs diff --git a/build-info/src/lib.rs b/crates/build-info/src/lib.rs similarity index 100% rename from build-info/src/lib.rs rename to crates/build-info/src/lib.rs diff --git a/dump/Cargo.toml b/crates/dump/Cargo.toml similarity index 100% rename from dump/Cargo.toml rename to crates/dump/Cargo.toml diff --git a/dump/README.md b/crates/dump/README.md similarity index 100% rename from dump/README.md rename to crates/dump/README.md diff --git a/dump/src/error.rs b/crates/dump/src/error.rs similarity index 100% rename from dump/src/error.rs rename to crates/dump/src/error.rs diff --git a/dump/src/lib.rs b/crates/dump/src/lib.rs similarity index 100% rename from dump/src/lib.rs rename to crates/dump/src/lib.rs diff --git a/dump/src/reader/compat/mod.rs b/crates/dump/src/reader/compat/mod.rs similarity index 100% rename from dump/src/reader/compat/mod.rs rename to crates/dump/src/reader/compat/mod.rs diff --git a/dump/src/reader/compat/snapshots/dump__reader__compat__v1_to_v2__test__compat_v1_v2-3.snap b/crates/dump/src/reader/compat/snapshots/dump__reader__compat__v1_to_v2__test__compat_v1_v2-3.snap similarity index 100% rename from dump/src/reader/compat/snapshots/dump__reader__compat__v1_to_v2__test__compat_v1_v2-3.snap rename to crates/dump/src/reader/compat/snapshots/dump__reader__compat__v1_to_v2__test__compat_v1_v2-3.snap diff --git a/dump/src/reader/compat/snapshots/dump__reader__compat__v1_to_v2__test__compat_v1_v2-6.snap b/crates/dump/src/reader/compat/snapshots/dump__reader__compat__v1_to_v2__test__compat_v1_v2-6.snap similarity index 100% rename from dump/src/reader/compat/snapshots/dump__reader__compat__v1_to_v2__test__compat_v1_v2-6.snap rename to crates/dump/src/reader/compat/snapshots/dump__reader__compat__v1_to_v2__test__compat_v1_v2-6.snap diff --git a/dump/src/reader/compat/snapshots/dump__reader__compat__v1_to_v2__test__compat_v1_v2-9.snap b/crates/dump/src/reader/compat/snapshots/dump__reader__compat__v1_to_v2__test__compat_v1_v2-9.snap similarity index 100% rename from dump/src/reader/compat/snapshots/dump__reader__compat__v1_to_v2__test__compat_v1_v2-9.snap rename to crates/dump/src/reader/compat/snapshots/dump__reader__compat__v1_to_v2__test__compat_v1_v2-9.snap diff --git a/dump/src/reader/compat/snapshots/dump__reader__compat__v2_to_v3__test__compat_v2_v3-11.snap b/crates/dump/src/reader/compat/snapshots/dump__reader__compat__v2_to_v3__test__compat_v2_v3-11.snap similarity index 100% rename from dump/src/reader/compat/snapshots/dump__reader__compat__v2_to_v3__test__compat_v2_v3-11.snap rename to crates/dump/src/reader/compat/snapshots/dump__reader__compat__v2_to_v3__test__compat_v2_v3-11.snap diff --git a/dump/src/reader/compat/snapshots/dump__reader__compat__v2_to_v3__test__compat_v2_v3-14.snap b/crates/dump/src/reader/compat/snapshots/dump__reader__compat__v2_to_v3__test__compat_v2_v3-14.snap similarity index 100% rename from dump/src/reader/compat/snapshots/dump__reader__compat__v2_to_v3__test__compat_v2_v3-14.snap rename to crates/dump/src/reader/compat/snapshots/dump__reader__compat__v2_to_v3__test__compat_v2_v3-14.snap diff --git a/dump/src/reader/compat/snapshots/dump__reader__compat__v2_to_v3__test__compat_v2_v3-5.snap b/crates/dump/src/reader/compat/snapshots/dump__reader__compat__v2_to_v3__test__compat_v2_v3-5.snap similarity index 100% rename from dump/src/reader/compat/snapshots/dump__reader__compat__v2_to_v3__test__compat_v2_v3-5.snap rename to crates/dump/src/reader/compat/snapshots/dump__reader__compat__v2_to_v3__test__compat_v2_v3-5.snap diff --git a/dump/src/reader/compat/snapshots/dump__reader__compat__v2_to_v3__test__compat_v2_v3-8.snap b/crates/dump/src/reader/compat/snapshots/dump__reader__compat__v2_to_v3__test__compat_v2_v3-8.snap similarity index 100% rename from dump/src/reader/compat/snapshots/dump__reader__compat__v2_to_v3__test__compat_v2_v3-8.snap rename to crates/dump/src/reader/compat/snapshots/dump__reader__compat__v2_to_v3__test__compat_v2_v3-8.snap diff --git a/dump/src/reader/compat/snapshots/dump__reader__compat__v3_to_v4__test__compat_v3_v4-12.snap b/crates/dump/src/reader/compat/snapshots/dump__reader__compat__v3_to_v4__test__compat_v3_v4-12.snap similarity index 100% rename from dump/src/reader/compat/snapshots/dump__reader__compat__v3_to_v4__test__compat_v3_v4-12.snap rename to crates/dump/src/reader/compat/snapshots/dump__reader__compat__v3_to_v4__test__compat_v3_v4-12.snap diff --git a/dump/src/reader/compat/snapshots/dump__reader__compat__v3_to_v4__test__compat_v3_v4-15.snap b/crates/dump/src/reader/compat/snapshots/dump__reader__compat__v3_to_v4__test__compat_v3_v4-15.snap similarity index 100% rename from dump/src/reader/compat/snapshots/dump__reader__compat__v3_to_v4__test__compat_v3_v4-15.snap rename to crates/dump/src/reader/compat/snapshots/dump__reader__compat__v3_to_v4__test__compat_v3_v4-15.snap diff --git a/dump/src/reader/compat/snapshots/dump__reader__compat__v3_to_v4__test__compat_v3_v4-6.snap b/crates/dump/src/reader/compat/snapshots/dump__reader__compat__v3_to_v4__test__compat_v3_v4-6.snap similarity index 100% rename from dump/src/reader/compat/snapshots/dump__reader__compat__v3_to_v4__test__compat_v3_v4-6.snap rename to crates/dump/src/reader/compat/snapshots/dump__reader__compat__v3_to_v4__test__compat_v3_v4-6.snap diff --git a/dump/src/reader/compat/snapshots/dump__reader__compat__v3_to_v4__test__compat_v3_v4-9.snap b/crates/dump/src/reader/compat/snapshots/dump__reader__compat__v3_to_v4__test__compat_v3_v4-9.snap similarity index 100% rename from dump/src/reader/compat/snapshots/dump__reader__compat__v3_to_v4__test__compat_v3_v4-9.snap rename to crates/dump/src/reader/compat/snapshots/dump__reader__compat__v3_to_v4__test__compat_v3_v4-9.snap diff --git a/dump/src/reader/compat/snapshots/dump__reader__compat__v4_to_v5__test__compat_v4_v5-12.snap b/crates/dump/src/reader/compat/snapshots/dump__reader__compat__v4_to_v5__test__compat_v4_v5-12.snap similarity index 100% rename from dump/src/reader/compat/snapshots/dump__reader__compat__v4_to_v5__test__compat_v4_v5-12.snap rename to crates/dump/src/reader/compat/snapshots/dump__reader__compat__v4_to_v5__test__compat_v4_v5-12.snap diff --git a/dump/src/reader/compat/snapshots/dump__reader__compat__v4_to_v5__test__compat_v4_v5-6.snap b/crates/dump/src/reader/compat/snapshots/dump__reader__compat__v4_to_v5__test__compat_v4_v5-6.snap similarity index 100% rename from dump/src/reader/compat/snapshots/dump__reader__compat__v4_to_v5__test__compat_v4_v5-6.snap rename to crates/dump/src/reader/compat/snapshots/dump__reader__compat__v4_to_v5__test__compat_v4_v5-6.snap diff --git a/dump/src/reader/compat/snapshots/dump__reader__compat__v4_to_v5__test__compat_v4_v5-9.snap b/crates/dump/src/reader/compat/snapshots/dump__reader__compat__v4_to_v5__test__compat_v4_v5-9.snap similarity index 100% rename from dump/src/reader/compat/snapshots/dump__reader__compat__v4_to_v5__test__compat_v4_v5-9.snap rename to crates/dump/src/reader/compat/snapshots/dump__reader__compat__v4_to_v5__test__compat_v4_v5-9.snap diff --git a/dump/src/reader/compat/snapshots/dump__reader__compat__v5_to_v6__test__compat_v5_v6-12.snap b/crates/dump/src/reader/compat/snapshots/dump__reader__compat__v5_to_v6__test__compat_v5_v6-12.snap similarity index 100% rename from dump/src/reader/compat/snapshots/dump__reader__compat__v5_to_v6__test__compat_v5_v6-12.snap rename to crates/dump/src/reader/compat/snapshots/dump__reader__compat__v5_to_v6__test__compat_v5_v6-12.snap diff --git a/dump/src/reader/compat/snapshots/dump__reader__compat__v5_to_v6__test__compat_v5_v6-6.snap b/crates/dump/src/reader/compat/snapshots/dump__reader__compat__v5_to_v6__test__compat_v5_v6-6.snap similarity index 100% rename from dump/src/reader/compat/snapshots/dump__reader__compat__v5_to_v6__test__compat_v5_v6-6.snap rename to crates/dump/src/reader/compat/snapshots/dump__reader__compat__v5_to_v6__test__compat_v5_v6-6.snap diff --git a/dump/src/reader/compat/snapshots/dump__reader__compat__v5_to_v6__test__compat_v5_v6-9.snap b/crates/dump/src/reader/compat/snapshots/dump__reader__compat__v5_to_v6__test__compat_v5_v6-9.snap similarity index 100% rename from dump/src/reader/compat/snapshots/dump__reader__compat__v5_to_v6__test__compat_v5_v6-9.snap rename to crates/dump/src/reader/compat/snapshots/dump__reader__compat__v5_to_v6__test__compat_v5_v6-9.snap diff --git a/dump/src/reader/compat/v1_to_v2.rs b/crates/dump/src/reader/compat/v1_to_v2.rs similarity index 100% rename from dump/src/reader/compat/v1_to_v2.rs rename to crates/dump/src/reader/compat/v1_to_v2.rs diff --git a/dump/src/reader/compat/v2_to_v3.rs b/crates/dump/src/reader/compat/v2_to_v3.rs similarity index 100% rename from dump/src/reader/compat/v2_to_v3.rs rename to crates/dump/src/reader/compat/v2_to_v3.rs diff --git a/dump/src/reader/compat/v3_to_v4.rs b/crates/dump/src/reader/compat/v3_to_v4.rs similarity index 100% rename from dump/src/reader/compat/v3_to_v4.rs rename to crates/dump/src/reader/compat/v3_to_v4.rs diff --git a/dump/src/reader/compat/v4_to_v5.rs b/crates/dump/src/reader/compat/v4_to_v5.rs similarity index 100% rename from dump/src/reader/compat/v4_to_v5.rs rename to crates/dump/src/reader/compat/v4_to_v5.rs diff --git a/dump/src/reader/compat/v5_to_v6.rs b/crates/dump/src/reader/compat/v5_to_v6.rs similarity index 100% rename from dump/src/reader/compat/v5_to_v6.rs rename to crates/dump/src/reader/compat/v5_to_v6.rs diff --git a/dump/src/reader/mod.rs b/crates/dump/src/reader/mod.rs similarity index 100% rename from dump/src/reader/mod.rs rename to crates/dump/src/reader/mod.rs diff --git a/dump/src/reader/snapshots/dump__reader__test__import_dump_v1-10.snap b/crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v1-10.snap similarity index 100% rename from dump/src/reader/snapshots/dump__reader__test__import_dump_v1-10.snap rename to crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v1-10.snap diff --git a/dump/src/reader/snapshots/dump__reader__test__import_dump_v1-4.snap b/crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v1-4.snap similarity index 100% rename from dump/src/reader/snapshots/dump__reader__test__import_dump_v1-4.snap rename to crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v1-4.snap diff --git a/dump/src/reader/snapshots/dump__reader__test__import_dump_v1-7.snap b/crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v1-7.snap similarity index 100% rename from dump/src/reader/snapshots/dump__reader__test__import_dump_v1-7.snap rename to crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v1-7.snap diff --git a/dump/src/reader/snapshots/dump__reader__test__import_dump_v2-11.snap b/crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v2-11.snap similarity index 100% rename from dump/src/reader/snapshots/dump__reader__test__import_dump_v2-11.snap rename to crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v2-11.snap diff --git a/dump/src/reader/snapshots/dump__reader__test__import_dump_v2-14.snap b/crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v2-14.snap similarity index 100% rename from dump/src/reader/snapshots/dump__reader__test__import_dump_v2-14.snap rename to crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v2-14.snap diff --git a/dump/src/reader/snapshots/dump__reader__test__import_dump_v2-5.snap b/crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v2-5.snap similarity index 100% rename from dump/src/reader/snapshots/dump__reader__test__import_dump_v2-5.snap rename to crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v2-5.snap diff --git a/dump/src/reader/snapshots/dump__reader__test__import_dump_v2-8.snap b/crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v2-8.snap similarity index 100% rename from dump/src/reader/snapshots/dump__reader__test__import_dump_v2-8.snap rename to crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v2-8.snap diff --git a/dump/src/reader/snapshots/dump__reader__test__import_dump_v2_from_meilisearch_v0_22_0_issue_3435-11.snap b/crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v2_from_meilisearch_v0_22_0_issue_3435-11.snap similarity index 100% rename from dump/src/reader/snapshots/dump__reader__test__import_dump_v2_from_meilisearch_v0_22_0_issue_3435-11.snap rename to crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v2_from_meilisearch_v0_22_0_issue_3435-11.snap diff --git a/dump/src/reader/snapshots/dump__reader__test__import_dump_v2_from_meilisearch_v0_22_0_issue_3435-5.snap b/crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v2_from_meilisearch_v0_22_0_issue_3435-5.snap similarity index 100% rename from dump/src/reader/snapshots/dump__reader__test__import_dump_v2_from_meilisearch_v0_22_0_issue_3435-5.snap rename to crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v2_from_meilisearch_v0_22_0_issue_3435-5.snap diff --git a/dump/src/reader/snapshots/dump__reader__test__import_dump_v2_from_meilisearch_v0_22_0_issue_3435-8.snap b/crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v2_from_meilisearch_v0_22_0_issue_3435-8.snap similarity index 100% rename from dump/src/reader/snapshots/dump__reader__test__import_dump_v2_from_meilisearch_v0_22_0_issue_3435-8.snap rename to crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v2_from_meilisearch_v0_22_0_issue_3435-8.snap diff --git a/dump/src/reader/snapshots/dump__reader__test__import_dump_v3-11.snap b/crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v3-11.snap similarity index 100% rename from dump/src/reader/snapshots/dump__reader__test__import_dump_v3-11.snap rename to crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v3-11.snap diff --git a/dump/src/reader/snapshots/dump__reader__test__import_dump_v3-14.snap b/crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v3-14.snap similarity index 100% rename from dump/src/reader/snapshots/dump__reader__test__import_dump_v3-14.snap rename to crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v3-14.snap diff --git a/dump/src/reader/snapshots/dump__reader__test__import_dump_v3-5.snap b/crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v3-5.snap similarity index 100% rename from dump/src/reader/snapshots/dump__reader__test__import_dump_v3-5.snap rename to crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v3-5.snap diff --git a/dump/src/reader/snapshots/dump__reader__test__import_dump_v3-8.snap b/crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v3-8.snap similarity index 100% rename from dump/src/reader/snapshots/dump__reader__test__import_dump_v3-8.snap rename to crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v3-8.snap diff --git a/dump/src/reader/snapshots/dump__reader__test__import_dump_v4-12.snap b/crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v4-12.snap similarity index 100% rename from dump/src/reader/snapshots/dump__reader__test__import_dump_v4-12.snap rename to crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v4-12.snap diff --git a/dump/src/reader/snapshots/dump__reader__test__import_dump_v4-6.snap b/crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v4-6.snap similarity index 100% rename from dump/src/reader/snapshots/dump__reader__test__import_dump_v4-6.snap rename to crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v4-6.snap diff --git a/dump/src/reader/snapshots/dump__reader__test__import_dump_v4-9.snap b/crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v4-9.snap similarity index 100% rename from dump/src/reader/snapshots/dump__reader__test__import_dump_v4-9.snap rename to crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v4-9.snap diff --git a/dump/src/reader/snapshots/dump__reader__test__import_dump_v5-12.snap b/crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v5-12.snap similarity index 100% rename from dump/src/reader/snapshots/dump__reader__test__import_dump_v5-12.snap rename to crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v5-12.snap diff --git a/dump/src/reader/snapshots/dump__reader__test__import_dump_v5-6.snap b/crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v5-6.snap similarity index 100% rename from dump/src/reader/snapshots/dump__reader__test__import_dump_v5-6.snap rename to crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v5-6.snap diff --git a/dump/src/reader/snapshots/dump__reader__test__import_dump_v5-9.snap b/crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v5-9.snap similarity index 100% rename from dump/src/reader/snapshots/dump__reader__test__import_dump_v5-9.snap rename to crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v5-9.snap diff --git a/dump/src/reader/snapshots/dump__reader__test__import_dump_v6_with_vectors-5.snap b/crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v6_with_vectors-5.snap similarity index 100% rename from dump/src/reader/snapshots/dump__reader__test__import_dump_v6_with_vectors-5.snap rename to crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v6_with_vectors-5.snap diff --git a/dump/src/reader/snapshots/dump__reader__test__import_dump_v6_with_vectors-6.snap b/crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v6_with_vectors-6.snap similarity index 100% rename from dump/src/reader/snapshots/dump__reader__test__import_dump_v6_with_vectors-6.snap rename to crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v6_with_vectors-6.snap diff --git a/dump/src/reader/snapshots/dump__reader__test__import_dump_v6_with_vectors-7.snap b/crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v6_with_vectors-7.snap similarity index 100% rename from dump/src/reader/snapshots/dump__reader__test__import_dump_v6_with_vectors-7.snap rename to crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v6_with_vectors-7.snap diff --git a/dump/src/reader/snapshots/dump__reader__test__import_dump_v6_with_vectors-8.snap b/crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v6_with_vectors-8.snap similarity index 100% rename from dump/src/reader/snapshots/dump__reader__test__import_dump_v6_with_vectors-8.snap rename to crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v6_with_vectors-8.snap diff --git a/dump/src/reader/snapshots/dump__reader__test__import_dump_v6_with_vectors-9.snap b/crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v6_with_vectors-9.snap similarity index 100% rename from dump/src/reader/snapshots/dump__reader__test__import_dump_v6_with_vectors-9.snap rename to crates/dump/src/reader/snapshots/dump__reader__test__import_dump_v6_with_vectors-9.snap diff --git a/dump/src/reader/v1/mod.rs b/crates/dump/src/reader/v1/mod.rs similarity index 100% rename from dump/src/reader/v1/mod.rs rename to crates/dump/src/reader/v1/mod.rs diff --git a/dump/src/reader/v1/settings.rs b/crates/dump/src/reader/v1/settings.rs similarity index 100% rename from dump/src/reader/v1/settings.rs rename to crates/dump/src/reader/v1/settings.rs diff --git a/dump/src/reader/v1/snapshots/dump__reader__v1__test__read_dump_v1-10.snap b/crates/dump/src/reader/v1/snapshots/dump__reader__v1__test__read_dump_v1-10.snap similarity index 100% rename from dump/src/reader/v1/snapshots/dump__reader__v1__test__read_dump_v1-10.snap rename to crates/dump/src/reader/v1/snapshots/dump__reader__v1__test__read_dump_v1-10.snap diff --git a/dump/src/reader/v1/snapshots/dump__reader__v1__test__read_dump_v1-2.snap b/crates/dump/src/reader/v1/snapshots/dump__reader__v1__test__read_dump_v1-2.snap similarity index 100% rename from dump/src/reader/v1/snapshots/dump__reader__v1__test__read_dump_v1-2.snap rename to crates/dump/src/reader/v1/snapshots/dump__reader__v1__test__read_dump_v1-2.snap diff --git a/dump/src/reader/v1/snapshots/dump__reader__v1__test__read_dump_v1-6.snap b/crates/dump/src/reader/v1/snapshots/dump__reader__v1__test__read_dump_v1-6.snap similarity index 100% rename from dump/src/reader/v1/snapshots/dump__reader__v1__test__read_dump_v1-6.snap rename to crates/dump/src/reader/v1/snapshots/dump__reader__v1__test__read_dump_v1-6.snap diff --git a/dump/src/reader/v1/update.rs b/crates/dump/src/reader/v1/update.rs similarity index 100% rename from dump/src/reader/v1/update.rs rename to crates/dump/src/reader/v1/update.rs diff --git a/dump/src/reader/v2/errors.rs b/crates/dump/src/reader/v2/errors.rs similarity index 100% rename from dump/src/reader/v2/errors.rs rename to crates/dump/src/reader/v2/errors.rs diff --git a/dump/src/reader/v2/meta.rs b/crates/dump/src/reader/v2/meta.rs similarity index 100% rename from dump/src/reader/v2/meta.rs rename to crates/dump/src/reader/v2/meta.rs diff --git a/dump/src/reader/v2/mod.rs b/crates/dump/src/reader/v2/mod.rs similarity index 100% rename from dump/src/reader/v2/mod.rs rename to crates/dump/src/reader/v2/mod.rs diff --git a/dump/src/reader/v2/settings.rs b/crates/dump/src/reader/v2/settings.rs similarity index 100% rename from dump/src/reader/v2/settings.rs rename to crates/dump/src/reader/v2/settings.rs diff --git a/dump/src/reader/v2/snapshots/dump__reader__v2__test__read_dump_v2-11.snap b/crates/dump/src/reader/v2/snapshots/dump__reader__v2__test__read_dump_v2-11.snap similarity index 100% rename from dump/src/reader/v2/snapshots/dump__reader__v2__test__read_dump_v2-11.snap rename to crates/dump/src/reader/v2/snapshots/dump__reader__v2__test__read_dump_v2-11.snap diff --git a/dump/src/reader/v2/snapshots/dump__reader__v2__test__read_dump_v2-14.snap b/crates/dump/src/reader/v2/snapshots/dump__reader__v2__test__read_dump_v2-14.snap similarity index 100% rename from dump/src/reader/v2/snapshots/dump__reader__v2__test__read_dump_v2-14.snap rename to crates/dump/src/reader/v2/snapshots/dump__reader__v2__test__read_dump_v2-14.snap diff --git a/dump/src/reader/v2/snapshots/dump__reader__v2__test__read_dump_v2-5.snap b/crates/dump/src/reader/v2/snapshots/dump__reader__v2__test__read_dump_v2-5.snap similarity index 100% rename from dump/src/reader/v2/snapshots/dump__reader__v2__test__read_dump_v2-5.snap rename to crates/dump/src/reader/v2/snapshots/dump__reader__v2__test__read_dump_v2-5.snap diff --git a/dump/src/reader/v2/snapshots/dump__reader__v2__test__read_dump_v2-8.snap b/crates/dump/src/reader/v2/snapshots/dump__reader__v2__test__read_dump_v2-8.snap similarity index 100% rename from dump/src/reader/v2/snapshots/dump__reader__v2__test__read_dump_v2-8.snap rename to crates/dump/src/reader/v2/snapshots/dump__reader__v2__test__read_dump_v2-8.snap diff --git a/dump/src/reader/v2/snapshots/dump__reader__v2__test__read_dump_v2_from_meilisearch_v0_22_0_issue_3435-10.snap b/crates/dump/src/reader/v2/snapshots/dump__reader__v2__test__read_dump_v2_from_meilisearch_v0_22_0_issue_3435-10.snap similarity index 100% rename from dump/src/reader/v2/snapshots/dump__reader__v2__test__read_dump_v2_from_meilisearch_v0_22_0_issue_3435-10.snap rename to crates/dump/src/reader/v2/snapshots/dump__reader__v2__test__read_dump_v2_from_meilisearch_v0_22_0_issue_3435-10.snap diff --git a/dump/src/reader/v2/snapshots/dump__reader__v2__test__read_dump_v2_from_meilisearch_v0_22_0_issue_3435-4.snap b/crates/dump/src/reader/v2/snapshots/dump__reader__v2__test__read_dump_v2_from_meilisearch_v0_22_0_issue_3435-4.snap similarity index 100% rename from dump/src/reader/v2/snapshots/dump__reader__v2__test__read_dump_v2_from_meilisearch_v0_22_0_issue_3435-4.snap rename to crates/dump/src/reader/v2/snapshots/dump__reader__v2__test__read_dump_v2_from_meilisearch_v0_22_0_issue_3435-4.snap diff --git a/dump/src/reader/v2/snapshots/dump__reader__v2__test__read_dump_v2_from_meilisearch_v0_22_0_issue_3435-7.snap b/crates/dump/src/reader/v2/snapshots/dump__reader__v2__test__read_dump_v2_from_meilisearch_v0_22_0_issue_3435-7.snap similarity index 100% rename from dump/src/reader/v2/snapshots/dump__reader__v2__test__read_dump_v2_from_meilisearch_v0_22_0_issue_3435-7.snap rename to crates/dump/src/reader/v2/snapshots/dump__reader__v2__test__read_dump_v2_from_meilisearch_v0_22_0_issue_3435-7.snap diff --git a/dump/src/reader/v2/updates.rs b/crates/dump/src/reader/v2/updates.rs similarity index 100% rename from dump/src/reader/v2/updates.rs rename to crates/dump/src/reader/v2/updates.rs diff --git a/dump/src/reader/v3/errors.rs b/crates/dump/src/reader/v3/errors.rs similarity index 100% rename from dump/src/reader/v3/errors.rs rename to crates/dump/src/reader/v3/errors.rs diff --git a/dump/src/reader/v3/meta.rs b/crates/dump/src/reader/v3/meta.rs similarity index 100% rename from dump/src/reader/v3/meta.rs rename to crates/dump/src/reader/v3/meta.rs diff --git a/dump/src/reader/v3/mod.rs b/crates/dump/src/reader/v3/mod.rs similarity index 100% rename from dump/src/reader/v3/mod.rs rename to crates/dump/src/reader/v3/mod.rs diff --git a/dump/src/reader/v3/settings.rs b/crates/dump/src/reader/v3/settings.rs similarity index 100% rename from dump/src/reader/v3/settings.rs rename to crates/dump/src/reader/v3/settings.rs diff --git a/dump/src/reader/v3/snapshots/dump__reader__v3__test__read_dump_v3-11.snap b/crates/dump/src/reader/v3/snapshots/dump__reader__v3__test__read_dump_v3-11.snap similarity index 100% rename from dump/src/reader/v3/snapshots/dump__reader__v3__test__read_dump_v3-11.snap rename to crates/dump/src/reader/v3/snapshots/dump__reader__v3__test__read_dump_v3-11.snap diff --git a/dump/src/reader/v3/snapshots/dump__reader__v3__test__read_dump_v3-14.snap b/crates/dump/src/reader/v3/snapshots/dump__reader__v3__test__read_dump_v3-14.snap similarity index 100% rename from dump/src/reader/v3/snapshots/dump__reader__v3__test__read_dump_v3-14.snap rename to crates/dump/src/reader/v3/snapshots/dump__reader__v3__test__read_dump_v3-14.snap diff --git a/dump/src/reader/v3/snapshots/dump__reader__v3__test__read_dump_v3-5.snap b/crates/dump/src/reader/v3/snapshots/dump__reader__v3__test__read_dump_v3-5.snap similarity index 100% rename from dump/src/reader/v3/snapshots/dump__reader__v3__test__read_dump_v3-5.snap rename to crates/dump/src/reader/v3/snapshots/dump__reader__v3__test__read_dump_v3-5.snap diff --git a/dump/src/reader/v3/snapshots/dump__reader__v3__test__read_dump_v3-8.snap b/crates/dump/src/reader/v3/snapshots/dump__reader__v3__test__read_dump_v3-8.snap similarity index 100% rename from dump/src/reader/v3/snapshots/dump__reader__v3__test__read_dump_v3-8.snap rename to crates/dump/src/reader/v3/snapshots/dump__reader__v3__test__read_dump_v3-8.snap diff --git a/dump/src/reader/v3/updates.rs b/crates/dump/src/reader/v3/updates.rs similarity index 100% rename from dump/src/reader/v3/updates.rs rename to crates/dump/src/reader/v3/updates.rs diff --git a/dump/src/reader/v4/errors.rs b/crates/dump/src/reader/v4/errors.rs similarity index 100% rename from dump/src/reader/v4/errors.rs rename to crates/dump/src/reader/v4/errors.rs diff --git a/dump/src/reader/v4/keys.rs b/crates/dump/src/reader/v4/keys.rs similarity index 100% rename from dump/src/reader/v4/keys.rs rename to crates/dump/src/reader/v4/keys.rs diff --git a/dump/src/reader/v4/meta.rs b/crates/dump/src/reader/v4/meta.rs similarity index 100% rename from dump/src/reader/v4/meta.rs rename to crates/dump/src/reader/v4/meta.rs diff --git a/dump/src/reader/v4/mod.rs b/crates/dump/src/reader/v4/mod.rs similarity index 100% rename from dump/src/reader/v4/mod.rs rename to crates/dump/src/reader/v4/mod.rs diff --git a/dump/src/reader/v4/settings.rs b/crates/dump/src/reader/v4/settings.rs similarity index 100% rename from dump/src/reader/v4/settings.rs rename to crates/dump/src/reader/v4/settings.rs diff --git a/dump/src/reader/v4/snapshots/dump__reader__v4__test__read_dump_v4-10.snap b/crates/dump/src/reader/v4/snapshots/dump__reader__v4__test__read_dump_v4-10.snap similarity index 100% rename from dump/src/reader/v4/snapshots/dump__reader__v4__test__read_dump_v4-10.snap rename to crates/dump/src/reader/v4/snapshots/dump__reader__v4__test__read_dump_v4-10.snap diff --git a/dump/src/reader/v4/snapshots/dump__reader__v4__test__read_dump_v4-13.snap b/crates/dump/src/reader/v4/snapshots/dump__reader__v4__test__read_dump_v4-13.snap similarity index 100% rename from dump/src/reader/v4/snapshots/dump__reader__v4__test__read_dump_v4-13.snap rename to crates/dump/src/reader/v4/snapshots/dump__reader__v4__test__read_dump_v4-13.snap diff --git a/dump/src/reader/v4/snapshots/dump__reader__v4__test__read_dump_v4-7.snap b/crates/dump/src/reader/v4/snapshots/dump__reader__v4__test__read_dump_v4-7.snap similarity index 100% rename from dump/src/reader/v4/snapshots/dump__reader__v4__test__read_dump_v4-7.snap rename to crates/dump/src/reader/v4/snapshots/dump__reader__v4__test__read_dump_v4-7.snap diff --git a/dump/src/reader/v4/tasks.rs b/crates/dump/src/reader/v4/tasks.rs similarity index 100% rename from dump/src/reader/v4/tasks.rs rename to crates/dump/src/reader/v4/tasks.rs diff --git a/dump/src/reader/v5/errors.rs b/crates/dump/src/reader/v5/errors.rs similarity index 100% rename from dump/src/reader/v5/errors.rs rename to crates/dump/src/reader/v5/errors.rs diff --git a/dump/src/reader/v5/keys.rs b/crates/dump/src/reader/v5/keys.rs similarity index 100% rename from dump/src/reader/v5/keys.rs rename to crates/dump/src/reader/v5/keys.rs diff --git a/dump/src/reader/v5/meta.rs b/crates/dump/src/reader/v5/meta.rs similarity index 100% rename from dump/src/reader/v5/meta.rs rename to crates/dump/src/reader/v5/meta.rs diff --git a/dump/src/reader/v5/mod.rs b/crates/dump/src/reader/v5/mod.rs similarity index 100% rename from dump/src/reader/v5/mod.rs rename to crates/dump/src/reader/v5/mod.rs diff --git a/dump/src/reader/v5/settings.rs b/crates/dump/src/reader/v5/settings.rs similarity index 100% rename from dump/src/reader/v5/settings.rs rename to crates/dump/src/reader/v5/settings.rs diff --git a/dump/src/reader/v5/snapshots/dump__reader__v5__test__read_dump_v5-10.snap b/crates/dump/src/reader/v5/snapshots/dump__reader__v5__test__read_dump_v5-10.snap similarity index 100% rename from dump/src/reader/v5/snapshots/dump__reader__v5__test__read_dump_v5-10.snap rename to crates/dump/src/reader/v5/snapshots/dump__reader__v5__test__read_dump_v5-10.snap diff --git a/dump/src/reader/v5/snapshots/dump__reader__v5__test__read_dump_v5-13.snap b/crates/dump/src/reader/v5/snapshots/dump__reader__v5__test__read_dump_v5-13.snap similarity index 100% rename from dump/src/reader/v5/snapshots/dump__reader__v5__test__read_dump_v5-13.snap rename to crates/dump/src/reader/v5/snapshots/dump__reader__v5__test__read_dump_v5-13.snap diff --git a/dump/src/reader/v5/snapshots/dump__reader__v5__test__read_dump_v5-7.snap b/crates/dump/src/reader/v5/snapshots/dump__reader__v5__test__read_dump_v5-7.snap similarity index 100% rename from dump/src/reader/v5/snapshots/dump__reader__v5__test__read_dump_v5-7.snap rename to crates/dump/src/reader/v5/snapshots/dump__reader__v5__test__read_dump_v5-7.snap diff --git a/dump/src/reader/v5/tasks.rs b/crates/dump/src/reader/v5/tasks.rs similarity index 100% rename from dump/src/reader/v5/tasks.rs rename to crates/dump/src/reader/v5/tasks.rs diff --git a/dump/src/reader/v6/mod.rs b/crates/dump/src/reader/v6/mod.rs similarity index 100% rename from dump/src/reader/v6/mod.rs rename to crates/dump/src/reader/v6/mod.rs diff --git a/dump/src/writer.rs b/crates/dump/src/writer.rs similarity index 100% rename from dump/src/writer.rs rename to crates/dump/src/writer.rs diff --git a/dump/tests/assets/v1.dump b/crates/dump/tests/assets/v1.dump similarity index 100% rename from dump/tests/assets/v1.dump rename to crates/dump/tests/assets/v1.dump diff --git a/dump/tests/assets/v2-v0.22.0.dump b/crates/dump/tests/assets/v2-v0.22.0.dump similarity index 100% rename from dump/tests/assets/v2-v0.22.0.dump rename to crates/dump/tests/assets/v2-v0.22.0.dump diff --git a/dump/tests/assets/v2.dump b/crates/dump/tests/assets/v2.dump similarity index 100% rename from dump/tests/assets/v2.dump rename to crates/dump/tests/assets/v2.dump diff --git a/dump/tests/assets/v3.dump b/crates/dump/tests/assets/v3.dump similarity index 100% rename from dump/tests/assets/v3.dump rename to crates/dump/tests/assets/v3.dump diff --git a/dump/tests/assets/v4.dump b/crates/dump/tests/assets/v4.dump similarity index 100% rename from dump/tests/assets/v4.dump rename to crates/dump/tests/assets/v4.dump diff --git a/dump/tests/assets/v5.dump b/crates/dump/tests/assets/v5.dump similarity index 100% rename from dump/tests/assets/v5.dump rename to crates/dump/tests/assets/v5.dump diff --git a/dump/tests/assets/v6-with-experimental.dump b/crates/dump/tests/assets/v6-with-experimental.dump similarity index 100% rename from dump/tests/assets/v6-with-experimental.dump rename to crates/dump/tests/assets/v6-with-experimental.dump diff --git a/dump/tests/assets/v6-with-vectors.dump b/crates/dump/tests/assets/v6-with-vectors.dump similarity index 100% rename from dump/tests/assets/v6-with-vectors.dump rename to crates/dump/tests/assets/v6-with-vectors.dump diff --git a/file-store/Cargo.toml b/crates/file-store/Cargo.toml similarity index 100% rename from file-store/Cargo.toml rename to crates/file-store/Cargo.toml diff --git a/file-store/src/lib.rs b/crates/file-store/src/lib.rs similarity index 100% rename from file-store/src/lib.rs rename to crates/file-store/src/lib.rs diff --git a/filter-parser/Cargo.toml b/crates/filter-parser/Cargo.toml similarity index 100% rename from filter-parser/Cargo.toml rename to crates/filter-parser/Cargo.toml diff --git a/filter-parser/README.md b/crates/filter-parser/README.md similarity index 100% rename from filter-parser/README.md rename to crates/filter-parser/README.md diff --git a/filter-parser/fuzz/.gitignore b/crates/filter-parser/fuzz/.gitignore similarity index 100% rename from filter-parser/fuzz/.gitignore rename to crates/filter-parser/fuzz/.gitignore diff --git a/filter-parser/fuzz/Cargo.toml b/crates/filter-parser/fuzz/Cargo.toml similarity index 100% rename from filter-parser/fuzz/Cargo.toml rename to crates/filter-parser/fuzz/Cargo.toml diff --git a/filter-parser/fuzz/corpus/parse/test_1 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_1 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_1 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_1 diff --git a/filter-parser/fuzz/corpus/parse/test_10 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_10 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_10 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_10 diff --git a/filter-parser/fuzz/corpus/parse/test_11 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_11 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_11 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_11 diff --git a/filter-parser/fuzz/corpus/parse/test_12 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_12 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_12 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_12 diff --git a/filter-parser/fuzz/corpus/parse/test_13 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_13 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_13 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_13 diff --git a/filter-parser/fuzz/corpus/parse/test_14 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_14 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_14 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_14 diff --git a/filter-parser/fuzz/corpus/parse/test_15 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_15 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_15 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_15 diff --git a/filter-parser/fuzz/corpus/parse/test_16 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_16 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_16 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_16 diff --git a/filter-parser/fuzz/corpus/parse/test_17 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_17 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_17 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_17 diff --git a/filter-parser/fuzz/corpus/parse/test_18 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_18 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_18 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_18 diff --git a/filter-parser/fuzz/corpus/parse/test_19 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_19 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_19 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_19 diff --git a/filter-parser/fuzz/corpus/parse/test_2 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_2 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_2 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_2 diff --git a/filter-parser/fuzz/corpus/parse/test_20 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_20 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_20 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_20 diff --git a/filter-parser/fuzz/corpus/parse/test_21 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_21 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_21 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_21 diff --git a/filter-parser/fuzz/corpus/parse/test_22 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_22 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_22 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_22 diff --git a/filter-parser/fuzz/corpus/parse/test_23 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_23 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_23 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_23 diff --git a/filter-parser/fuzz/corpus/parse/test_24 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_24 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_24 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_24 diff --git a/filter-parser/fuzz/corpus/parse/test_25 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_25 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_25 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_25 diff --git a/filter-parser/fuzz/corpus/parse/test_26 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_26 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_26 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_26 diff --git a/filter-parser/fuzz/corpus/parse/test_27 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_27 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_27 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_27 diff --git a/filter-parser/fuzz/corpus/parse/test_28 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_28 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_28 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_28 diff --git a/filter-parser/fuzz/corpus/parse/test_29 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_29 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_29 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_29 diff --git a/filter-parser/fuzz/corpus/parse/test_3 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_3 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_3 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_3 diff --git a/filter-parser/fuzz/corpus/parse/test_30 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_30 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_30 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_30 diff --git a/filter-parser/fuzz/corpus/parse/test_31 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_31 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_31 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_31 diff --git a/filter-parser/fuzz/corpus/parse/test_32 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_32 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_32 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_32 diff --git a/filter-parser/fuzz/corpus/parse/test_33 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_33 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_33 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_33 diff --git a/filter-parser/fuzz/corpus/parse/test_34 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_34 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_34 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_34 diff --git a/filter-parser/fuzz/corpus/parse/test_35 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_35 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_35 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_35 diff --git a/filter-parser/fuzz/corpus/parse/test_36 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_36 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_36 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_36 diff --git a/filter-parser/fuzz/corpus/parse/test_37 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_37 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_37 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_37 diff --git a/filter-parser/fuzz/corpus/parse/test_38 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_38 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_38 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_38 diff --git a/filter-parser/fuzz/corpus/parse/test_39 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_39 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_39 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_39 diff --git a/filter-parser/fuzz/corpus/parse/test_4 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_4 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_4 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_4 diff --git a/filter-parser/fuzz/corpus/parse/test_40 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_40 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_40 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_40 diff --git a/filter-parser/fuzz/corpus/parse/test_41 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_41 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_41 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_41 diff --git a/filter-parser/fuzz/corpus/parse/test_42 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_42 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_42 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_42 diff --git a/filter-parser/fuzz/corpus/parse/test_43 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_43 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_43 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_43 diff --git a/filter-parser/fuzz/corpus/parse/test_5 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_5 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_5 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_5 diff --git a/filter-parser/fuzz/corpus/parse/test_6 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_6 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_6 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_6 diff --git a/filter-parser/fuzz/corpus/parse/test_7 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_7 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_7 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_7 diff --git a/filter-parser/fuzz/corpus/parse/test_8 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_8 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_8 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_8 diff --git a/filter-parser/fuzz/corpus/parse/test_9 b/crates/filter-parser/fuzz/fuzz/corpus/parse/test_9 similarity index 100% rename from filter-parser/fuzz/corpus/parse/test_9 rename to crates/filter-parser/fuzz/fuzz/corpus/parse/test_9 diff --git a/filter-parser/fuzz/fuzz_targets/parse.rs b/crates/filter-parser/fuzz/fuzz_targets/parse.rs similarity index 100% rename from filter-parser/fuzz/fuzz_targets/parse.rs rename to crates/filter-parser/fuzz/fuzz_targets/parse.rs diff --git a/filter-parser/src/condition.rs b/crates/filter-parser/src/condition.rs similarity index 100% rename from filter-parser/src/condition.rs rename to crates/filter-parser/src/condition.rs diff --git a/filter-parser/src/error.rs b/crates/filter-parser/src/error.rs similarity index 100% rename from filter-parser/src/error.rs rename to crates/filter-parser/src/error.rs diff --git a/filter-parser/src/lib.rs b/crates/filter-parser/src/lib.rs similarity index 100% rename from filter-parser/src/lib.rs rename to crates/filter-parser/src/lib.rs diff --git a/filter-parser/src/main.rs b/crates/filter-parser/src/main.rs similarity index 100% rename from filter-parser/src/main.rs rename to crates/filter-parser/src/main.rs diff --git a/filter-parser/src/value.rs b/crates/filter-parser/src/value.rs similarity index 100% rename from filter-parser/src/value.rs rename to crates/filter-parser/src/value.rs diff --git a/flatten-serde-json/Cargo.toml b/crates/flatten-serde-json/Cargo.toml similarity index 100% rename from flatten-serde-json/Cargo.toml rename to crates/flatten-serde-json/Cargo.toml diff --git a/flatten-serde-json/README.md b/crates/flatten-serde-json/README.md similarity index 100% rename from flatten-serde-json/README.md rename to crates/flatten-serde-json/README.md diff --git a/flatten-serde-json/benches/benchmarks.rs b/crates/flatten-serde-json/benches/benchmarks.rs similarity index 100% rename from flatten-serde-json/benches/benchmarks.rs rename to crates/flatten-serde-json/benches/benchmarks.rs diff --git a/flatten-serde-json/fuzz/Cargo.toml b/crates/flatten-serde-json/fuzz/Cargo.toml similarity index 100% rename from flatten-serde-json/fuzz/Cargo.toml rename to crates/flatten-serde-json/fuzz/Cargo.toml diff --git a/flatten-serde-json/fuzz/fuzz_targets/flatten.rs b/crates/flatten-serde-json/fuzz/fuzz_targets/flatten.rs similarity index 100% rename from flatten-serde-json/fuzz/fuzz_targets/flatten.rs rename to crates/flatten-serde-json/fuzz/fuzz_targets/flatten.rs diff --git a/flatten-serde-json/src/lib.rs b/crates/flatten-serde-json/src/lib.rs similarity index 100% rename from flatten-serde-json/src/lib.rs rename to crates/flatten-serde-json/src/lib.rs diff --git a/flatten-serde-json/src/main.rs b/crates/flatten-serde-json/src/main.rs similarity index 100% rename from flatten-serde-json/src/main.rs rename to crates/flatten-serde-json/src/main.rs diff --git a/fuzzers/Cargo.toml b/crates/fuzzers/Cargo.toml similarity index 100% rename from fuzzers/Cargo.toml rename to crates/fuzzers/Cargo.toml diff --git a/fuzzers/README.md b/crates/fuzzers/README.md similarity index 100% rename from fuzzers/README.md rename to crates/fuzzers/README.md diff --git a/fuzzers/src/bin/fuzz-indexing.rs b/crates/fuzzers/src/bin/fuzz-indexing.rs similarity index 100% rename from fuzzers/src/bin/fuzz-indexing.rs rename to crates/fuzzers/src/bin/fuzz-indexing.rs diff --git a/fuzzers/src/lib.rs b/crates/fuzzers/src/lib.rs similarity index 100% rename from fuzzers/src/lib.rs rename to crates/fuzzers/src/lib.rs diff --git a/index-scheduler/Cargo.toml b/crates/index-scheduler/Cargo.toml similarity index 100% rename from index-scheduler/Cargo.toml rename to crates/index-scheduler/Cargo.toml diff --git a/index-scheduler/src/autobatcher.rs b/crates/index-scheduler/src/autobatcher.rs similarity index 100% rename from index-scheduler/src/autobatcher.rs rename to crates/index-scheduler/src/autobatcher.rs diff --git a/index-scheduler/src/batch.rs b/crates/index-scheduler/src/batch.rs similarity index 100% rename from index-scheduler/src/batch.rs rename to crates/index-scheduler/src/batch.rs diff --git a/index-scheduler/src/error.rs b/crates/index-scheduler/src/error.rs similarity index 100% rename from index-scheduler/src/error.rs rename to crates/index-scheduler/src/error.rs diff --git a/index-scheduler/src/features.rs b/crates/index-scheduler/src/features.rs similarity index 100% rename from index-scheduler/src/features.rs rename to crates/index-scheduler/src/features.rs diff --git a/index-scheduler/src/index_mapper/index_map.rs b/crates/index-scheduler/src/index_mapper/index_map.rs similarity index 100% rename from index-scheduler/src/index_mapper/index_map.rs rename to crates/index-scheduler/src/index_mapper/index_map.rs diff --git a/index-scheduler/src/index_mapper/mod.rs b/crates/index-scheduler/src/index_mapper/mod.rs similarity index 100% rename from index-scheduler/src/index_mapper/mod.rs rename to crates/index-scheduler/src/index_mapper/mod.rs diff --git a/index-scheduler/src/insta_snapshot.rs b/crates/index-scheduler/src/insta_snapshot.rs similarity index 100% rename from index-scheduler/src/insta_snapshot.rs rename to crates/index-scheduler/src/insta_snapshot.rs diff --git a/index-scheduler/src/lib.rs b/crates/index-scheduler/src/lib.rs similarity index 100% rename from index-scheduler/src/lib.rs rename to crates/index-scheduler/src/lib.rs diff --git a/index-scheduler/src/lru.rs b/crates/index-scheduler/src/lru.rs similarity index 100% rename from index-scheduler/src/lru.rs rename to crates/index-scheduler/src/lru.rs diff --git a/index-scheduler/src/snapshots/index_scheduler__tests__import_vectors-15.snap b/crates/index-scheduler/src/snapshots/index_scheduler__tests__import_vectors-15.snap similarity index 100% rename from index-scheduler/src/snapshots/index_scheduler__tests__import_vectors-15.snap rename to crates/index-scheduler/src/snapshots/index_scheduler__tests__import_vectors-15.snap diff --git a/index-scheduler/src/snapshots/index_scheduler__tests__import_vectors-2.snap b/crates/index-scheduler/src/snapshots/index_scheduler__tests__import_vectors-2.snap similarity index 100% rename from index-scheduler/src/snapshots/index_scheduler__tests__import_vectors-2.snap rename to crates/index-scheduler/src/snapshots/index_scheduler__tests__import_vectors-2.snap diff --git a/index-scheduler/src/snapshots/index_scheduler__tests__import_vectors-22.snap b/crates/index-scheduler/src/snapshots/index_scheduler__tests__import_vectors-22.snap similarity index 100% rename from index-scheduler/src/snapshots/index_scheduler__tests__import_vectors-22.snap rename to crates/index-scheduler/src/snapshots/index_scheduler__tests__import_vectors-22.snap diff --git a/index-scheduler/src/snapshots/index_scheduler__tests__import_vectors-5.snap b/crates/index-scheduler/src/snapshots/index_scheduler__tests__import_vectors-5.snap similarity index 100% rename from index-scheduler/src/snapshots/index_scheduler__tests__import_vectors-5.snap rename to crates/index-scheduler/src/snapshots/index_scheduler__tests__import_vectors-5.snap diff --git a/index-scheduler/src/snapshots/index_scheduler__tests__import_vectors-8.snap b/crates/index-scheduler/src/snapshots/index_scheduler__tests__import_vectors-8.snap similarity index 100% rename from index-scheduler/src/snapshots/index_scheduler__tests__import_vectors-8.snap rename to crates/index-scheduler/src/snapshots/index_scheduler__tests__import_vectors-8.snap diff --git a/index-scheduler/src/snapshots/index_scheduler__tests__import_vectors.snap b/crates/index-scheduler/src/snapshots/index_scheduler__tests__import_vectors.snap similarity index 100% rename from index-scheduler/src/snapshots/index_scheduler__tests__import_vectors.snap rename to crates/index-scheduler/src/snapshots/index_scheduler__tests__import_vectors.snap diff --git a/index-scheduler/src/snapshots/index_scheduler__tests__settings_update-2.snap b/crates/index-scheduler/src/snapshots/index_scheduler__tests__settings_update-2.snap similarity index 100% rename from index-scheduler/src/snapshots/index_scheduler__tests__settings_update-2.snap rename to crates/index-scheduler/src/snapshots/index_scheduler__tests__settings_update-2.snap diff --git a/index-scheduler/src/snapshots/index_scheduler__tests__settings_update-5.snap b/crates/index-scheduler/src/snapshots/index_scheduler__tests__settings_update-5.snap similarity index 100% rename from index-scheduler/src/snapshots/index_scheduler__tests__settings_update-5.snap rename to crates/index-scheduler/src/snapshots/index_scheduler__tests__settings_update-5.snap diff --git a/index-scheduler/src/snapshots/index_scheduler__tests__settings_update.snap b/crates/index-scheduler/src/snapshots/index_scheduler__tests__settings_update.snap similarity index 100% rename from index-scheduler/src/snapshots/index_scheduler__tests__settings_update.snap rename to crates/index-scheduler/src/snapshots/index_scheduler__tests__settings_update.snap diff --git a/index-scheduler/src/snapshots/lib.rs/cancel_enqueued_task/cancel_processed.snap b/crates/index-scheduler/src/snapshots/lib.rs/cancel_enqueued_task/cancel_processed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/cancel_enqueued_task/cancel_processed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/cancel_enqueued_task/cancel_processed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/cancel_enqueued_task/initial_tasks_enqueued.snap b/crates/index-scheduler/src/snapshots/lib.rs/cancel_enqueued_task/initial_tasks_enqueued.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/cancel_enqueued_task/initial_tasks_enqueued.snap rename to crates/index-scheduler/src/snapshots/lib.rs/cancel_enqueued_task/initial_tasks_enqueued.snap diff --git a/index-scheduler/src/snapshots/lib.rs/cancel_mix_of_tasks/aborted_indexation.snap b/crates/index-scheduler/src/snapshots/lib.rs/cancel_mix_of_tasks/aborted_indexation.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/cancel_mix_of_tasks/aborted_indexation.snap rename to crates/index-scheduler/src/snapshots/lib.rs/cancel_mix_of_tasks/aborted_indexation.snap diff --git a/index-scheduler/src/snapshots/lib.rs/cancel_mix_of_tasks/cancel_processed.snap b/crates/index-scheduler/src/snapshots/lib.rs/cancel_mix_of_tasks/cancel_processed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/cancel_mix_of_tasks/cancel_processed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/cancel_mix_of_tasks/cancel_processed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/cancel_mix_of_tasks/first_task_processed.snap b/crates/index-scheduler/src/snapshots/lib.rs/cancel_mix_of_tasks/first_task_processed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/cancel_mix_of_tasks/first_task_processed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/cancel_mix_of_tasks/first_task_processed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/cancel_mix_of_tasks/processing_second_task_cancel_enqueued.snap b/crates/index-scheduler/src/snapshots/lib.rs/cancel_mix_of_tasks/processing_second_task_cancel_enqueued.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/cancel_mix_of_tasks/processing_second_task_cancel_enqueued.snap rename to crates/index-scheduler/src/snapshots/lib.rs/cancel_mix_of_tasks/processing_second_task_cancel_enqueued.snap diff --git a/index-scheduler/src/snapshots/lib.rs/cancel_processing_dump/after_dump_register.snap b/crates/index-scheduler/src/snapshots/lib.rs/cancel_processing_dump/after_dump_register.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/cancel_processing_dump/after_dump_register.snap rename to crates/index-scheduler/src/snapshots/lib.rs/cancel_processing_dump/after_dump_register.snap diff --git a/index-scheduler/src/snapshots/lib.rs/cancel_processing_dump/cancel_processed.snap b/crates/index-scheduler/src/snapshots/lib.rs/cancel_processing_dump/cancel_processed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/cancel_processing_dump/cancel_processed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/cancel_processing_dump/cancel_processed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/cancel_processing_dump/cancel_registered.snap b/crates/index-scheduler/src/snapshots/lib.rs/cancel_processing_dump/cancel_registered.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/cancel_processing_dump/cancel_registered.snap rename to crates/index-scheduler/src/snapshots/lib.rs/cancel_processing_dump/cancel_registered.snap diff --git a/index-scheduler/src/snapshots/lib.rs/cancel_processing_task/aborted_indexation.snap b/crates/index-scheduler/src/snapshots/lib.rs/cancel_processing_task/aborted_indexation.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/cancel_processing_task/aborted_indexation.snap rename to crates/index-scheduler/src/snapshots/lib.rs/cancel_processing_task/aborted_indexation.snap diff --git a/index-scheduler/src/snapshots/lib.rs/cancel_processing_task/cancel_processed.snap b/crates/index-scheduler/src/snapshots/lib.rs/cancel_processing_task/cancel_processed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/cancel_processing_task/cancel_processed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/cancel_processing_task/cancel_processed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/cancel_processing_task/cancel_task_registered.snap b/crates/index-scheduler/src/snapshots/lib.rs/cancel_processing_task/cancel_task_registered.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/cancel_processing_task/cancel_task_registered.snap rename to crates/index-scheduler/src/snapshots/lib.rs/cancel_processing_task/cancel_task_registered.snap diff --git a/index-scheduler/src/snapshots/lib.rs/cancel_processing_task/initial_task_processing.snap b/crates/index-scheduler/src/snapshots/lib.rs/cancel_processing_task/initial_task_processing.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/cancel_processing_task/initial_task_processing.snap rename to crates/index-scheduler/src/snapshots/lib.rs/cancel_processing_task/initial_task_processing.snap diff --git a/index-scheduler/src/snapshots/lib.rs/cancel_processing_task/registered_the_first_task.snap b/crates/index-scheduler/src/snapshots/lib.rs/cancel_processing_task/registered_the_first_task.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/cancel_processing_task/registered_the_first_task.snap rename to crates/index-scheduler/src/snapshots/lib.rs/cancel_processing_task/registered_the_first_task.snap diff --git a/index-scheduler/src/snapshots/lib.rs/cancel_succeeded_task/cancel_processed.snap b/crates/index-scheduler/src/snapshots/lib.rs/cancel_succeeded_task/cancel_processed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/cancel_succeeded_task/cancel_processed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/cancel_succeeded_task/cancel_processed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/cancel_succeeded_task/initial_task_processed.snap b/crates/index-scheduler/src/snapshots/lib.rs/cancel_succeeded_task/initial_task_processed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/cancel_succeeded_task/initial_task_processed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/cancel_succeeded_task/initial_task_processed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/cancel_succeeded_task/registered_the_first_task.snap b/crates/index-scheduler/src/snapshots/lib.rs/cancel_succeeded_task/registered_the_first_task.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/cancel_succeeded_task/registered_the_first_task.snap rename to crates/index-scheduler/src/snapshots/lib.rs/cancel_succeeded_task/registered_the_first_task.snap diff --git a/index-scheduler/src/snapshots/lib.rs/do_not_batch_task_of_different_indexes/all_tasks_processed.snap b/crates/index-scheduler/src/snapshots/lib.rs/do_not_batch_task_of_different_indexes/all_tasks_processed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/do_not_batch_task_of_different_indexes/all_tasks_processed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/do_not_batch_task_of_different_indexes/all_tasks_processed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/document_addition/after_register.snap b/crates/index-scheduler/src/snapshots/lib.rs/document_addition/after_register.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/document_addition/after_register.snap rename to crates/index-scheduler/src/snapshots/lib.rs/document_addition/after_register.snap diff --git a/index-scheduler/src/snapshots/lib.rs/document_addition/after_the_batch_creation.snap b/crates/index-scheduler/src/snapshots/lib.rs/document_addition/after_the_batch_creation.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/document_addition/after_the_batch_creation.snap rename to crates/index-scheduler/src/snapshots/lib.rs/document_addition/after_the_batch_creation.snap diff --git a/index-scheduler/src/snapshots/lib.rs/document_addition/once_everything_is_processed.snap b/crates/index-scheduler/src/snapshots/lib.rs/document_addition/once_everything_is_processed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/document_addition/once_everything_is_processed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/document_addition/once_everything_is_processed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/document_addition_and_document_deletion/after_processing_the_batch.snap b/crates/index-scheduler/src/snapshots/lib.rs/document_addition_and_document_deletion/after_processing_the_batch.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/document_addition_and_document_deletion/after_processing_the_batch.snap rename to crates/index-scheduler/src/snapshots/lib.rs/document_addition_and_document_deletion/after_processing_the_batch.snap diff --git a/index-scheduler/src/snapshots/lib.rs/document_addition_and_document_deletion/documents.snap b/crates/index-scheduler/src/snapshots/lib.rs/document_addition_and_document_deletion/documents.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/document_addition_and_document_deletion/documents.snap rename to crates/index-scheduler/src/snapshots/lib.rs/document_addition_and_document_deletion/documents.snap diff --git a/index-scheduler/src/snapshots/lib.rs/document_addition_and_document_deletion/registered_the_first_task.snap b/crates/index-scheduler/src/snapshots/lib.rs/document_addition_and_document_deletion/registered_the_first_task.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/document_addition_and_document_deletion/registered_the_first_task.snap rename to crates/index-scheduler/src/snapshots/lib.rs/document_addition_and_document_deletion/registered_the_first_task.snap diff --git a/index-scheduler/src/snapshots/lib.rs/document_addition_and_document_deletion/registered_the_second_task.snap b/crates/index-scheduler/src/snapshots/lib.rs/document_addition_and_document_deletion/registered_the_second_task.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/document_addition_and_document_deletion/registered_the_second_task.snap rename to crates/index-scheduler/src/snapshots/lib.rs/document_addition_and_document_deletion/registered_the_second_task.snap diff --git a/index-scheduler/src/snapshots/lib.rs/document_addition_and_index_deletion/before_index_creation.snap b/crates/index-scheduler/src/snapshots/lib.rs/document_addition_and_index_deletion/before_index_creation.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/document_addition_and_index_deletion/before_index_creation.snap rename to crates/index-scheduler/src/snapshots/lib.rs/document_addition_and_index_deletion/before_index_creation.snap diff --git a/index-scheduler/src/snapshots/lib.rs/document_addition_and_index_deletion/both_task_succeeded.snap b/crates/index-scheduler/src/snapshots/lib.rs/document_addition_and_index_deletion/both_task_succeeded.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/document_addition_and_index_deletion/both_task_succeeded.snap rename to crates/index-scheduler/src/snapshots/lib.rs/document_addition_and_index_deletion/both_task_succeeded.snap diff --git a/index-scheduler/src/snapshots/lib.rs/document_addition_and_index_deletion/registered_the_first_task.snap b/crates/index-scheduler/src/snapshots/lib.rs/document_addition_and_index_deletion/registered_the_first_task.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/document_addition_and_index_deletion/registered_the_first_task.snap rename to crates/index-scheduler/src/snapshots/lib.rs/document_addition_and_index_deletion/registered_the_first_task.snap diff --git a/index-scheduler/src/snapshots/lib.rs/document_addition_and_index_deletion/registered_the_second_task.snap b/crates/index-scheduler/src/snapshots/lib.rs/document_addition_and_index_deletion/registered_the_second_task.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/document_addition_and_index_deletion/registered_the_second_task.snap rename to crates/index-scheduler/src/snapshots/lib.rs/document_addition_and_index_deletion/registered_the_second_task.snap diff --git a/index-scheduler/src/snapshots/lib.rs/document_addition_and_index_deletion/registered_the_third_task.snap b/crates/index-scheduler/src/snapshots/lib.rs/document_addition_and_index_deletion/registered_the_third_task.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/document_addition_and_index_deletion/registered_the_third_task.snap rename to crates/index-scheduler/src/snapshots/lib.rs/document_addition_and_index_deletion/registered_the_third_task.snap diff --git a/index-scheduler/src/snapshots/lib.rs/document_addition_and_index_deletion_on_unexisting_index/1.snap b/crates/index-scheduler/src/snapshots/lib.rs/document_addition_and_index_deletion_on_unexisting_index/1.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/document_addition_and_index_deletion_on_unexisting_index/1.snap rename to crates/index-scheduler/src/snapshots/lib.rs/document_addition_and_index_deletion_on_unexisting_index/1.snap diff --git a/index-scheduler/src/snapshots/lib.rs/document_addition_and_index_deletion_on_unexisting_index/2.snap b/crates/index-scheduler/src/snapshots/lib.rs/document_addition_and_index_deletion_on_unexisting_index/2.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/document_addition_and_index_deletion_on_unexisting_index/2.snap rename to crates/index-scheduler/src/snapshots/lib.rs/document_addition_and_index_deletion_on_unexisting_index/2.snap diff --git a/index-scheduler/src/snapshots/lib.rs/document_deletion_and_document_addition/after_failing_the_deletion.snap b/crates/index-scheduler/src/snapshots/lib.rs/document_deletion_and_document_addition/after_failing_the_deletion.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/document_deletion_and_document_addition/after_failing_the_deletion.snap rename to crates/index-scheduler/src/snapshots/lib.rs/document_deletion_and_document_addition/after_failing_the_deletion.snap diff --git a/index-scheduler/src/snapshots/lib.rs/document_deletion_and_document_addition/after_last_successful_addition.snap b/crates/index-scheduler/src/snapshots/lib.rs/document_deletion_and_document_addition/after_last_successful_addition.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/document_deletion_and_document_addition/after_last_successful_addition.snap rename to crates/index-scheduler/src/snapshots/lib.rs/document_deletion_and_document_addition/after_last_successful_addition.snap diff --git a/index-scheduler/src/snapshots/lib.rs/document_deletion_and_document_addition/documents.snap b/crates/index-scheduler/src/snapshots/lib.rs/document_deletion_and_document_addition/documents.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/document_deletion_and_document_addition/documents.snap rename to crates/index-scheduler/src/snapshots/lib.rs/document_deletion_and_document_addition/documents.snap diff --git a/index-scheduler/src/snapshots/lib.rs/document_deletion_and_document_addition/registered_the_first_task.snap b/crates/index-scheduler/src/snapshots/lib.rs/document_deletion_and_document_addition/registered_the_first_task.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/document_deletion_and_document_addition/registered_the_first_task.snap rename to crates/index-scheduler/src/snapshots/lib.rs/document_deletion_and_document_addition/registered_the_first_task.snap diff --git a/index-scheduler/src/snapshots/lib.rs/document_deletion_and_document_addition/registered_the_second_task.snap b/crates/index-scheduler/src/snapshots/lib.rs/document_deletion_and_document_addition/registered_the_second_task.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/document_deletion_and_document_addition/registered_the_second_task.snap rename to crates/index-scheduler/src/snapshots/lib.rs/document_deletion_and_document_addition/registered_the_second_task.snap diff --git a/index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_document_addition/document_addition_batch_created.snap b/crates/index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_document_addition/document_addition_batch_created.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_document_addition/document_addition_batch_created.snap rename to crates/index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_document_addition/document_addition_batch_created.snap diff --git a/index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_document_addition/document_addition_failed.snap b/crates/index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_document_addition/document_addition_failed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_document_addition/document_addition_failed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_document_addition/document_addition_failed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_document_addition/registered_the_first_task.snap b/crates/index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_document_addition/registered_the_first_task.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_document_addition/registered_the_first_task.snap rename to crates/index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_document_addition/registered_the_first_task.snap diff --git a/index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_document_deletion/after_adding_the_documents.snap b/crates/index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_document_deletion/after_adding_the_documents.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_document_deletion/after_adding_the_documents.snap rename to crates/index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_document_deletion/after_adding_the_documents.snap diff --git a/index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_document_deletion/after_adding_the_settings.snap b/crates/index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_document_deletion/after_adding_the_settings.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_document_deletion/after_adding_the_settings.snap rename to crates/index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_document_deletion/after_adding_the_settings.snap diff --git a/index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_document_deletion/after_adding_the_settings_and_documents.snap b/crates/index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_document_deletion/after_adding_the_settings_and_documents.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_document_deletion/after_adding_the_settings_and_documents.snap rename to crates/index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_document_deletion/after_adding_the_settings_and_documents.snap diff --git a/index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_document_deletion/after_removing_the_documents.snap b/crates/index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_document_deletion/after_removing_the_documents.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_document_deletion/after_removing_the_documents.snap rename to crates/index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_document_deletion/after_removing_the_documents.snap diff --git a/index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_document_deletion/documents_remaining_should_only_be_bork.snap b/crates/index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_document_deletion/documents_remaining_should_only_be_bork.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_document_deletion/documents_remaining_should_only_be_bork.snap rename to crates/index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_document_deletion/documents_remaining_should_only_be_bork.snap diff --git a/index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_document_deletion/registered_the_document_deletions.snap b/crates/index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_document_deletion/registered_the_document_deletions.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_document_deletion/registered_the_document_deletions.snap rename to crates/index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_document_deletion/registered_the_document_deletions.snap diff --git a/index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_document_deletion/registered_the_setting_and_document_addition.snap b/crates/index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_document_deletion/registered_the_setting_and_document_addition.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_document_deletion/registered_the_setting_and_document_addition.snap rename to crates/index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_document_deletion/registered_the_setting_and_document_addition.snap diff --git a/index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_index_creation/after_register.snap b/crates/index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_index_creation/after_register.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_index_creation/after_register.snap rename to crates/index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_index_creation/after_register.snap diff --git a/index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_index_creation/index_creation_failed.snap b/crates/index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_index_creation/index_creation_failed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_index_creation/index_creation_failed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/fail_in_process_batch_for_index_creation/index_creation_failed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/fail_in_update_task_after_process_batch_success_for_document_addition/after_batch_succeeded.snap b/crates/index-scheduler/src/snapshots/lib.rs/fail_in_update_task_after_process_batch_success_for_document_addition/after_batch_succeeded.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/fail_in_update_task_after_process_batch_success_for_document_addition/after_batch_succeeded.snap rename to crates/index-scheduler/src/snapshots/lib.rs/fail_in_update_task_after_process_batch_success_for_document_addition/after_batch_succeeded.snap diff --git a/index-scheduler/src/snapshots/lib.rs/fail_in_update_task_after_process_batch_success_for_document_addition/after_failing_to_commit.snap b/crates/index-scheduler/src/snapshots/lib.rs/fail_in_update_task_after_process_batch_success_for_document_addition/after_failing_to_commit.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/fail_in_update_task_after_process_batch_success_for_document_addition/after_failing_to_commit.snap rename to crates/index-scheduler/src/snapshots/lib.rs/fail_in_update_task_after_process_batch_success_for_document_addition/after_failing_to_commit.snap diff --git a/index-scheduler/src/snapshots/lib.rs/fail_in_update_task_after_process_batch_success_for_document_addition/document_addition_succeeded_but_index_scheduler_not_updated.snap b/crates/index-scheduler/src/snapshots/lib.rs/fail_in_update_task_after_process_batch_success_for_document_addition/document_addition_succeeded_but_index_scheduler_not_updated.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/fail_in_update_task_after_process_batch_success_for_document_addition/document_addition_succeeded_but_index_scheduler_not_updated.snap rename to crates/index-scheduler/src/snapshots/lib.rs/fail_in_update_task_after_process_batch_success_for_document_addition/document_addition_succeeded_but_index_scheduler_not_updated.snap diff --git a/index-scheduler/src/snapshots/lib.rs/fail_in_update_task_after_process_batch_success_for_document_addition/registered_the_first_task.snap b/crates/index-scheduler/src/snapshots/lib.rs/fail_in_update_task_after_process_batch_success_for_document_addition/registered_the_first_task.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/fail_in_update_task_after_process_batch_success_for_document_addition/registered_the_first_task.snap rename to crates/index-scheduler/src/snapshots/lib.rs/fail_in_update_task_after_process_batch_success_for_document_addition/registered_the_first_task.snap diff --git a/index-scheduler/src/snapshots/lib.rs/fail_in_update_task_after_process_batch_success_for_document_addition/task_successfully_processed.snap b/crates/index-scheduler/src/snapshots/lib.rs/fail_in_update_task_after_process_batch_success_for_document_addition/task_successfully_processed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/fail_in_update_task_after_process_batch_success_for_document_addition/task_successfully_processed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/fail_in_update_task_after_process_batch_success_for_document_addition/task_successfully_processed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/import_vectors/Intel to kefir succeeds.snap b/crates/index-scheduler/src/snapshots/lib.rs/import_vectors/Intel to kefir succeeds.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/import_vectors/Intel to kefir succeeds.snap rename to crates/index-scheduler/src/snapshots/lib.rs/import_vectors/Intel to kefir succeeds.snap diff --git a/index-scheduler/src/snapshots/lib.rs/import_vectors/Intel to kefir.snap b/crates/index-scheduler/src/snapshots/lib.rs/import_vectors/Intel to kefir.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/import_vectors/Intel to kefir.snap rename to crates/index-scheduler/src/snapshots/lib.rs/import_vectors/Intel to kefir.snap diff --git a/index-scheduler/src/snapshots/lib.rs/import_vectors/adding Intel succeeds.snap b/crates/index-scheduler/src/snapshots/lib.rs/import_vectors/adding Intel succeeds.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/import_vectors/adding Intel succeeds.snap rename to crates/index-scheduler/src/snapshots/lib.rs/import_vectors/adding Intel succeeds.snap diff --git a/index-scheduler/src/snapshots/lib.rs/import_vectors/after adding Intel.snap b/crates/index-scheduler/src/snapshots/lib.rs/import_vectors/after adding Intel.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/import_vectors/after adding Intel.snap rename to crates/index-scheduler/src/snapshots/lib.rs/import_vectors/after adding Intel.snap diff --git a/index-scheduler/src/snapshots/lib.rs/import_vectors/after_registering_settings_task_vectors.snap b/crates/index-scheduler/src/snapshots/lib.rs/import_vectors/after_registering_settings_task_vectors.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/import_vectors/after_registering_settings_task_vectors.snap rename to crates/index-scheduler/src/snapshots/lib.rs/import_vectors/after_registering_settings_task_vectors.snap diff --git a/index-scheduler/src/snapshots/lib.rs/import_vectors/settings_update_processed_vectors.snap b/crates/index-scheduler/src/snapshots/lib.rs/import_vectors/settings_update_processed_vectors.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/import_vectors/settings_update_processed_vectors.snap rename to crates/index-scheduler/src/snapshots/lib.rs/import_vectors/settings_update_processed_vectors.snap diff --git a/index-scheduler/src/snapshots/lib.rs/import_vectors_first_and_embedder_later/documents after initial push.snap b/crates/index-scheduler/src/snapshots/lib.rs/import_vectors_first_and_embedder_later/documents after initial push.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/import_vectors_first_and_embedder_later/documents after initial push.snap rename to crates/index-scheduler/src/snapshots/lib.rs/import_vectors_first_and_embedder_later/documents after initial push.snap diff --git a/index-scheduler/src/snapshots/lib.rs/insert_task_while_another_task_is_processing/after_batch_creation.snap b/crates/index-scheduler/src/snapshots/lib.rs/insert_task_while_another_task_is_processing/after_batch_creation.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/insert_task_while_another_task_is_processing/after_batch_creation.snap rename to crates/index-scheduler/src/snapshots/lib.rs/insert_task_while_another_task_is_processing/after_batch_creation.snap diff --git a/index-scheduler/src/snapshots/lib.rs/insert_task_while_another_task_is_processing/registered_the_first_task.snap b/crates/index-scheduler/src/snapshots/lib.rs/insert_task_while_another_task_is_processing/registered_the_first_task.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/insert_task_while_another_task_is_processing/registered_the_first_task.snap rename to crates/index-scheduler/src/snapshots/lib.rs/insert_task_while_another_task_is_processing/registered_the_first_task.snap diff --git a/index-scheduler/src/snapshots/lib.rs/insert_task_while_another_task_is_processing/registered_the_second_task.snap b/crates/index-scheduler/src/snapshots/lib.rs/insert_task_while_another_task_is_processing/registered_the_second_task.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/insert_task_while_another_task_is_processing/registered_the_second_task.snap rename to crates/index-scheduler/src/snapshots/lib.rs/insert_task_while_another_task_is_processing/registered_the_second_task.snap diff --git a/index-scheduler/src/snapshots/lib.rs/insert_task_while_another_task_is_processing/registered_the_third_task.snap b/crates/index-scheduler/src/snapshots/lib.rs/insert_task_while_another_task_is_processing/registered_the_third_task.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/insert_task_while_another_task_is_processing/registered_the_third_task.snap rename to crates/index-scheduler/src/snapshots/lib.rs/insert_task_while_another_task_is_processing/registered_the_third_task.snap diff --git a/index-scheduler/src/snapshots/lib.rs/panic_in_process_batch_for_index_creation/index_creation_failed.snap b/crates/index-scheduler/src/snapshots/lib.rs/panic_in_process_batch_for_index_creation/index_creation_failed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/panic_in_process_batch_for_index_creation/index_creation_failed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/panic_in_process_batch_for_index_creation/index_creation_failed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/panic_in_process_batch_for_index_creation/registered_the_first_task.snap b/crates/index-scheduler/src/snapshots/lib.rs/panic_in_process_batch_for_index_creation/registered_the_first_task.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/panic_in_process_batch_for_index_creation/registered_the_first_task.snap rename to crates/index-scheduler/src/snapshots/lib.rs/panic_in_process_batch_for_index_creation/registered_the_first_task.snap diff --git a/index-scheduler/src/snapshots/lib.rs/process_tasks_inserted_without_new_signal/processed_the_first_task.snap b/crates/index-scheduler/src/snapshots/lib.rs/process_tasks_inserted_without_new_signal/processed_the_first_task.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/process_tasks_inserted_without_new_signal/processed_the_first_task.snap rename to crates/index-scheduler/src/snapshots/lib.rs/process_tasks_inserted_without_new_signal/processed_the_first_task.snap diff --git a/index-scheduler/src/snapshots/lib.rs/process_tasks_inserted_without_new_signal/processed_the_second_task.snap b/crates/index-scheduler/src/snapshots/lib.rs/process_tasks_inserted_without_new_signal/processed_the_second_task.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/process_tasks_inserted_without_new_signal/processed_the_second_task.snap rename to crates/index-scheduler/src/snapshots/lib.rs/process_tasks_inserted_without_new_signal/processed_the_second_task.snap diff --git a/index-scheduler/src/snapshots/lib.rs/process_tasks_inserted_without_new_signal/processed_the_third_task.snap b/crates/index-scheduler/src/snapshots/lib.rs/process_tasks_inserted_without_new_signal/processed_the_third_task.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/process_tasks_inserted_without_new_signal/processed_the_third_task.snap rename to crates/index-scheduler/src/snapshots/lib.rs/process_tasks_inserted_without_new_signal/processed_the_third_task.snap diff --git a/index-scheduler/src/snapshots/lib.rs/process_tasks_inserted_without_new_signal/registered_the_first_task.snap b/crates/index-scheduler/src/snapshots/lib.rs/process_tasks_inserted_without_new_signal/registered_the_first_task.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/process_tasks_inserted_without_new_signal/registered_the_first_task.snap rename to crates/index-scheduler/src/snapshots/lib.rs/process_tasks_inserted_without_new_signal/registered_the_first_task.snap diff --git a/index-scheduler/src/snapshots/lib.rs/process_tasks_inserted_without_new_signal/registered_the_second_task.snap b/crates/index-scheduler/src/snapshots/lib.rs/process_tasks_inserted_without_new_signal/registered_the_second_task.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/process_tasks_inserted_without_new_signal/registered_the_second_task.snap rename to crates/index-scheduler/src/snapshots/lib.rs/process_tasks_inserted_without_new_signal/registered_the_second_task.snap diff --git a/index-scheduler/src/snapshots/lib.rs/process_tasks_inserted_without_new_signal/registered_the_third_task.snap b/crates/index-scheduler/src/snapshots/lib.rs/process_tasks_inserted_without_new_signal/registered_the_third_task.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/process_tasks_inserted_without_new_signal/registered_the_third_task.snap rename to crates/index-scheduler/src/snapshots/lib.rs/process_tasks_inserted_without_new_signal/registered_the_third_task.snap diff --git a/index-scheduler/src/snapshots/lib.rs/process_tasks_without_autobatching/first.snap b/crates/index-scheduler/src/snapshots/lib.rs/process_tasks_without_autobatching/first.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/process_tasks_without_autobatching/first.snap rename to crates/index-scheduler/src/snapshots/lib.rs/process_tasks_without_autobatching/first.snap diff --git a/index-scheduler/src/snapshots/lib.rs/process_tasks_without_autobatching/fourth.snap b/crates/index-scheduler/src/snapshots/lib.rs/process_tasks_without_autobatching/fourth.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/process_tasks_without_autobatching/fourth.snap rename to crates/index-scheduler/src/snapshots/lib.rs/process_tasks_without_autobatching/fourth.snap diff --git a/index-scheduler/src/snapshots/lib.rs/process_tasks_without_autobatching/registered_the_first_task.snap b/crates/index-scheduler/src/snapshots/lib.rs/process_tasks_without_autobatching/registered_the_first_task.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/process_tasks_without_autobatching/registered_the_first_task.snap rename to crates/index-scheduler/src/snapshots/lib.rs/process_tasks_without_autobatching/registered_the_first_task.snap diff --git a/index-scheduler/src/snapshots/lib.rs/process_tasks_without_autobatching/registered_the_fourth_task.snap b/crates/index-scheduler/src/snapshots/lib.rs/process_tasks_without_autobatching/registered_the_fourth_task.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/process_tasks_without_autobatching/registered_the_fourth_task.snap rename to crates/index-scheduler/src/snapshots/lib.rs/process_tasks_without_autobatching/registered_the_fourth_task.snap diff --git a/index-scheduler/src/snapshots/lib.rs/process_tasks_without_autobatching/registered_the_second_task.snap b/crates/index-scheduler/src/snapshots/lib.rs/process_tasks_without_autobatching/registered_the_second_task.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/process_tasks_without_autobatching/registered_the_second_task.snap rename to crates/index-scheduler/src/snapshots/lib.rs/process_tasks_without_autobatching/registered_the_second_task.snap diff --git a/index-scheduler/src/snapshots/lib.rs/process_tasks_without_autobatching/registered_the_third_task.snap b/crates/index-scheduler/src/snapshots/lib.rs/process_tasks_without_autobatching/registered_the_third_task.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/process_tasks_without_autobatching/registered_the_third_task.snap rename to crates/index-scheduler/src/snapshots/lib.rs/process_tasks_without_autobatching/registered_the_third_task.snap diff --git a/index-scheduler/src/snapshots/lib.rs/process_tasks_without_autobatching/second.snap b/crates/index-scheduler/src/snapshots/lib.rs/process_tasks_without_autobatching/second.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/process_tasks_without_autobatching/second.snap rename to crates/index-scheduler/src/snapshots/lib.rs/process_tasks_without_autobatching/second.snap diff --git a/index-scheduler/src/snapshots/lib.rs/process_tasks_without_autobatching/third.snap b/crates/index-scheduler/src/snapshots/lib.rs/process_tasks_without_autobatching/third.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/process_tasks_without_autobatching/third.snap rename to crates/index-scheduler/src/snapshots/lib.rs/process_tasks_without_autobatching/third.snap diff --git a/index-scheduler/src/snapshots/lib.rs/query_tasks_canceled_by/start.snap b/crates/index-scheduler/src/snapshots/lib.rs/query_tasks_canceled_by/start.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/query_tasks_canceled_by/start.snap rename to crates/index-scheduler/src/snapshots/lib.rs/query_tasks_canceled_by/start.snap diff --git a/index-scheduler/src/snapshots/lib.rs/query_tasks_from_and_limit/processed_all_tasks.snap b/crates/index-scheduler/src/snapshots/lib.rs/query_tasks_from_and_limit/processed_all_tasks.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/query_tasks_from_and_limit/processed_all_tasks.snap rename to crates/index-scheduler/src/snapshots/lib.rs/query_tasks_from_and_limit/processed_all_tasks.snap diff --git a/index-scheduler/src/snapshots/lib.rs/query_tasks_from_and_limit/registered_the_first_task.snap b/crates/index-scheduler/src/snapshots/lib.rs/query_tasks_from_and_limit/registered_the_first_task.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/query_tasks_from_and_limit/registered_the_first_task.snap rename to crates/index-scheduler/src/snapshots/lib.rs/query_tasks_from_and_limit/registered_the_first_task.snap diff --git a/index-scheduler/src/snapshots/lib.rs/query_tasks_from_and_limit/registered_the_second_task.snap b/crates/index-scheduler/src/snapshots/lib.rs/query_tasks_from_and_limit/registered_the_second_task.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/query_tasks_from_and_limit/registered_the_second_task.snap rename to crates/index-scheduler/src/snapshots/lib.rs/query_tasks_from_and_limit/registered_the_second_task.snap diff --git a/index-scheduler/src/snapshots/lib.rs/query_tasks_from_and_limit/registered_the_third_task.snap b/crates/index-scheduler/src/snapshots/lib.rs/query_tasks_from_and_limit/registered_the_third_task.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/query_tasks_from_and_limit/registered_the_third_task.snap rename to crates/index-scheduler/src/snapshots/lib.rs/query_tasks_from_and_limit/registered_the_third_task.snap diff --git a/index-scheduler/src/snapshots/lib.rs/query_tasks_simple/end.snap b/crates/index-scheduler/src/snapshots/lib.rs/query_tasks_simple/end.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/query_tasks_simple/end.snap rename to crates/index-scheduler/src/snapshots/lib.rs/query_tasks_simple/end.snap diff --git a/index-scheduler/src/snapshots/lib.rs/query_tasks_simple/start.snap b/crates/index-scheduler/src/snapshots/lib.rs/query_tasks_simple/start.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/query_tasks_simple/start.snap rename to crates/index-scheduler/src/snapshots/lib.rs/query_tasks_simple/start.snap diff --git a/index-scheduler/src/snapshots/lib.rs/query_tasks_special_rules/start.snap b/crates/index-scheduler/src/snapshots/lib.rs/query_tasks_special_rules/start.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/query_tasks_special_rules/start.snap rename to crates/index-scheduler/src/snapshots/lib.rs/query_tasks_special_rules/start.snap diff --git a/index-scheduler/src/snapshots/lib.rs/register/everything_is_successfully_registered.snap b/crates/index-scheduler/src/snapshots/lib.rs/register/everything_is_successfully_registered.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/register/everything_is_successfully_registered.snap rename to crates/index-scheduler/src/snapshots/lib.rs/register/everything_is_successfully_registered.snap diff --git a/index-scheduler/src/snapshots/lib.rs/swap_indexes/create_a.snap b/crates/index-scheduler/src/snapshots/lib.rs/swap_indexes/create_a.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/swap_indexes/create_a.snap rename to crates/index-scheduler/src/snapshots/lib.rs/swap_indexes/create_a.snap diff --git a/index-scheduler/src/snapshots/lib.rs/swap_indexes/create_b.snap b/crates/index-scheduler/src/snapshots/lib.rs/swap_indexes/create_b.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/swap_indexes/create_b.snap rename to crates/index-scheduler/src/snapshots/lib.rs/swap_indexes/create_b.snap diff --git a/index-scheduler/src/snapshots/lib.rs/swap_indexes/create_c.snap b/crates/index-scheduler/src/snapshots/lib.rs/swap_indexes/create_c.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/swap_indexes/create_c.snap rename to crates/index-scheduler/src/snapshots/lib.rs/swap_indexes/create_c.snap diff --git a/index-scheduler/src/snapshots/lib.rs/swap_indexes/create_d.snap b/crates/index-scheduler/src/snapshots/lib.rs/swap_indexes/create_d.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/swap_indexes/create_d.snap rename to crates/index-scheduler/src/snapshots/lib.rs/swap_indexes/create_d.snap diff --git a/index-scheduler/src/snapshots/lib.rs/swap_indexes/first_swap_processed.snap b/crates/index-scheduler/src/snapshots/lib.rs/swap_indexes/first_swap_processed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/swap_indexes/first_swap_processed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/swap_indexes/first_swap_processed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/swap_indexes/first_swap_registered.snap b/crates/index-scheduler/src/snapshots/lib.rs/swap_indexes/first_swap_registered.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/swap_indexes/first_swap_registered.snap rename to crates/index-scheduler/src/snapshots/lib.rs/swap_indexes/first_swap_registered.snap diff --git a/index-scheduler/src/snapshots/lib.rs/swap_indexes/second_swap_processed.snap b/crates/index-scheduler/src/snapshots/lib.rs/swap_indexes/second_swap_processed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/swap_indexes/second_swap_processed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/swap_indexes/second_swap_processed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/swap_indexes/third_empty_swap_processed.snap b/crates/index-scheduler/src/snapshots/lib.rs/swap_indexes/third_empty_swap_processed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/swap_indexes/third_empty_swap_processed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/swap_indexes/third_empty_swap_processed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/swap_indexes/two_swaps_registered.snap b/crates/index-scheduler/src/snapshots/lib.rs/swap_indexes/two_swaps_registered.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/swap_indexes/two_swaps_registered.snap rename to crates/index-scheduler/src/snapshots/lib.rs/swap_indexes/two_swaps_registered.snap diff --git a/index-scheduler/src/snapshots/lib.rs/swap_indexes_errors/after_the_index_creation.snap b/crates/index-scheduler/src/snapshots/lib.rs/swap_indexes_errors/after_the_index_creation.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/swap_indexes_errors/after_the_index_creation.snap rename to crates/index-scheduler/src/snapshots/lib.rs/swap_indexes_errors/after_the_index_creation.snap diff --git a/index-scheduler/src/snapshots/lib.rs/swap_indexes_errors/first_swap_failed.snap b/crates/index-scheduler/src/snapshots/lib.rs/swap_indexes_errors/first_swap_failed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/swap_indexes_errors/first_swap_failed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/swap_indexes_errors/first_swap_failed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/swap_indexes_errors/initial_tasks_processed.snap b/crates/index-scheduler/src/snapshots/lib.rs/swap_indexes_errors/initial_tasks_processed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/swap_indexes_errors/initial_tasks_processed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/swap_indexes_errors/initial_tasks_processed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/task_deletion_delete_same_task_twice/initial_tasks_enqueued.snap b/crates/index-scheduler/src/snapshots/lib.rs/task_deletion_delete_same_task_twice/initial_tasks_enqueued.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/task_deletion_delete_same_task_twice/initial_tasks_enqueued.snap rename to crates/index-scheduler/src/snapshots/lib.rs/task_deletion_delete_same_task_twice/initial_tasks_enqueued.snap diff --git a/index-scheduler/src/snapshots/lib.rs/task_deletion_delete_same_task_twice/initial_tasks_processed.snap b/crates/index-scheduler/src/snapshots/lib.rs/task_deletion_delete_same_task_twice/initial_tasks_processed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/task_deletion_delete_same_task_twice/initial_tasks_processed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/task_deletion_delete_same_task_twice/initial_tasks_processed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/task_deletion_delete_same_task_twice/task_deletion_processed.snap b/crates/index-scheduler/src/snapshots/lib.rs/task_deletion_delete_same_task_twice/task_deletion_processed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/task_deletion_delete_same_task_twice/task_deletion_processed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/task_deletion_delete_same_task_twice/task_deletion_processed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/task_deletion_deleteable/after_registering_the_task_deletion.snap b/crates/index-scheduler/src/snapshots/lib.rs/task_deletion_deleteable/after_registering_the_task_deletion.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/task_deletion_deleteable/after_registering_the_task_deletion.snap rename to crates/index-scheduler/src/snapshots/lib.rs/task_deletion_deleteable/after_registering_the_task_deletion.snap diff --git a/index-scheduler/src/snapshots/lib.rs/task_deletion_deleteable/initial_tasks_enqueued.snap b/crates/index-scheduler/src/snapshots/lib.rs/task_deletion_deleteable/initial_tasks_enqueued.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/task_deletion_deleteable/initial_tasks_enqueued.snap rename to crates/index-scheduler/src/snapshots/lib.rs/task_deletion_deleteable/initial_tasks_enqueued.snap diff --git a/index-scheduler/src/snapshots/lib.rs/task_deletion_deleteable/initial_tasks_processed.snap b/crates/index-scheduler/src/snapshots/lib.rs/task_deletion_deleteable/initial_tasks_processed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/task_deletion_deleteable/initial_tasks_processed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/task_deletion_deleteable/initial_tasks_processed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/task_deletion_deleteable/task_deletion_processed.snap b/crates/index-scheduler/src/snapshots/lib.rs/task_deletion_deleteable/task_deletion_processed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/task_deletion_deleteable/task_deletion_processed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/task_deletion_deleteable/task_deletion_processed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/task_deletion_undeleteable/initial_tasks_enqueued.snap b/crates/index-scheduler/src/snapshots/lib.rs/task_deletion_undeleteable/initial_tasks_enqueued.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/task_deletion_undeleteable/initial_tasks_enqueued.snap rename to crates/index-scheduler/src/snapshots/lib.rs/task_deletion_undeleteable/initial_tasks_enqueued.snap diff --git a/index-scheduler/src/snapshots/lib.rs/task_deletion_undeleteable/task_deletion_done.snap b/crates/index-scheduler/src/snapshots/lib.rs/task_deletion_undeleteable/task_deletion_done.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/task_deletion_undeleteable/task_deletion_done.snap rename to crates/index-scheduler/src/snapshots/lib.rs/task_deletion_undeleteable/task_deletion_done.snap diff --git a/index-scheduler/src/snapshots/lib.rs/task_deletion_undeleteable/task_deletion_enqueued.snap b/crates/index-scheduler/src/snapshots/lib.rs/task_deletion_undeleteable/task_deletion_enqueued.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/task_deletion_undeleteable/task_deletion_enqueued.snap rename to crates/index-scheduler/src/snapshots/lib.rs/task_deletion_undeleteable/task_deletion_enqueued.snap diff --git a/index-scheduler/src/snapshots/lib.rs/task_deletion_undeleteable/task_deletion_processing.snap b/crates/index-scheduler/src/snapshots/lib.rs/task_deletion_undeleteable/task_deletion_processing.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/task_deletion_undeleteable/task_deletion_processing.snap rename to crates/index-scheduler/src/snapshots/lib.rs/task_deletion_undeleteable/task_deletion_processing.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_auto_deletion_of_tasks/after_the_second_task_deletion.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_auto_deletion_of_tasks/after_the_second_task_deletion.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_auto_deletion_of_tasks/after_the_second_task_deletion.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_auto_deletion_of_tasks/after_the_second_task_deletion.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_auto_deletion_of_tasks/everything_has_been_processed.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_auto_deletion_of_tasks/everything_has_been_processed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_auto_deletion_of_tasks/everything_has_been_processed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_auto_deletion_of_tasks/everything_has_been_processed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_auto_deletion_of_tasks/task_deletion_have_been_enqueued.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_auto_deletion_of_tasks/task_deletion_have_been_enqueued.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_auto_deletion_of_tasks/task_deletion_have_been_enqueued.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_auto_deletion_of_tasks/task_deletion_have_been_enqueued.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_auto_deletion_of_tasks/task_deletion_have_been_processed.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_auto_deletion_of_tasks/task_deletion_have_been_processed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_auto_deletion_of_tasks/task_deletion_have_been_processed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_auto_deletion_of_tasks/task_deletion_have_been_processed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_auto_deletion_of_tasks/task_queue_is_full.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_auto_deletion_of_tasks/task_queue_is_full.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_auto_deletion_of_tasks/task_queue_is_full.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_auto_deletion_of_tasks/task_queue_is_full.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_disable_auto_deletion_of_tasks/task_deletion_have_not_been_enqueued.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_disable_auto_deletion_of_tasks/task_deletion_have_not_been_enqueued.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_disable_auto_deletion_of_tasks/task_deletion_have_not_been_enqueued.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_disable_auto_deletion_of_tasks/task_deletion_have_not_been_enqueued.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_disable_auto_deletion_of_tasks/task_queue_is_full.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_disable_auto_deletion_of_tasks/task_queue_is_full.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_disable_auto_deletion_of_tasks/task_queue_is_full.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_disable_auto_deletion_of_tasks/task_queue_is_full.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index/after_processing_the_10_tasks.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index/after_processing_the_10_tasks.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index/after_processing_the_10_tasks.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index/after_processing_the_10_tasks.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index/after_registering_the_10_tasks.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index/after_registering_the_10_tasks.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index/after_registering_the_10_tasks.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index/after_registering_the_10_tasks.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index/documents.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index/documents.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index/documents.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index/documents.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index/processed_the_first_task.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index/processed_the_first_task.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index/processed_the_first_task.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index/processed_the_first_task.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index/registered_the_first_task.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index/registered_the_first_task.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index/registered_the_first_task.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index/registered_the_first_task.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index_without_autobatching/after_registering_the_10_tasks.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index_without_autobatching/after_registering_the_10_tasks.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index_without_autobatching/after_registering_the_10_tasks.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index_without_autobatching/after_registering_the_10_tasks.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index_without_autobatching/all_tasks_processed.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index_without_autobatching/all_tasks_processed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index_without_autobatching/all_tasks_processed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index_without_autobatching/all_tasks_processed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index_without_autobatching/documents.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index_without_autobatching/documents.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index_without_autobatching/documents.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index_without_autobatching/documents.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index_without_autobatching/five_tasks_processed.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index_without_autobatching/five_tasks_processed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index_without_autobatching/five_tasks_processed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index_without_autobatching/five_tasks_processed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index_without_autobatching/processed_the_first_task.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index_without_autobatching/processed_the_first_task.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index_without_autobatching/processed_the_first_task.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index_without_autobatching/processed_the_first_task.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index_without_autobatching/registered_the_first_task.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index_without_autobatching/registered_the_first_task.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index_without_autobatching/registered_the_first_task.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_with_index_without_autobatching/registered_the_first_task.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_without_index/after_processing_the_10_tasks.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_without_index/after_processing_the_10_tasks.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_without_index/after_processing_the_10_tasks.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_without_index/after_processing_the_10_tasks.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_without_index/after_registering_the_10_tasks.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_without_index/after_registering_the_10_tasks.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_without_index/after_registering_the_10_tasks.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_without_index/after_registering_the_10_tasks.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_without_index_without_autobatching/after_registering_the_10_tasks.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_without_index_without_autobatching/after_registering_the_10_tasks.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_without_index_without_autobatching/after_registering_the_10_tasks.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_without_index_without_autobatching/after_registering_the_10_tasks.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_without_index_without_autobatching/all_tasks_processed.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_without_index_without_autobatching/all_tasks_processed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_without_index_without_autobatching/all_tasks_processed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_without_index_without_autobatching/all_tasks_processed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_without_index_without_autobatching/five_tasks_processed.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_without_index_without_autobatching/five_tasks_processed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_without_index_without_autobatching/five_tasks_processed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_cant_create_index_without_index_without_autobatching/five_tasks_processed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_mixed_right_without_index_starts_with_cant_create/after_registering_the_10_tasks.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_mixed_right_without_index_starts_with_cant_create/after_registering_the_10_tasks.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_mixed_right_without_index_starts_with_cant_create/after_registering_the_10_tasks.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_mixed_right_without_index_starts_with_cant_create/after_registering_the_10_tasks.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_mixed_right_without_index_starts_with_cant_create/all_tasks_processed.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_mixed_right_without_index_starts_with_cant_create/all_tasks_processed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_mixed_right_without_index_starts_with_cant_create/all_tasks_processed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_mixed_right_without_index_starts_with_cant_create/all_tasks_processed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_mixed_right_without_index_starts_with_cant_create/documents.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_mixed_right_without_index_starts_with_cant_create/documents.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_mixed_right_without_index_starts_with_cant_create/documents.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_mixed_right_without_index_starts_with_cant_create/documents.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_mixed_right_without_index_starts_with_cant_create/only_first_task_failed.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_mixed_right_without_index_starts_with_cant_create/only_first_task_failed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_mixed_right_without_index_starts_with_cant_create/only_first_task_failed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_mixed_right_without_index_starts_with_cant_create/only_first_task_failed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_mixed_rights_with_index/after_registering_the_10_tasks.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_mixed_rights_with_index/after_registering_the_10_tasks.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_mixed_rights_with_index/after_registering_the_10_tasks.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_mixed_rights_with_index/after_registering_the_10_tasks.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_mixed_rights_with_index/all_tasks_processed.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_mixed_rights_with_index/all_tasks_processed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_mixed_rights_with_index/all_tasks_processed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_mixed_rights_with_index/all_tasks_processed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_mixed_rights_with_index/documents.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_mixed_rights_with_index/documents.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_mixed_rights_with_index/documents.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_mixed_rights_with_index/documents.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_mixed_rights_with_index/processed_the_first_task.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_mixed_rights_with_index/processed_the_first_task.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_mixed_rights_with_index/processed_the_first_task.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_mixed_rights_with_index/processed_the_first_task.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_mixed_rights_with_index/registered_the_first_task.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_mixed_rights_with_index/registered_the_first_task.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_mixed_rights_with_index/registered_the_first_task.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_mixed_rights_with_index/registered_the_first_task.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_bad_primary_key/after_registering_the_5_tasks.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_bad_primary_key/after_registering_the_5_tasks.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_with_bad_primary_key/after_registering_the_5_tasks.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_bad_primary_key/after_registering_the_5_tasks.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_bad_primary_key/documents.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_bad_primary_key/documents.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_with_bad_primary_key/documents.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_bad_primary_key/documents.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_bad_primary_key/fifth_task_succeeds.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_bad_primary_key/fifth_task_succeeds.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_with_bad_primary_key/fifth_task_succeeds.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_bad_primary_key/fifth_task_succeeds.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_bad_primary_key/first_and_second_task_fails.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_bad_primary_key/first_and_second_task_fails.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_with_bad_primary_key/first_and_second_task_fails.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_bad_primary_key/first_and_second_task_fails.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_bad_primary_key/fourth_task_fails.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_bad_primary_key/fourth_task_fails.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_with_bad_primary_key/fourth_task_fails.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_bad_primary_key/fourth_task_fails.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_bad_primary_key/third_task_succeeds.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_bad_primary_key/third_task_succeeds.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_with_bad_primary_key/third_task_succeeds.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_bad_primary_key/third_task_succeeds.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key/after_registering_the_3_tasks.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key/after_registering_the_3_tasks.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key/after_registering_the_3_tasks.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key/after_registering_the_3_tasks.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key/documents.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key/documents.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key/documents.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key/documents.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key/only_first_task_succeed.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key/only_first_task_succeed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key/only_first_task_succeed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key/only_first_task_succeed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key/second_task_fails.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key/second_task_fails.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key/second_task_fails.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key/second_task_fails.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key/third_task_fails.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key/third_task_fails.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key/third_task_fails.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key/third_task_fails.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key_batch_wrong_key/after_registering_the_3_tasks.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key_batch_wrong_key/after_registering_the_3_tasks.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key_batch_wrong_key/after_registering_the_3_tasks.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key_batch_wrong_key/after_registering_the_3_tasks.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key_batch_wrong_key/documents.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key_batch_wrong_key/documents.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key_batch_wrong_key/documents.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key_batch_wrong_key/documents.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key_batch_wrong_key/only_first_task_succeed.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key_batch_wrong_key/only_first_task_succeed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key_batch_wrong_key/only_first_task_succeed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key_batch_wrong_key/only_first_task_succeed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key_batch_wrong_key/second_and_third_tasks_fails.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key_batch_wrong_key/second_and_third_tasks_fails.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key_batch_wrong_key/second_and_third_tasks_fails.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_multiple_primary_key_batch_wrong_key/second_and_third_tasks_fails.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key/after_registering_the_6_tasks.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key/after_registering_the_6_tasks.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key/after_registering_the_6_tasks.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key/after_registering_the_6_tasks.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key/all_other_tasks_succeeds.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key/all_other_tasks_succeeds.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key/all_other_tasks_succeeds.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key/all_other_tasks_succeeds.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key/documents.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key/documents.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key/documents.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key/documents.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key/first_task_fails.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key/first_task_fails.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key/first_task_fails.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key/first_task_fails.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key/second_task_fails.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key/second_task_fails.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key/second_task_fails.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key/second_task_fails.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key/third_task_succeeds.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key/third_task_succeeds.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key/third_task_succeeds.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key/third_task_succeeds.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key_inference_works/after_registering_the_6_tasks.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key_inference_works/after_registering_the_6_tasks.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key_inference_works/after_registering_the_6_tasks.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key_inference_works/after_registering_the_6_tasks.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key_inference_works/all_other_tasks_succeeds.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key_inference_works/all_other_tasks_succeeds.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key_inference_works/all_other_tasks_succeeds.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key_inference_works/all_other_tasks_succeeds.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key_inference_works/documents.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key_inference_works/documents.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key_inference_works/documents.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key_inference_works/documents.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key_inference_works/first_task_succeed.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key_inference_works/first_task_succeed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key_inference_works/first_task_succeed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key_inference_works/first_task_succeed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key_inference_works/second_task_fails.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key_inference_works/second_task_fails.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key_inference_works/second_task_fails.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key_inference_works/second_task_fails.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key_inference_works/third_task_succeeds.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key_inference_works/third_task_succeeds.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key_inference_works/third_task_succeeds.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_addition_with_set_and_null_primary_key_inference_works/third_task_succeeds.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_replace/1.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_replace/1.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_replace/1.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_replace/1.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_replace/2.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_replace/2.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_replace/2.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_replace/2.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_replace/documents.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_replace/documents.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_replace/documents.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_replace/documents.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_replace_without_autobatching/after_registering_the_10_tasks.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_replace_without_autobatching/after_registering_the_10_tasks.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_replace_without_autobatching/after_registering_the_10_tasks.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_replace_without_autobatching/after_registering_the_10_tasks.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_replace_without_autobatching/all_tasks_processed.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_replace_without_autobatching/all_tasks_processed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_replace_without_autobatching/all_tasks_processed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_replace_without_autobatching/all_tasks_processed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_replace_without_autobatching/documents.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_replace_without_autobatching/documents.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_replace_without_autobatching/documents.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_replace_without_autobatching/documents.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_replace_without_autobatching/five_tasks_processed.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_replace_without_autobatching/five_tasks_processed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_replace_without_autobatching/five_tasks_processed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_replace_without_autobatching/five_tasks_processed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_update/1.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_update/1.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_update/1.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_update/1.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_update/2.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_update/2.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_update/2.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_update/2.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_update/documents.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_update/documents.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_update/documents.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_update/documents.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_update_without_autobatching/after_registering_the_10_tasks.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_update_without_autobatching/after_registering_the_10_tasks.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_update_without_autobatching/after_registering_the_10_tasks.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_update_without_autobatching/after_registering_the_10_tasks.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_update_without_autobatching/all_tasks_processed.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_update_without_autobatching/all_tasks_processed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_update_without_autobatching/all_tasks_processed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_update_without_autobatching/all_tasks_processed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_update_without_autobatching/documents.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_update_without_autobatching/documents.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_update_without_autobatching/documents.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_update_without_autobatching/documents.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_document_update_without_autobatching/five_tasks_processed.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_document_update_without_autobatching/five_tasks_processed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_document_update_without_autobatching/five_tasks_processed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_document_update_without_autobatching/five_tasks_processed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_mixed_document_addition/after_registering_the_10_tasks.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_mixed_document_addition/after_registering_the_10_tasks.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_mixed_document_addition/after_registering_the_10_tasks.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_mixed_document_addition/after_registering_the_10_tasks.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_mixed_document_addition/all_tasks_processed.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_mixed_document_addition/all_tasks_processed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_mixed_document_addition/all_tasks_processed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_mixed_document_addition/all_tasks_processed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_mixed_document_addition/documents.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_mixed_document_addition/documents.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_mixed_document_addition/documents.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_mixed_document_addition/documents.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_mixed_document_addition/five_tasks_processed.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_mixed_document_addition/five_tasks_processed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_mixed_document_addition/five_tasks_processed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_mixed_document_addition/five_tasks_processed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_settings_update/after_registering_settings_task.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_settings_update/after_registering_settings_task.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_settings_update/after_registering_settings_task.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_settings_update/after_registering_settings_task.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_settings_update/settings_update_processed.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_settings_update/settings_update_processed.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_settings_update/settings_update_processed.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_settings_update/settings_update_processed.snap diff --git a/index-scheduler/src/snapshots/lib.rs/test_task_is_processing/registered_a_task.snap b/crates/index-scheduler/src/snapshots/lib.rs/test_task_is_processing/registered_a_task.snap similarity index 100% rename from index-scheduler/src/snapshots/lib.rs/test_task_is_processing/registered_a_task.snap rename to crates/index-scheduler/src/snapshots/lib.rs/test_task_is_processing/registered_a_task.snap diff --git a/index-scheduler/src/utils.rs b/crates/index-scheduler/src/utils.rs similarity index 100% rename from index-scheduler/src/utils.rs rename to crates/index-scheduler/src/utils.rs diff --git a/index-scheduler/src/uuid_codec.rs b/crates/index-scheduler/src/uuid_codec.rs similarity index 100% rename from index-scheduler/src/uuid_codec.rs rename to crates/index-scheduler/src/uuid_codec.rs diff --git a/json-depth-checker/Cargo.toml b/crates/json-depth-checker/Cargo.toml similarity index 100% rename from json-depth-checker/Cargo.toml rename to crates/json-depth-checker/Cargo.toml diff --git a/json-depth-checker/benches/depth.rs b/crates/json-depth-checker/benches/depth.rs similarity index 100% rename from json-depth-checker/benches/depth.rs rename to crates/json-depth-checker/benches/depth.rs diff --git a/json-depth-checker/fuzz/Cargo.toml b/crates/json-depth-checker/fuzz/Cargo.toml similarity index 100% rename from json-depth-checker/fuzz/Cargo.toml rename to crates/json-depth-checker/fuzz/Cargo.toml diff --git a/json-depth-checker/fuzz/fuzz_targets/depth.rs b/crates/json-depth-checker/fuzz/fuzz_targets/depth.rs similarity index 100% rename from json-depth-checker/fuzz/fuzz_targets/depth.rs rename to crates/json-depth-checker/fuzz/fuzz_targets/depth.rs diff --git a/json-depth-checker/src/lib.rs b/crates/json-depth-checker/src/lib.rs similarity index 100% rename from json-depth-checker/src/lib.rs rename to crates/json-depth-checker/src/lib.rs diff --git a/meili-snap/Cargo.toml b/crates/meili-snap/Cargo.toml similarity index 100% rename from meili-snap/Cargo.toml rename to crates/meili-snap/Cargo.toml diff --git a/meili-snap/src/lib.rs b/crates/meili-snap/src/lib.rs similarity index 100% rename from meili-snap/src/lib.rs rename to crates/meili-snap/src/lib.rs diff --git a/meili-snap/src/snapshots/lib.rs/snap/4.snap b/crates/meili-snap/src/snapshots/lib.rs/snap/4.snap similarity index 100% rename from meili-snap/src/snapshots/lib.rs/snap/4.snap rename to crates/meili-snap/src/snapshots/lib.rs/snap/4.snap diff --git a/meili-snap/src/snapshots/lib.rs/snap/5.snap b/crates/meili-snap/src/snapshots/lib.rs/snap/5.snap similarity index 100% rename from meili-snap/src/snapshots/lib.rs/snap/5.snap rename to crates/meili-snap/src/snapshots/lib.rs/snap/5.snap diff --git a/meili-snap/src/snapshots/lib.rs/snap/6.snap b/crates/meili-snap/src/snapshots/lib.rs/snap/6.snap similarity index 100% rename from meili-snap/src/snapshots/lib.rs/snap/6.snap rename to crates/meili-snap/src/snapshots/lib.rs/snap/6.snap diff --git a/meili-snap/src/snapshots/lib.rs/snap/7.snap b/crates/meili-snap/src/snapshots/lib.rs/snap/7.snap similarity index 100% rename from meili-snap/src/snapshots/lib.rs/snap/7.snap rename to crates/meili-snap/src/snapshots/lib.rs/snap/7.snap diff --git a/meili-snap/src/snapshots/lib.rs/snap/snap_name_1.snap b/crates/meili-snap/src/snapshots/lib.rs/snap/snap_name_1.snap similarity index 100% rename from meili-snap/src/snapshots/lib.rs/snap/snap_name_1.snap rename to crates/meili-snap/src/snapshots/lib.rs/snap/snap_name_1.snap diff --git a/meili-snap/src/snapshots/lib.rs/some_test/4.snap b/crates/meili-snap/src/snapshots/lib.rs/some_test/4.snap similarity index 100% rename from meili-snap/src/snapshots/lib.rs/some_test/4.snap rename to crates/meili-snap/src/snapshots/lib.rs/some_test/4.snap diff --git a/meili-snap/src/snapshots/lib.rs/some_test/5.snap b/crates/meili-snap/src/snapshots/lib.rs/some_test/5.snap similarity index 100% rename from meili-snap/src/snapshots/lib.rs/some_test/5.snap rename to crates/meili-snap/src/snapshots/lib.rs/some_test/5.snap diff --git a/meili-snap/src/snapshots/lib.rs/some_test/6.snap b/crates/meili-snap/src/snapshots/lib.rs/some_test/6.snap similarity index 100% rename from meili-snap/src/snapshots/lib.rs/some_test/6.snap rename to crates/meili-snap/src/snapshots/lib.rs/some_test/6.snap diff --git a/meili-snap/src/snapshots/lib.rs/some_test/7.snap b/crates/meili-snap/src/snapshots/lib.rs/some_test/7.snap similarity index 100% rename from meili-snap/src/snapshots/lib.rs/some_test/7.snap rename to crates/meili-snap/src/snapshots/lib.rs/some_test/7.snap diff --git a/meili-snap/src/snapshots/lib.rs/some_test/snap_name_1.snap b/crates/meili-snap/src/snapshots/lib.rs/some_test/snap_name_1.snap similarity index 100% rename from meili-snap/src/snapshots/lib.rs/some_test/snap_name_1.snap rename to crates/meili-snap/src/snapshots/lib.rs/some_test/snap_name_1.snap diff --git a/meilisearch-auth/Cargo.toml b/crates/meilisearch-auth/Cargo.toml similarity index 100% rename from meilisearch-auth/Cargo.toml rename to crates/meilisearch-auth/Cargo.toml diff --git a/meilisearch-auth/src/dump.rs b/crates/meilisearch-auth/src/dump.rs similarity index 100% rename from meilisearch-auth/src/dump.rs rename to crates/meilisearch-auth/src/dump.rs diff --git a/meilisearch-auth/src/error.rs b/crates/meilisearch-auth/src/error.rs similarity index 100% rename from meilisearch-auth/src/error.rs rename to crates/meilisearch-auth/src/error.rs diff --git a/meilisearch-auth/src/lib.rs b/crates/meilisearch-auth/src/lib.rs similarity index 100% rename from meilisearch-auth/src/lib.rs rename to crates/meilisearch-auth/src/lib.rs diff --git a/meilisearch-auth/src/store.rs b/crates/meilisearch-auth/src/store.rs similarity index 100% rename from meilisearch-auth/src/store.rs rename to crates/meilisearch-auth/src/store.rs diff --git a/meilisearch-types/Cargo.toml b/crates/meilisearch-types/Cargo.toml similarity index 100% rename from meilisearch-types/Cargo.toml rename to crates/meilisearch-types/Cargo.toml diff --git a/meilisearch-types/src/compression.rs b/crates/meilisearch-types/src/compression.rs similarity index 100% rename from meilisearch-types/src/compression.rs rename to crates/meilisearch-types/src/compression.rs diff --git a/meilisearch-types/src/deserr/mod.rs b/crates/meilisearch-types/src/deserr/mod.rs similarity index 100% rename from meilisearch-types/src/deserr/mod.rs rename to crates/meilisearch-types/src/deserr/mod.rs diff --git a/meilisearch-types/src/deserr/query_params.rs b/crates/meilisearch-types/src/deserr/query_params.rs similarity index 100% rename from meilisearch-types/src/deserr/query_params.rs rename to crates/meilisearch-types/src/deserr/query_params.rs diff --git a/meilisearch-types/src/document_formats.rs b/crates/meilisearch-types/src/document_formats.rs similarity index 100% rename from meilisearch-types/src/document_formats.rs rename to crates/meilisearch-types/src/document_formats.rs diff --git a/meilisearch-types/src/error.rs b/crates/meilisearch-types/src/error.rs similarity index 100% rename from meilisearch-types/src/error.rs rename to crates/meilisearch-types/src/error.rs diff --git a/meilisearch-types/src/facet_values_sort.rs b/crates/meilisearch-types/src/facet_values_sort.rs similarity index 100% rename from meilisearch-types/src/facet_values_sort.rs rename to crates/meilisearch-types/src/facet_values_sort.rs diff --git a/meilisearch-types/src/features.rs b/crates/meilisearch-types/src/features.rs similarity index 100% rename from meilisearch-types/src/features.rs rename to crates/meilisearch-types/src/features.rs diff --git a/meilisearch-types/src/index_uid.rs b/crates/meilisearch-types/src/index_uid.rs similarity index 100% rename from meilisearch-types/src/index_uid.rs rename to crates/meilisearch-types/src/index_uid.rs diff --git a/meilisearch-types/src/index_uid_pattern.rs b/crates/meilisearch-types/src/index_uid_pattern.rs similarity index 100% rename from meilisearch-types/src/index_uid_pattern.rs rename to crates/meilisearch-types/src/index_uid_pattern.rs diff --git a/meilisearch-types/src/keys.rs b/crates/meilisearch-types/src/keys.rs similarity index 100% rename from meilisearch-types/src/keys.rs rename to crates/meilisearch-types/src/keys.rs diff --git a/meilisearch-types/src/lib.rs b/crates/meilisearch-types/src/lib.rs similarity index 100% rename from meilisearch-types/src/lib.rs rename to crates/meilisearch-types/src/lib.rs diff --git a/meilisearch-types/src/locales.rs b/crates/meilisearch-types/src/locales.rs similarity index 100% rename from meilisearch-types/src/locales.rs rename to crates/meilisearch-types/src/locales.rs diff --git a/meilisearch-types/src/settings.rs b/crates/meilisearch-types/src/settings.rs similarity index 100% rename from meilisearch-types/src/settings.rs rename to crates/meilisearch-types/src/settings.rs diff --git a/meilisearch-types/src/star_or.rs b/crates/meilisearch-types/src/star_or.rs similarity index 100% rename from meilisearch-types/src/star_or.rs rename to crates/meilisearch-types/src/star_or.rs diff --git a/meilisearch-types/src/task_view.rs b/crates/meilisearch-types/src/task_view.rs similarity index 100% rename from meilisearch-types/src/task_view.rs rename to crates/meilisearch-types/src/task_view.rs diff --git a/meilisearch-types/src/tasks.rs b/crates/meilisearch-types/src/tasks.rs similarity index 100% rename from meilisearch-types/src/tasks.rs rename to crates/meilisearch-types/src/tasks.rs diff --git a/meilisearch-types/src/versioning.rs b/crates/meilisearch-types/src/versioning.rs similarity index 100% rename from meilisearch-types/src/versioning.rs rename to crates/meilisearch-types/src/versioning.rs diff --git a/meilisearch/Cargo.toml b/crates/meilisearch/Cargo.toml similarity index 100% rename from meilisearch/Cargo.toml rename to crates/meilisearch/Cargo.toml diff --git a/meilisearch/build.rs b/crates/meilisearch/build.rs similarity index 100% rename from meilisearch/build.rs rename to crates/meilisearch/build.rs diff --git a/meilisearch/src/analytics/mock_analytics.rs b/crates/meilisearch/src/analytics/mock_analytics.rs similarity index 100% rename from meilisearch/src/analytics/mock_analytics.rs rename to crates/meilisearch/src/analytics/mock_analytics.rs diff --git a/meilisearch/src/analytics/mod.rs b/crates/meilisearch/src/analytics/mod.rs similarity index 100% rename from meilisearch/src/analytics/mod.rs rename to crates/meilisearch/src/analytics/mod.rs diff --git a/meilisearch/src/analytics/segment_analytics.rs b/crates/meilisearch/src/analytics/segment_analytics.rs similarity index 100% rename from meilisearch/src/analytics/segment_analytics.rs rename to crates/meilisearch/src/analytics/segment_analytics.rs diff --git a/meilisearch/src/error.rs b/crates/meilisearch/src/error.rs similarity index 100% rename from meilisearch/src/error.rs rename to crates/meilisearch/src/error.rs diff --git a/meilisearch/src/extractors/authentication/error.rs b/crates/meilisearch/src/extractors/authentication/error.rs similarity index 100% rename from meilisearch/src/extractors/authentication/error.rs rename to crates/meilisearch/src/extractors/authentication/error.rs diff --git a/meilisearch/src/extractors/authentication/mod.rs b/crates/meilisearch/src/extractors/authentication/mod.rs similarity index 100% rename from meilisearch/src/extractors/authentication/mod.rs rename to crates/meilisearch/src/extractors/authentication/mod.rs diff --git a/meilisearch/src/extractors/mod.rs b/crates/meilisearch/src/extractors/mod.rs similarity index 100% rename from meilisearch/src/extractors/mod.rs rename to crates/meilisearch/src/extractors/mod.rs diff --git a/meilisearch/src/extractors/payload.rs b/crates/meilisearch/src/extractors/payload.rs similarity index 100% rename from meilisearch/src/extractors/payload.rs rename to crates/meilisearch/src/extractors/payload.rs diff --git a/meilisearch/src/extractors/sequential_extractor.rs b/crates/meilisearch/src/extractors/sequential_extractor.rs similarity index 100% rename from meilisearch/src/extractors/sequential_extractor.rs rename to crates/meilisearch/src/extractors/sequential_extractor.rs diff --git a/meilisearch/src/lib.rs b/crates/meilisearch/src/lib.rs similarity index 100% rename from meilisearch/src/lib.rs rename to crates/meilisearch/src/lib.rs diff --git a/meilisearch/src/main.rs b/crates/meilisearch/src/main.rs similarity index 100% rename from meilisearch/src/main.rs rename to crates/meilisearch/src/main.rs diff --git a/meilisearch/src/metrics.rs b/crates/meilisearch/src/metrics.rs similarity index 100% rename from meilisearch/src/metrics.rs rename to crates/meilisearch/src/metrics.rs diff --git a/meilisearch/src/middleware.rs b/crates/meilisearch/src/middleware.rs similarity index 100% rename from meilisearch/src/middleware.rs rename to crates/meilisearch/src/middleware.rs diff --git a/meilisearch/src/option.rs b/crates/meilisearch/src/option.rs similarity index 100% rename from meilisearch/src/option.rs rename to crates/meilisearch/src/option.rs diff --git a/meilisearch/src/routes/api_key.rs b/crates/meilisearch/src/routes/api_key.rs similarity index 100% rename from meilisearch/src/routes/api_key.rs rename to crates/meilisearch/src/routes/api_key.rs diff --git a/meilisearch/src/routes/dump.rs b/crates/meilisearch/src/routes/dump.rs similarity index 100% rename from meilisearch/src/routes/dump.rs rename to crates/meilisearch/src/routes/dump.rs diff --git a/meilisearch/src/routes/features.rs b/crates/meilisearch/src/routes/features.rs similarity index 100% rename from meilisearch/src/routes/features.rs rename to crates/meilisearch/src/routes/features.rs diff --git a/meilisearch/src/routes/indexes/documents.rs b/crates/meilisearch/src/routes/indexes/documents.rs similarity index 100% rename from meilisearch/src/routes/indexes/documents.rs rename to crates/meilisearch/src/routes/indexes/documents.rs diff --git a/meilisearch/src/routes/indexes/facet_search.rs b/crates/meilisearch/src/routes/indexes/facet_search.rs similarity index 100% rename from meilisearch/src/routes/indexes/facet_search.rs rename to crates/meilisearch/src/routes/indexes/facet_search.rs diff --git a/meilisearch/src/routes/indexes/mod.rs b/crates/meilisearch/src/routes/indexes/mod.rs similarity index 100% rename from meilisearch/src/routes/indexes/mod.rs rename to crates/meilisearch/src/routes/indexes/mod.rs diff --git a/meilisearch/src/routes/indexes/search.rs b/crates/meilisearch/src/routes/indexes/search.rs similarity index 100% rename from meilisearch/src/routes/indexes/search.rs rename to crates/meilisearch/src/routes/indexes/search.rs diff --git a/meilisearch/src/routes/indexes/settings.rs b/crates/meilisearch/src/routes/indexes/settings.rs similarity index 100% rename from meilisearch/src/routes/indexes/settings.rs rename to crates/meilisearch/src/routes/indexes/settings.rs diff --git a/meilisearch/src/routes/indexes/similar.rs b/crates/meilisearch/src/routes/indexes/similar.rs similarity index 100% rename from meilisearch/src/routes/indexes/similar.rs rename to crates/meilisearch/src/routes/indexes/similar.rs diff --git a/meilisearch/src/routes/logs.rs b/crates/meilisearch/src/routes/logs.rs similarity index 100% rename from meilisearch/src/routes/logs.rs rename to crates/meilisearch/src/routes/logs.rs diff --git a/meilisearch/src/routes/metrics.rs b/crates/meilisearch/src/routes/metrics.rs similarity index 100% rename from meilisearch/src/routes/metrics.rs rename to crates/meilisearch/src/routes/metrics.rs diff --git a/meilisearch/src/routes/mod.rs b/crates/meilisearch/src/routes/mod.rs similarity index 100% rename from meilisearch/src/routes/mod.rs rename to crates/meilisearch/src/routes/mod.rs diff --git a/meilisearch/src/routes/multi_search.rs b/crates/meilisearch/src/routes/multi_search.rs similarity index 100% rename from meilisearch/src/routes/multi_search.rs rename to crates/meilisearch/src/routes/multi_search.rs diff --git a/meilisearch/src/routes/snapshot.rs b/crates/meilisearch/src/routes/snapshot.rs similarity index 100% rename from meilisearch/src/routes/snapshot.rs rename to crates/meilisearch/src/routes/snapshot.rs diff --git a/meilisearch/src/routes/swap_indexes.rs b/crates/meilisearch/src/routes/swap_indexes.rs similarity index 100% rename from meilisearch/src/routes/swap_indexes.rs rename to crates/meilisearch/src/routes/swap_indexes.rs diff --git a/meilisearch/src/routes/tasks.rs b/crates/meilisearch/src/routes/tasks.rs similarity index 100% rename from meilisearch/src/routes/tasks.rs rename to crates/meilisearch/src/routes/tasks.rs diff --git a/meilisearch/src/search/federated.rs b/crates/meilisearch/src/search/federated.rs similarity index 100% rename from meilisearch/src/search/federated.rs rename to crates/meilisearch/src/search/federated.rs diff --git a/meilisearch/src/search/mod.rs b/crates/meilisearch/src/search/mod.rs similarity index 100% rename from meilisearch/src/search/mod.rs rename to crates/meilisearch/src/search/mod.rs diff --git a/meilisearch/src/search/ranking_rules.rs b/crates/meilisearch/src/search/ranking_rules.rs similarity index 100% rename from meilisearch/src/search/ranking_rules.rs rename to crates/meilisearch/src/search/ranking_rules.rs diff --git a/meilisearch/src/search_queue.rs b/crates/meilisearch/src/search_queue.rs similarity index 100% rename from meilisearch/src/search_queue.rs rename to crates/meilisearch/src/search_queue.rs diff --git a/meilisearch/tests/assets/dumps/v1/metadata.json b/crates/meilisearch/tests/assets/dumps/v1/metadata.json similarity index 100% rename from meilisearch/tests/assets/dumps/v1/metadata.json rename to crates/meilisearch/tests/assets/dumps/v1/metadata.json diff --git a/meilisearch/tests/assets/dumps/v1/test/documents.jsonl b/crates/meilisearch/tests/assets/dumps/v1/test/documents.jsonl similarity index 100% rename from meilisearch/tests/assets/dumps/v1/test/documents.jsonl rename to crates/meilisearch/tests/assets/dumps/v1/test/documents.jsonl diff --git a/meilisearch/tests/assets/dumps/v1/test/settings.json b/crates/meilisearch/tests/assets/dumps/v1/test/settings.json similarity index 100% rename from meilisearch/tests/assets/dumps/v1/test/settings.json rename to crates/meilisearch/tests/assets/dumps/v1/test/settings.json diff --git a/meilisearch/tests/assets/dumps/v1/test/updates.jsonl b/crates/meilisearch/tests/assets/dumps/v1/test/updates.jsonl similarity index 100% rename from meilisearch/tests/assets/dumps/v1/test/updates.jsonl rename to crates/meilisearch/tests/assets/dumps/v1/test/updates.jsonl diff --git a/meilisearch/tests/assets/test_set.json b/crates/meilisearch/tests/assets/test_set.json similarity index 100% rename from meilisearch/tests/assets/test_set.json rename to crates/meilisearch/tests/assets/test_set.json diff --git a/meilisearch/tests/assets/test_set.ndjson b/crates/meilisearch/tests/assets/test_set.ndjson similarity index 100% rename from meilisearch/tests/assets/test_set.ndjson rename to crates/meilisearch/tests/assets/test_set.ndjson diff --git a/meilisearch/tests/assets/v1_v0.20.0_movies.dump b/crates/meilisearch/tests/assets/v1_v0.20.0_movies.dump similarity index 100% rename from meilisearch/tests/assets/v1_v0.20.0_movies.dump rename to crates/meilisearch/tests/assets/v1_v0.20.0_movies.dump diff --git a/meilisearch/tests/assets/v1_v0.20.0_movies_with_settings.dump b/crates/meilisearch/tests/assets/v1_v0.20.0_movies_with_settings.dump similarity index 100% rename from meilisearch/tests/assets/v1_v0.20.0_movies_with_settings.dump rename to crates/meilisearch/tests/assets/v1_v0.20.0_movies_with_settings.dump diff --git a/meilisearch/tests/assets/v1_v0.20.0_rubygems_with_settings.dump b/crates/meilisearch/tests/assets/v1_v0.20.0_rubygems_with_settings.dump similarity index 100% rename from meilisearch/tests/assets/v1_v0.20.0_rubygems_with_settings.dump rename to crates/meilisearch/tests/assets/v1_v0.20.0_rubygems_with_settings.dump diff --git a/meilisearch/tests/assets/v2_v0.21.1_movies.dump b/crates/meilisearch/tests/assets/v2_v0.21.1_movies.dump similarity index 100% rename from meilisearch/tests/assets/v2_v0.21.1_movies.dump rename to crates/meilisearch/tests/assets/v2_v0.21.1_movies.dump diff --git a/meilisearch/tests/assets/v2_v0.21.1_movies_with_settings.dump b/crates/meilisearch/tests/assets/v2_v0.21.1_movies_with_settings.dump similarity index 100% rename from meilisearch/tests/assets/v2_v0.21.1_movies_with_settings.dump rename to crates/meilisearch/tests/assets/v2_v0.21.1_movies_with_settings.dump diff --git a/meilisearch/tests/assets/v2_v0.21.1_rubygems_with_settings.dump b/crates/meilisearch/tests/assets/v2_v0.21.1_rubygems_with_settings.dump similarity index 100% rename from meilisearch/tests/assets/v2_v0.21.1_rubygems_with_settings.dump rename to crates/meilisearch/tests/assets/v2_v0.21.1_rubygems_with_settings.dump diff --git a/meilisearch/tests/assets/v3_v0.24.0_movies.dump b/crates/meilisearch/tests/assets/v3_v0.24.0_movies.dump similarity index 100% rename from meilisearch/tests/assets/v3_v0.24.0_movies.dump rename to crates/meilisearch/tests/assets/v3_v0.24.0_movies.dump diff --git a/meilisearch/tests/assets/v3_v0.24.0_movies_with_settings.dump b/crates/meilisearch/tests/assets/v3_v0.24.0_movies_with_settings.dump similarity index 100% rename from meilisearch/tests/assets/v3_v0.24.0_movies_with_settings.dump rename to crates/meilisearch/tests/assets/v3_v0.24.0_movies_with_settings.dump diff --git a/meilisearch/tests/assets/v3_v0.24.0_rubygems_with_settings.dump b/crates/meilisearch/tests/assets/v3_v0.24.0_rubygems_with_settings.dump similarity index 100% rename from meilisearch/tests/assets/v3_v0.24.0_rubygems_with_settings.dump rename to crates/meilisearch/tests/assets/v3_v0.24.0_rubygems_with_settings.dump diff --git a/meilisearch/tests/assets/v4_v0.25.2_movies.dump b/crates/meilisearch/tests/assets/v4_v0.25.2_movies.dump similarity index 100% rename from meilisearch/tests/assets/v4_v0.25.2_movies.dump rename to crates/meilisearch/tests/assets/v4_v0.25.2_movies.dump diff --git a/meilisearch/tests/assets/v4_v0.25.2_movies_with_settings.dump b/crates/meilisearch/tests/assets/v4_v0.25.2_movies_with_settings.dump similarity index 100% rename from meilisearch/tests/assets/v4_v0.25.2_movies_with_settings.dump rename to crates/meilisearch/tests/assets/v4_v0.25.2_movies_with_settings.dump diff --git a/meilisearch/tests/assets/v4_v0.25.2_rubygems_with_settings.dump b/crates/meilisearch/tests/assets/v4_v0.25.2_rubygems_with_settings.dump similarity index 100% rename from meilisearch/tests/assets/v4_v0.25.2_rubygems_with_settings.dump rename to crates/meilisearch/tests/assets/v4_v0.25.2_rubygems_with_settings.dump diff --git a/meilisearch/tests/assets/v5_v0.28.0_test_dump.dump b/crates/meilisearch/tests/assets/v5_v0.28.0_test_dump.dump similarity index 100% rename from meilisearch/tests/assets/v5_v0.28.0_test_dump.dump rename to crates/meilisearch/tests/assets/v5_v0.28.0_test_dump.dump diff --git a/meilisearch/tests/assets/v6_v1.6.0_use_deactivated_experimental_setting.dump b/crates/meilisearch/tests/assets/v6_v1.6.0_use_deactivated_experimental_setting.dump similarity index 100% rename from meilisearch/tests/assets/v6_v1.6.0_use_deactivated_experimental_setting.dump rename to crates/meilisearch/tests/assets/v6_v1.6.0_use_deactivated_experimental_setting.dump diff --git a/meilisearch/tests/auth/api_keys.rs b/crates/meilisearch/tests/auth/api_keys.rs similarity index 100% rename from meilisearch/tests/auth/api_keys.rs rename to crates/meilisearch/tests/auth/api_keys.rs diff --git a/meilisearch/tests/auth/authorization.rs b/crates/meilisearch/tests/auth/authorization.rs similarity index 100% rename from meilisearch/tests/auth/authorization.rs rename to crates/meilisearch/tests/auth/authorization.rs diff --git a/meilisearch/tests/auth/errors.rs b/crates/meilisearch/tests/auth/errors.rs similarity index 100% rename from meilisearch/tests/auth/errors.rs rename to crates/meilisearch/tests/auth/errors.rs diff --git a/meilisearch/tests/auth/mod.rs b/crates/meilisearch/tests/auth/mod.rs similarity index 100% rename from meilisearch/tests/auth/mod.rs rename to crates/meilisearch/tests/auth/mod.rs diff --git a/meilisearch/tests/auth/payload.rs b/crates/meilisearch/tests/auth/payload.rs similarity index 100% rename from meilisearch/tests/auth/payload.rs rename to crates/meilisearch/tests/auth/payload.rs diff --git a/meilisearch/tests/auth/tenant_token.rs b/crates/meilisearch/tests/auth/tenant_token.rs similarity index 100% rename from meilisearch/tests/auth/tenant_token.rs rename to crates/meilisearch/tests/auth/tenant_token.rs diff --git a/meilisearch/tests/auth/tenant_token_multi_search.rs b/crates/meilisearch/tests/auth/tenant_token_multi_search.rs similarity index 100% rename from meilisearch/tests/auth/tenant_token_multi_search.rs rename to crates/meilisearch/tests/auth/tenant_token_multi_search.rs diff --git a/meilisearch/tests/common/encoder.rs b/crates/meilisearch/tests/common/encoder.rs similarity index 100% rename from meilisearch/tests/common/encoder.rs rename to crates/meilisearch/tests/common/encoder.rs diff --git a/meilisearch/tests/common/index.rs b/crates/meilisearch/tests/common/index.rs similarity index 100% rename from meilisearch/tests/common/index.rs rename to crates/meilisearch/tests/common/index.rs diff --git a/meilisearch/tests/common/mod.rs b/crates/meilisearch/tests/common/mod.rs similarity index 100% rename from meilisearch/tests/common/mod.rs rename to crates/meilisearch/tests/common/mod.rs diff --git a/meilisearch/tests/common/server.rs b/crates/meilisearch/tests/common/server.rs similarity index 100% rename from meilisearch/tests/common/server.rs rename to crates/meilisearch/tests/common/server.rs diff --git a/meilisearch/tests/common/service.rs b/crates/meilisearch/tests/common/service.rs similarity index 100% rename from meilisearch/tests/common/service.rs rename to crates/meilisearch/tests/common/service.rs diff --git a/meilisearch/tests/content_type.rs b/crates/meilisearch/tests/content_type.rs similarity index 100% rename from meilisearch/tests/content_type.rs rename to crates/meilisearch/tests/content_type.rs diff --git a/meilisearch/tests/dashboard/mod.rs b/crates/meilisearch/tests/dashboard/mod.rs similarity index 100% rename from meilisearch/tests/dashboard/mod.rs rename to crates/meilisearch/tests/dashboard/mod.rs diff --git a/meilisearch/tests/documents/add_documents.rs b/crates/meilisearch/tests/documents/add_documents.rs similarity index 100% rename from meilisearch/tests/documents/add_documents.rs rename to crates/meilisearch/tests/documents/add_documents.rs diff --git a/meilisearch/tests/documents/delete_documents.rs b/crates/meilisearch/tests/documents/delete_documents.rs similarity index 100% rename from meilisearch/tests/documents/delete_documents.rs rename to crates/meilisearch/tests/documents/delete_documents.rs diff --git a/meilisearch/tests/documents/errors.rs b/crates/meilisearch/tests/documents/errors.rs similarity index 100% rename from meilisearch/tests/documents/errors.rs rename to crates/meilisearch/tests/documents/errors.rs diff --git a/meilisearch/tests/documents/get_documents.rs b/crates/meilisearch/tests/documents/get_documents.rs similarity index 100% rename from meilisearch/tests/documents/get_documents.rs rename to crates/meilisearch/tests/documents/get_documents.rs diff --git a/meilisearch/tests/documents/mod.rs b/crates/meilisearch/tests/documents/mod.rs similarity index 100% rename from meilisearch/tests/documents/mod.rs rename to crates/meilisearch/tests/documents/mod.rs diff --git a/meilisearch/tests/documents/update_documents.rs b/crates/meilisearch/tests/documents/update_documents.rs similarity index 100% rename from meilisearch/tests/documents/update_documents.rs rename to crates/meilisearch/tests/documents/update_documents.rs diff --git a/meilisearch/tests/dumps/data.rs b/crates/meilisearch/tests/dumps/data.rs similarity index 100% rename from meilisearch/tests/dumps/data.rs rename to crates/meilisearch/tests/dumps/data.rs diff --git a/meilisearch/tests/dumps/mod.rs b/crates/meilisearch/tests/dumps/mod.rs similarity index 100% rename from meilisearch/tests/dumps/mod.rs rename to crates/meilisearch/tests/dumps/mod.rs diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/generate_and_import_dump_containing_vectors/1.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/generate_and_import_dump_containing_vectors/1.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/generate_and_import_dump_containing_vectors/1.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/generate_and_import_dump_containing_vectors/1.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/generate_and_import_dump_containing_vectors/2.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/generate_and_import_dump_containing_vectors/2.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/generate_and_import_dump_containing_vectors/2.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/generate_and_import_dump_containing_vectors/2.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_raw/1.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_raw/1.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_raw/1.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_raw/1.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_raw/2.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_raw/2.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_raw/2.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_raw/2.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_raw/3.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_raw/3.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_raw/3.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_raw/3.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_raw/4.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_raw/4.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_raw/4.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_raw/4.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_raw/5.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_raw/5.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_raw/5.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_raw/5.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_raw/6.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_raw/6.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_raw/6.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_raw/6.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_raw/7.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_raw/7.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_raw/7.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_raw/7.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_with_settings/1.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_with_settings/1.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_with_settings/1.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_with_settings/1.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_with_settings/2.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_with_settings/2.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_with_settings/2.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_with_settings/2.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_with_settings/3.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_with_settings/3.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_with_settings/3.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_with_settings/3.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_with_settings/4.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_with_settings/4.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_with_settings/4.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_with_settings/4.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_with_settings/5.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_with_settings/5.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_with_settings/5.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_with_settings/5.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_with_settings/6.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_with_settings/6.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_with_settings/6.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_with_settings/6.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_with_settings/7.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_with_settings/7.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_with_settings/7.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_movie_with_settings/7.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_rubygems_with_settings/1.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_rubygems_with_settings/1.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_rubygems_with_settings/1.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_rubygems_with_settings/1.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_rubygems_with_settings/2.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_rubygems_with_settings/2.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_rubygems_with_settings/2.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_rubygems_with_settings/2.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_rubygems_with_settings/3.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_rubygems_with_settings/3.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_rubygems_with_settings/3.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_rubygems_with_settings/3.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_rubygems_with_settings/4.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_rubygems_with_settings/4.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_rubygems_with_settings/4.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_rubygems_with_settings/4.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_rubygems_with_settings/5.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_rubygems_with_settings/5.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_rubygems_with_settings/5.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_rubygems_with_settings/5.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_rubygems_with_settings/6.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_rubygems_with_settings/6.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_rubygems_with_settings/6.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_rubygems_with_settings/6.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_rubygems_with_settings/7.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_rubygems_with_settings/7.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_rubygems_with_settings/7.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v1_rubygems_with_settings/7.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_raw/1.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_raw/1.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_raw/1.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_raw/1.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_raw/2.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_raw/2.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_raw/2.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_raw/2.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_raw/3.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_raw/3.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_raw/3.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_raw/3.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_raw/4.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_raw/4.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_raw/4.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_raw/4.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_raw/5.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_raw/5.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_raw/5.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_raw/5.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_raw/6.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_raw/6.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_raw/6.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_raw/6.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_raw/7.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_raw/7.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_raw/7.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_raw/7.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_with_settings/1.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_with_settings/1.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_with_settings/1.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_with_settings/1.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_with_settings/2.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_with_settings/2.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_with_settings/2.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_with_settings/2.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_with_settings/3.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_with_settings/3.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_with_settings/3.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_with_settings/3.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_with_settings/4.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_with_settings/4.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_with_settings/4.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_with_settings/4.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_with_settings/5.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_with_settings/5.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_with_settings/5.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_with_settings/5.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_with_settings/6.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_with_settings/6.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_with_settings/6.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_with_settings/6.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_with_settings/7.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_with_settings/7.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_with_settings/7.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_movie_with_settings/7.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_rubygems_with_settings/1.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_rubygems_with_settings/1.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_rubygems_with_settings/1.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_rubygems_with_settings/1.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_rubygems_with_settings/2.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_rubygems_with_settings/2.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_rubygems_with_settings/2.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_rubygems_with_settings/2.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_rubygems_with_settings/3.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_rubygems_with_settings/3.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_rubygems_with_settings/3.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_rubygems_with_settings/3.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_rubygems_with_settings/4.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_rubygems_with_settings/4.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_rubygems_with_settings/4.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_rubygems_with_settings/4.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_rubygems_with_settings/5.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_rubygems_with_settings/5.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_rubygems_with_settings/5.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_rubygems_with_settings/5.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_rubygems_with_settings/6.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_rubygems_with_settings/6.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_rubygems_with_settings/6.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_rubygems_with_settings/6.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_rubygems_with_settings/7.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_rubygems_with_settings/7.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_rubygems_with_settings/7.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v2_rubygems_with_settings/7.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_raw/1.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_raw/1.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_raw/1.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_raw/1.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_raw/2.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_raw/2.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_raw/2.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_raw/2.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_raw/3.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_raw/3.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_raw/3.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_raw/3.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_raw/4.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_raw/4.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_raw/4.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_raw/4.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_raw/5.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_raw/5.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_raw/5.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_raw/5.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_raw/6.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_raw/6.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_raw/6.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_raw/6.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_raw/7.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_raw/7.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_raw/7.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_raw/7.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_with_settings/1.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_with_settings/1.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_with_settings/1.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_with_settings/1.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_with_settings/2.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_with_settings/2.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_with_settings/2.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_with_settings/2.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_with_settings/3.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_with_settings/3.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_with_settings/3.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_with_settings/3.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_with_settings/4.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_with_settings/4.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_with_settings/4.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_with_settings/4.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_with_settings/5.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_with_settings/5.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_with_settings/5.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_with_settings/5.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_with_settings/6.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_with_settings/6.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_with_settings/6.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_with_settings/6.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_with_settings/7.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_with_settings/7.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_with_settings/7.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_movie_with_settings/7.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_rubygems_with_settings/1.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_rubygems_with_settings/1.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_rubygems_with_settings/1.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_rubygems_with_settings/1.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_rubygems_with_settings/2.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_rubygems_with_settings/2.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_rubygems_with_settings/2.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_rubygems_with_settings/2.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_rubygems_with_settings/3.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_rubygems_with_settings/3.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_rubygems_with_settings/3.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_rubygems_with_settings/3.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_rubygems_with_settings/4.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_rubygems_with_settings/4.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_rubygems_with_settings/4.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_rubygems_with_settings/4.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_rubygems_with_settings/5.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_rubygems_with_settings/5.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_rubygems_with_settings/5.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_rubygems_with_settings/5.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_rubygems_with_settings/6.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_rubygems_with_settings/6.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_rubygems_with_settings/6.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_rubygems_with_settings/6.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_rubygems_with_settings/7.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_rubygems_with_settings/7.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_rubygems_with_settings/7.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v3_rubygems_with_settings/7.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_raw/1.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_raw/1.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_raw/1.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_raw/1.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_raw/2.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_raw/2.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_raw/2.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_raw/2.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_raw/3.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_raw/3.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_raw/3.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_raw/3.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_raw/4.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_raw/4.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_raw/4.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_raw/4.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_raw/5.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_raw/5.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_raw/5.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_raw/5.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_raw/6.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_raw/6.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_raw/6.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_raw/6.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_raw/7.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_raw/7.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_raw/7.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_raw/7.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_with_settings/1.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_with_settings/1.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_with_settings/1.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_with_settings/1.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_with_settings/2.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_with_settings/2.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_with_settings/2.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_with_settings/2.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_with_settings/3.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_with_settings/3.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_with_settings/3.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_with_settings/3.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_with_settings/4.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_with_settings/4.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_with_settings/4.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_with_settings/4.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_with_settings/5.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_with_settings/5.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_with_settings/5.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_with_settings/5.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_with_settings/6.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_with_settings/6.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_with_settings/6.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_with_settings/6.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_with_settings/7.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_with_settings/7.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_with_settings/7.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_movie_with_settings/7.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_rubygems_with_settings/1.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_rubygems_with_settings/1.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_rubygems_with_settings/1.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_rubygems_with_settings/1.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_rubygems_with_settings/2.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_rubygems_with_settings/2.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_rubygems_with_settings/2.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_rubygems_with_settings/2.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_rubygems_with_settings/3.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_rubygems_with_settings/3.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_rubygems_with_settings/3.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_rubygems_with_settings/3.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_rubygems_with_settings/4.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_rubygems_with_settings/4.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_rubygems_with_settings/4.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_rubygems_with_settings/4.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_rubygems_with_settings/5.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_rubygems_with_settings/5.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_rubygems_with_settings/5.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_rubygems_with_settings/5.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_rubygems_with_settings/6.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_rubygems_with_settings/6.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_rubygems_with_settings/6.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_rubygems_with_settings/6.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_rubygems_with_settings/7.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_rubygems_with_settings/7.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_rubygems_with_settings/7.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v4_rubygems_with_settings/7.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v5/1.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v5/1.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v5/1.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v5/1.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v5/2.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v5/2.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v5/2.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v5/2.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v5/3.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v5/3.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v5/3.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v5/3.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v5/4.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v5/4.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v5/4.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v5/4.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v5/5.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v5/5.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v5/5.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v5/5.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v5/6.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v5/6.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v5/6.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v5/6.snap diff --git a/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v5/7.snap b/crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v5/7.snap similarity index 100% rename from meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v5/7.snap rename to crates/meilisearch/tests/dumps/snapshots/mod.rs/import_dump_v5/7.snap diff --git a/meilisearch/tests/features/mod.rs b/crates/meilisearch/tests/features/mod.rs similarity index 100% rename from meilisearch/tests/features/mod.rs rename to crates/meilisearch/tests/features/mod.rs diff --git a/meilisearch/tests/index/create_index.rs b/crates/meilisearch/tests/index/create_index.rs similarity index 100% rename from meilisearch/tests/index/create_index.rs rename to crates/meilisearch/tests/index/create_index.rs diff --git a/meilisearch/tests/index/delete_index.rs b/crates/meilisearch/tests/index/delete_index.rs similarity index 100% rename from meilisearch/tests/index/delete_index.rs rename to crates/meilisearch/tests/index/delete_index.rs diff --git a/meilisearch/tests/index/errors.rs b/crates/meilisearch/tests/index/errors.rs similarity index 100% rename from meilisearch/tests/index/errors.rs rename to crates/meilisearch/tests/index/errors.rs diff --git a/meilisearch/tests/index/get_index.rs b/crates/meilisearch/tests/index/get_index.rs similarity index 100% rename from meilisearch/tests/index/get_index.rs rename to crates/meilisearch/tests/index/get_index.rs diff --git a/meilisearch/tests/index/mod.rs b/crates/meilisearch/tests/index/mod.rs similarity index 100% rename from meilisearch/tests/index/mod.rs rename to crates/meilisearch/tests/index/mod.rs diff --git a/meilisearch/tests/index/stats.rs b/crates/meilisearch/tests/index/stats.rs similarity index 100% rename from meilisearch/tests/index/stats.rs rename to crates/meilisearch/tests/index/stats.rs diff --git a/meilisearch/tests/index/update_index.rs b/crates/meilisearch/tests/index/update_index.rs similarity index 100% rename from meilisearch/tests/index/update_index.rs rename to crates/meilisearch/tests/index/update_index.rs diff --git a/meilisearch/tests/integration.rs b/crates/meilisearch/tests/integration.rs similarity index 100% rename from meilisearch/tests/integration.rs rename to crates/meilisearch/tests/integration.rs diff --git a/meilisearch/tests/logs/error.rs b/crates/meilisearch/tests/logs/error.rs similarity index 100% rename from meilisearch/tests/logs/error.rs rename to crates/meilisearch/tests/logs/error.rs diff --git a/meilisearch/tests/logs/mod.rs b/crates/meilisearch/tests/logs/mod.rs similarity index 100% rename from meilisearch/tests/logs/mod.rs rename to crates/meilisearch/tests/logs/mod.rs diff --git a/meilisearch/tests/search/distinct.rs b/crates/meilisearch/tests/search/distinct.rs similarity index 100% rename from meilisearch/tests/search/distinct.rs rename to crates/meilisearch/tests/search/distinct.rs diff --git a/meilisearch/tests/search/errors.rs b/crates/meilisearch/tests/search/errors.rs similarity index 100% rename from meilisearch/tests/search/errors.rs rename to crates/meilisearch/tests/search/errors.rs diff --git a/meilisearch/tests/search/facet_search.rs b/crates/meilisearch/tests/search/facet_search.rs similarity index 100% rename from meilisearch/tests/search/facet_search.rs rename to crates/meilisearch/tests/search/facet_search.rs diff --git a/meilisearch/tests/search/formatted.rs b/crates/meilisearch/tests/search/formatted.rs similarity index 100% rename from meilisearch/tests/search/formatted.rs rename to crates/meilisearch/tests/search/formatted.rs diff --git a/meilisearch/tests/search/geo.rs b/crates/meilisearch/tests/search/geo.rs similarity index 100% rename from meilisearch/tests/search/geo.rs rename to crates/meilisearch/tests/search/geo.rs diff --git a/meilisearch/tests/search/hybrid.rs b/crates/meilisearch/tests/search/hybrid.rs similarity index 100% rename from meilisearch/tests/search/hybrid.rs rename to crates/meilisearch/tests/search/hybrid.rs diff --git a/meilisearch/tests/search/locales.rs b/crates/meilisearch/tests/search/locales.rs similarity index 100% rename from meilisearch/tests/search/locales.rs rename to crates/meilisearch/tests/search/locales.rs diff --git a/meilisearch/tests/search/matching_strategy.rs b/crates/meilisearch/tests/search/matching_strategy.rs similarity index 100% rename from meilisearch/tests/search/matching_strategy.rs rename to crates/meilisearch/tests/search/matching_strategy.rs diff --git a/meilisearch/tests/search/mod.rs b/crates/meilisearch/tests/search/mod.rs similarity index 100% rename from meilisearch/tests/search/mod.rs rename to crates/meilisearch/tests/search/mod.rs diff --git a/meilisearch/tests/search/multi.rs b/crates/meilisearch/tests/search/multi.rs similarity index 100% rename from meilisearch/tests/search/multi.rs rename to crates/meilisearch/tests/search/multi.rs diff --git a/meilisearch/tests/search/pagination.rs b/crates/meilisearch/tests/search/pagination.rs similarity index 100% rename from meilisearch/tests/search/pagination.rs rename to crates/meilisearch/tests/search/pagination.rs diff --git a/meilisearch/tests/search/restrict_searchable.rs b/crates/meilisearch/tests/search/restrict_searchable.rs similarity index 100% rename from meilisearch/tests/search/restrict_searchable.rs rename to crates/meilisearch/tests/search/restrict_searchable.rs diff --git a/meilisearch/tests/search/search_queue.rs b/crates/meilisearch/tests/search/search_queue.rs similarity index 100% rename from meilisearch/tests/search/search_queue.rs rename to crates/meilisearch/tests/search/search_queue.rs diff --git a/meilisearch/tests/search/snapshots/distinct.rs/distinct_at_search_time/succeed.snap b/crates/meilisearch/tests/search/snapshots/distinct.rs/distinct_at_search_time/succeed.snap similarity index 100% rename from meilisearch/tests/search/snapshots/distinct.rs/distinct_at_search_time/succeed.snap rename to crates/meilisearch/tests/search/snapshots/distinct.rs/distinct_at_search_time/succeed.snap diff --git a/meilisearch/tests/search/snapshots/errors.rs/distinct_at_search_time/task-succeed.snap b/crates/meilisearch/tests/search/snapshots/errors.rs/distinct_at_search_time/task-succeed.snap similarity index 100% rename from meilisearch/tests/search/snapshots/errors.rs/distinct_at_search_time/task-succeed.snap rename to crates/meilisearch/tests/search/snapshots/errors.rs/distinct_at_search_time/task-succeed.snap diff --git a/meilisearch/tests/settings/distinct.rs b/crates/meilisearch/tests/settings/distinct.rs similarity index 100% rename from meilisearch/tests/settings/distinct.rs rename to crates/meilisearch/tests/settings/distinct.rs diff --git a/meilisearch/tests/settings/errors.rs b/crates/meilisearch/tests/settings/errors.rs similarity index 100% rename from meilisearch/tests/settings/errors.rs rename to crates/meilisearch/tests/settings/errors.rs diff --git a/meilisearch/tests/settings/get_settings.rs b/crates/meilisearch/tests/settings/get_settings.rs similarity index 100% rename from meilisearch/tests/settings/get_settings.rs rename to crates/meilisearch/tests/settings/get_settings.rs diff --git a/meilisearch/tests/settings/mod.rs b/crates/meilisearch/tests/settings/mod.rs similarity index 100% rename from meilisearch/tests/settings/mod.rs rename to crates/meilisearch/tests/settings/mod.rs diff --git a/meilisearch/tests/settings/proximity_settings.rs b/crates/meilisearch/tests/settings/proximity_settings.rs similarity index 100% rename from meilisearch/tests/settings/proximity_settings.rs rename to crates/meilisearch/tests/settings/proximity_settings.rs diff --git a/meilisearch/tests/settings/tokenizer_customization.rs b/crates/meilisearch/tests/settings/tokenizer_customization.rs similarity index 100% rename from meilisearch/tests/settings/tokenizer_customization.rs rename to crates/meilisearch/tests/settings/tokenizer_customization.rs diff --git a/meilisearch/tests/similar/errors.rs b/crates/meilisearch/tests/similar/errors.rs similarity index 100% rename from meilisearch/tests/similar/errors.rs rename to crates/meilisearch/tests/similar/errors.rs diff --git a/meilisearch/tests/similar/mod.rs b/crates/meilisearch/tests/similar/mod.rs similarity index 100% rename from meilisearch/tests/similar/mod.rs rename to crates/meilisearch/tests/similar/mod.rs diff --git a/meilisearch/tests/snapshot/mod.rs b/crates/meilisearch/tests/snapshot/mod.rs similarity index 100% rename from meilisearch/tests/snapshot/mod.rs rename to crates/meilisearch/tests/snapshot/mod.rs diff --git a/meilisearch/tests/stats/mod.rs b/crates/meilisearch/tests/stats/mod.rs similarity index 100% rename from meilisearch/tests/stats/mod.rs rename to crates/meilisearch/tests/stats/mod.rs diff --git a/meilisearch/tests/swap_indexes/errors.rs b/crates/meilisearch/tests/swap_indexes/errors.rs similarity index 100% rename from meilisearch/tests/swap_indexes/errors.rs rename to crates/meilisearch/tests/swap_indexes/errors.rs diff --git a/meilisearch/tests/swap_indexes/mod.rs b/crates/meilisearch/tests/swap_indexes/mod.rs similarity index 100% rename from meilisearch/tests/swap_indexes/mod.rs rename to crates/meilisearch/tests/swap_indexes/mod.rs diff --git a/meilisearch/tests/tasks/errors.rs b/crates/meilisearch/tests/tasks/errors.rs similarity index 100% rename from meilisearch/tests/tasks/errors.rs rename to crates/meilisearch/tests/tasks/errors.rs diff --git a/meilisearch/tests/tasks/mod.rs b/crates/meilisearch/tests/tasks/mod.rs similarity index 100% rename from meilisearch/tests/tasks/mod.rs rename to crates/meilisearch/tests/tasks/mod.rs diff --git a/meilisearch/tests/tasks/webhook.rs b/crates/meilisearch/tests/tasks/webhook.rs similarity index 100% rename from meilisearch/tests/tasks/webhook.rs rename to crates/meilisearch/tests/tasks/webhook.rs diff --git a/meilisearch/tests/vector/binary_quantized.rs b/crates/meilisearch/tests/vector/binary_quantized.rs similarity index 100% rename from meilisearch/tests/vector/binary_quantized.rs rename to crates/meilisearch/tests/vector/binary_quantized.rs diff --git a/meilisearch/tests/vector/intel_gen.txt.gz b/crates/meilisearch/tests/vector/intel_gen.txt.gz similarity index 100% rename from meilisearch/tests/vector/intel_gen.txt.gz rename to crates/meilisearch/tests/vector/intel_gen.txt.gz diff --git a/meilisearch/tests/vector/mod.rs b/crates/meilisearch/tests/vector/mod.rs similarity index 100% rename from meilisearch/tests/vector/mod.rs rename to crates/meilisearch/tests/vector/mod.rs diff --git a/meilisearch/tests/vector/openai.rs b/crates/meilisearch/tests/vector/openai.rs similarity index 100% rename from meilisearch/tests/vector/openai.rs rename to crates/meilisearch/tests/vector/openai.rs diff --git a/meilisearch/tests/vector/openai_responses.json.gz b/crates/meilisearch/tests/vector/openai_responses.json.gz similarity index 100% rename from meilisearch/tests/vector/openai_responses.json.gz rename to crates/meilisearch/tests/vector/openai_responses.json.gz diff --git a/meilisearch/tests/vector/openai_tokenized_responses.json.gz b/crates/meilisearch/tests/vector/openai_tokenized_responses.json.gz similarity index 100% rename from meilisearch/tests/vector/openai_tokenized_responses.json.gz rename to crates/meilisearch/tests/vector/openai_tokenized_responses.json.gz diff --git a/meilisearch/tests/vector/rest.rs b/crates/meilisearch/tests/vector/rest.rs similarity index 100% rename from meilisearch/tests/vector/rest.rs rename to crates/meilisearch/tests/vector/rest.rs diff --git a/meilisearch/tests/vector/settings.rs b/crates/meilisearch/tests/vector/settings.rs similarity index 100% rename from meilisearch/tests/vector/settings.rs rename to crates/meilisearch/tests/vector/settings.rs diff --git a/meilisearch/tests/vector/snapshots/mod.rs/add_remove_one_vector_4588/document-added.snap b/crates/meilisearch/tests/vector/snapshots/mod.rs/add_remove_one_vector_4588/document-added.snap similarity index 100% rename from meilisearch/tests/vector/snapshots/mod.rs/add_remove_one_vector_4588/document-added.snap rename to crates/meilisearch/tests/vector/snapshots/mod.rs/add_remove_one_vector_4588/document-added.snap diff --git a/meilisearch/tests/vector/snapshots/mod.rs/add_remove_one_vector_4588/document-deleted.snap b/crates/meilisearch/tests/vector/snapshots/mod.rs/add_remove_one_vector_4588/document-deleted.snap similarity index 100% rename from meilisearch/tests/vector/snapshots/mod.rs/add_remove_one_vector_4588/document-deleted.snap rename to crates/meilisearch/tests/vector/snapshots/mod.rs/add_remove_one_vector_4588/document-deleted.snap diff --git a/meilisearch/tests/vector/snapshots/mod.rs/add_remove_one_vector_4588/settings-processed.snap b/crates/meilisearch/tests/vector/snapshots/mod.rs/add_remove_one_vector_4588/settings-processed.snap similarity index 100% rename from meilisearch/tests/vector/snapshots/mod.rs/add_remove_one_vector_4588/settings-processed.snap rename to crates/meilisearch/tests/vector/snapshots/mod.rs/add_remove_one_vector_4588/settings-processed.snap diff --git a/meilitool/Cargo.toml b/crates/meilitool/Cargo.toml similarity index 100% rename from meilitool/Cargo.toml rename to crates/meilitool/Cargo.toml diff --git a/meilitool/src/main.rs b/crates/meilitool/src/main.rs similarity index 100% rename from meilitool/src/main.rs rename to crates/meilitool/src/main.rs diff --git a/meilitool/src/uuid_codec.rs b/crates/meilitool/src/uuid_codec.rs similarity index 100% rename from meilitool/src/uuid_codec.rs rename to crates/meilitool/src/uuid_codec.rs diff --git a/milli/Cargo.toml b/crates/milli/Cargo.toml similarity index 100% rename from milli/Cargo.toml rename to crates/milli/Cargo.toml diff --git a/milli/README.md b/crates/milli/README.md similarity index 100% rename from milli/README.md rename to crates/milli/README.md diff --git a/milli/examples/index.rs b/crates/milli/examples/index.rs similarity index 100% rename from milli/examples/index.rs rename to crates/milli/examples/index.rs diff --git a/milli/examples/search.rs b/crates/milli/examples/search.rs similarity index 100% rename from milli/examples/search.rs rename to crates/milli/examples/search.rs diff --git a/milli/examples/settings.rs b/crates/milli/examples/settings.rs similarity index 100% rename from milli/examples/settings.rs rename to crates/milli/examples/settings.rs diff --git a/milli/fuzz/.gitignore b/crates/milli/fuzz/.gitignore similarity index 100% rename from milli/fuzz/.gitignore rename to crates/milli/fuzz/.gitignore diff --git a/milli/src/asc_desc.rs b/crates/milli/src/asc_desc.rs similarity index 100% rename from milli/src/asc_desc.rs rename to crates/milli/src/asc_desc.rs diff --git a/milli/src/criterion.rs b/crates/milli/src/criterion.rs similarity index 100% rename from milli/src/criterion.rs rename to crates/milli/src/criterion.rs diff --git a/milli/src/documents/builder.rs b/crates/milli/src/documents/builder.rs similarity index 100% rename from milli/src/documents/builder.rs rename to crates/milli/src/documents/builder.rs diff --git a/milli/src/documents/enriched.rs b/crates/milli/src/documents/enriched.rs similarity index 100% rename from milli/src/documents/enriched.rs rename to crates/milli/src/documents/enriched.rs diff --git a/milli/src/documents/mod.rs b/crates/milli/src/documents/mod.rs similarity index 100% rename from milli/src/documents/mod.rs rename to crates/milli/src/documents/mod.rs diff --git a/milli/src/documents/primary_key.rs b/crates/milli/src/documents/primary_key.rs similarity index 100% rename from milli/src/documents/primary_key.rs rename to crates/milli/src/documents/primary_key.rs diff --git a/milli/src/documents/reader.rs b/crates/milli/src/documents/reader.rs similarity index 100% rename from milli/src/documents/reader.rs rename to crates/milli/src/documents/reader.rs diff --git a/milli/src/documents/serde_impl.rs b/crates/milli/src/documents/serde_impl.rs similarity index 100% rename from milli/src/documents/serde_impl.rs rename to crates/milli/src/documents/serde_impl.rs diff --git a/milli/src/error.rs b/crates/milli/src/error.rs similarity index 100% rename from milli/src/error.rs rename to crates/milli/src/error.rs diff --git a/milli/src/external_documents_ids.rs b/crates/milli/src/external_documents_ids.rs similarity index 100% rename from milli/src/external_documents_ids.rs rename to crates/milli/src/external_documents_ids.rs diff --git a/milli/src/facet/facet_type.rs b/crates/milli/src/facet/facet_type.rs similarity index 100% rename from milli/src/facet/facet_type.rs rename to crates/milli/src/facet/facet_type.rs diff --git a/milli/src/facet/facet_value.rs b/crates/milli/src/facet/facet_value.rs similarity index 100% rename from milli/src/facet/facet_value.rs rename to crates/milli/src/facet/facet_value.rs diff --git a/milli/src/facet/mod.rs b/crates/milli/src/facet/mod.rs similarity index 100% rename from milli/src/facet/mod.rs rename to crates/milli/src/facet/mod.rs diff --git a/milli/src/facet/value_encoding.rs b/crates/milli/src/facet/value_encoding.rs similarity index 100% rename from milli/src/facet/value_encoding.rs rename to crates/milli/src/facet/value_encoding.rs diff --git a/milli/src/fieldids_weights_map.rs b/crates/milli/src/fieldids_weights_map.rs similarity index 100% rename from milli/src/fieldids_weights_map.rs rename to crates/milli/src/fieldids_weights_map.rs diff --git a/milli/src/fields_ids_map.rs b/crates/milli/src/fields_ids_map.rs similarity index 100% rename from milli/src/fields_ids_map.rs rename to crates/milli/src/fields_ids_map.rs diff --git a/milli/src/heed_codec/beu16_str_codec.rs b/crates/milli/src/heed_codec/beu16_str_codec.rs similarity index 100% rename from milli/src/heed_codec/beu16_str_codec.rs rename to crates/milli/src/heed_codec/beu16_str_codec.rs diff --git a/milli/src/heed_codec/beu32_str_codec.rs b/crates/milli/src/heed_codec/beu32_str_codec.rs similarity index 100% rename from milli/src/heed_codec/beu32_str_codec.rs rename to crates/milli/src/heed_codec/beu32_str_codec.rs diff --git a/milli/src/heed_codec/byte_slice_ref.rs b/crates/milli/src/heed_codec/byte_slice_ref.rs similarity index 100% rename from milli/src/heed_codec/byte_slice_ref.rs rename to crates/milli/src/heed_codec/byte_slice_ref.rs diff --git a/milli/src/heed_codec/facet/field_doc_id_facet_codec.rs b/crates/milli/src/heed_codec/facet/field_doc_id_facet_codec.rs similarity index 100% rename from milli/src/heed_codec/facet/field_doc_id_facet_codec.rs rename to crates/milli/src/heed_codec/facet/field_doc_id_facet_codec.rs diff --git a/milli/src/heed_codec/facet/mod.rs b/crates/milli/src/heed_codec/facet/mod.rs similarity index 100% rename from milli/src/heed_codec/facet/mod.rs rename to crates/milli/src/heed_codec/facet/mod.rs diff --git a/milli/src/heed_codec/facet/ordered_f64_codec.rs b/crates/milli/src/heed_codec/facet/ordered_f64_codec.rs similarity index 100% rename from milli/src/heed_codec/facet/ordered_f64_codec.rs rename to crates/milli/src/heed_codec/facet/ordered_f64_codec.rs diff --git a/milli/src/heed_codec/field_id_word_count_codec.rs b/crates/milli/src/heed_codec/field_id_word_count_codec.rs similarity index 100% rename from milli/src/heed_codec/field_id_word_count_codec.rs rename to crates/milli/src/heed_codec/field_id_word_count_codec.rs diff --git a/milli/src/heed_codec/fst_set_codec.rs b/crates/milli/src/heed_codec/fst_set_codec.rs similarity index 100% rename from milli/src/heed_codec/fst_set_codec.rs rename to crates/milli/src/heed_codec/fst_set_codec.rs diff --git a/milli/src/heed_codec/mod.rs b/crates/milli/src/heed_codec/mod.rs similarity index 100% rename from milli/src/heed_codec/mod.rs rename to crates/milli/src/heed_codec/mod.rs diff --git a/milli/src/heed_codec/obkv_codec.rs b/crates/milli/src/heed_codec/obkv_codec.rs similarity index 100% rename from milli/src/heed_codec/obkv_codec.rs rename to crates/milli/src/heed_codec/obkv_codec.rs diff --git a/milli/src/heed_codec/roaring_bitmap/bo_roaring_bitmap_codec.rs b/crates/milli/src/heed_codec/roaring_bitmap/bo_roaring_bitmap_codec.rs similarity index 100% rename from milli/src/heed_codec/roaring_bitmap/bo_roaring_bitmap_codec.rs rename to crates/milli/src/heed_codec/roaring_bitmap/bo_roaring_bitmap_codec.rs diff --git a/milli/src/heed_codec/roaring_bitmap/cbo_roaring_bitmap_codec.rs b/crates/milli/src/heed_codec/roaring_bitmap/cbo_roaring_bitmap_codec.rs similarity index 100% rename from milli/src/heed_codec/roaring_bitmap/cbo_roaring_bitmap_codec.rs rename to crates/milli/src/heed_codec/roaring_bitmap/cbo_roaring_bitmap_codec.rs diff --git a/milli/src/heed_codec/roaring_bitmap/mod.rs b/crates/milli/src/heed_codec/roaring_bitmap/mod.rs similarity index 100% rename from milli/src/heed_codec/roaring_bitmap/mod.rs rename to crates/milli/src/heed_codec/roaring_bitmap/mod.rs diff --git a/milli/src/heed_codec/roaring_bitmap/roaring_bitmap_codec.rs b/crates/milli/src/heed_codec/roaring_bitmap/roaring_bitmap_codec.rs similarity index 100% rename from milli/src/heed_codec/roaring_bitmap/roaring_bitmap_codec.rs rename to crates/milli/src/heed_codec/roaring_bitmap/roaring_bitmap_codec.rs diff --git a/milli/src/heed_codec/roaring_bitmap_length/bo_roaring_bitmap_len_codec.rs b/crates/milli/src/heed_codec/roaring_bitmap_length/bo_roaring_bitmap_len_codec.rs similarity index 100% rename from milli/src/heed_codec/roaring_bitmap_length/bo_roaring_bitmap_len_codec.rs rename to crates/milli/src/heed_codec/roaring_bitmap_length/bo_roaring_bitmap_len_codec.rs diff --git a/milli/src/heed_codec/roaring_bitmap_length/cbo_roaring_bitmap_len_codec.rs b/crates/milli/src/heed_codec/roaring_bitmap_length/cbo_roaring_bitmap_len_codec.rs similarity index 100% rename from milli/src/heed_codec/roaring_bitmap_length/cbo_roaring_bitmap_len_codec.rs rename to crates/milli/src/heed_codec/roaring_bitmap_length/cbo_roaring_bitmap_len_codec.rs diff --git a/milli/src/heed_codec/roaring_bitmap_length/mod.rs b/crates/milli/src/heed_codec/roaring_bitmap_length/mod.rs similarity index 100% rename from milli/src/heed_codec/roaring_bitmap_length/mod.rs rename to crates/milli/src/heed_codec/roaring_bitmap_length/mod.rs diff --git a/milli/src/heed_codec/roaring_bitmap_length/roaring_bitmap_len_codec.rs b/crates/milli/src/heed_codec/roaring_bitmap_length/roaring_bitmap_len_codec.rs similarity index 100% rename from milli/src/heed_codec/roaring_bitmap_length/roaring_bitmap_len_codec.rs rename to crates/milli/src/heed_codec/roaring_bitmap_length/roaring_bitmap_len_codec.rs diff --git a/milli/src/heed_codec/str_beu32_codec.rs b/crates/milli/src/heed_codec/str_beu32_codec.rs similarity index 100% rename from milli/src/heed_codec/str_beu32_codec.rs rename to crates/milli/src/heed_codec/str_beu32_codec.rs diff --git a/milli/src/heed_codec/str_ref.rs b/crates/milli/src/heed_codec/str_ref.rs similarity index 100% rename from milli/src/heed_codec/str_ref.rs rename to crates/milli/src/heed_codec/str_ref.rs diff --git a/milli/src/heed_codec/str_str_u8_codec.rs b/crates/milli/src/heed_codec/str_str_u8_codec.rs similarity index 100% rename from milli/src/heed_codec/str_str_u8_codec.rs rename to crates/milli/src/heed_codec/str_str_u8_codec.rs diff --git a/milli/src/index.rs b/crates/milli/src/index.rs similarity index 100% rename from milli/src/index.rs rename to crates/milli/src/index.rs diff --git a/milli/src/lib.rs b/crates/milli/src/lib.rs similarity index 100% rename from milli/src/lib.rs rename to crates/milli/src/lib.rs diff --git a/milli/src/localized_attributes_rules.rs b/crates/milli/src/localized_attributes_rules.rs similarity index 100% rename from milli/src/localized_attributes_rules.rs rename to crates/milli/src/localized_attributes_rules.rs diff --git a/milli/src/order_by_map.rs b/crates/milli/src/order_by_map.rs similarity index 100% rename from milli/src/order_by_map.rs rename to crates/milli/src/order_by_map.rs diff --git a/milli/src/prompt/context.rs b/crates/milli/src/prompt/context.rs similarity index 100% rename from milli/src/prompt/context.rs rename to crates/milli/src/prompt/context.rs diff --git a/milli/src/prompt/document.rs b/crates/milli/src/prompt/document.rs similarity index 100% rename from milli/src/prompt/document.rs rename to crates/milli/src/prompt/document.rs diff --git a/milli/src/prompt/error.rs b/crates/milli/src/prompt/error.rs similarity index 100% rename from milli/src/prompt/error.rs rename to crates/milli/src/prompt/error.rs diff --git a/milli/src/prompt/fields.rs b/crates/milli/src/prompt/fields.rs similarity index 100% rename from milli/src/prompt/fields.rs rename to crates/milli/src/prompt/fields.rs diff --git a/milli/src/prompt/mod.rs b/crates/milli/src/prompt/mod.rs similarity index 100% rename from milli/src/prompt/mod.rs rename to crates/milli/src/prompt/mod.rs diff --git a/milli/src/prompt/template_checker.rs b/crates/milli/src/prompt/template_checker.rs similarity index 100% rename from milli/src/prompt/template_checker.rs rename to crates/milli/src/prompt/template_checker.rs diff --git a/milli/src/proximity.rs b/crates/milli/src/proximity.rs similarity index 100% rename from milli/src/proximity.rs rename to crates/milli/src/proximity.rs diff --git a/milli/src/score_details.rs b/crates/milli/src/score_details.rs similarity index 100% rename from milli/src/score_details.rs rename to crates/milli/src/score_details.rs diff --git a/milli/src/search/facet/facet_distribution.rs b/crates/milli/src/search/facet/facet_distribution.rs similarity index 100% rename from milli/src/search/facet/facet_distribution.rs rename to crates/milli/src/search/facet/facet_distribution.rs diff --git a/milli/src/search/facet/facet_distribution_iter.rs b/crates/milli/src/search/facet/facet_distribution_iter.rs similarity index 100% rename from milli/src/search/facet/facet_distribution_iter.rs rename to crates/milli/src/search/facet/facet_distribution_iter.rs diff --git a/milli/src/search/facet/facet_range_search.rs b/crates/milli/src/search/facet/facet_range_search.rs similarity index 100% rename from milli/src/search/facet/facet_range_search.rs rename to crates/milli/src/search/facet/facet_range_search.rs diff --git a/milli/src/search/facet/facet_sort_ascending.rs b/crates/milli/src/search/facet/facet_sort_ascending.rs similarity index 100% rename from milli/src/search/facet/facet_sort_ascending.rs rename to crates/milli/src/search/facet/facet_sort_ascending.rs diff --git a/milli/src/search/facet/facet_sort_descending.rs b/crates/milli/src/search/facet/facet_sort_descending.rs similarity index 100% rename from milli/src/search/facet/facet_sort_descending.rs rename to crates/milli/src/search/facet/facet_sort_descending.rs diff --git a/milli/src/search/facet/filter.rs b/crates/milli/src/search/facet/filter.rs similarity index 100% rename from milli/src/search/facet/filter.rs rename to crates/milli/src/search/facet/filter.rs diff --git a/milli/src/search/facet/mod.rs b/crates/milli/src/search/facet/mod.rs similarity index 100% rename from milli/src/search/facet/mod.rs rename to crates/milli/src/search/facet/mod.rs diff --git a/milli/src/search/facet/search.rs b/crates/milli/src/search/facet/search.rs similarity index 100% rename from milli/src/search/facet/search.rs rename to crates/milli/src/search/facet/search.rs diff --git a/milli/src/search/facet/snapshots/facet_distribution_iter.rs/filter_distribution_all/0.snap b/crates/milli/src/search/facet/snapshots/facet_distribution_iter.rs/filter_distribution_all/0.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_distribution_iter.rs/filter_distribution_all/0.snap rename to crates/milli/src/search/facet/snapshots/facet_distribution_iter.rs/filter_distribution_all/0.snap diff --git a/milli/src/search/facet/snapshots/facet_distribution_iter.rs/filter_distribution_all/1.snap b/crates/milli/src/search/facet/snapshots/facet_distribution_iter.rs/filter_distribution_all/1.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_distribution_iter.rs/filter_distribution_all/1.snap rename to crates/milli/src/search/facet/snapshots/facet_distribution_iter.rs/filter_distribution_all/1.snap diff --git a/milli/src/search/facet/snapshots/facet_distribution_iter.rs/filter_distribution_all_stop_early/0.snap b/crates/milli/src/search/facet/snapshots/facet_distribution_iter.rs/filter_distribution_all_stop_early/0.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_distribution_iter.rs/filter_distribution_all_stop_early/0.snap rename to crates/milli/src/search/facet/snapshots/facet_distribution_iter.rs/filter_distribution_all_stop_early/0.snap diff --git a/milli/src/search/facet/snapshots/facet_distribution_iter.rs/filter_distribution_all_stop_early/1.snap b/crates/milli/src/search/facet/snapshots/facet_distribution_iter.rs/filter_distribution_all_stop_early/1.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_distribution_iter.rs/filter_distribution_all_stop_early/1.snap rename to crates/milli/src/search/facet/snapshots/facet_distribution_iter.rs/filter_distribution_all_stop_early/1.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_decreasing/excluded_0.hash.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_decreasing/excluded_0.hash.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_decreasing/excluded_0.hash.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_decreasing/excluded_0.hash.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_decreasing/excluded_1.hash.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_decreasing/excluded_1.hash.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_decreasing/excluded_1.hash.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_decreasing/excluded_1.hash.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_decreasing/excluded_2.hash.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_decreasing/excluded_2.hash.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_decreasing/excluded_2.hash.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_decreasing/excluded_2.hash.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_decreasing/excluded_3.hash.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_decreasing/excluded_3.hash.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_decreasing/excluded_3.hash.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_decreasing/excluded_3.hash.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_decreasing/included_0.hash.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_decreasing/included_0.hash.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_decreasing/included_0.hash.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_decreasing/included_0.hash.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_decreasing/included_1.hash.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_decreasing/included_1.hash.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_decreasing/included_1.hash.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_decreasing/included_1.hash.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_decreasing/included_2.hash.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_decreasing/included_2.hash.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_decreasing/included_2.hash.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_decreasing/included_2.hash.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_decreasing/included_3.hash.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_decreasing/included_3.hash.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_decreasing/included_3.hash.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_decreasing/included_3.hash.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_exact/field_id_0_exact_0.hash.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_exact/field_id_0_exact_0.hash.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_exact/field_id_0_exact_0.hash.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_exact/field_id_0_exact_0.hash.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_exact/field_id_0_exact_1.hash.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_exact/field_id_0_exact_1.hash.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_exact/field_id_0_exact_1.hash.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_exact/field_id_0_exact_1.hash.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_exact/field_id_0_exact_2.hash.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_exact/field_id_0_exact_2.hash.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_exact/field_id_0_exact_2.hash.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_exact/field_id_0_exact_2.hash.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_exact/field_id_0_exact_3.hash.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_exact/field_id_0_exact_3.hash.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_exact/field_id_0_exact_3.hash.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_exact/field_id_0_exact_3.hash.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_exact/field_id_1_exact_0.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_exact/field_id_1_exact_0.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_exact/field_id_1_exact_0.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_exact/field_id_1_exact_0.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_exact/field_id_1_exact_1.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_exact/field_id_1_exact_1.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_exact/field_id_1_exact_1.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_exact/field_id_1_exact_1.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_exact/field_id_1_exact_2.hash.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_exact/field_id_1_exact_2.hash.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_exact/field_id_1_exact_2.hash.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_exact/field_id_1_exact_2.hash.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_exact/field_id_1_exact_3.hash.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_exact/field_id_1_exact_3.hash.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_exact/field_id_1_exact_3.hash.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_exact/field_id_1_exact_3.hash.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_increasing/excluded_0.hash.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_increasing/excluded_0.hash.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_increasing/excluded_0.hash.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_increasing/excluded_0.hash.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_increasing/excluded_1.hash.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_increasing/excluded_1.hash.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_increasing/excluded_1.hash.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_increasing/excluded_1.hash.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_increasing/excluded_2.hash.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_increasing/excluded_2.hash.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_increasing/excluded_2.hash.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_increasing/excluded_2.hash.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_increasing/excluded_3.hash.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_increasing/excluded_3.hash.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_increasing/excluded_3.hash.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_increasing/excluded_3.hash.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_increasing/included_0.hash.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_increasing/included_0.hash.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_increasing/included_0.hash.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_increasing/included_0.hash.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_increasing/included_1.hash.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_increasing/included_1.hash.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_increasing/included_1.hash.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_increasing/included_1.hash.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_increasing/included_2.hash.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_increasing/included_2.hash.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_increasing/included_2.hash.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_increasing/included_2.hash.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_increasing/included_3.hash.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_increasing/included_3.hash.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_increasing/included_3.hash.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_increasing/included_3.hash.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_pinch/excluded_0.hash.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_pinch/excluded_0.hash.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_pinch/excluded_0.hash.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_pinch/excluded_0.hash.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_pinch/excluded_1.hash.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_pinch/excluded_1.hash.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_pinch/excluded_1.hash.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_pinch/excluded_1.hash.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_pinch/excluded_2.hash.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_pinch/excluded_2.hash.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_pinch/excluded_2.hash.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_pinch/excluded_2.hash.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_pinch/excluded_3.hash.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_pinch/excluded_3.hash.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_pinch/excluded_3.hash.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_pinch/excluded_3.hash.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_pinch/included_0.hash.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_pinch/included_0.hash.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_pinch/included_0.hash.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_pinch/included_0.hash.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_pinch/included_1.hash.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_pinch/included_1.hash.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_pinch/included_1.hash.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_pinch/included_1.hash.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_pinch/included_2.hash.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_pinch/included_2.hash.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_pinch/included_2.hash.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_pinch/included_2.hash.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_pinch/included_3.hash.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_pinch/included_3.hash.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_pinch/included_3.hash.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_pinch/included_3.hash.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/end_at_included_0.hash.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/end_at_included_0.hash.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/end_at_included_0.hash.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/end_at_included_0.hash.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/end_at_included_1.hash.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/end_at_included_1.hash.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/end_at_included_1.hash.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/end_at_included_1.hash.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/end_at_included_2.hash.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/end_at_included_2.hash.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/end_at_included_2.hash.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/end_at_included_2.hash.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/end_at_included_3.hash.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/end_at_included_3.hash.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/end_at_included_3.hash.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/end_at_included_3.hash.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/start_from_included_0.hash.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/start_from_included_0.hash.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/start_from_included_0.hash.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/start_from_included_0.hash.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/start_from_included_1.hash.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/start_from_included_1.hash.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/start_from_included_1.hash.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/start_from_included_1.hash.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/start_from_included_2.hash.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/start_from_included_2.hash.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/start_from_included_2.hash.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/start_from_included_2.hash.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/start_from_included_3.hash.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/start_from_included_3.hash.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/start_from_included_3.hash.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/start_from_included_3.hash.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/unbounded_field_id_0_0.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/unbounded_field_id_0_0.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/unbounded_field_id_0_0.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/unbounded_field_id_0_0.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/unbounded_field_id_0_1.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/unbounded_field_id_0_1.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/unbounded_field_id_0_1.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/unbounded_field_id_0_1.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/unbounded_field_id_0_2.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/unbounded_field_id_0_2.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/unbounded_field_id_0_2.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/unbounded_field_id_0_2.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/unbounded_field_id_0_3.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/unbounded_field_id_0_3.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/unbounded_field_id_0_3.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/unbounded_field_id_0_3.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/unbounded_field_id_1_0.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/unbounded_field_id_1_0.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/unbounded_field_id_1_0.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/unbounded_field_id_1_0.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/unbounded_field_id_1_1.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/unbounded_field_id_1_1.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/unbounded_field_id_1_1.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/unbounded_field_id_1_1.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/unbounded_field_id_1_2.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/unbounded_field_id_1_2.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/unbounded_field_id_1_2.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/unbounded_field_id_1_2.snap diff --git a/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/unbounded_field_id_1_3.snap b/crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/unbounded_field_id_1_3.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/unbounded_field_id_1_3.snap rename to crates/milli/src/search/facet/snapshots/facet_range_search.rs/filter_range_unbounded/unbounded_field_id_1_3.snap diff --git a/milli/src/search/facet/snapshots/facet_sort_ascending.rs/filter_sort_ascending/0.snap b/crates/milli/src/search/facet/snapshots/facet_sort_ascending.rs/filter_sort_ascending/0.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_sort_ascending.rs/filter_sort_ascending/0.snap rename to crates/milli/src/search/facet/snapshots/facet_sort_ascending.rs/filter_sort_ascending/0.snap diff --git a/milli/src/search/facet/snapshots/facet_sort_ascending.rs/filter_sort_ascending/1.snap b/crates/milli/src/search/facet/snapshots/facet_sort_ascending.rs/filter_sort_ascending/1.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_sort_ascending.rs/filter_sort_ascending/1.snap rename to crates/milli/src/search/facet/snapshots/facet_sort_ascending.rs/filter_sort_ascending/1.snap diff --git a/milli/src/search/facet/snapshots/facet_sort_ascending.rs/filter_sort_ascending_multiple_field_ids/0-0.snap b/crates/milli/src/search/facet/snapshots/facet_sort_ascending.rs/filter_sort_ascending_multiple_field_ids/0-0.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_sort_ascending.rs/filter_sort_ascending_multiple_field_ids/0-0.snap rename to crates/milli/src/search/facet/snapshots/facet_sort_ascending.rs/filter_sort_ascending_multiple_field_ids/0-0.snap diff --git a/milli/src/search/facet/snapshots/facet_sort_ascending.rs/filter_sort_ascending_multiple_field_ids/0-1.snap b/crates/milli/src/search/facet/snapshots/facet_sort_ascending.rs/filter_sort_ascending_multiple_field_ids/0-1.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_sort_ascending.rs/filter_sort_ascending_multiple_field_ids/0-1.snap rename to crates/milli/src/search/facet/snapshots/facet_sort_ascending.rs/filter_sort_ascending_multiple_field_ids/0-1.snap diff --git a/milli/src/search/facet/snapshots/facet_sort_ascending.rs/filter_sort_ascending_multiple_field_ids/1-0.snap b/crates/milli/src/search/facet/snapshots/facet_sort_ascending.rs/filter_sort_ascending_multiple_field_ids/1-0.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_sort_ascending.rs/filter_sort_ascending_multiple_field_ids/1-0.snap rename to crates/milli/src/search/facet/snapshots/facet_sort_ascending.rs/filter_sort_ascending_multiple_field_ids/1-0.snap diff --git a/milli/src/search/facet/snapshots/facet_sort_ascending.rs/filter_sort_ascending_multiple_field_ids/1-1.snap b/crates/milli/src/search/facet/snapshots/facet_sort_ascending.rs/filter_sort_ascending_multiple_field_ids/1-1.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_sort_ascending.rs/filter_sort_ascending_multiple_field_ids/1-1.snap rename to crates/milli/src/search/facet/snapshots/facet_sort_ascending.rs/filter_sort_ascending_multiple_field_ids/1-1.snap diff --git a/milli/src/search/facet/snapshots/facet_sort_descending.rs/filter_sort_descending/0.snap b/crates/milli/src/search/facet/snapshots/facet_sort_descending.rs/filter_sort_descending/0.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_sort_descending.rs/filter_sort_descending/0.snap rename to crates/milli/src/search/facet/snapshots/facet_sort_descending.rs/filter_sort_descending/0.snap diff --git a/milli/src/search/facet/snapshots/facet_sort_descending.rs/filter_sort_descending/1.snap b/crates/milli/src/search/facet/snapshots/facet_sort_descending.rs/filter_sort_descending/1.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_sort_descending.rs/filter_sort_descending/1.snap rename to crates/milli/src/search/facet/snapshots/facet_sort_descending.rs/filter_sort_descending/1.snap diff --git a/milli/src/search/facet/snapshots/facet_sort_descending.rs/filter_sort_descending/2.snap b/crates/milli/src/search/facet/snapshots/facet_sort_descending.rs/filter_sort_descending/2.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_sort_descending.rs/filter_sort_descending/2.snap rename to crates/milli/src/search/facet/snapshots/facet_sort_descending.rs/filter_sort_descending/2.snap diff --git a/milli/src/search/facet/snapshots/facet_sort_descending.rs/filter_sort_descending_multiple_field_ids/0-0.snap b/crates/milli/src/search/facet/snapshots/facet_sort_descending.rs/filter_sort_descending_multiple_field_ids/0-0.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_sort_descending.rs/filter_sort_descending_multiple_field_ids/0-0.snap rename to crates/milli/src/search/facet/snapshots/facet_sort_descending.rs/filter_sort_descending_multiple_field_ids/0-0.snap diff --git a/milli/src/search/facet/snapshots/facet_sort_descending.rs/filter_sort_descending_multiple_field_ids/0-1.snap b/crates/milli/src/search/facet/snapshots/facet_sort_descending.rs/filter_sort_descending_multiple_field_ids/0-1.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_sort_descending.rs/filter_sort_descending_multiple_field_ids/0-1.snap rename to crates/milli/src/search/facet/snapshots/facet_sort_descending.rs/filter_sort_descending_multiple_field_ids/0-1.snap diff --git a/milli/src/search/facet/snapshots/facet_sort_descending.rs/filter_sort_descending_multiple_field_ids/1-0.snap b/crates/milli/src/search/facet/snapshots/facet_sort_descending.rs/filter_sort_descending_multiple_field_ids/1-0.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_sort_descending.rs/filter_sort_descending_multiple_field_ids/1-0.snap rename to crates/milli/src/search/facet/snapshots/facet_sort_descending.rs/filter_sort_descending_multiple_field_ids/1-0.snap diff --git a/milli/src/search/facet/snapshots/facet_sort_descending.rs/filter_sort_descending_multiple_field_ids/1-1.snap b/crates/milli/src/search/facet/snapshots/facet_sort_descending.rs/filter_sort_descending_multiple_field_ids/1-1.snap similarity index 100% rename from milli/src/search/facet/snapshots/facet_sort_descending.rs/filter_sort_descending_multiple_field_ids/1-1.snap rename to crates/milli/src/search/facet/snapshots/facet_sort_descending.rs/filter_sort_descending_multiple_field_ids/1-1.snap diff --git a/milli/src/search/fst_utils.rs b/crates/milli/src/search/fst_utils.rs similarity index 100% rename from milli/src/search/fst_utils.rs rename to crates/milli/src/search/fst_utils.rs diff --git a/milli/src/search/hybrid.rs b/crates/milli/src/search/hybrid.rs similarity index 100% rename from milli/src/search/hybrid.rs rename to crates/milli/src/search/hybrid.rs diff --git a/milli/src/search/mod.rs b/crates/milli/src/search/mod.rs similarity index 100% rename from milli/src/search/mod.rs rename to crates/milli/src/search/mod.rs diff --git a/milli/src/search/new/bucket_sort.rs b/crates/milli/src/search/new/bucket_sort.rs similarity index 100% rename from milli/src/search/new/bucket_sort.rs rename to crates/milli/src/search/new/bucket_sort.rs diff --git a/milli/src/search/new/db_cache.rs b/crates/milli/src/search/new/db_cache.rs similarity index 100% rename from milli/src/search/new/db_cache.rs rename to crates/milli/src/search/new/db_cache.rs diff --git a/milli/src/search/new/distinct.rs b/crates/milli/src/search/new/distinct.rs similarity index 100% rename from milli/src/search/new/distinct.rs rename to crates/milli/src/search/new/distinct.rs diff --git a/milli/src/search/new/exact_attribute.rs b/crates/milli/src/search/new/exact_attribute.rs similarity index 100% rename from milli/src/search/new/exact_attribute.rs rename to crates/milli/src/search/new/exact_attribute.rs diff --git a/milli/src/search/new/geo_sort.rs b/crates/milli/src/search/new/geo_sort.rs similarity index 100% rename from milli/src/search/new/geo_sort.rs rename to crates/milli/src/search/new/geo_sort.rs diff --git a/milli/src/search/new/graph_based_ranking_rule.rs b/crates/milli/src/search/new/graph_based_ranking_rule.rs similarity index 100% rename from milli/src/search/new/graph_based_ranking_rule.rs rename to crates/milli/src/search/new/graph_based_ranking_rule.rs diff --git a/milli/src/search/new/interner.rs b/crates/milli/src/search/new/interner.rs similarity index 100% rename from milli/src/search/new/interner.rs rename to crates/milli/src/search/new/interner.rs diff --git a/milli/src/search/new/limits.rs b/crates/milli/src/search/new/limits.rs similarity index 100% rename from milli/src/search/new/limits.rs rename to crates/milli/src/search/new/limits.rs diff --git a/milli/src/search/new/logger/mod.rs b/crates/milli/src/search/new/logger/mod.rs similarity index 100% rename from milli/src/search/new/logger/mod.rs rename to crates/milli/src/search/new/logger/mod.rs diff --git a/milli/src/search/new/logger/visual.rs b/crates/milli/src/search/new/logger/visual.rs similarity index 100% rename from milli/src/search/new/logger/visual.rs rename to crates/milli/src/search/new/logger/visual.rs diff --git a/milli/src/search/new/matches/matching_words.rs b/crates/milli/src/search/new/matches/matching_words.rs similarity index 100% rename from milli/src/search/new/matches/matching_words.rs rename to crates/milli/src/search/new/matches/matching_words.rs diff --git a/milli/src/search/new/matches/mod.rs b/crates/milli/src/search/new/matches/mod.rs similarity index 100% rename from milli/src/search/new/matches/mod.rs rename to crates/milli/src/search/new/matches/mod.rs diff --git a/milli/src/search/new/mod.rs b/crates/milli/src/search/new/mod.rs similarity index 100% rename from milli/src/search/new/mod.rs rename to crates/milli/src/search/new/mod.rs diff --git a/milli/src/search/new/query_graph.rs b/crates/milli/src/search/new/query_graph.rs similarity index 100% rename from milli/src/search/new/query_graph.rs rename to crates/milli/src/search/new/query_graph.rs diff --git a/milli/src/search/new/query_term/compute_derivations.rs b/crates/milli/src/search/new/query_term/compute_derivations.rs similarity index 100% rename from milli/src/search/new/query_term/compute_derivations.rs rename to crates/milli/src/search/new/query_term/compute_derivations.rs diff --git a/milli/src/search/new/query_term/mod.rs b/crates/milli/src/search/new/query_term/mod.rs similarity index 100% rename from milli/src/search/new/query_term/mod.rs rename to crates/milli/src/search/new/query_term/mod.rs diff --git a/milli/src/search/new/query_term/ntypo_subset.rs b/crates/milli/src/search/new/query_term/ntypo_subset.rs similarity index 100% rename from milli/src/search/new/query_term/ntypo_subset.rs rename to crates/milli/src/search/new/query_term/ntypo_subset.rs diff --git a/milli/src/search/new/query_term/parse_query.rs b/crates/milli/src/search/new/query_term/parse_query.rs similarity index 100% rename from milli/src/search/new/query_term/parse_query.rs rename to crates/milli/src/search/new/query_term/parse_query.rs diff --git a/milli/src/search/new/query_term/phrase.rs b/crates/milli/src/search/new/query_term/phrase.rs similarity index 100% rename from milli/src/search/new/query_term/phrase.rs rename to crates/milli/src/search/new/query_term/phrase.rs diff --git a/milli/src/search/new/ranking_rule_graph/build.rs b/crates/milli/src/search/new/ranking_rule_graph/build.rs similarity index 100% rename from milli/src/search/new/ranking_rule_graph/build.rs rename to crates/milli/src/search/new/ranking_rule_graph/build.rs diff --git a/milli/src/search/new/ranking_rule_graph/cheapest_paths.rs b/crates/milli/src/search/new/ranking_rule_graph/cheapest_paths.rs similarity index 100% rename from milli/src/search/new/ranking_rule_graph/cheapest_paths.rs rename to crates/milli/src/search/new/ranking_rule_graph/cheapest_paths.rs diff --git a/milli/src/search/new/ranking_rule_graph/condition_docids_cache.rs b/crates/milli/src/search/new/ranking_rule_graph/condition_docids_cache.rs similarity index 100% rename from milli/src/search/new/ranking_rule_graph/condition_docids_cache.rs rename to crates/milli/src/search/new/ranking_rule_graph/condition_docids_cache.rs diff --git a/milli/src/search/new/ranking_rule_graph/dead_ends_cache.rs b/crates/milli/src/search/new/ranking_rule_graph/dead_ends_cache.rs similarity index 100% rename from milli/src/search/new/ranking_rule_graph/dead_ends_cache.rs rename to crates/milli/src/search/new/ranking_rule_graph/dead_ends_cache.rs diff --git a/milli/src/search/new/ranking_rule_graph/exactness/mod.rs b/crates/milli/src/search/new/ranking_rule_graph/exactness/mod.rs similarity index 100% rename from milli/src/search/new/ranking_rule_graph/exactness/mod.rs rename to crates/milli/src/search/new/ranking_rule_graph/exactness/mod.rs diff --git a/milli/src/search/new/ranking_rule_graph/fid/mod.rs b/crates/milli/src/search/new/ranking_rule_graph/fid/mod.rs similarity index 100% rename from milli/src/search/new/ranking_rule_graph/fid/mod.rs rename to crates/milli/src/search/new/ranking_rule_graph/fid/mod.rs diff --git a/milli/src/search/new/ranking_rule_graph/mod.rs b/crates/milli/src/search/new/ranking_rule_graph/mod.rs similarity index 100% rename from milli/src/search/new/ranking_rule_graph/mod.rs rename to crates/milli/src/search/new/ranking_rule_graph/mod.rs diff --git a/milli/src/search/new/ranking_rule_graph/position/mod.rs b/crates/milli/src/search/new/ranking_rule_graph/position/mod.rs similarity index 100% rename from milli/src/search/new/ranking_rule_graph/position/mod.rs rename to crates/milli/src/search/new/ranking_rule_graph/position/mod.rs diff --git a/milli/src/search/new/ranking_rule_graph/proximity/build.rs b/crates/milli/src/search/new/ranking_rule_graph/proximity/build.rs similarity index 100% rename from milli/src/search/new/ranking_rule_graph/proximity/build.rs rename to crates/milli/src/search/new/ranking_rule_graph/proximity/build.rs diff --git a/milli/src/search/new/ranking_rule_graph/proximity/compute_docids.rs b/crates/milli/src/search/new/ranking_rule_graph/proximity/compute_docids.rs similarity index 100% rename from milli/src/search/new/ranking_rule_graph/proximity/compute_docids.rs rename to crates/milli/src/search/new/ranking_rule_graph/proximity/compute_docids.rs diff --git a/milli/src/search/new/ranking_rule_graph/proximity/mod.rs b/crates/milli/src/search/new/ranking_rule_graph/proximity/mod.rs similarity index 100% rename from milli/src/search/new/ranking_rule_graph/proximity/mod.rs rename to crates/milli/src/search/new/ranking_rule_graph/proximity/mod.rs diff --git a/milli/src/search/new/ranking_rule_graph/typo/mod.rs b/crates/milli/src/search/new/ranking_rule_graph/typo/mod.rs similarity index 100% rename from milli/src/search/new/ranking_rule_graph/typo/mod.rs rename to crates/milli/src/search/new/ranking_rule_graph/typo/mod.rs diff --git a/milli/src/search/new/ranking_rule_graph/words/mod.rs b/crates/milli/src/search/new/ranking_rule_graph/words/mod.rs similarity index 100% rename from milli/src/search/new/ranking_rule_graph/words/mod.rs rename to crates/milli/src/search/new/ranking_rule_graph/words/mod.rs diff --git a/milli/src/search/new/ranking_rules.rs b/crates/milli/src/search/new/ranking_rules.rs similarity index 100% rename from milli/src/search/new/ranking_rules.rs rename to crates/milli/src/search/new/ranking_rules.rs diff --git a/milli/src/search/new/resolve_query_graph.rs b/crates/milli/src/search/new/resolve_query_graph.rs similarity index 100% rename from milli/src/search/new/resolve_query_graph.rs rename to crates/milli/src/search/new/resolve_query_graph.rs diff --git a/milli/src/search/new/small_bitmap.rs b/crates/milli/src/search/new/small_bitmap.rs similarity index 100% rename from milli/src/search/new/small_bitmap.rs rename to crates/milli/src/search/new/small_bitmap.rs diff --git a/milli/src/search/new/sort.rs b/crates/milli/src/search/new/sort.rs similarity index 100% rename from milli/src/search/new/sort.rs rename to crates/milli/src/search/new/sort.rs diff --git a/milli/src/search/new/tests/attribute_fid.rs b/crates/milli/src/search/new/tests/attribute_fid.rs similarity index 100% rename from milli/src/search/new/tests/attribute_fid.rs rename to crates/milli/src/search/new/tests/attribute_fid.rs diff --git a/milli/src/search/new/tests/attribute_position.rs b/crates/milli/src/search/new/tests/attribute_position.rs similarity index 100% rename from milli/src/search/new/tests/attribute_position.rs rename to crates/milli/src/search/new/tests/attribute_position.rs diff --git a/milli/src/search/new/tests/cutoff.rs b/crates/milli/src/search/new/tests/cutoff.rs similarity index 100% rename from milli/src/search/new/tests/cutoff.rs rename to crates/milli/src/search/new/tests/cutoff.rs diff --git a/milli/src/search/new/tests/distinct.rs b/crates/milli/src/search/new/tests/distinct.rs similarity index 100% rename from milli/src/search/new/tests/distinct.rs rename to crates/milli/src/search/new/tests/distinct.rs diff --git a/milli/src/search/new/tests/exactness.rs b/crates/milli/src/search/new/tests/exactness.rs similarity index 100% rename from milli/src/search/new/tests/exactness.rs rename to crates/milli/src/search/new/tests/exactness.rs diff --git a/milli/src/search/new/tests/geo_sort.rs b/crates/milli/src/search/new/tests/geo_sort.rs similarity index 100% rename from milli/src/search/new/tests/geo_sort.rs rename to crates/milli/src/search/new/tests/geo_sort.rs diff --git a/milli/src/search/new/tests/integration.rs b/crates/milli/src/search/new/tests/integration.rs similarity index 100% rename from milli/src/search/new/tests/integration.rs rename to crates/milli/src/search/new/tests/integration.rs diff --git a/milli/src/search/new/tests/language.rs b/crates/milli/src/search/new/tests/language.rs similarity index 100% rename from milli/src/search/new/tests/language.rs rename to crates/milli/src/search/new/tests/language.rs diff --git a/milli/src/search/new/tests/mod.rs b/crates/milli/src/search/new/tests/mod.rs similarity index 100% rename from milli/src/search/new/tests/mod.rs rename to crates/milli/src/search/new/tests/mod.rs diff --git a/milli/src/search/new/tests/ngram_split_words.rs b/crates/milli/src/search/new/tests/ngram_split_words.rs similarity index 100% rename from milli/src/search/new/tests/ngram_split_words.rs rename to crates/milli/src/search/new/tests/ngram_split_words.rs diff --git a/milli/src/search/new/tests/proximity.rs b/crates/milli/src/search/new/tests/proximity.rs similarity index 100% rename from milli/src/search/new/tests/proximity.rs rename to crates/milli/src/search/new/tests/proximity.rs diff --git a/milli/src/search/new/tests/proximity_typo.rs b/crates/milli/src/search/new/tests/proximity_typo.rs similarity index 100% rename from milli/src/search/new/tests/proximity_typo.rs rename to crates/milli/src/search/new/tests/proximity_typo.rs diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__attribute_fid__attribute_fid_ngrams-4.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__attribute_fid__attribute_fid_ngrams-4.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__attribute_fid__attribute_fid_ngrams-4.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__attribute_fid__attribute_fid_ngrams-4.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__attribute_fid__attribute_fid_simple.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__attribute_fid__attribute_fid_simple.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__attribute_fid__attribute_fid_simple.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__attribute_fid__attribute_fid_simple.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__attribute_position__attribute_position_different_fields.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__attribute_position__attribute_position_different_fields.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__attribute_position__attribute_position_different_fields.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__attribute_position__attribute_position_different_fields.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__attribute_position__attribute_position_ngrams.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__attribute_position__attribute_position_ngrams.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__attribute_position__attribute_position_ngrams.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__attribute_position__attribute_position_ngrams.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__attribute_position__attribute_position_repeated.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__attribute_position__attribute_position_repeated.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__attribute_position__attribute_position_repeated.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__attribute_position__attribute_position_repeated.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__attribute_position__attribute_position_simple-2.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__attribute_position__attribute_position_simple-2.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__attribute_position__attribute_position_simple-2.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__attribute_position__attribute_position_simple-2.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_after_words.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_after_words.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_after_words.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_after_words.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_all_candidates_with_typo.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_all_candidates_with_typo.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_all_candidates_with_typo.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_all_candidates_with_typo.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_attribute_starts_with_phrase-3.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_attribute_starts_with_phrase-3.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_attribute_starts_with_phrase-3.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_attribute_starts_with_phrase-3.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_attribute_starts_with_phrase.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_attribute_starts_with_phrase.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_attribute_starts_with_phrase.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_attribute_starts_with_phrase.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_attribute_starts_with_simple.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_attribute_starts_with_simple.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_attribute_starts_with_simple.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_attribute_starts_with_simple.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_followed_by_typo_prefer_no_typo_prefix.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_followed_by_typo_prefer_no_typo_prefix.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_followed_by_typo_prefer_no_typo_prefix.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_followed_by_typo_prefer_no_typo_prefix.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_simple_ordered.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_simple_ordered.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_simple_ordered.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_simple_ordered.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_simple_random.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_simple_random.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_simple_random.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_simple_random.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_simple_reversed-3.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_simple_reversed-3.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_simple_reversed-3.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_simple_reversed-3.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_simple_reversed.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_simple_reversed.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_simple_reversed.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__exactness_simple_reversed.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__proximity_after_exactness-4.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__proximity_after_exactness-4.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__proximity_after_exactness-4.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__proximity_after_exactness-4.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__proximity_after_exactness.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__proximity_after_exactness.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__proximity_after_exactness.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__proximity_after_exactness.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__typo_followed_by_exactness.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__typo_followed_by_exactness.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__typo_followed_by_exactness.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__typo_followed_by_exactness.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__words_after_exactness.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__words_after_exactness.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__words_after_exactness.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__exactness__words_after_exactness.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort-2.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort-2.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort-2.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort-2.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort-4.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort-4.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort-4.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort-4.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-10.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-10.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-10.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-10.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-12.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-12.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-12.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-12.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-14.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-14.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-14.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-14.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-16.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-16.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-16.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-16.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-18.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-18.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-18.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-18.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-2.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-2.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-2.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-2.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-20.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-20.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-20.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-20.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-4.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-4.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-4.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-4.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-6.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-6.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-6.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-6.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-8.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-8.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-8.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_around_the_edge_of_the_flat_earth-8.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_mixed_with_words-2.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_mixed_with_words-2.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_mixed_with_words-2.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_mixed_with_words-2.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_without_any_geo_faceted_documents-2.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_without_any_geo_faceted_documents-2.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_without_any_geo_faceted_documents-2.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__geo_sort__geo_sort_without_any_geo_faceted_documents-2.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__proximity__proximity_prefix_db-11.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__proximity__proximity_prefix_db-11.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__proximity__proximity_prefix_db-11.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__proximity__proximity_prefix_db-11.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__proximity__proximity_prefix_db-14.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__proximity__proximity_prefix_db-14.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__proximity__proximity_prefix_db-14.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__proximity__proximity_prefix_db-14.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__proximity__proximity_prefix_db-2.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__proximity__proximity_prefix_db-2.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__proximity__proximity_prefix_db-2.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__proximity__proximity_prefix_db-2.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__proximity__proximity_prefix_db-5.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__proximity__proximity_prefix_db-5.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__proximity__proximity_prefix_db-5.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__proximity__proximity_prefix_db-5.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__proximity__proximity_prefix_db-8.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__proximity__proximity_prefix_db-8.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__proximity__proximity_prefix_db-8.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__proximity__proximity_prefix_db-8.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__proximity__proximity_split_word-2.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__proximity__proximity_split_word-2.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__proximity__proximity_split_word-2.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__proximity__proximity_split_word-2.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__proximity__proximity_split_word-5.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__proximity__proximity_split_word-5.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__proximity__proximity_split_word-5.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__proximity__proximity_split_word-5.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__proximity__proximity_split_word-8.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__proximity__proximity_split_word-8.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__proximity__proximity_split_word-8.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__proximity__proximity_split_word-8.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__sort__redacted-2.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__sort__redacted-2.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__sort__redacted-2.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__sort__redacted-2.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__sort__sort-11.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__sort__sort-11.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__sort__sort-11.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__sort__sort-11.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__sort__sort-2.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__sort__sort-2.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__sort__sort-2.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__sort__sort-2.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__sort__sort-5.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__sort__sort-5.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__sort__sort-5.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__sort__sort-5.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__sort__sort-8.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__sort__sort-8.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__sort__sort-8.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__sort__sort-8.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__stop_words__stop_words_in_phrase-6.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__stop_words__stop_words_in_phrase-6.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__stop_words__stop_words_in_phrase-6.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__stop_words__stop_words_in_phrase-6.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__stop_words__stop_words_in_phrase-8.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__stop_words__stop_words_in_phrase-8.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__stop_words__stop_words_in_phrase-8.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__stop_words__stop_words_in_phrase-8.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_bucketing-2.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_bucketing-2.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_bucketing-2.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_bucketing-2.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_bucketing-5.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_bucketing-5.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_bucketing-5.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_bucketing-5.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_bucketing-8.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_bucketing-8.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_bucketing-8.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_bucketing-8.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_exact_attribute-4.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_exact_attribute-4.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_exact_attribute-4.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_exact_attribute-4.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_exact_word-12.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_exact_word-12.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_exact_word-12.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_exact_word-12.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_ranking_rule_not_preceded_by_words_ranking_rule-2.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_ranking_rule_not_preceded_by_words_ranking_rule-2.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_ranking_rule_not_preceded_by_words_ranking_rule-2.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_ranking_rule_not_preceded_by_words_ranking_rule-2.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_ranking_rule_not_preceded_by_words_ranking_rule-5.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_ranking_rule_not_preceded_by_words_ranking_rule-5.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_ranking_rule_not_preceded_by_words_ranking_rule-5.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_ranking_rule_not_preceded_by_words_ranking_rule-5.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_synonyms-2.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_synonyms-2.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_synonyms-2.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_synonyms-2.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_synonyms-5.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_synonyms-5.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_synonyms-5.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__typo__typo_synonyms-5.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__typo_proximity__trap_basic_and_complex1-2.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__typo_proximity__trap_basic_and_complex1-2.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__typo_proximity__trap_basic_and_complex1-2.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__typo_proximity__trap_basic_and_complex1-2.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__typo_proximity__trap_complex2-2.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__typo_proximity__trap_complex2-2.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__typo_proximity__trap_complex2-2.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__typo_proximity__trap_complex2-2.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__words_tms__words_proximity_tms_last_phrase-2.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__words_tms__words_proximity_tms_last_phrase-2.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__words_tms__words_proximity_tms_last_phrase-2.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__words_tms__words_proximity_tms_last_phrase-2.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__words_tms__words_proximity_tms_last_phrase-5.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__words_tms__words_proximity_tms_last_phrase-5.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__words_tms__words_proximity_tms_last_phrase-5.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__words_tms__words_proximity_tms_last_phrase-5.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__words_tms__words_proximity_tms_last_simple-2.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__words_tms__words_proximity_tms_last_simple-2.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__words_tms__words_proximity_tms_last_simple-2.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__words_tms__words_proximity_tms_last_simple-2.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__words_tms__words_proximity_tms_last_simple-5.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__words_tms__words_proximity_tms_last_simple-5.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__words_tms__words_proximity_tms_last_simple-5.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__words_tms__words_proximity_tms_last_simple-5.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__words_tms__words_tms_all-2.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__words_tms__words_tms_all-2.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__words_tms__words_tms_all-2.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__words_tms__words_tms_all-2.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__words_tms__words_tms_last_phrase-2.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__words_tms__words_tms_last_phrase-2.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__words_tms__words_tms_last_phrase-2.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__words_tms__words_tms_last_phrase-2.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__words_tms__words_tms_last_phrase-5.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__words_tms__words_tms_last_phrase-5.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__words_tms__words_tms_last_phrase-5.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__words_tms__words_tms_last_phrase-5.snap diff --git a/milli/src/search/new/tests/snapshots/milli__search__new__tests__words_tms__words_tms_last_simple-2.snap b/crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__words_tms__words_tms_last_simple-2.snap similarity index 100% rename from milli/src/search/new/tests/snapshots/milli__search__new__tests__words_tms__words_tms_last_simple-2.snap rename to crates/milli/src/search/new/tests/snapshots/milli__search__new__tests__words_tms__words_tms_last_simple-2.snap diff --git a/milli/src/search/new/tests/sort.rs b/crates/milli/src/search/new/tests/sort.rs similarity index 100% rename from milli/src/search/new/tests/sort.rs rename to crates/milli/src/search/new/tests/sort.rs diff --git a/milli/src/search/new/tests/stop_words.rs b/crates/milli/src/search/new/tests/stop_words.rs similarity index 100% rename from milli/src/search/new/tests/stop_words.rs rename to crates/milli/src/search/new/tests/stop_words.rs diff --git a/milli/src/search/new/tests/typo.rs b/crates/milli/src/search/new/tests/typo.rs similarity index 100% rename from milli/src/search/new/tests/typo.rs rename to crates/milli/src/search/new/tests/typo.rs diff --git a/milli/src/search/new/tests/typo_proximity.rs b/crates/milli/src/search/new/tests/typo_proximity.rs similarity index 100% rename from milli/src/search/new/tests/typo_proximity.rs rename to crates/milli/src/search/new/tests/typo_proximity.rs diff --git a/milli/src/search/new/tests/words_tms.rs b/crates/milli/src/search/new/tests/words_tms.rs similarity index 100% rename from milli/src/search/new/tests/words_tms.rs rename to crates/milli/src/search/new/tests/words_tms.rs diff --git a/milli/src/search/new/vector_sort.rs b/crates/milli/src/search/new/vector_sort.rs similarity index 100% rename from milli/src/search/new/vector_sort.rs rename to crates/milli/src/search/new/vector_sort.rs diff --git a/milli/src/search/similar.rs b/crates/milli/src/search/similar.rs similarity index 100% rename from milli/src/search/similar.rs rename to crates/milli/src/search/similar.rs diff --git a/milli/src/snapshot_tests.rs b/crates/milli/src/snapshot_tests.rs similarity index 100% rename from milli/src/snapshot_tests.rs rename to crates/milli/src/snapshot_tests.rs diff --git a/milli/src/snapshots/index.rs/bug_3007/geo_faceted_documents_ids.snap b/crates/milli/src/snapshots/index.rs/bug_3007/geo_faceted_documents_ids.snap similarity index 100% rename from milli/src/snapshots/index.rs/bug_3007/geo_faceted_documents_ids.snap rename to crates/milli/src/snapshots/index.rs/bug_3007/geo_faceted_documents_ids.snap diff --git a/milli/src/snapshots/index.rs/unexpected_extra_fields_in_geo_field/geo_faceted_documents_ids.snap b/crates/milli/src/snapshots/index.rs/unexpected_extra_fields_in_geo_field/geo_faceted_documents_ids.snap similarity index 100% rename from milli/src/snapshots/index.rs/unexpected_extra_fields_in_geo_field/geo_faceted_documents_ids.snap rename to crates/milli/src/snapshots/index.rs/unexpected_extra_fields_in_geo_field/geo_faceted_documents_ids.snap diff --git a/milli/src/thread_pool_no_abort.rs b/crates/milli/src/thread_pool_no_abort.rs similarity index 100% rename from milli/src/thread_pool_no_abort.rs rename to crates/milli/src/thread_pool_no_abort.rs diff --git a/milli/src/update/available_documents_ids.rs b/crates/milli/src/update/available_documents_ids.rs similarity index 100% rename from milli/src/update/available_documents_ids.rs rename to crates/milli/src/update/available_documents_ids.rs diff --git a/milli/src/update/clear_documents.rs b/crates/milli/src/update/clear_documents.rs similarity index 100% rename from milli/src/update/clear_documents.rs rename to crates/milli/src/update/clear_documents.rs diff --git a/milli/src/update/del_add.rs b/crates/milli/src/update/del_add.rs similarity index 100% rename from milli/src/update/del_add.rs rename to crates/milli/src/update/del_add.rs diff --git a/milli/src/update/facet/bulk.rs b/crates/milli/src/update/facet/bulk.rs similarity index 100% rename from milli/src/update/facet/bulk.rs rename to crates/milli/src/update/facet/bulk.rs diff --git a/milli/src/update/facet/incremental.rs b/crates/milli/src/update/facet/incremental.rs similarity index 100% rename from milli/src/update/facet/incremental.rs rename to crates/milli/src/update/facet/incremental.rs diff --git a/milli/src/update/facet/mod.rs b/crates/milli/src/update/facet/mod.rs similarity index 100% rename from milli/src/update/facet/mod.rs rename to crates/milli/src/update/facet/mod.rs diff --git a/milli/src/update/facet/snapshots/bulk.rs/insert/default.hash.snap b/crates/milli/src/update/facet/snapshots/bulk.rs/insert/default.hash.snap similarity index 100% rename from milli/src/update/facet/snapshots/bulk.rs/insert/default.hash.snap rename to crates/milli/src/update/facet/snapshots/bulk.rs/insert/default.hash.snap diff --git a/milli/src/update/facet/snapshots/bulk.rs/insert/large_group_small_min_level.hash.snap b/crates/milli/src/update/facet/snapshots/bulk.rs/insert/large_group_small_min_level.hash.snap similarity index 100% rename from milli/src/update/facet/snapshots/bulk.rs/insert/large_group_small_min_level.hash.snap rename to crates/milli/src/update/facet/snapshots/bulk.rs/insert/large_group_small_min_level.hash.snap diff --git a/milli/src/update/facet/snapshots/bulk.rs/insert/odd_group_odd_min_level.hash.snap b/crates/milli/src/update/facet/snapshots/bulk.rs/insert/odd_group_odd_min_level.hash.snap similarity index 100% rename from milli/src/update/facet/snapshots/bulk.rs/insert/odd_group_odd_min_level.hash.snap rename to crates/milli/src/update/facet/snapshots/bulk.rs/insert/odd_group_odd_min_level.hash.snap diff --git a/milli/src/update/facet/snapshots/bulk.rs/insert/small_group_large_min_level.hash.snap b/crates/milli/src/update/facet/snapshots/bulk.rs/insert/small_group_large_min_level.hash.snap similarity index 100% rename from milli/src/update/facet/snapshots/bulk.rs/insert/small_group_large_min_level.hash.snap rename to crates/milli/src/update/facet/snapshots/bulk.rs/insert/small_group_large_min_level.hash.snap diff --git a/milli/src/update/facet/snapshots/bulk.rs/insert/small_group_small_min_level.hash.snap b/crates/milli/src/update/facet/snapshots/bulk.rs/insert/small_group_small_min_level.hash.snap similarity index 100% rename from milli/src/update/facet/snapshots/bulk.rs/insert/small_group_small_min_level.hash.snap rename to crates/milli/src/update/facet/snapshots/bulk.rs/insert/small_group_small_min_level.hash.snap diff --git a/milli/src/update/facet/snapshots/bulk.rs/insert_delete_field_insert/default.hash.snap b/crates/milli/src/update/facet/snapshots/bulk.rs/insert_delete_field_insert/default.hash.snap similarity index 100% rename from milli/src/update/facet/snapshots/bulk.rs/insert_delete_field_insert/default.hash.snap rename to crates/milli/src/update/facet/snapshots/bulk.rs/insert_delete_field_insert/default.hash.snap diff --git a/milli/src/update/facet/snapshots/bulk.rs/insert_delete_field_insert/large_group_small_min_level.hash.snap b/crates/milli/src/update/facet/snapshots/bulk.rs/insert_delete_field_insert/large_group_small_min_level.hash.snap similarity index 100% rename from milli/src/update/facet/snapshots/bulk.rs/insert_delete_field_insert/large_group_small_min_level.hash.snap rename to crates/milli/src/update/facet/snapshots/bulk.rs/insert_delete_field_insert/large_group_small_min_level.hash.snap diff --git a/milli/src/update/facet/snapshots/bulk.rs/insert_delete_field_insert/odd_group_odd_min_level.hash.snap b/crates/milli/src/update/facet/snapshots/bulk.rs/insert_delete_field_insert/odd_group_odd_min_level.hash.snap similarity index 100% rename from milli/src/update/facet/snapshots/bulk.rs/insert_delete_field_insert/odd_group_odd_min_level.hash.snap rename to crates/milli/src/update/facet/snapshots/bulk.rs/insert_delete_field_insert/odd_group_odd_min_level.hash.snap diff --git a/milli/src/update/facet/snapshots/bulk.rs/insert_delete_field_insert/small_group_large_min_level.hash.snap b/crates/milli/src/update/facet/snapshots/bulk.rs/insert_delete_field_insert/small_group_large_min_level.hash.snap similarity index 100% rename from milli/src/update/facet/snapshots/bulk.rs/insert_delete_field_insert/small_group_large_min_level.hash.snap rename to crates/milli/src/update/facet/snapshots/bulk.rs/insert_delete_field_insert/small_group_large_min_level.hash.snap diff --git a/milli/src/update/facet/snapshots/bulk.rs/insert_delete_field_insert/small_group_small_min_level.hash.snap b/crates/milli/src/update/facet/snapshots/bulk.rs/insert_delete_field_insert/small_group_small_min_level.hash.snap similarity index 100% rename from milli/src/update/facet/snapshots/bulk.rs/insert_delete_field_insert/small_group_small_min_level.hash.snap rename to crates/milli/src/update/facet/snapshots/bulk.rs/insert_delete_field_insert/small_group_small_min_level.hash.snap diff --git a/milli/src/update/facet/snapshots/bulk.rs/insert_string/default.hash.snap b/crates/milli/src/update/facet/snapshots/bulk.rs/insert_string/default.hash.snap similarity index 100% rename from milli/src/update/facet/snapshots/bulk.rs/insert_string/default.hash.snap rename to crates/milli/src/update/facet/snapshots/bulk.rs/insert_string/default.hash.snap diff --git a/milli/src/update/facet/snapshots/bulk.rs/insert_string/large_group_small_min_level.hash.snap b/crates/milli/src/update/facet/snapshots/bulk.rs/insert_string/large_group_small_min_level.hash.snap similarity index 100% rename from milli/src/update/facet/snapshots/bulk.rs/insert_string/large_group_small_min_level.hash.snap rename to crates/milli/src/update/facet/snapshots/bulk.rs/insert_string/large_group_small_min_level.hash.snap diff --git a/milli/src/update/facet/snapshots/bulk.rs/insert_string/odd_group_odd_min_level.hash.snap b/crates/milli/src/update/facet/snapshots/bulk.rs/insert_string/odd_group_odd_min_level.hash.snap similarity index 100% rename from milli/src/update/facet/snapshots/bulk.rs/insert_string/odd_group_odd_min_level.hash.snap rename to crates/milli/src/update/facet/snapshots/bulk.rs/insert_string/odd_group_odd_min_level.hash.snap diff --git a/milli/src/update/facet/snapshots/bulk.rs/insert_string/small_group_large_min_level.hash.snap b/crates/milli/src/update/facet/snapshots/bulk.rs/insert_string/small_group_large_min_level.hash.snap similarity index 100% rename from milli/src/update/facet/snapshots/bulk.rs/insert_string/small_group_large_min_level.hash.snap rename to crates/milli/src/update/facet/snapshots/bulk.rs/insert_string/small_group_large_min_level.hash.snap diff --git a/milli/src/update/facet/snapshots/bulk.rs/insert_string/small_group_small_min_level.hash.snap b/crates/milli/src/update/facet/snapshots/bulk.rs/insert_string/small_group_small_min_level.hash.snap similarity index 100% rename from milli/src/update/facet/snapshots/bulk.rs/insert_string/small_group_small_min_level.hash.snap rename to crates/milli/src/update/facet/snapshots/bulk.rs/insert_string/small_group_small_min_level.hash.snap diff --git a/milli/src/update/facet/snapshots/incremental.rs/append/append.hash.snap b/crates/milli/src/update/facet/snapshots/incremental.rs/append/append.hash.snap similarity index 100% rename from milli/src/update/facet/snapshots/incremental.rs/append/append.hash.snap rename to crates/milli/src/update/facet/snapshots/incremental.rs/append/append.hash.snap diff --git a/milli/src/update/facet/snapshots/incremental.rs/delete_from_end/0.snap b/crates/milli/src/update/facet/snapshots/incremental.rs/delete_from_end/0.snap similarity index 100% rename from milli/src/update/facet/snapshots/incremental.rs/delete_from_end/0.snap rename to crates/milli/src/update/facet/snapshots/incremental.rs/delete_from_end/0.snap diff --git a/milli/src/update/facet/snapshots/incremental.rs/delete_from_end/100.hash.snap b/crates/milli/src/update/facet/snapshots/incremental.rs/delete_from_end/100.hash.snap similarity index 100% rename from milli/src/update/facet/snapshots/incremental.rs/delete_from_end/100.hash.snap rename to crates/milli/src/update/facet/snapshots/incremental.rs/delete_from_end/100.hash.snap diff --git a/milli/src/update/facet/snapshots/incremental.rs/delete_from_end/15.snap b/crates/milli/src/update/facet/snapshots/incremental.rs/delete_from_end/15.snap similarity index 100% rename from milli/src/update/facet/snapshots/incremental.rs/delete_from_end/15.snap rename to crates/milli/src/update/facet/snapshots/incremental.rs/delete_from_end/15.snap diff --git a/milli/src/update/facet/snapshots/incremental.rs/delete_from_end/150.hash.snap b/crates/milli/src/update/facet/snapshots/incremental.rs/delete_from_end/150.hash.snap similarity index 100% rename from milli/src/update/facet/snapshots/incremental.rs/delete_from_end/150.hash.snap rename to crates/milli/src/update/facet/snapshots/incremental.rs/delete_from_end/150.hash.snap diff --git a/milli/src/update/facet/snapshots/incremental.rs/delete_from_end/17.snap b/crates/milli/src/update/facet/snapshots/incremental.rs/delete_from_end/17.snap similarity index 100% rename from milli/src/update/facet/snapshots/incremental.rs/delete_from_end/17.snap rename to crates/milli/src/update/facet/snapshots/incremental.rs/delete_from_end/17.snap diff --git a/milli/src/update/facet/snapshots/incremental.rs/delete_from_end/200.hash.snap b/crates/milli/src/update/facet/snapshots/incremental.rs/delete_from_end/200.hash.snap similarity index 100% rename from milli/src/update/facet/snapshots/incremental.rs/delete_from_end/200.hash.snap rename to crates/milli/src/update/facet/snapshots/incremental.rs/delete_from_end/200.hash.snap diff --git a/milli/src/update/facet/snapshots/incremental.rs/delete_from_start/127.hash.snap b/crates/milli/src/update/facet/snapshots/incremental.rs/delete_from_start/127.hash.snap similarity index 100% rename from milli/src/update/facet/snapshots/incremental.rs/delete_from_start/127.hash.snap rename to crates/milli/src/update/facet/snapshots/incremental.rs/delete_from_start/127.hash.snap diff --git a/milli/src/update/facet/snapshots/incremental.rs/delete_from_start/215.snap b/crates/milli/src/update/facet/snapshots/incremental.rs/delete_from_start/215.snap similarity index 100% rename from milli/src/update/facet/snapshots/incremental.rs/delete_from_start/215.snap rename to crates/milli/src/update/facet/snapshots/incremental.rs/delete_from_start/215.snap diff --git a/milli/src/update/facet/snapshots/incremental.rs/delete_from_start/255.snap b/crates/milli/src/update/facet/snapshots/incremental.rs/delete_from_start/255.snap similarity index 100% rename from milli/src/update/facet/snapshots/incremental.rs/delete_from_start/255.snap rename to crates/milli/src/update/facet/snapshots/incremental.rs/delete_from_start/255.snap diff --git a/milli/src/update/facet/snapshots/incremental.rs/delete_shuffled/127.hash.snap b/crates/milli/src/update/facet/snapshots/incremental.rs/delete_shuffled/127.hash.snap similarity index 100% rename from milli/src/update/facet/snapshots/incremental.rs/delete_shuffled/127.hash.snap rename to crates/milli/src/update/facet/snapshots/incremental.rs/delete_shuffled/127.hash.snap diff --git a/milli/src/update/facet/snapshots/incremental.rs/delete_shuffled/215.hash.snap b/crates/milli/src/update/facet/snapshots/incremental.rs/delete_shuffled/215.hash.snap similarity index 100% rename from milli/src/update/facet/snapshots/incremental.rs/delete_shuffled/215.hash.snap rename to crates/milli/src/update/facet/snapshots/incremental.rs/delete_shuffled/215.hash.snap diff --git a/milli/src/update/facet/snapshots/incremental.rs/delete_shuffled/255.snap b/crates/milli/src/update/facet/snapshots/incremental.rs/delete_shuffled/255.snap similarity index 100% rename from milli/src/update/facet/snapshots/incremental.rs/delete_shuffled/255.snap rename to crates/milli/src/update/facet/snapshots/incremental.rs/delete_shuffled/255.snap diff --git a/milli/src/update/facet/snapshots/incremental.rs/in_place_level0_delete/after_delete.hash.snap b/crates/milli/src/update/facet/snapshots/incremental.rs/in_place_level0_delete/after_delete.hash.snap similarity index 100% rename from milli/src/update/facet/snapshots/incremental.rs/in_place_level0_delete/after_delete.hash.snap rename to crates/milli/src/update/facet/snapshots/incremental.rs/in_place_level0_delete/after_delete.hash.snap diff --git a/milli/src/update/facet/snapshots/incremental.rs/in_place_level0_delete/before_delete.hash.snap b/crates/milli/src/update/facet/snapshots/incremental.rs/in_place_level0_delete/before_delete.hash.snap similarity index 100% rename from milli/src/update/facet/snapshots/incremental.rs/in_place_level0_delete/before_delete.hash.snap rename to crates/milli/src/update/facet/snapshots/incremental.rs/in_place_level0_delete/before_delete.hash.snap diff --git a/milli/src/update/facet/snapshots/incremental.rs/in_place_level0_insert/in_place_level0_insert.snap b/crates/milli/src/update/facet/snapshots/incremental.rs/in_place_level0_insert/in_place_level0_insert.snap similarity index 100% rename from milli/src/update/facet/snapshots/incremental.rs/in_place_level0_insert/in_place_level0_insert.snap rename to crates/milli/src/update/facet/snapshots/incremental.rs/in_place_level0_insert/in_place_level0_insert.snap diff --git a/milli/src/update/facet/snapshots/incremental.rs/many_field_ids_append/many_field_ids_append.hash.snap b/crates/milli/src/update/facet/snapshots/incremental.rs/many_field_ids_append/many_field_ids_append.hash.snap similarity index 100% rename from milli/src/update/facet/snapshots/incremental.rs/many_field_ids_append/many_field_ids_append.hash.snap rename to crates/milli/src/update/facet/snapshots/incremental.rs/many_field_ids_append/many_field_ids_append.hash.snap diff --git a/milli/src/update/facet/snapshots/incremental.rs/many_field_ids_prepend/many_field_ids_prepend.hash.snap b/crates/milli/src/update/facet/snapshots/incremental.rs/many_field_ids_prepend/many_field_ids_prepend.hash.snap similarity index 100% rename from milli/src/update/facet/snapshots/incremental.rs/many_field_ids_prepend/many_field_ids_prepend.hash.snap rename to crates/milli/src/update/facet/snapshots/incremental.rs/many_field_ids_prepend/many_field_ids_prepend.hash.snap diff --git a/milli/src/update/facet/snapshots/incremental.rs/merge_values/merge_values.hash.snap b/crates/milli/src/update/facet/snapshots/incremental.rs/merge_values/merge_values.hash.snap similarity index 100% rename from milli/src/update/facet/snapshots/incremental.rs/merge_values/merge_values.hash.snap rename to crates/milli/src/update/facet/snapshots/incremental.rs/merge_values/merge_values.hash.snap diff --git a/milli/src/update/facet/snapshots/incremental.rs/prepend/prepend.hash.snap b/crates/milli/src/update/facet/snapshots/incremental.rs/prepend/prepend.hash.snap similarity index 100% rename from milli/src/update/facet/snapshots/incremental.rs/prepend/prepend.hash.snap rename to crates/milli/src/update/facet/snapshots/incremental.rs/prepend/prepend.hash.snap diff --git a/milli/src/update/facet/snapshots/incremental.rs/shuffle_merge_string_and_delete/after_delete.hash.snap b/crates/milli/src/update/facet/snapshots/incremental.rs/shuffle_merge_string_and_delete/after_delete.hash.snap similarity index 100% rename from milli/src/update/facet/snapshots/incremental.rs/shuffle_merge_string_and_delete/after_delete.hash.snap rename to crates/milli/src/update/facet/snapshots/incremental.rs/shuffle_merge_string_and_delete/after_delete.hash.snap diff --git a/milli/src/update/facet/snapshots/incremental.rs/shuffle_merge_string_and_delete/before_delete.hash.snap b/crates/milli/src/update/facet/snapshots/incremental.rs/shuffle_merge_string_and_delete/before_delete.hash.snap similarity index 100% rename from milli/src/update/facet/snapshots/incremental.rs/shuffle_merge_string_and_delete/before_delete.hash.snap rename to crates/milli/src/update/facet/snapshots/incremental.rs/shuffle_merge_string_and_delete/before_delete.hash.snap diff --git a/milli/src/update/facet/snapshots/incremental.rs/shuffled/shuffled.hash.snap b/crates/milli/src/update/facet/snapshots/incremental.rs/shuffled/shuffled.hash.snap similarity index 100% rename from milli/src/update/facet/snapshots/incremental.rs/shuffled/shuffled.hash.snap rename to crates/milli/src/update/facet/snapshots/incremental.rs/shuffled/shuffled.hash.snap diff --git a/milli/src/update/index_documents/enrich.rs b/crates/milli/src/update/index_documents/enrich.rs similarity index 100% rename from milli/src/update/index_documents/enrich.rs rename to crates/milli/src/update/index_documents/enrich.rs diff --git a/milli/src/update/index_documents/extract/extract_docid_word_positions.rs b/crates/milli/src/update/index_documents/extract/extract_docid_word_positions.rs similarity index 100% rename from milli/src/update/index_documents/extract/extract_docid_word_positions.rs rename to crates/milli/src/update/index_documents/extract/extract_docid_word_positions.rs diff --git a/milli/src/update/index_documents/extract/extract_facet_number_docids.rs b/crates/milli/src/update/index_documents/extract/extract_facet_number_docids.rs similarity index 100% rename from milli/src/update/index_documents/extract/extract_facet_number_docids.rs rename to crates/milli/src/update/index_documents/extract/extract_facet_number_docids.rs diff --git a/milli/src/update/index_documents/extract/extract_facet_string_docids.rs b/crates/milli/src/update/index_documents/extract/extract_facet_string_docids.rs similarity index 100% rename from milli/src/update/index_documents/extract/extract_facet_string_docids.rs rename to crates/milli/src/update/index_documents/extract/extract_facet_string_docids.rs diff --git a/milli/src/update/index_documents/extract/extract_fid_docid_facet_values.rs b/crates/milli/src/update/index_documents/extract/extract_fid_docid_facet_values.rs similarity index 100% rename from milli/src/update/index_documents/extract/extract_fid_docid_facet_values.rs rename to crates/milli/src/update/index_documents/extract/extract_fid_docid_facet_values.rs diff --git a/milli/src/update/index_documents/extract/extract_fid_word_count_docids.rs b/crates/milli/src/update/index_documents/extract/extract_fid_word_count_docids.rs similarity index 100% rename from milli/src/update/index_documents/extract/extract_fid_word_count_docids.rs rename to crates/milli/src/update/index_documents/extract/extract_fid_word_count_docids.rs diff --git a/milli/src/update/index_documents/extract/extract_geo_points.rs b/crates/milli/src/update/index_documents/extract/extract_geo_points.rs similarity index 100% rename from milli/src/update/index_documents/extract/extract_geo_points.rs rename to crates/milli/src/update/index_documents/extract/extract_geo_points.rs diff --git a/milli/src/update/index_documents/extract/extract_vector_points.rs b/crates/milli/src/update/index_documents/extract/extract_vector_points.rs similarity index 100% rename from milli/src/update/index_documents/extract/extract_vector_points.rs rename to crates/milli/src/update/index_documents/extract/extract_vector_points.rs diff --git a/milli/src/update/index_documents/extract/extract_word_docids.rs b/crates/milli/src/update/index_documents/extract/extract_word_docids.rs similarity index 100% rename from milli/src/update/index_documents/extract/extract_word_docids.rs rename to crates/milli/src/update/index_documents/extract/extract_word_docids.rs diff --git a/milli/src/update/index_documents/extract/extract_word_pair_proximity_docids.rs b/crates/milli/src/update/index_documents/extract/extract_word_pair_proximity_docids.rs similarity index 100% rename from milli/src/update/index_documents/extract/extract_word_pair_proximity_docids.rs rename to crates/milli/src/update/index_documents/extract/extract_word_pair_proximity_docids.rs diff --git a/milli/src/update/index_documents/extract/extract_word_position_docids.rs b/crates/milli/src/update/index_documents/extract/extract_word_position_docids.rs similarity index 100% rename from milli/src/update/index_documents/extract/extract_word_position_docids.rs rename to crates/milli/src/update/index_documents/extract/extract_word_position_docids.rs diff --git a/milli/src/update/index_documents/extract/mod.rs b/crates/milli/src/update/index_documents/extract/mod.rs similarity index 100% rename from milli/src/update/index_documents/extract/mod.rs rename to crates/milli/src/update/index_documents/extract/mod.rs diff --git a/milli/src/update/index_documents/helpers/clonable_mmap.rs b/crates/milli/src/update/index_documents/helpers/clonable_mmap.rs similarity index 100% rename from milli/src/update/index_documents/helpers/clonable_mmap.rs rename to crates/milli/src/update/index_documents/helpers/clonable_mmap.rs diff --git a/milli/src/update/index_documents/helpers/grenad_helpers.rs b/crates/milli/src/update/index_documents/helpers/grenad_helpers.rs similarity index 100% rename from milli/src/update/index_documents/helpers/grenad_helpers.rs rename to crates/milli/src/update/index_documents/helpers/grenad_helpers.rs diff --git a/milli/src/update/index_documents/helpers/merge_functions.rs b/crates/milli/src/update/index_documents/helpers/merge_functions.rs similarity index 100% rename from milli/src/update/index_documents/helpers/merge_functions.rs rename to crates/milli/src/update/index_documents/helpers/merge_functions.rs diff --git a/milli/src/update/index_documents/helpers/mod.rs b/crates/milli/src/update/index_documents/helpers/mod.rs similarity index 100% rename from milli/src/update/index_documents/helpers/mod.rs rename to crates/milli/src/update/index_documents/helpers/mod.rs diff --git a/milli/src/update/index_documents/mod.rs b/crates/milli/src/update/index_documents/mod.rs similarity index 100% rename from milli/src/update/index_documents/mod.rs rename to crates/milli/src/update/index_documents/mod.rs diff --git a/milli/src/update/index_documents/parallel.rs b/crates/milli/src/update/index_documents/parallel.rs similarity index 100% rename from milli/src/update/index_documents/parallel.rs rename to crates/milli/src/update/index_documents/parallel.rs diff --git a/milli/src/update/index_documents/snapshots/mod.rs/delete_documents_with_numbers_as_primary_key/documents_ids.snap b/crates/milli/src/update/index_documents/snapshots/mod.rs/delete_documents_with_numbers_as_primary_key/documents_ids.snap similarity index 100% rename from milli/src/update/index_documents/snapshots/mod.rs/delete_documents_with_numbers_as_primary_key/documents_ids.snap rename to crates/milli/src/update/index_documents/snapshots/mod.rs/delete_documents_with_numbers_as_primary_key/documents_ids.snap diff --git a/milli/src/update/index_documents/snapshots/mod.rs/delete_documents_with_numbers_as_primary_key/facet_id_exists_docids.snap b/crates/milli/src/update/index_documents/snapshots/mod.rs/delete_documents_with_numbers_as_primary_key/facet_id_exists_docids.snap similarity index 100% rename from milli/src/update/index_documents/snapshots/mod.rs/delete_documents_with_numbers_as_primary_key/facet_id_exists_docids.snap rename to crates/milli/src/update/index_documents/snapshots/mod.rs/delete_documents_with_numbers_as_primary_key/facet_id_exists_docids.snap diff --git a/milli/src/update/index_documents/snapshots/mod.rs/delete_documents_with_numbers_as_primary_key/word_docids.snap b/crates/milli/src/update/index_documents/snapshots/mod.rs/delete_documents_with_numbers_as_primary_key/word_docids.snap similarity index 100% rename from milli/src/update/index_documents/snapshots/mod.rs/delete_documents_with_numbers_as_primary_key/word_docids.snap rename to crates/milli/src/update/index_documents/snapshots/mod.rs/delete_documents_with_numbers_as_primary_key/word_docids.snap diff --git a/milli/src/update/index_documents/snapshots/mod.rs/delete_documents_with_numbers_as_primary_key/word_pair_proximity_docids.snap b/crates/milli/src/update/index_documents/snapshots/mod.rs/delete_documents_with_numbers_as_primary_key/word_pair_proximity_docids.snap similarity index 100% rename from milli/src/update/index_documents/snapshots/mod.rs/delete_documents_with_numbers_as_primary_key/word_pair_proximity_docids.snap rename to crates/milli/src/update/index_documents/snapshots/mod.rs/delete_documents_with_numbers_as_primary_key/word_pair_proximity_docids.snap diff --git a/milli/src/update/index_documents/snapshots/mod.rs/delete_documents_with_strange_primary_key/documents_ids.snap b/crates/milli/src/update/index_documents/snapshots/mod.rs/delete_documents_with_strange_primary_key/documents_ids.snap similarity index 100% rename from milli/src/update/index_documents/snapshots/mod.rs/delete_documents_with_strange_primary_key/documents_ids.snap rename to crates/milli/src/update/index_documents/snapshots/mod.rs/delete_documents_with_strange_primary_key/documents_ids.snap diff --git a/milli/src/update/index_documents/snapshots/mod.rs/delete_documents_with_strange_primary_key/word_docids.snap b/crates/milli/src/update/index_documents/snapshots/mod.rs/delete_documents_with_strange_primary_key/word_docids.snap similarity index 100% rename from milli/src/update/index_documents/snapshots/mod.rs/delete_documents_with_strange_primary_key/word_docids.snap rename to crates/milli/src/update/index_documents/snapshots/mod.rs/delete_documents_with_strange_primary_key/word_docids.snap diff --git a/milli/src/update/index_documents/snapshots/mod.rs/delete_documents_with_strange_primary_key/word_pair_proximity_docids.snap b/crates/milli/src/update/index_documents/snapshots/mod.rs/delete_documents_with_strange_primary_key/word_pair_proximity_docids.snap similarity index 100% rename from milli/src/update/index_documents/snapshots/mod.rs/delete_documents_with_strange_primary_key/word_pair_proximity_docids.snap rename to crates/milli/src/update/index_documents/snapshots/mod.rs/delete_documents_with_strange_primary_key/word_pair_proximity_docids.snap diff --git a/milli/src/update/index_documents/snapshots/mod.rs/filtered_placeholder_search_should_not_return_deleted_documents/facet_id_exists_docids.snap b/crates/milli/src/update/index_documents/snapshots/mod.rs/filtered_placeholder_search_should_not_return_deleted_documents/facet_id_exists_docids.snap similarity index 100% rename from milli/src/update/index_documents/snapshots/mod.rs/filtered_placeholder_search_should_not_return_deleted_documents/facet_id_exists_docids.snap rename to crates/milli/src/update/index_documents/snapshots/mod.rs/filtered_placeholder_search_should_not_return_deleted_documents/facet_id_exists_docids.snap diff --git a/milli/src/update/index_documents/snapshots/mod.rs/filtered_placeholder_search_should_not_return_deleted_documents/facet_id_f64_docids.snap b/crates/milli/src/update/index_documents/snapshots/mod.rs/filtered_placeholder_search_should_not_return_deleted_documents/facet_id_f64_docids.snap similarity index 100% rename from milli/src/update/index_documents/snapshots/mod.rs/filtered_placeholder_search_should_not_return_deleted_documents/facet_id_f64_docids.snap rename to crates/milli/src/update/index_documents/snapshots/mod.rs/filtered_placeholder_search_should_not_return_deleted_documents/facet_id_f64_docids.snap diff --git a/milli/src/update/index_documents/snapshots/mod.rs/filtered_placeholder_search_should_not_return_deleted_documents/facet_id_string_docids.snap b/crates/milli/src/update/index_documents/snapshots/mod.rs/filtered_placeholder_search_should_not_return_deleted_documents/facet_id_string_docids.snap similarity index 100% rename from milli/src/update/index_documents/snapshots/mod.rs/filtered_placeholder_search_should_not_return_deleted_documents/facet_id_string_docids.snap rename to crates/milli/src/update/index_documents/snapshots/mod.rs/filtered_placeholder_search_should_not_return_deleted_documents/facet_id_string_docids.snap diff --git a/milli/src/update/index_documents/snapshots/mod.rs/filtered_placeholder_search_should_not_return_deleted_documents/word_docids.snap b/crates/milli/src/update/index_documents/snapshots/mod.rs/filtered_placeholder_search_should_not_return_deleted_documents/word_docids.snap similarity index 100% rename from milli/src/update/index_documents/snapshots/mod.rs/filtered_placeholder_search_should_not_return_deleted_documents/word_docids.snap rename to crates/milli/src/update/index_documents/snapshots/mod.rs/filtered_placeholder_search_should_not_return_deleted_documents/word_docids.snap diff --git a/milli/src/update/index_documents/snapshots/mod.rs/filtered_placeholder_search_should_not_return_deleted_documents/word_pair_proximity_docids.snap b/crates/milli/src/update/index_documents/snapshots/mod.rs/filtered_placeholder_search_should_not_return_deleted_documents/word_pair_proximity_docids.snap similarity index 100% rename from milli/src/update/index_documents/snapshots/mod.rs/filtered_placeholder_search_should_not_return_deleted_documents/word_pair_proximity_docids.snap rename to crates/milli/src/update/index_documents/snapshots/mod.rs/filtered_placeholder_search_should_not_return_deleted_documents/word_pair_proximity_docids.snap diff --git a/milli/src/update/index_documents/snapshots/mod.rs/geo_filtered_placeholder_search_should_not_return_deleted_documents/facet_id_f64_docids.snap b/crates/milli/src/update/index_documents/snapshots/mod.rs/geo_filtered_placeholder_search_should_not_return_deleted_documents/facet_id_f64_docids.snap similarity index 100% rename from milli/src/update/index_documents/snapshots/mod.rs/geo_filtered_placeholder_search_should_not_return_deleted_documents/facet_id_f64_docids.snap rename to crates/milli/src/update/index_documents/snapshots/mod.rs/geo_filtered_placeholder_search_should_not_return_deleted_documents/facet_id_f64_docids.snap diff --git a/milli/src/update/index_documents/snapshots/mod.rs/geo_filtered_placeholder_search_should_not_return_deleted_documents/facet_id_string_docids.snap b/crates/milli/src/update/index_documents/snapshots/mod.rs/geo_filtered_placeholder_search_should_not_return_deleted_documents/facet_id_string_docids.snap similarity index 100% rename from milli/src/update/index_documents/snapshots/mod.rs/geo_filtered_placeholder_search_should_not_return_deleted_documents/facet_id_string_docids.snap rename to crates/milli/src/update/index_documents/snapshots/mod.rs/geo_filtered_placeholder_search_should_not_return_deleted_documents/facet_id_string_docids.snap diff --git a/milli/src/update/index_documents/snapshots/mod.rs/simple_documents_replace/initial/word_docids.snap b/crates/milli/src/update/index_documents/snapshots/mod.rs/simple_documents_replace/initial/word_docids.snap similarity index 100% rename from milli/src/update/index_documents/snapshots/mod.rs/simple_documents_replace/initial/word_docids.snap rename to crates/milli/src/update/index_documents/snapshots/mod.rs/simple_documents_replace/initial/word_docids.snap diff --git a/milli/src/update/index_documents/snapshots/mod.rs/simple_documents_replace/updated/word_docids.snap b/crates/milli/src/update/index_documents/snapshots/mod.rs/simple_documents_replace/updated/word_docids.snap similarity index 100% rename from milli/src/update/index_documents/snapshots/mod.rs/simple_documents_replace/updated/word_docids.snap rename to crates/milli/src/update/index_documents/snapshots/mod.rs/simple_documents_replace/updated/word_docids.snap diff --git a/milli/src/update/index_documents/transform.rs b/crates/milli/src/update/index_documents/transform.rs similarity index 100% rename from milli/src/update/index_documents/transform.rs rename to crates/milli/src/update/index_documents/transform.rs diff --git a/milli/src/update/index_documents/typed_chunk.rs b/crates/milli/src/update/index_documents/typed_chunk.rs similarity index 100% rename from milli/src/update/index_documents/typed_chunk.rs rename to crates/milli/src/update/index_documents/typed_chunk.rs diff --git a/milli/src/update/indexer_config.rs b/crates/milli/src/update/indexer_config.rs similarity index 100% rename from milli/src/update/indexer_config.rs rename to crates/milli/src/update/indexer_config.rs diff --git a/milli/src/update/mod.rs b/crates/milli/src/update/mod.rs similarity index 100% rename from milli/src/update/mod.rs rename to crates/milli/src/update/mod.rs diff --git a/milli/src/update/settings.rs b/crates/milli/src/update/settings.rs similarity index 100% rename from milli/src/update/settings.rs rename to crates/milli/src/update/settings.rs diff --git a/milli/src/update/update_step.rs b/crates/milli/src/update/update_step.rs similarity index 100% rename from milli/src/update/update_step.rs rename to crates/milli/src/update/update_step.rs diff --git a/milli/src/update/word_prefix_docids.rs b/crates/milli/src/update/word_prefix_docids.rs similarity index 100% rename from milli/src/update/word_prefix_docids.rs rename to crates/milli/src/update/word_prefix_docids.rs diff --git a/milli/src/update/words_prefix_integer_docids.rs b/crates/milli/src/update/words_prefix_integer_docids.rs similarity index 100% rename from milli/src/update/words_prefix_integer_docids.rs rename to crates/milli/src/update/words_prefix_integer_docids.rs diff --git a/milli/src/update/words_prefixes_fst.rs b/crates/milli/src/update/words_prefixes_fst.rs similarity index 100% rename from milli/src/update/words_prefixes_fst.rs rename to crates/milli/src/update/words_prefixes_fst.rs diff --git a/milli/src/vector/error.rs b/crates/milli/src/vector/error.rs similarity index 100% rename from milli/src/vector/error.rs rename to crates/milli/src/vector/error.rs diff --git a/milli/src/vector/hf.rs b/crates/milli/src/vector/hf.rs similarity index 100% rename from milli/src/vector/hf.rs rename to crates/milli/src/vector/hf.rs diff --git a/milli/src/vector/json_template.rs b/crates/milli/src/vector/json_template.rs similarity index 100% rename from milli/src/vector/json_template.rs rename to crates/milli/src/vector/json_template.rs diff --git a/milli/src/vector/manual.rs b/crates/milli/src/vector/manual.rs similarity index 100% rename from milli/src/vector/manual.rs rename to crates/milli/src/vector/manual.rs diff --git a/milli/src/vector/mod.rs b/crates/milli/src/vector/mod.rs similarity index 100% rename from milli/src/vector/mod.rs rename to crates/milli/src/vector/mod.rs diff --git a/milli/src/vector/ollama.rs b/crates/milli/src/vector/ollama.rs similarity index 100% rename from milli/src/vector/ollama.rs rename to crates/milli/src/vector/ollama.rs diff --git a/milli/src/vector/openai.rs b/crates/milli/src/vector/openai.rs similarity index 100% rename from milli/src/vector/openai.rs rename to crates/milli/src/vector/openai.rs diff --git a/milli/src/vector/parsed_vectors.rs b/crates/milli/src/vector/parsed_vectors.rs similarity index 100% rename from milli/src/vector/parsed_vectors.rs rename to crates/milli/src/vector/parsed_vectors.rs diff --git a/milli/src/vector/rest.rs b/crates/milli/src/vector/rest.rs similarity index 100% rename from milli/src/vector/rest.rs rename to crates/milli/src/vector/rest.rs diff --git a/milli/src/vector/settings.rs b/crates/milli/src/vector/settings.rs similarity index 100% rename from milli/src/vector/settings.rs rename to crates/milli/src/vector/settings.rs diff --git a/milli/tests/assets/test_set.ndjson b/crates/milli/tests/assets/test_set.ndjson similarity index 100% rename from milli/tests/assets/test_set.ndjson rename to crates/milli/tests/assets/test_set.ndjson diff --git a/milli/tests/mod.rs b/crates/milli/tests/mod.rs similarity index 100% rename from milli/tests/mod.rs rename to crates/milli/tests/mod.rs diff --git a/milli/tests/search/distinct.rs b/crates/milli/tests/search/distinct.rs similarity index 100% rename from milli/tests/search/distinct.rs rename to crates/milli/tests/search/distinct.rs diff --git a/milli/tests/search/facet_distribution.rs b/crates/milli/tests/search/facet_distribution.rs similarity index 100% rename from milli/tests/search/facet_distribution.rs rename to crates/milli/tests/search/facet_distribution.rs diff --git a/milli/tests/search/filters.rs b/crates/milli/tests/search/filters.rs similarity index 100% rename from milli/tests/search/filters.rs rename to crates/milli/tests/search/filters.rs diff --git a/milli/tests/search/mod.rs b/crates/milli/tests/search/mod.rs similarity index 100% rename from milli/tests/search/mod.rs rename to crates/milli/tests/search/mod.rs diff --git a/milli/tests/search/phrase_search.rs b/crates/milli/tests/search/phrase_search.rs similarity index 100% rename from milli/tests/search/phrase_search.rs rename to crates/milli/tests/search/phrase_search.rs diff --git a/milli/tests/search/query_criteria.rs b/crates/milli/tests/search/query_criteria.rs similarity index 100% rename from milli/tests/search/query_criteria.rs rename to crates/milli/tests/search/query_criteria.rs diff --git a/milli/tests/search/sort.rs b/crates/milli/tests/search/sort.rs similarity index 100% rename from milli/tests/search/sort.rs rename to crates/milli/tests/search/sort.rs diff --git a/milli/tests/search/typo_tolerance.rs b/crates/milli/tests/search/typo_tolerance.rs similarity index 100% rename from milli/tests/search/typo_tolerance.rs rename to crates/milli/tests/search/typo_tolerance.rs diff --git a/permissive-json-pointer/Cargo.toml b/crates/permissive-json-pointer/Cargo.toml similarity index 100% rename from permissive-json-pointer/Cargo.toml rename to crates/permissive-json-pointer/Cargo.toml diff --git a/permissive-json-pointer/README.md b/crates/permissive-json-pointer/README.md similarity index 100% rename from permissive-json-pointer/README.md rename to crates/permissive-json-pointer/README.md diff --git a/permissive-json-pointer/src/lib.rs b/crates/permissive-json-pointer/src/lib.rs similarity index 100% rename from permissive-json-pointer/src/lib.rs rename to crates/permissive-json-pointer/src/lib.rs diff --git a/tracing-trace/Cargo.toml b/crates/tracing-trace/Cargo.toml similarity index 100% rename from tracing-trace/Cargo.toml rename to crates/tracing-trace/Cargo.toml diff --git a/tracing-trace/src/bin/trace-to-callstats.rs b/crates/tracing-trace/src/bin/trace-to-callstats.rs similarity index 100% rename from tracing-trace/src/bin/trace-to-callstats.rs rename to crates/tracing-trace/src/bin/trace-to-callstats.rs diff --git a/tracing-trace/src/bin/trace-to-firefox.rs b/crates/tracing-trace/src/bin/trace-to-firefox.rs similarity index 100% rename from tracing-trace/src/bin/trace-to-firefox.rs rename to crates/tracing-trace/src/bin/trace-to-firefox.rs diff --git a/tracing-trace/src/entry.rs b/crates/tracing-trace/src/entry.rs similarity index 100% rename from tracing-trace/src/entry.rs rename to crates/tracing-trace/src/entry.rs diff --git a/tracing-trace/src/error.rs b/crates/tracing-trace/src/error.rs similarity index 100% rename from tracing-trace/src/error.rs rename to crates/tracing-trace/src/error.rs diff --git a/tracing-trace/src/layer.rs b/crates/tracing-trace/src/layer.rs similarity index 100% rename from tracing-trace/src/layer.rs rename to crates/tracing-trace/src/layer.rs diff --git a/tracing-trace/src/lib.rs b/crates/tracing-trace/src/lib.rs similarity index 100% rename from tracing-trace/src/lib.rs rename to crates/tracing-trace/src/lib.rs diff --git a/tracing-trace/src/main.rs b/crates/tracing-trace/src/main.rs similarity index 100% rename from tracing-trace/src/main.rs rename to crates/tracing-trace/src/main.rs diff --git a/tracing-trace/src/processor/firefox_profiler.rs b/crates/tracing-trace/src/processor/firefox_profiler.rs similarity index 100% rename from tracing-trace/src/processor/firefox_profiler.rs rename to crates/tracing-trace/src/processor/firefox_profiler.rs diff --git a/tracing-trace/src/processor/fmt.rs b/crates/tracing-trace/src/processor/fmt.rs similarity index 100% rename from tracing-trace/src/processor/fmt.rs rename to crates/tracing-trace/src/processor/fmt.rs diff --git a/tracing-trace/src/processor/mod.rs b/crates/tracing-trace/src/processor/mod.rs similarity index 100% rename from tracing-trace/src/processor/mod.rs rename to crates/tracing-trace/src/processor/mod.rs diff --git a/tracing-trace/src/processor/span_stats.rs b/crates/tracing-trace/src/processor/span_stats.rs similarity index 100% rename from tracing-trace/src/processor/span_stats.rs rename to crates/tracing-trace/src/processor/span_stats.rs diff --git a/xtask/Cargo.toml b/crates/xtask/Cargo.toml similarity index 100% rename from xtask/Cargo.toml rename to crates/xtask/Cargo.toml diff --git a/xtask/src/bench/assets.rs b/crates/xtask/src/bench/assets.rs similarity index 100% rename from xtask/src/bench/assets.rs rename to crates/xtask/src/bench/assets.rs diff --git a/xtask/src/bench/client.rs b/crates/xtask/src/bench/client.rs similarity index 100% rename from xtask/src/bench/client.rs rename to crates/xtask/src/bench/client.rs diff --git a/xtask/src/bench/command.rs b/crates/xtask/src/bench/command.rs similarity index 100% rename from xtask/src/bench/command.rs rename to crates/xtask/src/bench/command.rs diff --git a/xtask/src/bench/dashboard.rs b/crates/xtask/src/bench/dashboard.rs similarity index 100% rename from xtask/src/bench/dashboard.rs rename to crates/xtask/src/bench/dashboard.rs diff --git a/xtask/src/bench/env_info.rs b/crates/xtask/src/bench/env_info.rs similarity index 100% rename from xtask/src/bench/env_info.rs rename to crates/xtask/src/bench/env_info.rs diff --git a/xtask/src/bench/meili_process.rs b/crates/xtask/src/bench/meili_process.rs similarity index 100% rename from xtask/src/bench/meili_process.rs rename to crates/xtask/src/bench/meili_process.rs diff --git a/xtask/src/bench/mod.rs b/crates/xtask/src/bench/mod.rs similarity index 100% rename from xtask/src/bench/mod.rs rename to crates/xtask/src/bench/mod.rs diff --git a/xtask/src/bench/workload.rs b/crates/xtask/src/bench/workload.rs similarity index 100% rename from xtask/src/bench/workload.rs rename to crates/xtask/src/bench/workload.rs diff --git a/xtask/src/lib.rs b/crates/xtask/src/lib.rs similarity index 100% rename from xtask/src/lib.rs rename to crates/xtask/src/lib.rs diff --git a/xtask/src/main.rs b/crates/xtask/src/main.rs similarity index 100% rename from xtask/src/main.rs rename to crates/xtask/src/main.rs From 3353bcd82d271c8601a6055515075315245c14f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Renault?= Date: Mon, 21 Oct 2024 08:21:56 +0200 Subject: [PATCH 089/111] Revert "Change the Meilisearch logo to the kawaii version" This reverts commit 13d1d78a2d9dcea4c9dc56f52d2f2ddf7b4cde44. --- README.md | 7 +++++-- assets/meilisearch-logo-kawaii.png | Bin 99880 -> 0 bytes 2 files changed, 5 insertions(+), 2 deletions(-) delete mode 100644 assets/meilisearch-logo-kawaii.png diff --git a/README.md b/README.md index 59d618ab2..4be92d439 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,9 @@

- - + + + + +

diff --git a/assets/meilisearch-logo-kawaii.png b/assets/meilisearch-logo-kawaii.png deleted file mode 100644 index 40dc0cb0a5020149cae6455c7fb74dcb8bd4e44a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 99880 zcmeFZ1y>wF(=ZA_LU37N3GNbPafje;!QFy8i_79}A-F^E;O_1cJh&4UclR%OUit3* z4foERK3!8&GBwlFRb5kOB9s-S(2zeO!@$6x$w-TTfq{WN``hr45dUJ70Hj2JJ6P}+ zDN&fp38I6)2f=1qGUf^jFbscTBp3u(Y#6wIB!71p*pDy>|AAp(WMK*a7ybfE_un+{ zU|_+E!{>E?Nrme5MX|%*J0GOw5=)>>U4b!3cQp{RQpJT#U&)>}>79 zd>(=n|DoXf3;#p2P>}tH#KlICLQ6rJOw7UAjEsvJ#0;VkLM9_46L9`&&i6%J^1s>t z9tl!dy0|#a5ap zflZyQ99^s&?8*MgH8yc@brGbX_^0T9xBt3M7c2AstH~bx-)8+akmcVQ7FK2u%m2#$ z%PQ~>%ctaQW%gI|Klwte0{@}>f8qXHj{wU*<^PY1`7cZVgZ*o&5V8Qv|K2tsWCDY& z_b@P@VPwQb)I4C1v)=n>sVCi^Wys_qNRwqGf_h9ff!NVqSY{w#@dC0^Y= zJZvp{Q(stXV5AdeEQ>+_01Moa$b-L$ih7Aw$(Tc5{JOk1vK}rk&1al?Ar???!?v4M z@FefuD){U9^R{j#0+?}?|phx@Hu5u7a7 z`lu&|f+2ddJJK;;*#G%#;B!M^A$@&l5$L8sZFpWsyLf)0f;$+*=@MK!IfscYicfjq zqh|1L@KiH_z5!=sKvZ9B(v}TAB%#5(-W-3*=z|UVwAG{j8}xDWLj5OH-k~N&{byznpnZtP^g52^w9Ix^dwK;&K+hwT1}1U)&04+nns>7G&q+8 z@E9hN*HNmpzj9%_crcQbbk=U6&pKi)hZ~Bh8rAzu6zs{%#5E6#7v0A4g|X0peu0U~dG^@DoEHyK%PJzQp9-{OL2)6l3cb$gs*!kA z4FE@iA=Fr15ODotdUlTX!z+Id177>Gvv1C+=p(EkFUQ>(X?lA68bT7M0lP*O#o0o~;$T}#-m;S>k^*-~Q6s%V))|HzwXCRHV&{fZ68 z!j4FU<&HcLu1XyY-!w@brw36{u}#J1-Xi6p6;Z*>z=tO$zrWxRypoVQY5icl4oH8Z z&a+=_&^5zN?#i*F_bUAl9|EPe@la(1}%N{<9>KYeif-N#{uli)?dIYM&nIZO&rbJJ?e- zM-~8O=(fqZI#4LMa7dDye68WG5t#DrP#<^a=bOv7lB!brXTu6EoC;>=)5a5zroQh2 z3u15si|?oFrHA%m7orbpSo1ZSxWF$s7y>@bXYmA{EFNbb`IJIoygh3~-gL(qPh4R_ z{2|p)?O=~xmQTV0Ps>0A+9rYJI|DM=%dbP&m>OF!+(}^-L`eIxa&mE3{zv>UNms{4 z6~Rp1-CoEi51O6HRGrbg2f5NaN7c5PWeuHq*n|?CLAyE|)uiP+T5zRa^Rt5Qmd(I``k{ZPflZp z7pYg`N+&8Qj zF<>hg$s2sClmnXAd7RiuolD6FvPi&sL@?Lz`4A-X zy?g$BJs@25e8(%d{p@YCqfW|iViTT?{m*SKA|Xb;tMFRK^+N$a+J03$v8i#;{O&jq zh`%L6QUTqM1Cg}JAw8WE^d@$Q0y#m!Yew?1B=bFq8phC!H*q8kL6Tm|$xOJJR#}tJ+{H= zj(37y(YzA)ES6(mMfu9tIV6Nc+%Xyq3^M|EV6qyGo40SCIJZ&5Syl1`#II6ek z#;7l*!!Y}X%fSmgrOE8_wei#7pZc zTJN1bJh&ErBvYf?X#U)C`5R^|5kW5xaAsS$%)<&PEF+SNs7qMFM47h&lQn;XjthDU(DMe>)}f z%?kYKJh?n9jzFPCkkx%%3#Uv-nGx$2?0IjwTMLE^@UrnOb*aL;Z@%>HA$jVFwYoFS=TSIB=tamNpR;~y ziu)E_YED_-19iDJE|Z5R*mR1T)|n)_dL7ryO>R@7R_jI{3FZ8v-Bbq2PYbK}>f0LK z%$P4tVH^5DJ3~1Ps`QM7tcmZ+Yf&MhkG%);;@X@(hWl)aIwK{@~TFqN^_SJ=SI{-HKspjwsreN2V#}yEo40h*KOTX!o%s8cPhk> z9SgmW-ZF(Jr(|XmJ15pHDWspq^eg1LLWvh&^6t;?(D75tp?YPj0oP6Mzy79Wf{ve$ zvH|@~ldMVoX7y8?|6nD$ulstGneNf2`bRM1=RDu563hu9c;mzXNamlk5ukPkZ!g}3Ae6nYX!03Xz#7P~?CPPc_1L{*ll#{h@M<#Q`XpI0@ zejUNF%Vgl9E$W}xn9Wy?-s3YYb5O|?3mYoJok{BKbn4*7B#g^F3B50YYJ8flTcvn- zzRAQi!TUk*Z#eR2lp&$iSD)mo>N^G>+?l`&K(F@aE`LYg2=6SPddz)56E~_I-2o3@ z+8B~yF}dbB>Xs)LcRDj;++T0dyS%5Vyn)|0(H3XMq4>DZY$eDX@x5+rLk*u%lR2zr zge$pD2e|e`lv3E-F2!C{^xVj~U@X(z?0lem${@8xQQ|IUk<+|6nozx{7l*BBe4xpe zpzHE9AZz*60-7i6BaSr0zKs{jk(F(`vhjF*<`*pley^&1J&hIJ+s3GQr1gD?`?P6l z&c%ASYvcJzuh>pE_asN>uxLd%hb;^njvs({iHPz03gX2mdZGazvn`z$yaPl%RFVAV zKt$Z@t@Ig0@m%|PExCcQ$n&lj^YwHd6Y*P$=&eNXY1FV1LY;+uK5d&pvlwQyn2}Z= zK?MdBSgVT!$6AN6z050_1-;(Z@5$T}C4!O|uUETTVeb!~I49Z!O~;cxxYm#4wgoqs zDH~mpGQCUg;T_t)vC;01SCq-F>lMKq+}p%bJd)PW+i>55mH=Gp{eE8dfnVb>cj@$~ zu0@9ltJEgDoROKr)Dh9jI}{w8E#Zv@smI001zfd|{5(-z!n(qg3wEx*KD{vXBP1=; z(vRI?Z3=4#b0c0m;44_1Ix6vKPHRq0p#Cp-a!VQ4!`r>ySN9YI2!2ZVh6EDIlAKj~@q+-T3$ocXftNQFw z@G2B(af_!1EsPuu3Ji7B0G$o6!sK=Ehmo`~;AS8rU!L{uL)uVTjAq=fnd!_EKCEK7 zIrGi8Ex&_dHUa4Ur~>PMQhA3{tp;WGOx(Ep3?A6!M>B~6E;WL0SX6NyV!_S6{ zGuBzewN43(@~76q&JQi0YE7+K z)7E{mIEvQwdTVW|5e@87nYUSem;MS7$#yo(c{*WQvSX z)ov!yyY{7+0FM^Kod|~Zioo6Bn9P2}R~|^xA)IpB`O4h(?%&d~&-kw<@i`@-6$TeM z0wIO`pcV(gR3gZXUY-L1Is4C<{!SP~!PIArji_oKQiV+cY@g!@?5*Wh)cF@r_*Y@-qEFH0i}*Z=r=b3B!t5C++~)Mp*cG~f94xif?ZdXx6wh04Wk zSBWk@N%{`L6xr}~3d4n%U|v6z>S}$Tr|-sHogwOf>WzzF{iTlE0L=6f224*s$>}Cn z_jkJue#QfOsFQ}ZcZQ%d;|B!IL2|y_ngie#AZ$3}lP0SqGs9bllLMEKyLn9JEW*Em14*Z)liCg4 z_>yajLpD*bu@Sg?jcP^4k+QF<(!Pvrl$2~+Kuq&yU zi$ZU(1x_cVv`Nrrn@I=j-)$8#7*$b9XX0dse-;S9 zx+$Y|^ZbK{K;JVTG+yq7eR(9Y7@d)MhLU&%Uf67r^9`Y1?Q#$YGEAB zhHF1fz{K>9>qn%$R$|&qrYeccC`(0d?Hv2!tXOM^uAuDIu+PW2QH5H^7|KkQj(5ID zJ#A-lCAis30*gifF9ak3l0u$ZAJ-(8gMkHMlHU%-+!<=_m`!A;hSsLKWgjCC{UO*4 z@abC$QshTlb7MLa-dOvf;QKPkE6)U08Cb3kljZdVF^C^d=PW}FhJs)R%MRB#5r4yB>AfI2#`m%EExOD{i_%&S)A{~sdMGt)Q%v|0lYy1d4L-j}n5nc~5%sqSmAu4?6zq(LnLs^t8c#{d z^U(|6Tn7wl$w3m723prTK*4_PG1GW8`Y9QXzX#J-g~~^=0v?EBBvGlQdKD(}QJd9) zC7n;qLQtnat%shP5Vm8E%_zns@gOswaTt9@d|6Z(}p@XIOB1&-gRVGIqX%n`HCG?mhbun>mQEX59cfKhmL3 z2OA9d#NyT5wm6(i)OIan5z+K~{u`5faU))q@E~tG!mW7M=X$wgu=8F{|mR z5-D1X!WzB0sW)(KzcO4I;%`O6N!1kR6Ajb-(SFbV?q#)9A8dE?!D8^7UoxJwO)xkg zk*bKwbaBq0XwxftdYs(*@<#E}nOUG%H^;EEDN$Q9VqrDikg~^iN&#W7RiW<@AK_$h z8-J{Ny1JnodbMvop0?RHDY;h>_c6>#AOky2){HUBmFmTwg>TzlVC4h( z=c-DcFGf6L`1L=3OMu9PaDb7;o$?44d+Y5c@@S_LRV)8`hb4IdF zOO8cf^q!GzrRR;-NVpfl1b9b9vRp|pzkJdre-w@&O*7vm`iOtb%CjwKAMieVVB>uy znv+^`%FmI;r`+H5UH74*g$^$pU;N#}`hR(6CRVNwe9!}Gckqnyld z;q78N7WdEctU?$or9CLODeeIba%B1~IJ-WA+@QkryuogojnB`mOIpLAyf#DuUWH0i zQ;JUF^Jm=S&fCWJN>W4Dh0Zo-uLb{i7#fvc|HRt zFLn9@OPsR3gz~o>vZ9sEF~9ReWMfM77NY=(IW(mqNJIMcv(WT+Q3ow{2l7z3uB(rd z&ri0g7U(wG*P9xjk6^9=<2JY#8eqm^qEDWU&xQ3NWrj=*@@%{ddK>+183qf@io7q` z6IesrjhNElf^i+hquQDupR0y+$#vSt&-^6no(1#kUo0;t9;8FRoY(=Q8PqyT~o2eZKQP&IhV^UhPjXpUR*Md(61;Bo^u$^*37KW#x zmr0uqE;5&@ppSb;uDc8<*M9JDPKuA;=gsGD8bg+m_>qrpUr#fh8KxZHwLdBYhmev> z9pzZ;EIyx16ocDly zI1!N?PqrXH4$7w;16BJWnX-q>SQXWt@l&=>SI-_YOUSCGW#(@pra_==pK+G)P*; z8oVE1xM}T8Lh#x(Axk^47R|73C8BEwopbPQ=^lJ5-hAN|fRv8LyFA}ZTr=dkBTrLR zej@PuR;@-9_mOu&k>2uIA?tUneK9q-w@t;uS=uyO3q?ITExAk}L|Ir?;We=z$r_fM z$M<2G$`O&gpwVScUtVpnK3e6bNeRW6@xfxYR<;*wA^03I5nk?uPJgyjo`Zd+((B8D z)`rRG$kEy_4IwRdUpHFig}9_sXD#9MOBBhGE-%KD`bPf9KXF*HrYc6ue>F6{fTc4FEynVRu^ElUcQGl8-EU94eE>ry{tN<;!@|oi zq)RNrJOuM78N*^Y|abA}!+wfAwMAqXI^h5*o|6*?zed&bgX- zc766s57@3)@aDgFl+jO40+a-+#x<~in4(c(9$G-}WCxVC5z4CLk1RPqI4fOlkm11N9BTCP?X)Vqt;UVYXllTr_l?*&emMDZG|7$p@19| z*1!bQ4DIao7smi5*l4ZtI)7Bsti-;!baB1! zlKmenoH}SH6&|fpj|ZZA*Q=58d_9d>6H1&D?|NpRaCz*c&z+0c&1x`OwLxAr%+77@cDG-(RZE&&MhIv`lzHCo2^r z-?e@bQANfP9cuMm4Dulp{pd4G6K{}9UZH?|$Q2QeEpRTmX3^HPVQ5S9N>~CGUVx)MJgLe{Wsr z=gmI-WgE1o-fGO0&Ls+KI4;lSl3}LmfLGIwNNQT8a7QO7^Xlj=(%G}y@fHjCeSfqT zmwaKX{==W3LJP9%WyL~b#^BL_-zboF(pVdefg~m-CX*6P5&Cv-b#Qwv3XZ~`S!S!s zdPSQu$o|4hpX#J9deH?r(STc61X;BDG$mmT|D~N324ALpf9!$HufEuQ0J@Ws9mi8o z>getEA`aqj4A`GdR?%WZ1{NNv-N+4Iu?R#EE9p`9Dmy_#+^=rGBx6D17?j2gfyR+9 z!W(Vn*1W{)g~nO4P}afZw_z;~UzR{DWZsPqf(M>YWV3AJ$uKHA=D@LT36g0C#w@zT zk-I}}q-}oVc92wB9_4X(s_45Kt>>>O!TA+pmc<#Gz5WEd&ujFl{iRao*&!uaH`g|5 zrlg9D#M~Z1-oQOWRnyFDU&fu&q+u9%XRfox)5X-8)Ao(}+Z=BNyzB3Lc>Z zrgu^G9GY;P#`6*T?N;^g=u)p0!(^tS1D%-(jv&a!q$ej}(^k%J)j_)>W+ z5Nh>w(HkvqTTq!9B3E|#z!k{?e|pzj6p|I23i?7drwjx$e+iJ7*^KKvd}pT+bB7VZ zpX|5~d#96zsl(G`Faw~$s0!vyC^P<9bSnFPQk584RvK{V&_xthQ#2i&NK3`e9muCf ztMXeVuf7azV(J^fzI@v<}~djDV&3|0U_?>gB@rJA~Y!*APo zKj^t@%;xJI#TXXt)Z(;f*V|yX&jfQ+IhTKn8HInbXA58!9elkyk>RaAhL7;|V3}$6 zl0fpS{Z?93qi;4&@P`kTDvolKe&Ne)PyYzAhx*>)`3q5dQ2Y9&%Vc`qqApuS_b z-qoY_7UiXE1KrmRLJMtApBe;AXz{Y)A(VXjf=8jYpR+{<95@=B5_!-5Q^N97x!HY8 z_k>-s9Xu6bTFeB?Y1FaP;{IIP&CLA4VJ9{D>M(0UOJh6|N=hNpVA91kJT%LgQUqMB zz?Y%HYDY3Zo&ly~a0k?cS4o3S*1rLMp<9m^99+2M8G@Tgbwte+@CIjQA$ijC zV8Uu4&TG)npO_Coi0SiSLzf#Z;EAlXKFk(ES8yhUzk^{!n_J*QBwrdo(U$rpnQtjk zmv4!XYB%TypTxpM6|1~vu0~#Zxht(9;Rqk-2JdS_CF;c!7Hi6Pr$#PP##MHoex zeZ&~T9mbQ$8|Uw9!3$`t>`j$=YmIw_Lh1O_6gSfj7SF(*F_a!%A3y3VWc|&_9g_s# zd0#(KI8IiNU+;=ZURW7e?%*^5Qhcdlg%W)r3Tfzsv+9GX9w)S)ik^B+A&sf^BYnQv z{ouj_OED{YM#%%Fg}+~miDBWgP$_&!DqOvEGI$#*|5U8jX|M|k8sW9Y_>wS;lPtIz z8-}jbVfa2FI z9!>w|s_R0~91d-_&-LKrs!V2?dAQ;+r?WXor=_X57%~X$xN<_J;F7Z3FJVG%gp^55+q6iRoOORhG)h>3g)6|Ui z^_s@zM#iPtQQE!(^?iJ5vO%<&VtwfE^T9en-uIZ-@zo-0%gc4p4 z2cDGPHtu>+eFE%+>Xn9N!2pUN%Lt`yC7Xp^M9o>>8SMEL8B{Rw1itLARSB-}#tM>V zgZi28T$-iXe!H}yg%4xX#^x0?&h9o8wESfs8#085q7h%R#UAGVSOU4S*n_f0s^MdV z)3kT*GQg+u`Ix1toiz8F3cYPUMA01Ybpvi( zg8wWe!aHro$$Qg{Pr45`GKHoJ9UR!wx@_~T6GN=0zrVb)j_$ z3s`7hb~?!FvXSZx0$lr5bojZpA*dVkLhh&zXG+?Kp=iAAx72H#}ZB9TN1XtCn zGaSjPUU^JOhfPpXDZkonFF^v1E9Ugl-&rI!jpQO^c(C^y2SJES&Y!CuT_+kkP<)=s zeS}zXBGGR?v~A7hQ1Wos`O*A+a%6#1Tn`-PufCWd6b#G+Zq&YB!4Z@dhUWbfjd zRn1jgTvBgb&c2VyfKN5aTuUlF;K(ru6J5I|=HAUy;Za$($hWJBmLu}Sp%!zg7YxCG zZ&LB~B$DcRISOrl?enAxa=YR>nGFDA`Fn)u%^XSA#UK}Eo=D$$4D>C(l+WCVl>v0P z!Wfve$PU>H$^>G6Y;d)@?(9@Kuu73DZY014cY*MamXUqoVX8U*J zk+Ng_2Ura*I;v)-I;t&h>)#v}Kk{}gadkOCy!jNZ(h;A+NO!*7hBoh7wsm0iFfDqf zOkZ%U`9ja@qv_&m`4Jcd$0kzv7aQrtVlMmJgcWwRe*>&zyGJqj0pek%{)m z@+-4STBB;LCW&!*hdfu<1g4CSxB2(XkfBz(madB!XuI(iI9dJcM%_I44;8t}Yod!r z06V@VW5$M0*eI4UUC&eGEX`gE%AU?tFLtY5=%&WFcg7x;hix`4sdID0QqxrNzt%=$D&63U^VzwR$RNgoimvR%ZF`P)ODgrFS^4|6hZ@rSkrTby zbQ$niZ-CyqbACpqGf_{$1RutzWNL((q9duiG$n&EYyX4dHlAzqGC;F8fNBy{z)v&u*FJ>nk=*0hA$C6sBiz)&40p*^|W8D=*>BTRYlFj|J-FpeV{8e4HyYa>!k%lHX zOc$3+mSX@#8R##2u7+o%h-iHo_0B-|u)bk*BwV1lZg+g)3@1L`2Mme{%n%Imazm~6 zrzBc9W2F|?*lL*2>rWLBP3j;3sJOmdlQCm^U30)6<4A`bkdGiz zlyI>l9l4;3Fs~$)hs#+eTBFFj^(?<9l-B6FfoCM>f|@C}uH=XsW;IVa-#+fQZLkS} z6&=lJ{u&MTxAgc$atB}c8+8ry`!|}xc?}^Gv2Pl&Bt(NAgg?*Pok8z{5i82OxLO~t zEdhVXdrow?9f4xQtzw2@E90&jl9RiCMk-2ICaowO_z=SFjL?Xd_yRSg+g7JRp7xqy z6SlRwlIk3{)gGyJWPO{@&RkU(^cS-ULZ$6K!4~c5gVj@G;2>P56 zn8|AuJ~BRHGu%;!;}T$cjDR`dnGAUU0GzWj2T z;|Fz3+`_@L#%N%>Za6l5v9-%P%_<#QAbgVXhNWy~X?F(x9IoIgiZSa~SWbelQm1?s zRFK*Y^4>a&`(V7BnE3TH_(sPmAJx?N;lkDqOdHKP`P%_db35J+zvGI(y4T5FF2Cpc z9`x}W?UO(jpWoGs(+-f6>wGog zsc0y@qcbkQH@i@i{pecy0u`aHTpaG@PZ;a(D78g{2&3eD=cO~FLxe+m0^nt}AZb|4 zMwhV{hzfivV_?-G%--xUt7SxXbYfJF<_W21--6$(#Jz0E(M6mE$F$P8hqwy@L&=Qq zPfN3Ap8nM1G3>0yRL)!$+j7sfHSSjYZtmoI<33#=gl$^K3AtW4NXi=n71VJYtJX@4h~6{k|eKtK?yz?GK!Vz^&ytP9ZUUk z@UYOHd8La+uMo18#lssC=L%&jE=K6)tVt8O$+X^Sg5nl48(F5a55!M8lkg^c@+{E9 zUB5KGJNmOf>e??DV24MXhGet!CF4Z=E00~SVb<4AHCsw@ng=><*yWvs1NWYfwm9gf z0}a#U(PoiWT9$_kIj^7LzZ|##6Cfq`eAIS-5TJVeCZ-%xPim9RXWv&dqv8t9f%+{0Ut2u{w!(6>>EA55sM&OhMi5pE^ML+-gNG2}-u2h1#*p zw8!H>LtL&Aqhh|zY?4sz`8=!xyxBsHqKJc2f7;k)HnmcZg}^clB39Fw6*GM#v$K)98$15v|f0m{YSsRC8h~tOh$)`7ynM(N$Blu|TCM#^6wfOq;4@6;M zqbObxr??I00;JTV$NM)Yz!cZ7KR0~|<(o~{5_~WgsY@afvW;hlTP;bFwyp`Grs<; zF%Lz}0H+u+C*aV{iZWid*;feX!rQ>Sf7dTLs+sWCLS1ILQGM|a)q7s=Z%VzT(q=SH zR0!V{tsH1Gg!w~bB`q2R;xAOzPs!LBoxTS>kjEyuKo+c7%wv~M0K|f=B+s6F?WjHR zup?3ebh%&Iy!+rYDT%_EY2c=oBw1E6?U|K)wJ)C@_XeKEl^dx3=J}A`1WAj@FTY1P zFlZTE6yR4fhcNKGSU%s3s(*^!bz4`R-AzVTcd~($^%F{6b2VbR;^N(f))B2>%PY3q z*|uaT`a4lcX^q)tGGr8V0+O|e?)o*IP{Dp&fSwTTHam*@l@uZVG=Ci97Om`aoC7%1 ziI=9??+CA#iV#-TnR$*JHz1g3&FpZ3gO0>jV}%R(;A02r-$QEB|rVY5t8ElGN`*9@N%3n-9esgdb+ z0*LWaLlkDQ8=W4nCLFEDUOs4T?fWlX@L?Cr28AhGZJWaLcZA?O7;`{`l{(d0jw@1z zS|olQFbdn;#~i!AdFPC}GaRKFzEQ~aC>qb=G5}lQ@5G`{g`8p9=!lJ(OYM_e_@%RG zwt7Gl+`_fk#R-LsyP`aNnm-4t=1-AC>(WI(KQ|9@uv_g&3ShelNqE5+zpefVt{MS_ z;Dm-bKJ!S5i_O-pP7l061D#^-@P~@A#xU@Vl*ln63PacJ69D-01Js<4!#=y5Rv(hg zShgd`u)c<#GTc$!IGu8)w5_0iY$rPy2Bmy29vjdLRK;jf6f?^9YSK-%#je0;Yq^_O zQ1vm;{&u916Pr(M<^6JZ?62;rbkqKc=F&tq^=Nq?^sXs#P1Nwd{KROfEm`07W+O4S z>p>db6hJ-CGOI`;fxjThzg&OT!sP|v8Oh(@2O+)!X5cLGL;QQ&vHG3k6+;(00o2Jw zZWCvKUQfuf>n(%b<7dFZ4-dgOA?KZT%+u}XrP9=$tU5e*8gw3|$`(GBz1mnEN9Yfv z4M_n_b-jjgkKnq>WCa5@TnSCNG~}KpMRCYl=VJ`&)sYgRnp7rP^x(wg2Uwpx&cZp z=g>J}e;;?)Nv}b}&yeC%!n$ZzCghXrL`OZIk7pl``sLNsQ`-_1Z5JI%&3@VKxXH>XU?K=^#fz?v;v1Wcg-!#m!6l{cS$uJS91QZEWC2V+CN2%y>HOn%v5Ceyb4`_ zFFL-=E7pmH;bXUed)Mz@aAM3TI-$>eui*zHgMLhP zyD3gS=i;fvcDHZlkt1pbNl|(Rs_gA&(K|ldw)>|IVoSE$T8EOW*`r2nasGI#$Gxl~ z<#iGEG>hoSW_wwG%WE#Ri%=gR$K8O+d*-mE?{x>mn?uTBiiLU17e4%?dDUgj`Jh`- z+yi9-qQ|q#oA5p!{9kocC28$H;QF02KG!$%4$S@bllh5qr)RVL+gM(p|JcEk#w6XWgJ`#Lj)a~kRdf#@aSx6(;eJ%q=$02Ni_|FVY9$crYkSEP z5{OOd=VyxIK`Y4`i)!Nva|X|-8#}hb zCTB_K$)ySsH#|qtnd~kPNt~D? zsAw(GS9xJpO%Ht7vp!^2WsA;|lPJcyh`}!N#tXVK_1^VpJflnE+AP{7e>SJ5Iip9q z=)+cJPNof6zOQl}8^1%#J(*JRs1gdk6j{FN2}w<&5f8YTs?!SjBfb3CmvE6toMV|^ z+zx*9X73#dq*<`qpi|jQC01J|3 z-(24#(|nJ_eB#uMqkL8izb?3w)4pv|r6mVS6+VwEG*l)m&XxBVt^EDj%rF$eMV&0ge{-I>$grYvl&p@ z=Sg!g#9~DorvqxeG|e^&A$DzoTxJm}f9w>9?X3CEr05_Ib-`WI#4sUPJ_^TJ?}Ff% zQI}_ouZldAlE^&8L%sdb(cx_8DG*sB%L#B#C&9kxkx$nZtTN-~`R9}%SwZF5d?~X-rT2XL2b0&Gmen!~C z$8_rRH^mlKnnl1nb3l&R;M~AKSV#Qx#L?qpk3nf^>zLTwgz3H$@VSHj&ttzKp7tNSy*OAJO8M}SCo!A7y_$$%-A4=^Iq{3b0{1}6N zwDf-GD^ihG9gIWLxp#Pda?#Tx5$H6OLP_vcmu5=j{RcixjOKP!y_wq*lID9l3tRel z+jFi9V6mHrS%QczlzOUNzK zrET2-Ph+Cv^%8d8TPSmsqBDll{^WQCyI+aRf?o=XkJrV^pSCOPMuChr&X!^WzT(Wb zgCMmycl5lC-QxIMV$3js>;MX_e_nlYhrm+S3Zo8R3^DT)7o%vX6dX+5R*jWSo zp-13fjS^G&PhtymMeV~IBqod6(R^8N`tncwyOa*`r1GRm@V1p;uBM^di}g;rNz7}d z`?8JP;Zv;MJw>@*JM_Ra5=R%JNq|qF-6P$@Y6B*MRoAt=&!Lkq=qB3B(~HROm#*=v zokvgD2X+jdB=;8lj>)qE#rYnQh|$>V6LUreag<@HqjjvQa1B-~{vcOv2|fl2odm^v zMhp?DQ>}fM;n{Y6CljQFNg1nAoRb8dfm1{AQ+dno@AXLbdLOx92w>;iZ#jEPs_7~UxaHW zAb77$z4toavv|#Dsw<}$J-4y%IV7m%J33n~iEqbZa}tlQHjpq%2$YrE-aBXKYY=h^ z@Tq-8(&kSA+R+heCkC*$f|#$0v+N_kPiJnRILS$bq|YsYEEp$eHE* zc|b3ase0*g0_&2f?sH=Wn||Ee4}Z$j*c@rWnUZ@MpAX(tp4=EL_RcLQ~zL-D`n z{W-8gxaH+QZ(gA2{{vn?p})`n6dKX2xiw&YWYCLY+HV<+A~`IjR!hOTeYd#@@V(<*miAye_Shzo^LDcF{~v{U~nC!#Qq8(h9>n z!1XW$r2gAhoN|h zKkIbo3Z@9Sb}%)MyrhXpQxw$v#i7cZJ_(J$FoJXbS_UqJhO9$U?wM9JWB6ddUH<|; z_;v;`moSa^PYUkz-3AAcFF$~t9Sg8zL{wK=S(bF}^fQ?Y`9ABR_l&~BswkYDr*<|u zYEcBwjbrE3*dg)ih@r_^2bBHj;3q@enybe!3i5lxcJ%Hbck)K>?dS@44>$E!Kp!?b zg>+K{O*f9(Gnf|tv5c8frr;0+C5$>1l;;j=JBa(`4GqHn?3^1^>KrXnM+ff=FEJj^+3pV7FQ8rwNq|$ zpR4#BJvNO9z0dEs?7Vhai?l0#w-hXrqta#2r6%e|bAp!F^7~Ekj~RLoiq0e0 zV!B?7FVnX?85-RiD3>GC@O%Wr|31j73Cb>y5N0Dd&bkBThWrh$aJHU_@y8(XM_#00 z^0}jK>v9!%{|v`ar8h8_`X2WiMzQc7olI3m>f6`!HC;RZtngNebNb=?kEHNUN9>1S z`>QF*88f%3p5{|{ZhY&$g!uMVg`7fgB=62TEEXe@=SdJ4upsP@1kU#vE0^C|CE~-Gh#lF{OanHl{U8{{w;O>)}3ko-8pf; z5ptkLsLJ<3_Qmd52oj*P(OW58d;5|9UMu!?8;HJLQBQCyE#nLvK~UF z=X95T>uJ6CuKwDvmeD!tiz&||;N2xnA3)F@KM{e8@c!tvDE*X4o}-Xy)b7?vfurf% ztxfe**ZOrD?VJ?>zi<7Ks}p}RjVyqQWl{;aoN(7b8z)UkZ=5r}kg~lIes=t5g!k~q zBV6a&og$d=7<0zg6OMAIOsFjTwYN`A<(@Vgg%Hm5{{2JDO5R0Zpnh4Ywk|3K%Sg`= z{v7E3$HyfedeJQianj9U?MrhI@LxZv>q~yQ%@zFCSx2^2;PtkpNl`P7`4@plajM1D z5t`=@WYq8Uq&(g>){MUob+A6gsHUZF&&7wLU>)35uVUKu*LubO0-p4v`LqS`RTfQ` ze}L54ina2h)7EFB+Gg+Pfow9P#QPW2aGTEOu-=yNw<(!cRPJW7I^3vIS{pMpPuq^~&c> z;bp$eVT$$KSe0^KInHem@Hg&GmI_DVGN=Uf+K44{(cY1mjh*^yuS*Qpv~oHr&icPs zlJ1vhCKH&W-rfS@Peo6rqZ8ZtM}yuT>XP5BV1)7k1gh(Ox3oGkHs|~%LA!r*S{j8H z@nYG;wU)e3lb2r?a|BY)?22)Ex>guyn*Qc%(=uVU_3zpHkn7(Cm4t6|D{nIC63LTuDWhNdY)cxq>&+ z9oHhv2xr~*q%9mVD>O35m)ytyv(V8&`w*XNozyhx3t3QF*3bQxu$0C4SpcR`Dcqjm zd?a{0>i~o=dFOVq{CQ+q3}2691*ZNNEjLrnTHBe1qus#>zk8_rCk`Vo8r1#37bD2D zMA^pHkE+CJ3TZ}XHrqy?AYMOqEW54U&U^5a^Yyt(n|uGO2JJ=?=F1h z_r`3v%gtR*{G{ddYx_J9@LD;`=f-ddG43&JaD!+9kHry9!A(nH8h1klJBOOlLI~oWlR`Ll44NbYTMfM@;gK0$Boxw+0JbOe zRF<5t1Y|GqY(9B2nV}pzKAEGwOQ-zBlB9vtw_rH* z_5wKSc6-Dx9iQ-?;}?R&OUq#%zkRTG3T_K2{$xllTlvnz%qxz9`3&!3vauo!e(dy- z`AC{u2r?TL8JS#ei(*oiP0;Y+*Twv3hbGM}118IL>xKFw4dN`4)~og-7^nPN^^I#QOwg(bskw;rramIj5y#Yo@@8( z;ffbC7c&~1L7Va3uaa4o#RyHEpWcDsbEN0SBFjAU-jSPoBbcVbz-5|_)U?R`>mQg3 zLRe(MZ5W-UKJq{aKOo$fjzaiRM+XVR!Qt#>2&NkmGQ5*7nV$39w=Tjtlw&pmQp?Vv zguUpbBy`1zY3TL-PY8=Mi0gWC9!Ic$=^XZUCf$la`Z%_evh)&PR(UH*4{`0OHMZXRD6+ z+BTZ@j~-`PI0tj_AnW6z7+0AV(kna#Gj)a;}yQ5gV-bfVPQXBSl72yWgBsXL_9%wb-PY)8DN0 zYGpKTB&tF>n&C8esab0(_52J>$WP&^smHlg;#-%#H0N#*u{#s#V3$pddGt~&o6N$z ziqDE=&bTq0>3rg3OjR@4K;%9c4BCIujBZ%xl}?~D=FS!CQB08Q%(Nd^9Kzq;!&a%J z+nYw*b;XU0$N96%5dwchM|K_+baaa|<6B3W$ku?-&8NG}q-pUx6h^Oej(iE;wnboG zGRf7@*2cg*bGaNr{w)g4tR;9g)1I>s97~~1=d?wT!u$|MTZe+TBSteyACywhj$S#P zMs$+Pk?Uz0O*5YJ%i1}|H>}eZ_7#QMA7HrGtU~dmFBnftih|qxKX)g}5zn0e)q`5z z;f{5~2`DWa-g#h0+zKT7m|ypzD)C(uqzK8+PQ*t9VPtsI)!$*!>NF4TTyK&080XhZ z(=GCp0aXp z84dG0+F`*PL-bP+Ke4Y9$FucyutRY>L3(`(+`VQ50+M{klK;2qpYqj7^wC3xG5VyO z3SvJrau$eq+mQ&|KTy`k5R8=RN%9t7Qzv1mdl;NpV8RP320EVo^Zkrq5u^u>gk~Ov zW}ZlG;7HrLzwZFvK~qj6{QIpaJ1e1W@MKc%0e|l*+6BI+_0)CVUwmCsNZj1T<(S!$ zK=26G*D2EZw99|q!WZlgB5mp?_%8Ff^rY)I(^L*~`~+|Sy}Rf8e{uLhAC-l*evpyP zs9s_{T>k-QFqQk)m6o@DIa`Rk65KvlHA>h%iEp!1{kE>`scq#qLDD!j0)c|q2t*>3 zz{WsepPrhWCUS?d!EJ#b&Nr_1r|C#LCpwO7-XVaY^XBIEk;DU6oJ!Rd7_$%HF`nnotr zT)uoZTqa_*W;^!hZ%m4V8!Xr{Esxt3eso{TMQ6$3a~hs4n`vpb`|9naLkE=6^k%Sp z1Hs?#VIpTPMuQ)n>d_spwpCp=h8Ex3W3N+jL zP;lbm{^-xaShH}}Y0T-VddQAsF&}<%UFySm-xHptC*QmK*6jW|Jmy4L6SSg-54! zoVUFg0lEmG>74!`@S>13ZO_t?n*_ZYfwPEoy*H03^Z}O-3YN?8hmA>xg0M=Sf@Sqx zJbU4FtpY0Pmf60pppqxNjehwXgp3=q9EQ+QSPJcwO+l%Lwsp>6Qt}N=92#~0->+6+bMHTx2te1%55D!o~sN2)NKbtpm{Tz-Q(%qCCkC+=Nk zvj$u=HGc`q@bx&c-MO(XrE?Zra|WBe8`2yCF8MAA3l;a@rogo|o+93jESUR5 z*jQTvS)dwUB8f*`LgwL-N4=K8@XoQx%sk>}V2xiFH+LAH7t_{3!X*wfmeD~t&*5Gz zT4xD&T9W|`r<2&hGf0${w+NZAb1?IhQOu%a+(O4%^KkN@JO(Cm+!&npM$nNVN+H)H zr;(nf-^^IdZ-3(a^QkP^xB(_f=hi!0W_BPQOH-ZDD2?=n`BIp|5}2G$KbqijS@Zke zbhd0V9R2=76to(=s`ji6&(UTSDr z6byplZA+Q0D^2250mwrx#2p9SQ(y!%xhLl#oX-QdPQ3{ViSrQDBe5j>?VZpq>0COE zc!U#7`$_7RH%U=|?mG~i!mG4K=pFv9ad@d8g(?3oreY}Xgk6%-y9#Ai$bH5#OJgoK zRp2{L$u!5~SO>$a1i8D|eF!84xBcAudDh$Ak?2o&|5N&B7T&yy!rk(LhE4%)JgotK zkKXxsKixT;@(rSF_oBGA-%elWTHHDc9dr~Oiw$GloxAsA zEZO&hY#M?aGMU#G$@`iGY#v1!U1k?~n}q{%8>G9g-yFtZ&bNQ@wT$F%<%?|Op9Hug z@OGBbGL74D8SB3fiu2!bPB(t&sw=H+ctwl#a`01{l1ZQDL;0A;&b6Jt(s!YqCdkQzaRPjba1YcoLi7(wV0qE;kr(XEHakaJQt#$F1lFgm{--lZU}G0dIN@OslaH+K7?WFE3EZX}?yQ~cx|1+i>0XU}%ZSWqK( z2eq;kUt1r`ByoGKChY#f(;+-ZCa)jQTF{B9e8$Vn!|Ng#SNw=)O`!j9S_qe13r;Av z&MR*u9TkB_j5w$1TJFo{C+3T?A1eGul>QT5e2n15E7gkHe_;YFtP2 zi{%heTMML->9SE$ISZEwDR?stHVV73XQE2xuMn}WGWPBux2=7`eYBs2(Yw)xrcC?A z6C7@9|D?i^LramC0fn7$v$e=M*Xzh{5DfL)qj*P#J!IQmapLoF(BkV6G|opmsn3Q3!kYBzI(;*MGZYQh{B+7TmI1z^PLR56)HGHQQ#> zwS9X(;w^+fsE{5^`WH=uW>A(tdj|>%`N})Q@2*^vO{}jgU&NcrPMn|ic?}WLk_u8h zXdeZ#uk!M&%TNM-zXV)U79G6=iz!fqxHkKA%GLRhOC9&_M432}YtxrsnWpg+%&ru1 zYEI!j7@X=4JR5<2=M(TVo>>O#;537C;IG+-dV>e&z5Upa%L(NVZt8Q<5gfNL5(mFJ z5Sx#5ssIw%3Z#0KHUdmByZ7;G$i%MSpHlAisoE@21n$AX=>*6 z!#DP2g@b83dKM>+YVU=wyzhVD>-0GVp}VE&%(sI3EWA4jhB&s1yNczJx3ugW{Vd9Q z)y<5ONnZhK4Y9 zIa5VkyK#};-Xs^pX}LHWp?&sYY1!Ft=Ta8xcm7c|`XS(nlW?wshkpBE(gOD?a=Bcd z&CjAa=y69k>24%K-{}^?bH4Uo9BFB|Yf%XQGao(+?Tn-D3TW$TnwC#|D;wNoW&~~F z+_Zw*cOG$y0X#Z&awGM-mg60i^^60c=bEA*?z*E%2Z$;tw)qE-O}pvEz7f+`fP3*q ze!hhEW(~I+P=ekGU0a_*h8Il#2mJ*v(7AMazHY607M9^Rz|cSppTfL-eHnO9-Dx{;0oV-PYf z(jE0BA3MS$D@Y$4yR;+EgXJ(=a<(Ltva^D2vSWaTw>lPbOyJ=pfd0OEM_L zF%2DN?xOt8`*&;MvEg41sO2NWQ&2qt9>fR5+#-;9$4{z~j%mh(x~Yq%a6fRPx3vN# z=V^cNIv%y_C_H>n2liQ$cch-_qb|rsZ!?`KkbVNqI)$L6%iRza(mHQiCVfb>pedB@ zTZ|(Y{oOgz$Dk9<*?w0*B}4(M;Ge;}wmX>?dfoc%Y+9Q9o!fT`K;fswLFGYnZ~S=6 z!X#j&?~AWa)(2@&Jn$dZ5!b%fM&APevyz-vD@pRsnZ$ z`{sjbB=2TM3PcA0u6uU+Dhn}jYd*J7zV{PP5YGyn2?*s3&LJe$?wA+HcArlpDfk6TJ9#O2;psJwV%-hU>W-@e%p;Y{I^A6a$e7tka$m z!c9*kr$zfm<~!oTftAj1!hb!S22&T(w^cgCbu1%4na)zk^y>h|G_=CK_aIiy+>4xu ze3Y*>QeJ9Xz2IjCLr^0*~}Ru@F3kkb)wwa9cj2FtseyW;k;y4%S>lW zGZzOs`8WAkjAK@VwVobCiYIX~-CCztDNNj=Rb3U7pwD;e^JA>GwO z6SJdBEfHG^IZsXOwwK=^*j&gsaNb!bE5Woh=g(M-fJT_i^`O062BsNZrNO#NhbkT_ zWLlxb8?O^RlimX2IC54wHBFT@S4doPOR^~FnXM^U9`q=9``sBKTGXxwf2O~b_kOug z#oxLLKG#$Gg9l?JpntiJ{ns;-@G8;~!6N8C@Xq6EH?`Bx5E7=Rr9?jyg;mRu@HO)N z?rfA7+Qxe19mQr&qdlpEko&N7D?M8$zit1krQm@~GmX2(T~HC$bN}0S!M7-jIFnH* zJSULm=@XbbyPIWeTrVM<(-H63k1`Qf$b94dDEA7^|DrD*j8GQWMLMn465q}|Ti2mVE#mcz|z{_qIfzM>eIw$lsZ$NWt< zEB5*-X!>`G(1WJ-Pn;lg8~56)kN!PseYN8K>S zrah4X>7eJwT%zzMT0Py#SbQcv>)&l!E-94hNH0BsxK$WAO3@jSs%|lupLG8Oqd1|b z^2-$Up>Qp6w0fl|oAqni#phEBRr+Qe>F~^QY%W+%=aPa+x_`-On)5^!nA$C6_LJrAC8CPSrboR)aoIami3J^O5NX! zF!e()PSFh~kF?Axg)@8>#406D4HV00TBZZ~Qy=z04XlT4ph^E>+U6tsGw3)X(WULB zLVp_1T&j50%n&Y}m#iys$28Ndr6uiKUU}bK+T{J$F&93DFASn+)qkC_GjB5o_9V`J zbUvMe?H3(*DK$gfZ7&_q+uwzOPg20>xn$^V3N=dDrfF_*V)4icb%u-&Q5bnlR$8hdD~_Zgy-HYR68s3GF*s4iP?EW z_Z6N+C%)+F5dLmj8rg14fHz{i;@5OtLgIJ5O?PXQBRi^}*8KRPU4q+k6+64AiqANo z!brd`6a9Q40vBdfqyfWiE?5`Mie%pqq)iX=f4N{Vw5?(q@xK-M8r9v!x& zaA@CB06KE@I}L*Kvnn=4$mu)Huova`-X*v0i|%e@P6;Ty-*f=eBm3|iOV(lzJ{54a3BD$vC1V8_q&&3T14R^5Rlx_y9>_6|sM1Jl}Yv)=;~ z&E=8k+S}5Xs_Kgv8;8c@KSDm|aFLO5V*{(n$=o(JlF4!Zub;)GMC`{S+;HO~zeBq| znbGy&uFhGHqm1tjp4T4CUXeyWc91Ov+tQB(oI0#)lp-C-EHZd+9zM!tnoPN$f;s9W zv@xcT(`j^;vFMbw0wyLDvP}>8HSd=Tay>iOdT&L>rlnx%?$e+qo$KpVinQUEon*dS z0_JCX>b#`~^wvVgg(F^_u7>eI0Vi!Ziq^@@QMCRoTI2l6-yb z58?YqGBhfDDehIBUO$R^5ac8q*iE}Q(_0UmW9^QI@XD@K+-?p+GLN2RHi+J$9*cW3Ueyor0 z{n3o=#ws_?Nnbph0S&1Z@reqO5e;X+ zbeZz6CFm62r)U>}NgTa@@-`fLQG~Se<~qm3`Ke_W zCP6PQ!%f+;Xbe6D=BnpiwtMA==RD)bw6Lhx3Xvmk{khi&@6M|93qN3vJAE74#qaeX z0@XfsR&w4*CyWh{N*EhZjb6bmQI*2G9+TbQON25hnR5lEcJcM>UI)=Tnvl2~b<#$u z*#_eMVN7p&)f;z9!94v4!dVA)b+TLzleNs|=UH$>_b`m@=d+WRolSG;?dJcQhvv_f zF#4EZ&ZwBwR@ASU%;sFXl*azBHCfv()@=m>NDzuS$Zt<@pN#{ld8M22X<1gQkO!PM*tTnHinIqQ|Vg3j08NxWso=&kiGFLchVs{ys z8w%y6N}5l9dS){5e)UEK9`76&SZ+sO4=!U4dlged~S+y$YXyy_*`&d*$D3f!~A3mWm zU(9Knrl9}AKTJ*K*TdY=Y7ykC{Kr0JyNjFRv)G>r;K*K@sprEvt(YG_hSxX79p&7$ z?Eb|lI}33HMG09z+0EYROdWbvqy+1!bD0AQnj~%#<31H9j82M%%7b9qnIEJlUB8*0 za~PfR4%~<7j)zd9ICT6IZ{pDW%vye7S!|MrSJZ?z*R^8B#PhXu*SYG0%sNP@(lj(Q z_nSnxyTjSjo5J*MZ#uVVe{_f+HE&@fmh-RtAcFf$oYSE427=p?g4@_B)j;iFjleJ> zcA&z$))0~Z<#B8{&0pn|4)#F%GWIdxZ?aU;$ zCZCuNKjtS6y93)taCwV+P;V)2O~Y(d)Rx0QReBK(W3(lrhC> z{A*nMrp}{V3%Q&wUvw0w3D`}J9_Q?cst@(rA3XiVDCh?v<&q@VYde}2Z#rh_46a&?6-hjY&qnq!|LpbwHrA>h&9#xLt zJcjw_!x8G#Ew@#S-@}ii(F~d1Y@2-TJImvb&*R}e5Q`Oms1x{B=@GiiTO68}?^Q+= z;2t`G`V~XJF1CW}=vb@>05om8Z^lp*t5LL@BVgO1mThc5%ar*+iq-fxhq2B5{{HP+YC?5&$dA}urHZ4K z`)fpFpyx*EZtR(Oa=rD|LZ(L*%Oeqva~SbX8y#&fQha zWn(&yK{9cPsE$W2MRJ50x@n)$Vi3AvoSQ?`7u8&9v+(PGpHO2D#^yF2h;a?E^qz&p6-n!J}ra0w)yE@_muw;ZFn7u zlv5`vv#zCgjru_&Q)>0)Fuu*j@*n&!laEiH!J<^Yxcks+HD zI#wMVnwp1&j@W9nSP$S^=btuaau!Ib1c zG51;tD0Pz;d~v$P#dC zSl48`hlJJ=cOSFfGX0(`i3CXkj|6OY1$F(LykSh=pL&6opT{#n%+qw@nX=b|EI9UC z>aN0}RU$jBxxU*iU-d0;YRxuu{@vqqckm86eEX4%T1mG%0d9V|3|@;P%jPq|vKc=^ zrs*8U_w{wmVe>i1G0~iEe&;#YKZkSM=N6Q&Q|IEW#_=S4>-GEK2XI)Lel?tNijZ&T ztWMK4*0)NhXhsumZ@a59{p|LIok?h7Uo zHD|bXyC>L#4kS~u(M#BJ4A?@>Ik~CKH_%S~@{d}6BZ98a#l-?dZPx<=mdzVMUNaJhq>ww~AvLCed{z(B++ZFI$bEF+tih<9Vr?F6|! zHWqec@9Wz!cTg1x_mh550rvmNdlLZ5j_NSGUhm6H&%W;?jYj)wfe-?T9gH!=7C2a7 zLku{!6XP|%?U*nmCQevl8=QbK*fBO>TQ&xp-5`w+AR&a%g7%SSG@58M|efQqFRi{p^r|Q(HQ#PWkN=<~W?(d?b8b#6Sg^bPx-KHyD5#c0$ zQm3*QuI=`qg%IQbrUH^nB3ZH{TaEPN3X?rK+#L1{GI(ZrWYs0nLj{&oNghNT{KZ2Z zVZq6^FvK9bi_U#MjC*nOP*}#bqzI&saE-6`48FmYnq_>Didr>U4;S z0*{R7z28KW8OFVucCX@^W{KS*xSwyUVAAWUBP$V;F6CN4p1Hp4rSwA3lGoRFW{g(G zY&}w}?e?!1p2v*rhvtCPph)u0}(A9vbD;YwiAFams|Ey z`0b(+DbCQOUju93sZNL~m7&kYXtIv5!ck9+z(_==FpoHaCq19~I+}(?qMA0pG6~P+ z{T!9nqXttYt`&ieJ|(1&|BC+Pkl# z6C>@?Gb-IVLEiGmiIGpE516+K@G1IiR-I=$O{@>bdJrbPd#E*RgHb;{*b;U!(>}~M zdluMAu>V~+i>89onmb*ZsRulMHtkX?Jqk$9?@C>4Oazp^)ghtoT&&SOxiCv4RLqoGC+qo!1Dml z$<6*^kat7+EQrrUHsu{#6JM<}9Q?{amy3H#{;ipN*6|wB=LGpvBq@9LF}`w7{?a{# zIxTlQHx+rl!OZ2SchHH!G{-QrLd?!8%z#8};Zxm@W~MW0*D)2t%Rt3vf>S5w7Lse| z7=C~s%~w`-!gWWMdla{IOmX64bkZMuUY1tt(0Gl$(RilfKNUFI%}uj;frHyqWV7nl zuoEUNv)(q;9QMLUQALYIAtO~^%bBw%VqGO!!yL@9e_Ma($Mk8(3~DBBDSL}#w!w^A z7f^#!CyhV;%yK4{nqGyew6d=}c>Ys!P+!tF!hD)b^+JtkBnDNQ1% z7g9Klq8xubD<9XL-JW=_j17kF`lUyQT92HK|Cd+5=r@P(tG8$M*SMUOI}4SWS*K;( zhrr_*-${(B8B_a`{|*EH{mV1PU1UBo!(=A3l79WAKFEws^~mSqU`g6#{%WktDED$X zaNWV(qcnFh;8D~qV#~{tEYhChtE8)uck@9FZ7+}Q zeAPu5ry(ysLB7Z31k(sYFBzH!eT2_F1!Lv9b{-Ne`V0-5h(pQ4M0Alov0}u#&wv9M zPl5DAvb(m2@Xb9q66w;8tPW+&l%)*%iu1^%FKy!-Z~CjZtViJ%n$vprczsulsPJRi zdC9odF#q89!Ce@%B(LT0@%Xs+p(4fU>$KfT*%^czhfk0rgJE6% zxR=qsBWyKoi<~7jz-w2~KpE*fxR?y{ws7o3%}bf~lgws34e2jlk}VMw2B6$kzUFob&2s z-lJ=gKC?n?XXf2Cw<(-t2HT9N)oiH-Iv3**&&v)J~`M?njR;!!Wfp$CQ94qT=bxLEi6mB* znPG)uS}i~0(D>5_@5su+cWET%&_<)_Kl-WCr+((2vRU^VApG>fnPuJ^^MsX3yEC~T z8Bw!#6*9rRvVmEkwG3wHU8d$bm<^bEAy|I6QU)3GE5ncv!Kf7}?ub`Q*foTh5qFui zxvzgOnhs(Cw(${l#KF5AlDj3OJKlV(0kMermhRGWk~oeNQl{$w!1#1empCmXnY@gB zCf973`COR!Toi)lv{P(QuZzqA{6wF%jw6{g{jH&*G{GUiDRaV&QVP zkt%L6T)fhm24>${4()tk}ail84sIuqlL4*8GuBYMgG=`0N{?>SRUd!y4V zdONddw_RxmL4`IOx{Nt#b!!_zg*(AG#pT_);o5tA;U=VbH3$3t7m=>TnY&^97HQ)l z`pZ5|`gbIqU`X}!dFLR-ARX`eUQvvEDvHF!RVY>ZVK{A*e``0k*>_}gx$eG%GMUXi zol>YD4cIu4@pL%xSf*OeQ;$GeWX=&k_gWD1i;o_qs*QNfp1$l!wzXZB<&}7fAbpj2 z=3wh<53%o`K4uYV(14ZkDtv8jYYhX(wuHXY+asFOWSFH$qSfWh!HA}LU?^tyl5?7g zC^BR1G~&SqWT@71EVp9*r_7(hHwrG>f2PQKn0nL1a!^}%e4sr%I@pFNFPo_tExse8 zq-aKF3A&5kOBi)0&JrDk>x7OY8Ugu+Koy9zJCiOZAw3(PsQbts2Nc`O&2h$$Kp^z* zFNfE>?Yvlzc71zq7`*jxSiY*vPOfs8!`ZU4(Ec5@g(qflHfI;Fain;7tREa0X-E0N z9aLl(T|$;Hq{g5&uV-zmf$Gf1a+wU+NYv>3hb`AO z|9mi+br{*Ctd)3E9jS&cm0Jh89cz{~hh2{#es+r~$>P6Za2a%P&u}=*a@2}yq4Nlv z5uJ9xScX~iEzNM?nNn3PP;EDaSWHw%t4dH)fmHq;ICpsNX|6{_ArxW)zhma7%dW!7&*>=6xd3gMc0;@4vb& zAF!tn1;nN=DU>=3Qc9NZ7LEgak0sv4fF`9|`t9po!&j4?$M{V=l>?YUG&LWUOo;CS znE1U1FwgppY(MuUwD*;Bv%bvPzv)sJ!vU8k2|DAyoph;F;qriGB#jz8DBE&)E)U&Q zy7`SK(*avGX_%%D^rmM4l$!Rz{UpSqsSm8IGH>^K%e=>Mj|@%Of}j&`Vy5c)^uvAK z;jaEpn09mMBRXtFAy%ZV(|HR1rYlkuU910CzJshg_3j)D?X#Og8{%P$amdPU&Z5~N4(qRFldXEuDok^JvvFnI*2kJ{h6 z(E{Jr+Z+z;Yzx1D=B`U~MLv;m$GZrDRuqtCv(I?*_fCWswWI5)d@OU8)K-+LhSd!aXjBHChj~*jiXHE@1E^XY-NT;c`H8l;0w<@x3f*> ze%HcoMoUD3M2v)1Leb6Vdx&!bVq-H=rsBX+B}?n8Gc?OZEo@O4hxB?kPW_IGnAb)? zDT8~mZ(Pa1mbNI^5gFGr0mhT^|EDiyc1oXMk|qNE*pDPet$znIDr6+u6CU%aQY1bR z{#xI~sWjI9h;dvT?En2!low^>0O0~0wW9abVFpA;kdBbI(xW|mtu~&bgV9)1OXy_M z(0k;!OMmmm@08#1XRW0TFM=tZemJwtJBb#`!-p8^^OFuhC2EuIKqqQuFCF>LZ zaY?taTKQEFQ>n&nFO^(IL?StVA%m<(rk)pea>n^R;{2;SGgFa8#!Xbwl;O|OiG7w1 z#6uz!L0Siv&?wyx?Rm?d_nl^5ZXqf{>cB1FMwoVvqnLLwCqa|tD8J;b`>EBR*bkH5 zrPenLdnY0kw^}JxG1_M8&1~G{0~0|RdP@YFjks~>5YHZ^O^gs-r-vWMCILS~%KIgO>x zn5vY=7`Xn8mgp6ni5$NZHIX!NV|(;l2*m&&y#Iv+{K? z@ilDWS;WlTy|@v^W8}u|K2U8md!RsM-nbRhnx5UNel%wGv#PeiG(N z{BL<~)-k6}LT%W0Ue7EGk(i=T#iFiTdbmL9U@GG_Y^0T+sdV6kEj5|AGLtWq9%0hz zQ|#xyTP97u@74`@v6|mT>Ciz*3RI$0Mp#H51c$m68BKf`2kdVoc^;=#MdlsTF;0MS zKH2n|ksuvX#2fKeL590@bt3K@J~}n#9ck}EK{RLH^`y5Z`DTJ}0c+O(1IAsgtdpNA z$;tvx;nzCkI`i+mAcSB3D&l%@k)Nj>OpEPmB{j%FsZcF_siLGj$b2^TEAvv+E#&ne zd&z%|!}jO-*59Z7xlHy<|m$;D(!yX9*Bj)vjb$@U++Cp4AcSo-9@ zeJnd%>-2348y$W2c4nD(s$|VWN9eJRCSp&68c(!8$xU1t6mdw}obYVGZcv+@gC z2g3T+zObB`c^k9xh#5w1p2j#bG}*5XTbKzK4;4t?Eh8JWCtqDh$M#Q8(ZMjnjg2LF zYVdA+GVrzF{PYuS17dCQZLISt@)nHGS&wVtmEgBi*+61ICcbJNeP!4>-g}ubfA{c= zu=8Xo>_LdM55>o$3yQdt>qdvqHh-$pr9$Hu|Kg_X%eP|)7G$hj$7OmhQt5-sFjHqCrdFN=Cp}zhyzV>oa>p z&&pP8;AHgC3`c=m)L8wEZE-&{#;-CX_gV?#zLeSc1#Nw-m-oZtGVZ{9D~9JRI7Zj7 zr6-wr3)PsQQR>~b=ud6V{bY0)WKtkL8Ms?b?qT5Y%U=oM&Fj$?K9@CEGNcwHX3h6V zLG0i&QFW~#^|+@kPA3?l-@Lygv^&`+2Il%BN{_~uF7v;#4C;xenxCvBSPtR%+t$TM zSm<*{b*R+>)pCQtW|m%#VQ0G^1uU6&N?Qt&;_E`sqy%q@`ItU7c#my}nfEaixt#U8 zOgn8*DC?RyBk_3(CQ3+3cqL;jB3CpoyokE^lPue)N$ooK_{<;+GudC;+##6lg)mH? z&D0`|rptG?!u`QF*b27~X%uwY7x*n+{^ZZjJiQ!kiq9l@=Rb;&-?NRl5Ouy_-PpMF zBe-QxZScH5%p9K;zjS?w`-TRNgs!gNF+~~U{>5jUaU1t^HW%?V4CM_QavDkp8g9K- zGVRxn16>$tDLK{2>*qLL8~X%4EJ`Bs8^VHA))l+yA^>Qg5H{og~F=Ms78dk^f6yXZWL|v%|-a z&J924ox!>+N_kP)jc%O)W!IFSX3bk|TOo@!d-m6cFRWjJCOETUl?;>UE&DRLU(w3I zT?2RP$cKLj6aNzHj<=&h`UHDr4}>xji8AYsW`$(M%S1AxC^6eGzhahE(mXkeBCZ8k z5mT=~og2Y?rE49@-)V|U)EVUQbj@rF-Aj?MIQGrml9zh#u|A=RdvyC?IC{AHpg7^` z=s|~KitWnv`X&azz0}`fXMV!8XHbSVD}0d4Q1Z4B^T`U}6&cky_vA0qSiq5^AGsxl zb?wT`gy?kC^%3M%9xQ8(wRkM3z~gAh9kjvyEe5)tWaf4{r2?+yuIDC*ehoA6D^XFH&02X> z)n~728IfGcMw{mW52wIJ?E3R-&&M*`H?!HuRG3V{Y#FCnjbd~db?FOR5vOk^juw=+ z=xFBC5qTcDCLv|q?1dgCzmyCf!>G%QSDYWZG)Q71E9+2-r-8f6GI7~^8gSJUZMJgG zG6-8DdseiC15fvJIA11tsV97;A7{SSkG{HEt8D#L&RI)dC|4 zNA?XMy&6K%U%P|TQVbapHqwMJwg-q$NFs zquONOyz*QA-aF~1uQwp#9rHjGn zVnAg~8S1n8Hz!%0i@D3%!tRHA(WOD$-9R}P(`UVL_I}X)w&Q>%Qacth)3-C1hP)WY z2{QH*Vd4Pz@Nnp7G^O-lqymriJ=!J4SBSLm7(H6&OpLS%Cy^xVX7;Iak+2V}J?ca{0hKi+O|p)01|wdJ)kx0i{7knr&<+`eEn~?~gHV zydiau&jbNu;yCln`vz#*80p$GW)vHpYy4O-+_;~VU2LeMAbW3Pk7Wni)+Y`*XtMH- z@-(FvXDNe+%P5THB1UoNWTaW}($%xVnwQK7+kSW=v}64&u1QWQ)_oU7@s&u(@=*fTgZS*{qjscL8E^XP94sfp08zhUJg8hF9MEBZ|KaN=cq*pXwt8$oT zY_rSfqtJMQdNDx9JIn-J+Pzw-J1U@ezpx`5$?MOkC5MTLI;(t)Y!_SH5eAPQ3Z=!9 zC(mZ3o%BPy*+n4a^*MVv<cdWg_SDCP7y&=2 zokzL~#1$S#@En9WdmzdEJ2A+y7fWlrA0-||y6S4Uuh)Z%JftMy9SK}`Fm8!;prS>z zZ^8VZfcgL7tFoDS!E4Mf{cA7aBDAjmd21$mc3^NiK(r($DxQhIT0ZD5L!FL6gcByM zo{a|%6x^?^MKb+QFPLJw9~Lcbw8L1Uid&4Ms^;{@bP5}BmN9I=K(DC@Lo7EAo$fC7|l5!%Xo8tMV$Br;<=U$s1wvPh0ERhf3gr9D_#I zb_|6@E14bnO@HHc;L+E_KULE{aj>cR$TNNJqHM4CROTWstGL&DpWDTN$~;Nw>tIma zg8`J?o7s`Tip5Cg#!&VD*w6PKRC<-Sp=uRq#4V5A_U=q`kV{<)cUyR#x8iS?(NxaT zM);jDZqM3wudWPQBO6*X6Q6_0kxP1YoB)ho2IwhL>@tZ`KNBxwA60k6g=b9zYp^MP zs{{W8(bxR!%saOKnm#C0=$4_{3Fv<^7qwg$z^2Ynmly-R~ZT4?x7B=3N#b3rm zs?*X&WS3SFXT4RJckHdf?D47{C^ZfD(TJzH5ygokQnVu~VO0Yg2=#fD7JK@(R`W4s zHx=ZLAE5KY#N-^5Ku1B|mefR^u{ztb7TfJ)8ty2>*-IRMgKG|ou7`KqQu8d%K9Bv) z>1loSh2d|%b|UO~x-WF1Y&IA>xR_NVVREmr7@aM9A-9MFd7qg$M>Dqi^GZo7-hHOk zSB2=0Vms#&W=Db!DbnaLi63ypLBNy|<+RjEPa=Ib=S7#9T#n)+jV1$z)`B+j5o}2h zvCYX{F{2=-NfW7EC@~EorY~X4VzOd-vuYTA+gr;-YD%08`ClFgVJkCU&(7Y@rBa+nL{*vL>D?ncl@3bR=>(JP zcCV_C=Z9W^SM@HW3Mm}I$19{aq#mPV);;< zai4*0Vbv%+^UQm}g;~vRf_caNR7|Jp>HI8TZPdy*9esNejc%#okXjKdzMjFTd+3DU zs%63C-6DRqW!?W+4f6OrvpVX5Xcg+jf_92)<%u9^Nq5j2b>rpYXfyr{kurW~I& zw>{398|o`7D6~9V*}ip5fZ~5Yha`3tMO-|j3>@5V@+FwB$WiGcSXiq zVczb>klu+PkzcHbsUXE!6ZmM@q^)1epMx+567&^QE3k3mN@+-OH6~>oA}{F#5Kp_l z8ApvE9Ar{3Jh+_>;%+kgiydM$JE5b0mU}#_fOlYi5 zp=Op9&10;gjXjYk4t^Gq;2UF%$tvhb^k?qj%rozcUSAG3n*lBRW5&l(QyhJzs!%<5 ztb=i@7=@*MbF7q)fxOr0;glNfFc;g!7{8NofQBZbJ(jlBR8XAi&J4~2p1-a=now=S zuuE!_lZkRn7~&jKF)OXR7d_wmoT+F=bfoPzHwfsGg_eL6Ob@nMX z8JwE`i_e)A-uki2!vA^my z>Q{84f!)thnJ#Bk%jT5xdC^pj#sfP_4D{-r&F08vV2`_O_IKg8*qo}$-n+X($gIu5eB$HEm81K28-{AXkInaJi|3XTy zcfp(AYOO=(!J;)n2OH*<@lUoBD#lf-;B`-NN#lf|e-sd!ps$Ys&rtv4VW{tJ()Mlo z_&Y;a=fiHzN@&VA+}aXuxNSISq4O6%)>8V|8xYBaSC)V7=GT%x|1-4DTp#)oL6uz( zcT^W==`2M@N1in@`%wxRJpo3*y> z_nlBn#hXK{3jCxRlD>?1A-^0}AseLsz+9?}!O2rXQuCf+?MA{f3tRR_ zF?!O%;9&%J&2ZH}IJh~<(V#~U^@Rm8Dd|Xr7lL#!C>^#-t@5qlZQMs`^klMLk7*QY z;c}~%;&B;VVq95>XZBb6qyclwRUze-r%~-@K%!Y-{U_$D#Wh9!nlx6_?;5&?mSC|i zUMkb+*DzX;)OwnB9Iz)58NBQFN*YwQ)1Q4HgROX+tpwF-ce*DH@T!E@;b__%NO*Lv zWnkRZpcq#TFL^LWepb|@%&`BK`{zTs`M%J-Z+EF;|Fq)jaFNH4-8Rg@AA?~d7&dGW z%|Q0;^=0gee;&sE#jtGOZ!m*@7c-R^QJOnMIk+vt*|boCnU{%IRU2J>DzObk?ccOc z)PMZ$Fx2#x(r0dbFvI^4_|G23Z8FY0^WN}oYQY9FwCj)y8f0QuHae8N3BZcszQz1h zqRLJtb`1EL$!L9;9U|7LX$2@RAvV`Gb5zEvQkfI;T(G<|3>?A^%``Jl(^I1HPIRnd z2hxY()39_9u^M2O-mb{=NtXiMbI>m~Ss#Oc+|01|qYx^*V#L%abh!z+zQO3Gg;@xK%lRtP`{xyTr}(S+ozM+j&C$vl3SbdZZsxCMl1}cgrm4s0{L>k^$+SGm)5A{2gMG{( zQ~9~H=W(FYuX*5$SgKh^-CM=Q_R45J*zFJQA9u!ThMLJTdZmgg<2e}E#a6c`_H*ne z+U}cp*RrCF-#pT9%ByO0**CdoT^c3};x|q1^LB7irN4`)rQfsA{qAL+8FnnB5hfG1 z@^xGX5jkyxac|RW`9FTCJ@h?296t7ogQc6_FqBDXt02k06^`P3U-_ng{50(3XTkY= z>I_?4npPlI&-k4aavMhznT`Hid>&vg;ZXmhamHV2`f}-S-Vo)%Bg$#1pOyFXm>^3_q#pg>{G$Hc&wt(zTw<)c@JfBMja;qL#lKg@YvTU_=UvTUaj%$gQv z4#%Gu2-n=W2oX1VLI#bIm^2!vh{4m&^4KeHS{TBIen>}5=Vb?%z(+f>itxEG zPSDY__dMiMjC$x3eG4zR@jUyi$c#v#G^l|G7FBX0!KHs&wu$jT%1^98c9%ucy z%gDzv^JW_S1);U$0z}pS2~*j(fb+ej5C8Zrgoq|cVc;Zsd~imt+&jEOQTk7B!}t3P z!OXdnRn_ycNB&$S=+DRHYMke?mi`2d@(u|4HWm4v(qF$}v;Gq}<3=PuVZ6!zGkrhf z%$s7R{UXEIP#P}3>F=Y zB&f?X)pq6JNh^W<%ua{s@LQZ6ng=P8M4(5v*3ctJUQYYnd~k{G9A@2~Rwq>C&B(h4 zcYN~(X4%U!W?h*c%gb~MBj0!!dB#0{&pj3WI;&m5E(tH|+Kup+9gNJApNO>LN9L^t zcyT~k;iaxu@kHi5USwdpdMT9+Kb{T$BvOdI+#x$!o%5*8wCCKlek|T`y;xdB^N{{mo5?L@2nD%v+}kYDS)k`m#q#;=N+U z*GvB1g+^*CYt!{ErRUfuycxX~q6RRMG8pTfL3BMuNP6m;*&do^c(*TbaVv_Ux$C`?-+8U&u;|Fi z@QSx}b7;#vT&1y2BBSq#eoVje$kRXk1nC2KmuB@6b26EkOqgUoh3igX(% z#%<*|(w$hC^OQ5!)nz>%MM`%b!QhFRTSr=`tA}=C8=m?#W;UEBtcy;#75nB>sZ-M3 zPW?T+hr>`{3Nr6JDH&lY6J?gJ2Ujcn%;)LLV%e{}b>pBSc^zlq#Tmw6CH^dcPxUU~si z0xqQlhmZl7td8OXU^LD!4s=qvJ1~(tB~y;o@LQLe4V=hN7OUnZxwT@xvI%Kf zOj~lChP&;_+5b^yE`!WmTAM3pF1Bvh{X0Sr_SO3t;AYHn8X_;CaCY2tS!?*_Uu_GQ zy$29~a3#I_vx%>F>CmVckst7F|Onw5k=4g-=f$&dn|r! z=-D`A-V<4TvJL20DZ%1d*>%+_nRLd?75P87D}+0CFdv4QyLRsZD8eAy#e{y=kDA!7 z)!O}G82HOr7y5Zb3AsMfVN465GZYZ%4QNK3-8Q#5indh(BGj~j^|Sv-IUG;gvLer-&Em+PXK*4<#~f#m z(-}KafG%$B*{A4!5YheUnM-QVip>|D-x;3x4wl}?i%Bap)yTP66lk6Ad$2#;{<$Z^ ztAF9b8a~HyCa+}TGH^37osWLqYuAQN_w5Z2-MJU~kAwqv_hWPiKg{|{^O`~rQk50m zWf;{^Sl&7q7Qv|Jz_7d7^%3WjqBGyW#c2QSYeV^})mafuXNOu$R?76E+v$GJ>se_g z+RY14B#Ya{tn1_vajQ5tuX!NM<7n3lk?1(ljq-Ni|wdSN*DtD`r^LTpR6ELoG7Tsepy3lCTUs zLXAvFZ6HR+Px5A-%w-nib?I_!#4|I0oNoWtO(A@7d)A+Ol9~BD3=_9A=tn)jt@|g@ zG<~%E=D&Xq>+^B>+Y$NxE|h)IZ4f<#=b||rvc=1&i?yh z-h*+=92I}0rDPFU|K0wx;)0&=;Aanqt_2kPbPpnk%6;0iFKP?7yl;Cr_wspR{Z)(N zo^EIQjX;{MXH1;MDYhhCe*N-zsR6y`>7!x$7Bp`Uo(#)a>zlXdXqdVGL}<1Q+=Ck% zK5-In*}+GC(Wen zl9b-8Gq}$k90+pmSz)p9~k4dcz9LtF`W>+!iqRZ>`oH z<>_(;r^U2w48YEm_}sj7i`IfTn#prvk~cHQ9Ex`y^JG@VHp-6~T-Ddaj%u<;Tp~zF zCk{5^!yvZh$h;Xu)dP`Me&Rh3U_OdVH%0z3aj)0x#fsiTA$)c#10>4YwQ)siC05J( zv7g)2vXGAK8!+(SM~wbnv7K#uuchgkf@-bhnTq>VJR9@Jk|t7V;jvS#qjKrRnfGCs zw~7yL@=ujQlzD||VZ=ED6RA5nU8=-@uMW&PK@;C3wJgs#$_R2Ca2roMGw)-2kp#I; zKaE7ln2)m+C04_GhBt%^n$L?w6r_SK}l}T!tQYXtgi49uF_miiPLW-zM}WP?{lAQCnO{v}JD-|EBVj0miWDK0(mRcoH7# zO69llP`OS&)tR^PISQM|qT6_M6zR4;n;!FIEp7Up*R%diAN3dLm;I0d`fBP#whay& z*F-uzSi%U)AC%v8^97~9dHtJQ+x~x*fu9}Bd%<3d(2glLk;(#HJ+;z8cWZ=pk+rw$ z$nsS07E$_R2h@fUgeYo_onsc=h=(aWNJ0sYc`UaORY`X0i{k+)m}xz{eSjSiiZiDV zjAH=N{fU~Q`>cg8<9z0MSG2K*czS35#gs&4Cm3db+i>{AJMRl`{i91c)od>9GZ)}y zq!fcp_#E>v&q(t-Hg-daOLrJn$t!pAQ&n()r++&NA>Q)m%;nT?EZ7>~PWfv5NxY^( zI#T?V(YVdVHO1%;zZz^&1AQb+9&OBw9G6as=I5^r;U9Nmf15BIy&Bdsa9IMwS-`cB zS)(FLwmOw&z~sBQkP5}%@i+S+CpXn*E_lfhMBAQcv& zDCgbTYKmHB>~pPr%PEaF%b995B`_Y#ATe*_A60+_?Df!8q&=nuhs#!k@P~^+_`7>T z`18j?I2Q(P`3$nIW7pZoOsT!+CbZFKVEycMk%5=lu~dp`n*{=%-H&Gn^Gj7up8Y@s_>M?v35IY&r^=RK*gA%nWT1B|JUu`?gqSW9(TOWPh;$`v$wgass9wpCRa*toPJ&YQ~NsQy?Aks^k``<4ql+aGr}*K zAHwC(v7R!vt_tRVv94#X*{^uSizjtzz?3H&UA#FRZi)h8ZvJM8E#6 z+i$&wec}t*t^GaHh8u%GX8oBMy^jvJqvy5E!2g;V_?yFfBIZ1zEV^g=FSX$;fDvQw z!-o)yutZj(lw$24uc))iZ9~(7Q9su>pndZp_~y8kXPodgKT%zpQJgw_Ffs2MBva`% z|2frz9IDY_jMmztUSSRB6d{yogb2rzz@Rf}p|H|el#+eY2{tzG96w88&& z!^7bn|I{B|_BtY!&>C4ZF7oj{rVJ2w>;MW8cxa6?9M;97g{s{=?}kYppyM>ZPSANK zMrmF0po@!PjyL@nvsPv~6Lr$#(dD<)siux;Kk+|82Y%nq5FTLL%w{U3r&QT-YX{pU z75BiVj5K~3i5;e$xR?8N*F)R2OW5bThO$K5Xr`wo!(&*+5=kZa-8>TXBcx7vqK-B5 zJ?1y+C8Ryi#jlYT9Eh(ns^~4n>r3%DeysSBW|1(fkIXC~tnfCeSLvj02+w1tw}w2M z$1K;icO@Sx!fr!!`#4P3Wr$ah&;RDq@uNxUPHEah`0qW%p6{(8+yle5{7~|X>$x6L zn(BpVOks;L6wAfIoVpB;Y!2ZspU93Tef2pZym&o~4JK~*6mJnndS=H<%)j(LnY(mC zelT4!*u&5hB}IHNEi4a414SAsQfi>|Af?gxq){Bjbm?9M{inRRW^D)`rd-~4J6p|Y zJL;~)!2_F0`Vcj?&wLa1LGHs&$OjtLLN7-7qxeitp9PRir1p0o4lO6?Wt&(Cqf@P? zk=n^=Yu8R%h!rDyEWWp4_gVYq^`I<-p77B+)&lcB#+q|nu9uz)NVESrh!6)k)n~l+ zRS~qV*Vtw;LUSw)AgUxX&6L($I6s{8Q!~O2P8e(Fgt4h;otspn@}dNy7V$|`h+#!> z>(N5W@M5%yZ@g(JEc`E+3ch4!Xm&qpB)AHZ?g@2mv)I_dRTC_0ap^yG28tbb(n&tS z?B-#vN7$0Gg^m{sUXf6iEaGfRi%(v5L^4h#ID&QQub*NDOvnAMm)B2A7W&k$bO|Xf zNu&1G?Jxoc4sMM>4`b9B(^g*k_E?8w;KEN2&nb=KcJT2RkI{)g9>Py73*o0O2`nmS z0aD!YjdbNn8wp=PJJrWnukxgh#3RyF$y3}@jmYrHMCx?l5{Ko!lR@|IUWYLiX0o+F zDUFQWL5dO#^Eaih7$yzWbkk_czdjPeKR*SdBaX$C^$Hk|;bg-4b%0y7u!~6bQve5e zOTlBk(h}bX?+@Ygn?m?c*D_0GFjtE>c@fTe}82Ne|#IJ&6#+tpd2y=-hv37 zHNe*QGXwul>0__|PQClUE2H~Sd_OywcWU*6Cy&sX9z*oTB572IrY1}Tcu@)G2#u=> z@FimJu`)4Oj4#QtivLEPWXvj{^&z?+M*P}bsWd0TJibm3NRiAsQq>S0;4jghYaUH1 zGU8Nmtd&{Y3w~xr_>0#)6uMs0&fahX*B2X2a>T35`XoV70gU{n8=318_ExWAi`I&k z0rpm-$;@-R+qKjVp34aR2^wBx-UUR(s1;e)19njSk1%bk%GP}$+`B!5@9qfUemYJM zeuxJ?P{ld@Dt0*HHf&;$J}2acU>Jgkx6s0qj%u3iq28^SLpmQ_Hsrer|;K1 zdnP~=sj*v_rpO`#Bx5ZvA?ZY9Z`rSDbBh)G=Ofh0*Hq>%-+lSP5dI3rtyG5tiDFpG zIWAM>ab?&V|2253GfIlqLzj}9-?){*+^ZQ3vaC{zMr}YeQjf=@&bn-SHGjN>lr-zA z%LI?p;oisK`RJBxDPkS8J_iZMb5@3MJ_C(h*J9lECB5dYv-sb75pj@Cwv2{_yP~VbQR^4Ga_xer_$|%c{hg7qN-`AB}3JU zGR;)&Fjpfgwp~O;eTO*u)wR&IA@U0@;RO zE-fgoyy_k2hLaC7XjNQHc(c%4r+LRq@{YrN_ve@2&xKL{MEl9`OWjApZ_GRxezE63 zcoi!h7q=e|^I_l`0zv5$o{2bK#!Z+KGsLDxIO+AjJ~xEb^NGvI#MU6l6a^cynz~@tjd$vz`CJd()o@mG6IZ%St5a1x_w0)hj{>86nw!F`y9?IC#;A$e=Q~55Fbmo02 zGj7|LP>ojI6`Skdbf0c84(ms1i{^!KEAlsgw!?gX|E>^zn>PI2Co^iK+c4E2F>rTC z{ML1u*zZ*Sw{ol^hFrSdi1_@i+9X$eUJcE&$*n;yc^Z?x@%ltdX5Sn>P*-gFavf+Q z`{r!;Z%51fbXYj^RJp$W+BawPTLq|+5!x6)G|#hw@|qm;Z&SI+kQLpZM0D@r4iReP zo?COQ^X`*=`Fod#{on7$coDO2++^1KxDIeBKR6$T{MwF_;XllPS;MH`HuFGu8O-|p z_7mvbU^9V;ip<)0O_uK7OP0$IS#N&NkA?93bF+@VR>y+>njmpDUemUbne4BBiLE5} zLNNKQ2wE-F)JfE8{56r+_+WgR!uXNZO;TKrd>yV zd=VP{^3E*A$>evURkBT2V$9-7m-*wB-cvmKEruz+`y4Ouin~#;ZA8AQT}+>)me7iYuI6vz-gH~iXVnHW3LRU`7qy!|$r%&&p75oe=K z?Np`93#qH>ZoKnbA^g!sp6M&BQ?Mnxb^w(@RAp_dmOJC`A=3iZ~{SxY!^JBN!&d9Ub^l3tSVaDyPb{rV7Su-*LQ- znRjJeiMGF`PJWOz?kkXbc+Ryj4Wwm=>iIPddbclWNsv-TKkcl?in29FgLevE?Ih<1`v&>di8O-*?=7g60{6?miEdh%pzy&ZEPL+ zx9`3@ytwl_VHsPOW-O;;)WmA^V5p2)r|@D+S8}U*mvKMHDEc=q4&k*hZ=qJEWM9Yr z78#|p?f=2-hNXdU5e?ln#M8Bom?)86JG)0n$M>HK;UyPTPeWqdPto!Gf45~8xa=UE zadUgd!8l#MxsbuehmlTPxD-7A7)vof$0I7GM?5E)(LGKF`2aKdhZ#IQMPBT~P2B>! zMEy|I@xTz523E|V<7HNK{z61MTv|-?+-YZ2u2;H~Kn5JQ6LEJG9hrS0qY95BbtbiG z!)bKp$-ZoPq{#FkqhYX&-F>hhe6fBOBOeD_%FC}l7{Uj*ii~@lVAK9dMyAO8D%OwF zPJ@(J)gqHoI#@ZI!20HiLNZ_np1<@BwuQmKJ;WsCb0xjW-R+!rQ$Mk)7}Hhe5R-mP z`Zbl_Qhwv7|ETnt7u_Gv7|lR>j5d$eeIj~VtTco*>gP*#pg7fIh_7A z=Uvw#hQ1|)yAFi%YC8RV24ORAsHsCo+$Qoco%OGrAHwUe%t%R%tscVkQ}q9-?;%oq z2*VUGZQ+@m%_`z^E&VG`gz$oO8S^oXDO02~cA(7M{>SYwl)Q5^T=El%eujUf(b<>- z+AsVtgn#$Ei3u<*g6UC;^{q{a-?{EZ`O3lZ9J&fc!kPz7jVcBpoNdrLbN|5*Zr@91 z%KGnxF!C!Fpc%|q{3&Ma?l`cWs$`_W6Yp4H3vA(x`>Z{$n^Vf76*bqkUBA^3O%Y`N z?#Xs{NnRa_pBsKQ$tu9P<0&gmWA8t-ucZCK6E|1K2}yw zll9NEy0`u9Ob&kgJJ~wA+UIZqP9ejD--+Mg`7I%xSUD=fXU7Yz;cS81GD3!j_r=Y} z_1ajo)3HKo;Y>?eGH+{<#aI9>oONrcrSC-E`RHkyKvYa_4(BFPE218W#1p?f(dz`~ zUN?8)YlXOaJ+$q4cM>htlV^vtN{+ zv0e@B3{@7N`#T>ce}4f+_onA$Uy6)4`5o^b>COrNIE_;AuQMBGlXawNJk$+;vx87u z_6{;uk@yv>paHiL=9Sa=x8G-m%7DeKTiO28hNGJH01jlA)46?i3u1o;Qtp))>1wWZLPBn2$hjeKf z?@#Z_mQ|$tOo8>&0oA3;G72Wj6PWif{bRDEr}x-*#Bh~1FmQ}Xi$1c)D(TE{_#xS^ z$nqnijQ;bd99QNuN6ky}7n_uMY5tx=8UN1hA$)}LSAtV*P@+HKIcqsLoIc`S6iYFI zO+8|5l)+*s|8)6HfBn0qzj@KlXY1Cnvxj*vxG*dAXd=}XJW{Scj}Nf{(jh4Fb^WcP z1*W#&$$sA<4ow*8d&kFG|5xiAYuP7ZoDouu%rT_++F;&2%wTpu(#z7!sR~?gM56VQ zVbgsTjG~k{PO%i$p$*&bN7e~c(_DKi{9W5v^1$<#*Z zcV||Q!MJZ)!)jAL(@Pm&J7kqFA|sV50e$T8OoLi++L;176b4-3YjhAY@6>6e4&{Mu zA^hJb8Sp5vm?Be&1 zmyS{9m37Q%sfGG04`qWJ2R2hJ^U64^UqaRdlB?^gtEPwWs2ooLh5L;x3RxS zW13ug(lQf%J>&f(JFSj$KbGH*L^t{}bXzpI#P%4XQr z`jS+4UwdH)U*AYOrOz4yS$Wv4V>9R=UkBQcgb zwx&5t-Jrx%$=1qiOWe*tqa2E@U1u8udMa^%5zLzdJYuaI<7a9PO{|@r&?fabAfxPH z_Me$ZwZcl*_+SI^(x*cM8Nb*cFZfP{#uCsn)7YuMOgBwJ_iB1{b-#w67&zg#;+t1Ah z($fe?pJMKfPlxd8i?acm&S8#z<^1-&L(SnJlICNvPboz! ziC~K0omgX46syd(61ybo>K)2s_{=SP*S`#=9UonE<+HF)J{yKUpMGne7j`0MH4TQD zJacX2+6Wa`N`vOyF@V|3fX}U>j1S@fh10_M3iG}} z@}6~Y_Au}8o-A_*Ix&&jcMO^CjA(DPo-DW0vdyoieRFFRUb;AXa9Bm6X@+=`)(niK zXea^n$rNcZnq(riYZeEjpi@z6E4!66H6WUBG>oDU$cD6#ihm)D`W!Ab!snwA-j|C?F&dASIBGHIFaFs4)bzWGGPynQzNWO7{q06+jqL_t*X_dgxN zt@}{WVg@#uj$AaZCwn->4u)jQ8Imm_-FDELboCGpO+lL1`fU^pqdn^fDL=LKi{>*^ z8EI8a#{H%3**dj@CDZG5ra;;}5QLM@B1lV0zxK_i6Krgzxu(8KMP%RHgnkWWC2iB8 zG->;B8Nu>*pmi<-*7?){*RU1k?`J(;MM0%nGw{1OPrrFG13wqTxP)=a%dGSBahT#q z_ss~ym~$p3X3W{Fnb$yP8~vng=JQCF`z#c{`JP32%C$&BdiJZF(}BY-1`1CzU|>-? zy6H!zJ{QKlh<=P__WyeW%A+G7cr2Tne}8}r(>B%J~%s%c~>Vsy%8kNrgl&vFY3X(C7QFY@2adN z^EyZ#eCr9!Y(7RDl!f>WGZ_%yqq{TaT^Zi>`hVI64Y`z~(R5Ven~rz$(}7|SOso=S z<2KoRsKn*F?MO+(Vah=ecV!+#Hyl;u``4#fQlc(QMIsU-;>X$ZW(KRzVM*gZuEQ<> zf%#9}SD%vlMZ< zSSlb(S~iH8^+0Ni&b*P)cg#k~=`upx5?+6E6OvuK)n^S9YRQ>RQyLXv9p_m2%{L#0 zc`vEnlB&>Y0a`4FXMr|X6+eg9GF+8}x zS6ke|RMMs-L;6jadv~8t$8znt+0m7onQ1-&6W`7V#6ilnD`D__6ZAiU=q~eE@oQXm zqIQm7-kfQiPDkfh5l!8%=AG$?eT6T8%6Lzb=KHs0+s3xjnSOguT4&4r)cb3C6@z*t zuMAbc+U3Ez|hyu3*o=qk(qq8{2K*(XidF(GL~Svbo1^OxDP#?Y4WET zcjoKN{YsZ&VSEdwWmFAx_jEh2%V=vag9dHo+xFf>{SXp;*<0;j71pSCgGh9upsu#h`o7pc zNQQPUlq4#FtR>mZOD}dSmB<;U#Wiz_!pk0S4{PsRPd0~dc6Ef_kDd%~zv1a{)yr1J zEv_yc5t+w}gupl;W~%^!)7jNj&7NNX>|7qg>9c ztTiI$uw$FRzIg|;nr6x{4)+S>Kh?zBcAD9$b?m@!IEnq`A(a94gyVLim-VLC?-FgO#l zZaY*j<5u}lake6CdlP}la9BY83Xd?8b(uu_)r~S$t$OXH<9-F@^^WVZisBKJu_NM} znQor&AFf^#!dqdyAA`xRgrPM$sF6{3!vHqZsVSLqW^Y+)7yhHny4CDGQ+v|Q$4W5y zO3@aQ)~I~SyKV`jKS_O(+f?r6vE`9w-qmoW_}%Xw2M*OdHQ;7^o5}y*zbYFD8q{(! zp2Uob=u%zqo7&eZmL^DTC%unzcYA6J*(Z~YahdihK$!!*WY)*&_XhS3ht`!%Va5vj zDE=Z7Px&@M9FkE<3S>s*w`uE!^szX`_KlieQ;U&K{J1qFh?r`p+K?aN{b1Ve0+Z=jk!jL;d+5L+i zKRA~)A7&q3p@*vtKhdHU#pa&P;s`WF+dyMZLB_qCcQK*dM>>&$TbjisKjR{+@i8x- zu!V-_^CXL>2KM@#oyoYCsS?l{7{?XNcvny!DnPm)**(asa7)H89$}#2sb!7UCDUie zqIt+SGjsA?#%*~D)$&v0B(d1=}lEXc5Be!@2%%?!|J!gVUjY1TnWHy&yC zJ{1t3N?VH=B$+pr^%==k+`eZX#_v;!O#EDmp${UO7Q|Tz)@?1yJ-9F1@80NOt%y%i z`7X-h#+9cSw`r`_mQy(6PjnUYqxhWsR=KC9*~-#dCw2DFHqIeX>wFR*u0a^~Or$wK z|0kD*4h&BnMNxPEj^p9s@9quv-b@>&>*`*JZjWn#lZz%6i(_VY|8}t3W6QrD3p=+Q z4$Ifg89V#742|bpnD@y?vS-buVsH((978QLqD1#uw(fm)G4CYWD3$hRa+thOEP`=? z(z5{`qH`Jty3j=InuF-5&=JOpP0+|(>4Z{6FIGC+hzEW+kQ-!u%t|@THORKJs5uV4 zBbx@ptN&zqn60u2FoGwiC%%KHi^~49@1@iJB`UdVI31ic7B58l>_vAis_yz82{SUT zRDmla&f}k!o46~98?V!8G}2wi$Pez;cFcR~0RAwtRI_r*D`7s?w zsJt#qki=;5>>nQwaVLRNh8oFC!K>cIX*d-{Pk@85NMx!GN~CjCqN`%4?kG+>{dwfS zAf8BH{GrW|=jyJwvqMHWRj{s!$GoWqez)EqLF}@b`l;r3qXV`|ymF;iUzoY3!CIxC zTJOd7U3{+Pzu<1q(qjEGPp1fG%iN_iLK_3i*$cbF3YhBU*Dnv(-*_l|;UkZQ$G&kS z^sHfb&lDJVBGF{_9~};x?%yAo_n7+1_XwUB&W@s75(e`|Y}VYeBy{ah4s-86 z>bJ9td7sM7J*OV)c%rX}o&G+wGKDdfIxF1{W=|dLA3Vao&JISPLo#AUkzSD*qXj+; z{~zX{>fwVnTK?Gr)=EFpwxBt*%xVg=u#et_0#V28mbmq-n`1sZm^Ck0)EUmZ!k&?~ zk+=8wQv8t&8x9I7<+X^va?m-WFn17vkl#jsl&I`;Oy|gR?shicis`Dx^xdPL@v&zv z6*8ecFoNGhg!$47M#o#EH-4c&oFU6tw{v;IRqiJhRT07HqRD-rSsO=>S1c=fjn5^f z86{6x`rbA){P#0JpgzkiJn}QflVKE-*J%7>(4BJ7qfEkSjBw9Wtlz_o)RAa(dBTWY zuXiSI$9x&fD9dOdin<>^jA9%#YbDFsewrP}NE`Su+&pM2LQ1S=R_B6$4-HjQtEr5N zej9l;7uGS8c|7pIPv=+)P zuF!znq%{hLagAH0w>}K<$h?QCTd{bEVCmY~;jRDes_+kU?+@Sqhy9^v_0$=-lctVU z&0*8`4~FYsM_gkD)iL&L$#5MK8&52A5IIE176=?TaSdeO^=w=BKD(Ir##EI&oTLG; zW6~1VL(GQkSk+Q3G(;<{TeTFMzwGU+!>4X~Bn+=$P{!7aE^Li=FT-ppYss_bwS?}T zw$RR+$t>Qx(R6I%9Pk;ud$36h9Y+TyFk9&~nwUXV-R=b`mF<62N~~x48P#-f%G)oLZQ23%(sG^b!@?f3$K|tTL<^rdq1j>BQh4l3PlWwHc~9tk zd1q*4nWU99?snwmU99`H!}Q$?t1*mH)Yz360!Nz0)TYsyrypmRGCtv3TNx0H%uq77 zdVa;Dgv&g8UIG(hwxf2m-#i!lf$brD{9f82j84TN^RrQ~o~Tt`4P4udCQ=zJ z;BCr~(iF`r!zGHE-bLhxNV8k_VjZ4Dn#2X^(H@2N-<7}=`lw=5r|Vg!+sa_DtRBZ? z(_v7GwGVq4eQ1NTuoO|HWkFAEY=v>&_-mJjL%YAn!AB=T=j`Swo#v?2vmG)0&L5sY zMBf{F=CZVyl3-?84|H%zaXt0%A@n;mRUF5vxR=Y%!^elh#@qD$tbx2WpKa-pd0RSl z(uLPJ^WMUUYc~ZKnKy2Q@TqcL^^z4~)je}Kz=KgEM|5@|N^fPKU^AUUbAw_y>r155 z`oU9^4WbaIU#mj&Q;)~0q0er|EK|q_)5pcXc{4b`fiQ@hhxQ!_C%5bg-Pf!SEoxN9 zJj|0cH=n^xwqt2DXJjnSx}LK-3+elKGa+d;Sx5U|+RT#K&I0Ep4KnEFaCE3C9LD&? zLH4p9V5{i?G-eNT9p$^ntr98>g8w**aPL9gH_a^*+Uv-1~(rE}WDz;Tos;TJYg z)gQH7HH%9l7Yi|Nv4C9&?uVVrrICx0GkSq|@_A4sHU1!3inht;)79>!ecX*WzEMRx ziFC%To$|MxAHo}1`YD3>q5+j_&k11-x&iOGHDlnmDZ|zamVpXsNz+`s2~v+eJD>d) zza>A7-o+^+RSfSAjx-Rd#9d3Q-bj4bYX<=Hspo?EQW;PDaGHK(-sElBt#Q*3Z#5K6 z%!-|fJ4krVFRu?Ddj0p2l#suXkxnjM7LmJo4nH&y4)4pD_h{}7-jtWCVcy3!XYIH| z)yO{Taq!C!!mt{IrmU?TM4MWA7t#nN8mmLS@vsP0JGTuW? zurd@O*3_)JM&81=?2cRZU-UitIaRFDuFtNw#Q4Y*lRJdwe~Ilk<-;dJ|DBt{@O`^N z2Zt!MJ=Plr&c#T>^Ugb!{**r}5y4JV$8rk!I8PR*aVI5+ta!%>kbUY$)V$|-d)U`E zBkVlc6n4R^_rR0+odVnVXo7~$e8=ZAB9Hir%jR7e)6 zBFL7V-+R{jPWHpz&wg0@LUGQ9X)lMFuff2@8VvU=VS8am++GL|a0D02@@ zOUgg@SMy4r{P|~u#e*>>YlvGt=U%);s25tyr-F z=j}79h^6OvH`-beHQ;VMF$sjRQxm7}69HaFTA0uEhMNfQWWSq^1l8#GJNZ4tn(E-T zec|L?kB9EtwuL5+k1W9`u-r;}%e90Np*6p71=_C6o>KlK+-h^XzS~G=pTbV&ZXIEU z8}8d4%4(5QMq}60P9c_w(r<+aPn5!kelR!8Vf}NM0i%*+#o38DJA85Wn{v&2x>O!J z&aQ-Gh?VCp$v&igtMiu5469x|Gi>_SQS>TUOSDesU=?sM5XI))efR4iPmjUOW$5S< zhsDgyFNBF-*wz4X!BPk4-_gX+T%LKZWZb%1H&VOP zY*HzXuH@a+%m z4yCn)kCS{vxD^h%yipBOIn7~U=eF8dUN{a%8sYZsH1M!LYp)iC|YxJ_pr zntGxydMIlegC)+URNi6n*V$Sk^QPnf@(vV!wq&%_ur{U$-3l1*Mv)4B>M6)D*n*-~ z2#BM-VmbypHRBTEcl~hr>{SH4MY?z>hAa9xozID;kr`=Dd!j$Zos(^cbnLOMq3^$a zF0{SIgKKXDc?K%YGu5TNmC)CZJQ4aH&_0GkxjuxRD1&h-d7*UK3VZ@+^bfNh*wA)KXQq3p!>?GHt#MmFte4dwIcuw+ z(cZ;+;J{7&m66$LLRg4Q@q88Mf{(@?O^X`y$wFhb(E70eCJ}EcH}_YU*rL_6FDv`W z;Ho6ea!gBt0~ohSO{vmfdhbM>ot>?aTqbuP;tcMg!r-zJOQLD%+F4=obseFfnfgSu zRpO|8H}Cq7R>BECcuX^>f*+;=GS zJ+&+JZrRCuULg91bm91I_l5SGJ{P*~g#l_h(tXlSNA;^h7h8u)35@9nNs>w032pmdcZb8+k#yJEnqQyHk{(V z&-8~M{_N@zG4~KN%b~r!p@}8eBo{F3gtSZJA!4{>F}~{wfFHMu8E!)45#y;J+pvdA zwB(}YJYP&LI%v)aR|t7eAYJq}2xP-wGijVL-^0jL-_a4%`_>cDaBJDZAy>Mu5HkyOZ0BXe-PgOfvaK8+4h$a#+stf>79u}S^IqQEA`IldmVR~a`)1f@Zc8@hCMeO4GS>@ zlSG;ZUx4WRyqC`k4}I-W=w37d6NesW^3K?u<+}Cp*sj6Q%glYz3p>KU`nwPH% zCAI2-lV}Hj`ti(Mms)eV2KUru9BKuY^4fANF)z+iGkR~TTmxwK}CkGdU9 za|?$*9rpNEAkFN@AaLfb{RJ#t=F&UOz3J_v<)8b^zbSq4$A3`1;ntSY#uwq?M;p#6 z=1skzj1a~G6Hk^$-z_;7?5r${uaE9uL z)VrF`?(3C4m5RQ4Z3r*FFpJ%I_1`F@PBbyc6y7)l825nte|PN~43mp4;SF^|J0zeqm>5UbiG8A%;%c&fk@B&q4$Z)92J6JxFFGm!FJe9d%)y z3}(UnFOVCfL66ev2|;hXB!pkt$SGHpBjULn$!Px*ycwGyTDMjSGVE_@mB9%v2Q zQ8JTtbwStsRV`u5_l}3Z|KNS$XaB#;*%p|ImeYE4M358GGp8fWI;Ra&xY)6$<4swa z`I+DsQe?I#4-JJo|9|%01J1Iey7ND;bIxgEPtG%vMjA;V2}S~$i~|;6gh|F=du_bI z#$J0_#`fR#djIRl{{6vnfHlb=^EWnN69kM91|gw5(&QX^X1b?)I*0eV-~anNb?f!( zp6;G3M*LOX_uYHzR-MY{)H!wP)W06FH@=^3YKapkC1iU&#$ukh?~GM16!BL637{(E zn}&ogr78OaqkR)L(lKrmh~jHE7Tbo`)!ByIT5Q$zEmp-OLoUMA^I??ho*ApO8jgls z1@T^o4gDp>5OTEECkeSiN-n{#s*cO^5V>J}p35n}n%pVw#X7}NVuCT7lYMiE)??qk zlys)U))>?zdb*4G5EHLErqAX5W?>b_$ zhe55o{B%U0L|(cy3)FFbBE+(TElD57TO6^-KDXAp<3Nd8)ap>0T*UNapN>6Uc@~oV z$W{n`D*$mqQbLC3bAl1!DY!axj2GbxM)FMVY-sOc&T;;}mH+brcIZ$q>~lA)z}d1& zsy|r+mN=hDMQIDt0y)BscJ)}%uYc7F)^M006B$SHo<;rScyE7J6s?P=Fzhg~gh_y` zm(Ap2UT;`z_O>NvUp&ehD}(U@4T-4ex-_fTv7G85 zjbWZTEJ8i;sc!c1KW%URFV|V2xD@I>67Yy0Do~heTT^bkztv|I%^<@I6mffE{D?!Y zU0GreeWb(IY-zStXl6?}EvB;^8_o5bN^FdNrIVoCm=JZB!HCcpcBIGQVhSr#R<)e{ z;u|Zh{oWdDUt4Djme*QCYutdQW)AHTxfE?>l&)8a)_3WE&q7#ql2hdfFwjt6U7WUd zl-ybi;o^8NQ2nT~RB-Z_O^BPk&g;bOi;KRXzUZ zx`Aw#M+=QJYaRQ+u>I@DcG*w;@OTNx=(%H=ps7D`yE zI=y$q2K&aW8q?AF5-W!t@#Je(aT?z@&9;K?T{-|E$-re5B2!PLzm$E~XI4SD4`DX7 z5VNTu+!v<1_K5fPmkamORH+WIS?IZweec3*S>c6HC4Q4a(kJ&J!rlWR0CTOTLDwRd zm3T$OxEo?Do9IrAA;>Z=Aq`|9IBh*shtM_|8LL&ysnuD zj=l?_>6-3{?i5gyfd6rOAlw@1s)I<>rpM91bWr>#oCc}j;N(3)k00HS)^~l4l{MDk z>480*xb5`6J#3}_?Hg7+4`R9$BCPo2Ev5VmF$UfX(DZ%Eos|Mw0(<=59tYnMp1h^v zI=;X=l2Rr_IOK%p_@FWja%AdtwG6_9c;_J%52~MtZ0@}gn@Fc=15IW~Re74l#V<<9 zi8hGy6k^IaL|fdZN>fGN8bpwAdY>|C$(|5k!*wP0$j7@F=g-)i{^LeJ2q7Oy zL}I@DhI)JI?-6ac0SNB+Vx0_6lr1i_fB5gu+57*p%vu-MIESp_3o*Uo7Z%y)-+RC+ zH)24jCS~QSZ_pQeF>bnE}d-G#CsKEsX(V{-Lyol8M|3ts>x&gC~fgM z+U(qj_$aQ%m28`9GOfvEgy#`Lk8;auGSqTnnDMAE3UuM_oY0;waImr#Qb^Jf$k<0~ zD{O3Koee(GYu%M)sAVuIq`r=Goq~W%QBY2@4(-WTKU05XrQ7>jeixC+&|0=je&;Vw z`sDDn?^$Ez7=(&06o*WC8>+6=mb4RNa?g{e?W^}4utVP(wE7#+y#UX-engVx46y5O zF0rrtuS1B=o9S0!Sti2vB^Gu5%XPreBw{WXYq638l?q%3X0Fh4e^Z?lA(`xXe9XE! z27HW6?Hk08cye<)IP`k6-+LN$cP3o{-%5UiBu07(t zl{(zcl^uVC-*<9dx)btHM_s^>Qp1X%KFvPxZ+8abE<_;%{UXYG7O~e|_HkL3&JA4U zK+UTjXNPp%5EB~eMSn5q}LF6s5H-CcV&Tsta4re{y z#!MF*?3zH2_77UoHx2+3QJs)1ZhH3*utJ^a*>M|Eiiu_QGzH~ST}H&{5RxDHsXleC z@dsam4ft=eZy#bmjSvX3F4a&Jiw{76_NB}035fPCI3S&iE`qId(dE=sQwX;E=KYYg zxZ4J}-tYePX+(27?5BPO&WJvdPrwyp>G}rnz@d}xt>Qr?ir(vMw1G!Ne>nYY}AK}uppq4tBS6ywb|8Uk`c1OQ8EU2C$iuXvlAHe{a z4s#J|^uz<)NlhRf*ohcs+roeo1r^yOS0$CYl5^Fon<)@SX9HWOj zU;IHKxXQSsV5)u=i9iC^+n%xb^_Dh!-~jcz5+gcDPlPIOP4o#X$09Epkg5f0li5bm^PDWvU?)U(8Zd7P@`%Osb=lb zMZ~8@*Rz~@i1*9nJ@{U|UlDGZoP6IZtbHvF`IwDx$NY*L&SY%xv1hHU^Q;vdKV#Xi zKW9Z9!&Zb5|7soHz@Qobsdf~N0#WcmOew}b`Bkg?8q<;X1o5R5iIAyI6ZwKOm^>nUN znnI!*7I2m|9QbcPiVba)rI3UOtehZ=XlLQFkHcL*H3rd!*oX@fJ{MPsEWw-c!cdHZ z^`eUdZv;6$`1>cVvZ2IY^UgJ)jz~Pz6}Cr~*4eUGSJ~00hp>~6i1eZo@dEg$|44aQvMX)vLLABEBTf^sYVUaz40?p=+?6P-l+w3MVqo`MvrsUOsyxkxk z85Fz<5o8sxl`hQeV&94>PRlUC+=)XwC21JrL6#C{avB?ToVF+8p^8$vdEi2?3lS&1 zc_NsmsA>9)CMbjSm0|WT_VV6Guk77bWOt*@TmcuXVdld^V3aqaFW%g26P+imc@6EL zzK6H!P4|m!e!&v~uU%GbJ*U$4f8M&w8a5xW1vgZ1;0JXFP8j9Tm(v`W)A9JQ4IIc= z<=P^vzp2DIZN<$mGg-(vEwjj`_#+?fw9T(PWvg#woI7pp-asbFAeQ{Pe zN#)CuWl0m0Md8)Ua8i=1T3uvM{Of6Z>1&s|zQB}r5^SaE$?)@vHS`nu&<7tvK^WZS zOPK608R0C`qLTlbz4!C~lKRw}9)6*U&#yh=eg8cHkBk=Vw9*M=zC{aN;b7LTdJ(-r zjp~H?@;cE)qTJxetp(}88#m4!KH&PlK6(^Kv6FZa>*>3LOprTy$i_386=g1`bmi_|M>*iI{`@)4>t!8Ybp_%aMk#0y z`d1@J>ePv zsJ=*CFFC_!b02gK(BqS`f2;gUs-Pz(vQe<-pWb- z9vZE%Cr>rmd^F}Okm}Sx#LH1ulzn!Im30-xi`0tstoqdw$(b=LI(Wv4IlRNWSALNb zk5Lqp(xXgZCyF5=nxwI}9)hf97=sXx@I1^w8HF$paY?K_l!&(nM7Z5wVQEjn=Sm&T z+U*JP4mFS@ob>1AYa6WXjxrm%6h+0nddfWlUWB%oC8?&Dm6}A}GSHLmgEgj11+2pt za~M)-OMx}91yg;@Pw5hXBx^ZX(;l)CT+pY!@%IO9`GzJ}z*LYT#%+P-;B&P9G*C0Y zf^ss_>A}sOmGDJpMAR-WwzDUZT5I_&cC48p&+4mqr6{c(`Pz^@^Sut+a`)nZgOc{i zMf7C(5zf1@-Rzfp*glL1JsiZNZpxXch;_yX-$k5qp!!^>TC(KD*z#5!pV` zSBglt)J}km5vW@Yc~nZm30G47m4Ulygn)nkSNHqc;fm|wyl9`ueQ$ipJX>)05$o^C zI03#Sq(sI&9+_|0Tw;A)8T+SSJ7}f#hi%@La$B&r(rR1EtP&;1Qr34fAn<;~%46sc zELu}%8*iQOV*OcaFy|ffJby{8wQsGklPLa_U~TI{w+)HFr6DfW?Zx)wznrm-J&)KO zZ(C$b*ERUMyKB(XMrS*R?a*`G_Q+Q|?aY!g>%baaCL1PKv4^OwbP4%50guo0Iq7On z)YMqr$4=RMHZSqXOTIRCgz><5wLRFuo_J368-)OB@}}-D13Bsk3MZ$#5QoyzojnA; zGNxyC9wAs#dvd354xY%^D3U1THnIEjOkVk}iE0IVmT&&)Hv8gl9=66COPttxfXn<* z`->i(K2s*UC{h%!_1;&LnCcK3X?&kp@uw~OQzPG>mC*vVx%@>N)$ zvVQITmzxD#q^Uadd)}G~JH4~pN)|{fKDJJXelfojk}6zZY+wKAAzRIweho)3=l72( zuwuU6u*q!4X-?Ba1Dz8f=_rZK^XwK6zwJ(SpBJ1{pT727JlET@-$gF?MjwH(A+EjL zUV4sHCTT;Wv28cB2RO0E=R!{nllBC&U28(o4<{oJ1+so6%;GNI z#dn8i>2Dkbl_#U478waa>0q_xUVyk)nw+zsc%+HkH)FCk$e@FIvv zBcf5k?>snS#~vM`-Dv}^Y(GG$!on6f@Yh>_ zZ=7$Re&-%*emQwoAG%Nn4CS#31l$7`_33vZby$ih7v-yBqz8Sc#;t#Enn~h>)u07k zB@sC77qi7%(nCUmN;zI4xKbCBeqtdCPP-o-v4{TYfZhG})xL&5y?iB1W=2@K5djtw zqgp&2^+Hb%|J*%Fag-fAohXv92wa-UkhTG5*m5`g*o5m90BY8DyorO2zO(v-uXzh> zZWwr^%lt|5bn!CyS<(ewmdferf;mW>+P?s0#VeZc2z-XS`ka<_7O$zXM?2U+w3tAb zF~e5AiK4TyOTZZ-Rf27G)ywwzY`P7a}@8 zmfg0kAhm6qy5S4PU7N%^u6tX-cabY;Akhjxf=+AQQo6ZkC6oa4qn_y)C$FsXqw|lB zu)pz-K4Ilw*=O08wpt3pEP~C~;Pl{}v?@z*0Ea2T)Z$ht4Fr7f&@;^HAq=clg$m8Z zCa~fQlztL_!H2sRGOef%L^nM!WCf@C;Z(>w-?D*1d#oJMb|}0>3_(zBr^_s^u%ZU( zH_L?_gy%Va;{Zh5x9Qin*pB|k?8w6dR=QBSCzFI>4hWa^$;FSDcJ10dYG3>0UVGCA zSlY;IqZ=@^@#wuBc9i1^%j;R!lic<)T;#$8qNJk0%9hf4sw%FStilwc*rk(}j_;TD zk=zB;X98f;?ep!yS024Ii2=z-)GzU8<848d+jlDCLKsavsv+9mWN=8rn-}Vodvz0c z#BE^0^tJzW6#Ll|_VPC^cM-k1Qyd~S=yVilGQ!?#@!Byd34^489k$nTXZd4%Fx)3t z>a;=9Vd7K3QfPT|p_OpewXzKot6VpJa2}fcTIp9^PpwZMYb0I^r+4=+x7$CwYp*rm zb@>xEPwZ;^P*hh3z|K?4d?X3csa&~=j-D5J7i?DIGKrCv3+a2yP{u5-K6FJU4dT9K z17XJ(dhGL!&)84?(grJ@(Jlk!ip{uy-8NeH*17ZQGFfK-#pUJbRc$8JG28C7@*?rR z$gXg1@cZ8!D)AFP{T=(+`**^Qto40*K94*%_zTuYh+dlQw=9{x2m}#*9RKozR{lvi z?mN*$XNy}F4p>Aw63i(86amDRMH#AKe$dJsV_$aR_T6ZRGwO1RChFNF;E?#$piCwJ z^7k)rLKzU`1hZGIaAwdnFVE@Bjtz8F|@r!>P?@+t9o-5T~~|KUm7ct@+P!g`fB zK@k#hGH?8q)%FpNKFrpG6;&=(+!b(6$Y-N~q%(Sa+~!GX(4Du|oT)ld1)dbLUiouN z?Bj3PX)Uk6Gy;@A$tZ%5%c(wuSTEJ($IQYFS1m7wfFH9bzSwC$|B=nEz^8QO<{|!1 zAIn?%Frm5JRVcWX!>!jMW-p`s(j};30#J+& zMk#G5P5yHXe}trRzhSH~Twm30&;DGe?fv(Dt71*x*P%ldh(Dt$baz9eiYvN$w>mc!re1 zFX_3qK4bFXkFh*+_))v}L)TeNqt5vb<;(?-lQ6dW`8Hu0Z=oN+eWO$$wkGvbe*VR_ zzC7%5%0;~PAFfHxo3;t--YIOOzWx69!p{6VRjHcxesk?t>jmUciaotxs2kA)pU5f&x9U|%^m+vXov@`8+%k1%%D}-Y% z)L|HyD+Vf=!CJwx8Re?SDnM1Z> zV^a>%Yf`ajb-lgmk5<^Heg;wWk3kIK_O7-Qp)5x6Ok396DaqIXb0i+Ok!`+nk?s0W zmp%U3GggP#TN9KEi9ZwQt5yFKi0GR(mD;IqVwi(H*wVg_-DdHc2Kz5hZ?+Rh`>hPZ zt0jzbCU;d3WhcmNtt*8a(-KdNGgIF4{mw!B`5*PHen5Lm6a2@2c`X*0ABCq-9~3dV z`fWZvJ2SqkUqHCeJUwcg?ya|<`c<|%f@?H(R1ZE;Va;3RxMZiU0?c$`%i?_9PHC%- zw!C(}?fB?Ut9sJ@szJ~_vZ-4V~d;cGA$w_FYw7CY0aOOG~nQxv7 zqI0e1jcf9ez0JXH=V|MBO#i{%5 z`EK^!&;2*{1^i#^osc%kR2A1NSQ*bND0L<&e5{6etEZ`hP3du}1iZNwJ+dRrTogt= zy77(w@Q`>oNfr!!KxAI9rr8SX0`V?ZoCLuH5+7m*V0m!FMaMo0cW=F8Homgi$~eN) zK^!{GRudGRgbb2&9g^owaRn>Mpx=4LFAJe9X{($1SwZDuaM_mDq(5NGFKX#k_N+;zWvkzwsB=+Oek|wcaky0B7I# zRhMGSm8luN6~Nt{JuqflKG0-K)-g_Sk(1N&vW?At>3J^aT~nPb{D(fM2;OuFKh5)n z)S=Q>yUQm1yZ)bz_R+UIWtD3X!^2w0l(eLYO=MeZ+43`9^!|Z(L{E z+wc7}!7VDSmRv%eTzWnPE<-(Bh>P{Q;nsP!>K(`I#I9kaAD6#%Om&?`&!lN>sqOij zemnHuE?alY{BybQh+h`MBuz@{KqhK1TaJQsN$UPPMO)_NzO&m{E4S?hre)pFLCm9p z36J>v|6H@g+pFTf_h*74!JB^{&GWn1CvX=QLcs9YMz|)Ns9YZ35t(674E}VjIh?+# z@2m)7?HmD%Q8c<3AsGy>q`vs5*;0tGG{hY{C|L#|Fd_jJW^yz{JWpARf5ogniU8w= zj&)dtj?>etK!+b*lo34goW$Vr_3_SLQ(M&N(Z+#Yz|ooE)Sk%h#<&R_y?Vr08S)5M znzUzbIFH0~0P@dByp!_hWy>E$z(ssk-_&Y%|5lHEe?OE75S1VcX6grB4~^PgADVBk`?R;hxYyCCvTVenERZm*^9ctXU zVx9k+@~$PDe)h??{aBf`wFjlz*zV^*=tAK^_(fHrUJ;BlG=P-81%rqpPil~Fh$klo zxPy4Ptl%t}-@!EDnbfwF6ifo*6Bx{^8pl}wuoT^{Y2qFF)`z)mqo>$CVE2b0;OijZ zqGBRLfim&%JUKPHv|~gmap-dD1dG^Pbo87=F;cQ~yy9eZ=B$YeW?P8q3~M=u7asjr zJrlT@C!e!2wpiHpD5=Tnlsg6X9?0N>(B`c*J0QJ}u{@Dcy*TXik;=}BjWY!#&(ZZA=;%2lZib`MV2*i!J@UejnU2N@ETyQV zW%W>YC005T5qi3fwKozx2CwJ^X(7GblrKn#k4@x&Zx{K?bHa7YO4x9Z8*sbpY z0*x;9fGB_FZMsv6>Qt-+&E-l)SmQ?H){B@UyrHsn$|7a_e~kj^2kO_pU%9&3Wl2&k>GC5*#dH zWjJ7@uqm$X`^|5~(VREt2alEvhz$|GLdkGRnhP9$6M%1WD;<1|!x z7w;kgU%0Z~e(`T^v_+dLtm~0c*J4*Wu9&0Vl<$jysqpFJ6L$NX7JEDqqHE1Xf~4a& zfA#Y>*}N5HUVq|(C7QmPPW@fl{i>GEuaDZYo2%>t|9I0R;Z~hTqLr5sDdiQii$OXZ zKF0IDUDTtBr^J)~e_?S$o!s^2mG<&KX|q$?N358M#6>%jE#hFFvj@_)@VW~7#lNEN z(aZ74%5+3PSa-76w;QQpJdG}wC;>7uAt`PL#hg(LD9E&3yvBH*009>iwxFGUx0O_W z7>&QLKo$0*kM-H?&-`<%PAAK<#0%cLx{aR8b9fK|y5^m)f9nL8`)-1A*n2oG^y}RJ z8UxLhSW1A?l{bV|9Vtx4Lyu6y1?m!;Q1|$(ijEhm+eGoHyx;ZA#b;fLq4kl>$S~>c z#Lnj&HKCR{Qfa3Y%Q>=d7uZn z%eZ8$msr2buLM%V-Pp%RLZUOuT{J&O9u;R0j%xiJT=yK&pVv>`<`A(qw5i|!CpX*n z+xFQ5e|XHY=!#ZjRZM+DTkEb=|J2&LBs|(Cd6q*u8rGHAy`NcSH{P`vv3;V8!kC+F zh$%xpk-VH*qA$3%Id=IanKSi6;Kgw;8))Up@faNcn|@^z!Jf7U|MN*}`DyfNsG|$j z84%$<^Te2Kx~I;5=D%L&%8FVBQeQsDIikY9i^=wJmgS35O39{6L2=t8>fz? zK6&S7vTytRcM+fe7N2gXmRF);sRdpt8SLm{YbsqLW3{Lg;)EDW((Vn%cQM&o_ZUZQ zGCMKM)sH7~;S`s3@r+rX0yMNHx@eRAmMwf-8~T->eCJw<8*eU5edM*HoEQ6wK)9uf zIm<9nC)3Gg*E7N+@m8Ml9X(yWB(v)f94mI5={uU&iD<{5x<}qOaN&XArx;KZm|HEV z4`S#Pn&!nP>AV=~_Y=c%=>!O2!bp8M;NT~?k{hJyAsTR+Vq`kbSjiED3ah2~LH=_& z$pLhqkVDlku&8bwn>w+i0YL*VaWG9rxu~c7LHh8Q58FUT#_Co-Pix-Fsj3Nu{hlr+ zRXDFQe+oYlX=;MehN;^3{N7FW)T@r$w?1*$4*koZm90cGx~0&yCMB57G>e zq%Bw0RAB42w1$`I83?`?^{XMGe3Vue*=ygu*2VGLKYP%g{(H6;HgNjbBCQT7jD`!D z;O<#T@)HyDveU#qH}y-^->cAj*znRe`}XG#+G8K%ya9?@xtRJx#2d@(gnZ_rpOgQ9 zy21oV^X}oUaU0r|v1aP<^}pY4TVA=yYMWsKaB)(54iG$Izbt{!R@4<*&mm0EA`!YI zN8Xh$OEA}0002M$NklljwWb7{eex^z**m4`ru(JU(L8+d!HtDbBzb( zx$AqQcE_)^*&E-to^^C4#MJ$agj*RCuEpx4eewyjos3zX6sf)5s(;}o^5J)d)4T3k z29UolEiK5VMG?eAamF!*PkLV2TD~nTZeTF~3k3YnQlEIse-hSwz6kgWhImtLmM8{H zBx?+Z@p`}fA>_;GNjE|mZsQd@qp5W~)1$`+5dEH|=R%A{lp)4hh%s9r!!z#%oEtt~ z0G3L1TdJzhmVa+}E(HYjTig1rFjL$5;Dl}4mQ8KnmdQ4MPEE_#iTBc=^lXSXW6u4i z|EPm#t%qYE-AyNG$@};agz^wu8`uUTaW}EZHWv?0T>UX5t55C}h_&$vw$&8WFgd`L z4%2BnxfHY5tN0*jEm4tgF{J=Gnt}Ui6rQHTfJM1rqL= zMk-Y>o2I$LnEA(%oPT0CkWUT#I7q*P^{sFI`3b9C^*j>pnX@^guMoN;QwIO_FJEYD zZfdi=?>=pheeHzp{z|V6qHrbyET!yumBlFEatb6zT(M1f&($*zGRo5V#Hej~d!x0s zhrXjaxf0Iv>*;3Me%5G9wl&!PH?fuM8vuqtdA}!Y$CtaU^J$QMEBmN1YND-3Qdr6- zs6Gi{GNLt1ojE>sV#3N76xym+*Vqle+h*%;ndfJd&tI|EzWO_ycsH+r<5WRE41;{m z)FlfL{0(obn{}=GN|g6X!|(}EQhy6pz!Cn&4R-racH5(0J8nC^a>mY}NLGlA@^T~^ zUVjjV*mRTqTzx#`On&MM8Hmkj&$x{q8mI0HY&rG0`M+@rt*uvc)h1U;fK|cHo~m>hlKn z-LnNUH_;)Va+=VkNVD@xqxSlbF18Kh3E#nk4of}a*5Q(;g%{J|05@r zjd4=hZ`k|pE3*3#tY2mKf+F6rE_{t4a^c(W%%nd3rUQJ_h57&u`mWD$&gDcUv!x!) zD-ESS{%&gi!kskS?b&tol;)JJ+7r6$o{p?-MQuSQ^hnd8Z4WSgrgcAi((nCOFzUeF zj5vJ=1U>|r`YVWtiL5yxim{VT4F$)_(VK_6A>Is9AI%D&$O&Pb5*~lxXh7&hrjnc5 z>0|+ZT8d_xw)+x%OiXtxGV3i zVj;w+M;apM#hxV?&rpAl=X2< z^$P1KD_mM6=AMRN@Ofmal8FbCfB+zgeYB1IcEzS``&Ed z<)3scfDoM4xx0q@5ydE^mw10dVIu&pFs}<2pTa3>PmXiT!uL| zqmQnTVscmD=gTj-X|Q(e3AACHMh0c%#>X|24)x}FpD{~HAvm>@vh6;Fv@y6s z<5>Pa35-DnN6!g7zwzT~&=2)(oN*_GwM%tMFVtn0bH`ik7yrD$p1$X}J@naQcI2zr zq+nM^Im)53pr^4ghRNN5r_=V9&#vUS+@+pu?5DHlA(lr+gWj-s)?hP!kc3G@UU;(y z;(ZLZFdHRm=D5%e-jbBcRg{W!V#Wk6-}41Y zyjAQR7p|_fZCgs>blb{@ZCl4vA5WcBn0)7Z^q##xz=IY(RNNn_lpxLLjpCaf0gO3%youxU9K5iN#mtgZ{CQEBINs)LBAs0$(W z7EEyJy7kpdZK!|DdOC-!hclbcVuQMm6SYQ|G^HuKx`>*v2qas~Hk;NZRUAYzk3&j0 zMOv_u_MJ2ROZXUxWkinB;?;HC9ScC(6E=jog<&q8=S+2YuPMS9N-1ru6Qr~jpYL3( zsXB1aEOxFN?A^apZ-;*7j6M0S4twsK-PZLm+!6S2;9$mH`A6&r2BFKvy!cDrkGgTL z|0Vu2`Q=6N$6TGMT|DQLA#&d)z0P@>tnxWpJ`R{9c3pF&U9an9i#*E%J!$LZ#6Rgx z4AB0AeT*}}DgPC4b(Pd(MNO&IHI?~}hQgW69^qI3^V-P>)Q?7O{j=mJT{e3t__U zXeOIY{eAY`pWBf7*qxnM+g|z$mUzc@po+5+UGok{9DJQ~)}s?+AEfNX<19is=gs|T z@+arFZPM>zo^l7N6y6F80ik6|Iv4R)&y!vA;@PTS#i3-JAm)UU*NFO@IxBBZDZ`i{ zB$Xgkq^kgPmieOhh=iQz5+|Z#cYuAx zD5vl(q!m)ucSnS|$oeWkd}qBW+s68PBb{GPQD$&H82NVcaGyQ=uSe|Ep+W2Cgso9d z?CM3Ubc8j~k|iinvHmL2?GJ-+tIpDhF`KR{nLKMdR^$w2%J=eMN!Z9G3HiukoUR4S z6HWa5NV7a&O~Rd@WJHciFSd`Yi)nwjuDP5Lvf5e%F^V-q)6+NjCb8=>j;hpk_q`mz zvj41|KGDyRD1^wNXi42j=`vQY6IQ`XI#BQr5Mw@O@{8Zjch4Y3&7`p=$bB4cYa+Qc zf>i!A-^NBc=!bh+VKRjEF!XP)AN6>~41Q@QoPy`DxThHdflDU;40~A{6I`!f>SCfO z)D>PfGRlE78S1PbTEG#dewrAVOOja~!&^lCNRpxBdG)MGQxSMX_2Yls3s|0&en|nu zM`4QjUR=VN+FYfWgI#xft#$q)n%Cq*7S|fuIJK^leptoM5+?m6OdfPJYXxdVWlS22 zcrF5#+FqxUsRO@3*?UpvbhTbv8gm%crsLw_i z9rZcyhaP7-l=HmPpDxLH*7?z0AZP9HCI2;H(B{v1oCU0ji3bpV_j!B z1DUW+6xDmCxr9S@M38(ELdbT*A){$76fu_O=-N)S%@Ny%*=GWWxPlvz;Uo-vlsw2H zr8B~l14+h*Ml5?6v5~`3e^el$D?tb_{W%$^8jvn}XFq9XVFgNFh^}UHDk9*-$$fg; zGq&&J16F${2Tzb?Y1URQ!|dfsQe$E(G5N(Fk2qVh7wfOQ^zKXBE&sXIDmflf$jFJx zMFR3TsyD29IfGMN5mFJ89FOba@y|@8;eJP~uY1V)dPb}tuDJiyC?;8vf`FCT zVq!g<{LN@eF2klUe&-B= za(0o_ zJ}jBu&WKK>w)^Q1Dq51;%gV_)k=5cay342mS|yciWkahhdtabkxYQY5o#8 zT8x&s%g2oJAc#=F_^yL+%rgH8AyxC^pX&!AzUmY1pFAsoxB-lWq}rGn;tJR&UVM}_ z8f{AtSl9g>U_!tX4{idMfJ7`d!8Wmiat(a)a;DcxSogFxI<3}%JSgc+W{gMAOyWl* zl@$?iW<{f$S9L(s%PLQ0KXFBD`)Yg*lLnNyT(cRFzOF6Y_(O2YTD(*oDf?{J;V^PZ zal^}{{YIoUQ?&@9S55r!`=zCGS+OXmrfG7a3x#LB=OfSj=kU#c^)sW3+^h(&i0NSe zuoKw~49wY{e(UcW4@#mu)JjP zLMg#mr<}2Y*eam&x+SA`f(R$+L{_K}&J^NS%~gUQ{495r^w3Lcf-^0oPs%unIA2|H ztvEDKUgJzw4=`h$`J>*Fhsg=ilwp|+$4OU)O?pIwyjn&pw`9z_d!@a86Mp}$TNOAaO;2cR7rn}iDbJN3J z)ps?3CY8d0Ej4EUfrr+QTwc^1Y~_bF*ECz5Qi$5rNwQ~9F4Ck! zbSrO*4^R^z+@eb+7LLpz-q}TL6Vo?|#zX;UaN?$lP{g_Z?iTy}2liXz&2YBQ1Ho2q z;-zfG(x_TAfl?L_La>BTXkA|$v7i2*^KCvNZf~|(%Hk0;z-KNh5U(8*>T#R^`zT3|d%jg({7oyzFE`_1tAvoP3JJr!`J>7jaG%{!>nN%J> z*lpcs`fUH6K05<{i*^tfRw+ZxCwEP9bpVUFNcG0RQC352#YrNgu~qDdD51>xX;lpG601iNQoDf5*WAl|U0YmtA+rQ=;Khl^-nnf0xrnMRUj`sO zJ!il>8I}tq)>r-X4-D9;$JlYJdM8WjE4oYZP2r_4Ax^%mtkfDIiWQX*dCpU4XsY2Z zi9m_f)mJ-*UtEMFuBO750hFIoID9QB#5Sj37vR)KW(qc&|Fp1i?5|&7wuJsk8-Qaa zghX+TwTAQrR2||iT`ZbxTI?(G`XceZBGm|xY`TiQJoH9wBGjCG7N-saqA+Yy&*;>n z=>rq}16H{A1WW>mN*&$6Q{3|P$; zJ+7by2nrKiNWuM;dlYFVRu?LnUew&4h$Ou!fU^ zR`*M_w&mW{d^wNvHO=#cc|JZ<$k=mKV3j!{tcd8sd#IiK=a{{KXeFXfTVL-W1R3!% zgt@!xEChJSPD6~(aNO*`;D8-H&}k=+v#so8pFOgt6A&!3I5#XJqDx$^mWo7#B~q>6 zuhco+I>fB=n%PcPvLLrc9ve?YR(ryo=#nA5YnX|M!LriNfGhc}wRIf%dB=)c6x|SKS5{bkW3@H4)*;?5gM&q+U54TtV(!{{ zCOD-?5D;~X%ayoWTyA*<@&-6w^$A}qcOo6u(+izY2Zd^N9+lhsj)&zv5;6R*n4>ME z1g9kz=?-X##PtW55Fl9eKCDTB#PjMG8OCXu*6C-*x>qb~IY42sV8jYMy9P$=+b1!1 z4b~t(AtQd&=x1yzF}CMoUD1bGD99B}<3}4<=W}5k7ZkMm^mshSBF|oKAspEF;_4g3JOSnDJIG1+pjev#F$--csrb|M zA0Y%Gv-*|_yWyvr?4ggIvO2bcNii*ydNM|QpMFoW;ki>4`!$U)D~$4@j$cW}>lyep zY@5-&mQCyp1rt`yy@sm-Kh0f=&w@PH@$eg)iNv-!>fov%E%J0Rj_SgNQitcHD+$S} za<~Gzjn4|7IHCoy^*G{cGn>tVW&=?vF|;8 zh{#Vw2s7N5;3vXTX7k{hDry@c3hY)$IAP_K5vt%~ zzC#;_w@JU~tFT^3kEAktnDAL&!gs$XKgqXzFFnuV?ZNV@Ipm!>7nh7Q-S5&-72%Lh zgzoCcA}Z3&=s%IMV@M!i&A7x3>n_QHh_NOzx>RR%cHO*YyJ^EBc6e4>3&#Yvai&0p zM9S6G*4$cel~olkW)^|2k;qwMW{7Jk1i2g{ECn?YUvaW?7KqfYl+(62DegI;h0~UP zK^jHdOjPJYj4?XQ?gaD^M%l6|g+G0hp27sgS@=W!Fb&=kH?DPU^@kwpr<35sRm({k zP|A3UAo z1}EO?P3OY|s`aLXcu!AXSeRZ|PDd^{3o;YpbIc4-|3(C-l0qp-Lb~aj`uHFNWFe7;)P(}>k7(AWe>%WdHJ&qvck>$4WX39wzSi5&Qi$Y7|`w$!`MmZ z_@sklHiogN5z0DzdJO(57&r)x+t|L(?hh;K> zJ9jN?IHaJpElw$f3rLpmz>(&)E1DJYy-d$S)=5|)bjs|Aox7ez7&KuD8sNNpc)?Xp z$dkhJmitqjkV}5UHy+%@F!5dY;5T{KTDSKjh_MW+$WFOV;&KPz>_ma}b)>C_iRG9? z>CPSNtB|&Oq{f~kUubq)y}j|yr4ZrFMWxY0=GS z!Khr6yPnWZwmwy7i(gV_3sy9^#G%BM?GW`d>#jz$E@^`_?F+`)p`wWf69+BpNWoEr zK5F7?DPWlQvQeZEs(-}snixsxPD*=m$Ha_TY(X?9#z;0cVL zNq`XSO8mY^yf3#Fqo%oG)eWTL{c<*#>a%npYhrLW(%^EAfWzaIUzBv9IXL|UWF!pj zgghc`At|0ix8Omuz|IlG-3xvBMQ9|Yb@K+&Q{EhwN1pN=e5Up-!7XB@FE`GNIeP{X zUb0P0aL9>&n+>LC3Q^$r{PP@0l0eZdBCGb*4YvL#YHZg-{Z`GkFzIZvv&e~ZDU~8i zlj4d?DR>2!h;bmq;klIeG6-`S-->y5%gdhK{4(O z@p4N^yHp5Kb$o?gB+&6L@S6RrigSo}TCyT!V_1h88XRTMHH3sMH3Qg?KXYop206H- z^YEYzot?0=5av-#YNk0_bG(Oo#L8Cz&02&ND;)|UOKBBttR2roHET^}f}}_+shhY) z>F@H>H7zg7uj(y{qz7EmXTziW1V8-MZ$pISuCM^`e+T68C!H?^i(+0 zU`F^}uc4j2nnjoM$|Pxzt_+4j(oAl~yCFfHVT!0pjrg#dTjAiacvjs~VvUQ+t%6Bv z%L3MF*)~*(D7B#()7R*5R8~o2TjFcDWNi<<2$x?+lg|M_n7lCxg%k9WxasezCULq_ z29yp$f8PKSmOdL{`J?Mp55)K^wxIg$*rC(b)!AcbIDK&E1D!0^_5hJxDlR1uVWkpS z6Aw}XO-@{P#a-*>nwl3O&_}VazSLGNFGt~&RYb;$CIv%%ObEKh>;O6sLGlon1H`~6 zK4O24S8Z;ovxQ5Wt#y8bRaI0m;b~%>yT(e3%Y41N8odV9qcq+tA*hl(XmS$P-Y4q- zfX|PMNgkrEwD(7MIDuQjT-8EgNpn(~H zYlc=wXD<)D=nNse4-6gea4UTAy9=+?IU<(PsMNsq0RDmJz_Fy}OY;+2GdyAi2RmRv zNlTLkcZ7{_JhG!ZzU09I_BfV}#+Zq!gJc-+g<8Xu`FMy%NG(7M`AK~8K4ig)Da;E_ zY-q?$0&;neQD?d{*k$?Yt@kdnk?%f;X;x;!DCY=2Nnj?#SqM?~`Ur)`Cof^XxXXFz zJrHHT`&$U2-v}aicdJR%)zgSLe{PxCJ8lVWb(w^FD)J4Pi-l3*ceA-?8YH$Dv z3TGxsL%+AT*9r0|mdm=kd+gA@4m-nA$nGDUw1@XGk#{28#a+Z%#8|`=xqqUIz)H9+ zDlD`0jf~skCSR+Mn(L}NmBF`gY9KNWHHj2Q+mCUf!)H)V?19MlfGeDn7LIE}32w1% zxD{LEn-^JAQ==_f(#BS`daed*!2UxSYx1>7AG8LqE9`?O>P5OZ#$vST@6rFoV9GF_Al?$2*mphbiFvB2hiT7pHARSrgA)9^Q z=W3Dsw7a-OVw2Dp&V?+l4W$~}h|)=o1`Td{k6|XMmM-3R)Wx-o4Bj-L1vDAh8bl%G z;2pm)h_V=5C~Re&Tn^%0c!WWU@?+FAR|MzQ1qH$$8WegAg%qd9Z~D5z`i;^j63W*{R0#vI_WgmvFWf>)Nd1t~#@)$iHj} zYmj(4A%e(vE_t4(ehp!itLWl$yhs1sU97!69-*8Z>SBM&lGSZcJh_V?+!0~KyAu%A zX;SuZo6Lojum%U&U)+bbbvHJ!;eNU}MCL5!Qu{lKZSYv34bT;a_hSr1t0)lXBCK5% zL!5Pva|uTrmQqc%D^_jJYV$+4Yq(4k%@xr5CV#r9;CZj9A`5DlAC_^&e z$PF72H`S>VUPa->mD?zD21|UIAw=8QS|5jl&LY7rtVPnWtjNka+@);|id>Cw%nPfq zS_ffnt+dJ-X)=pjhKwOH)%xlA9k7!uhFng@i0$opRGIRM75WG<_*f7EssNLblP4){ zNo&8ax8HiuOE}GVJbkLmjvqZ`M-F$`!QCD9=z}Nhl&_MAk7QjOx=5>Fyy9z;VVM+oix(NM^boh^|EC;*^msIfR%DQE&mOKkLz<1I;HGXqw-!0MV*u=6pHT zjm0NAq`)XW;=7dDX8{gOp>p3GmJ@HbyEWFtXd#F|oX=5a&&S;>&A!SQs0R@-G4X6B zar2+$HU0g3N%(g~l4iK-9itq+{C4khvmf7-r%=eZWI~aHsq*X3i|#*;I3l4|xao*P z$aer+_1)cPtrO+16DLmE!2^eF|9(#I`tET%v!@(1yU3O>2W+wytKI~MSBvC>u2PPP z{YsAK6t3_zl|DpY&q8q8QWtb&ayfi~Kd7Lp#Bc@fS!=g=c^{;_bVrG{(|w4x2e@~# z`ptH(EPmxHG1UrjZdr?gk(MHBUW9@d+;h`Bh%}R(n)(W~lOf<07slmC(s>}1Dj>*zRT2lpMbLkEx9 z3v*7C~ZNYA^ldjrNriKtg?B_YivH-U7P1s zTYYndFB3?kU3EH_iv&5|6*j*f^c>&geJ*%Ev~Q|lHR!+Z4~IU<=vErf6xwlVwqX>c z9{0R$g%{g?b#nP4@xGjTeKR)AHT@h!n`D>2#OM(^J%AZ;)ET;uC%l>m5DTjqLj5zTL`-m6)F-K4k&>$ zB@-f4tf5XULQ|Llt^%bxx-3;O)hudm?L9veF<*)UamjGR-`IN28CGJ-@`XALV1?bZ z5Ge%{2=D1Pv%$!x`H!L!H-8wTg_1$R#;hCV;g?VG$C&)!P zRCDGROd79)DD(7C?_HhUcH;O+J96lR9p>8k#1VVop+l(ee9gg&8!EAT>^0Tb*{z#z zw>s>7mm)q?IumHV;y!F;=F&~VSD-q2kaZ)gBZ9* zKVlXpaajN0YP1c-_4d-GH&7-lFhhK039Vp)dkXF7tR!m?*u=jCUc$daN3@+P_%jjY zdy0v?uM;ELmBN;kvosOPpkF2lMfx;(r{kAXzx5k21N_c%Yg<;~(usyP>bEh}ZIRY^ zXK_BE3>D%bA`yPU5awCRhIrjTo~kPt#>Q2 z7m2rO;Zj|+X`Zs%P)B71XmNH9;;jKL@vQb-&*XgMvu4RQu|ljz*+$qdv~Zm;=TM7d z31WjBydx!pNYq-}El`wkyT`naA3tR&ojcu57|km@DDgb)NjQ#ir|3@aOA6qUM>3dXZ?(! zkt@X?anYwbIs+HI|CsIDbHtu}=%D@J&>=#d;vx-5bhy{r%JQYQYSjmALBk4bte($m zhNKcf3w8nwI)VvWRbvk66Z1#PQH3g7B+w$#>cY-NV>4bH^w=4UdqAKi;x1X7a$EL| ztBb98AnyS9--J22_#1X?x?&ailj#IxJ%~ zaMK(>a*7lAjvZ#(-CnL|y6nV5ScdCmc0vze0~m^#i){IlRkq<3WzGS+mbgaXgvNPL zlk>H22&)r8avO-D27bu7|HeX4ul$F;gpwz8+@Cce%pjo{IgJwAaVCgplvi+;cGFro zYK|M7w+uzG#Z}hGK{PcSzEj2oh?LH$8A|VZ6Y)drv);b^c%8SIfV*?YgnJjUR-NM1CfNp`T&w1jhu2R`Bd+!GuC)8Wc=H+&0&vog zf-|E}v0m)vRVC|o0F!a^;rMg>M7k9yl(>TfEE)oC6fft}4xcA~Lt!+ig_JX-mwm+z zu3#3}k??&o{CS>Y@LQH572Fkw?s=m4XT?)p2&bv8RVESr9yl2ky|VRm`CA$-d)s2? z>{Cp3Jc>sqDtkMl)Be*)q)(nWZO4wDfI~WBJD)jd-~WFP*gg}N#2|JzpWh}_bE<9K zyfzM-r~~hgvzMGLLekvv=j?_yJf}|%W+IxI}Qxk$-PKn_Kn%l zL4kZKpUJ8SYe-9X!R1GF=jkNhk~DcE3_31w^4`Zo zHj;WLn)aOImth%Uj9mxAC&&Xk4occFKfIyDmcOjV7Obkad7KO1G_TTX8_NCAk_60@ zoARXmc+&g`vQHc|#;%5oWsk7^J-%>H>cD+MV}y@6|J=i5q#3*>KVIOIw#%MHMDe|Y zFcdqmox#LhZRnCYh;&g|35SwA&Y9jHa){fvYIb(j+>6Bfa;jEGiQX>&DOhRsurSPK z1qmT^t;9+=$~oNV;2_HA^tLITIP^XZ+Au7q_Gsr~I7ne)%~GdZX=c=ghY;)3(!uCl z3LQk-3L)lqz}WL@veM>x!dJh=h81Xvun5xmmU@&JdS7{@GT&obC}%`PL} z%dnxddA_CALuA`(r;PsztkMl2%9M7l)*6o-K5hpRBK_!>57^G3eI$64i@LN{F3whF zjd9cBRfuKRd3%gth(cR%dI!6$tgFSAEWX|rHm|g%nt4`HTJ7S?V3-2JB@?#fIU+#- zzA&B2pi5GUgtLo8IxR&p5$Ud=ELMn)W)+HJwe7{$vK0bNTQ)D?s8Nja)HZTphP1co zJ9F))jYLG16`Ec|Bvjqy66n|f>O$%?POQ9qN<$C+@4-gksC&_@?(7(_jzj%+VlTvc z7yV}!hGq^Td_&8>qzyvd5H$VEZj!zN;|`#1h80l*!W$-)A+1TsyeH-Hgp!}&HNyw> zAiSmht%;J{IEHUVsPo|?^nWzztJW9V{LSUI_#HL2kag;|cGjqyuxux()(p-k68MLD z4e*^%&y@TO@z3j()USv0=Ol*UheVS)$V)Kq@f`Sq;Y*7=2)|<18H+>VS?~JBa+XUT zb(gJU`TIBGsG%LN2@o!HfbgteiO10;vVCVzOzSq=!!|eJmt{!p6ffXJJXp-hpjcq` zHbrBPcVX&&9j1Eq+>6Bf66=;`^Ysih5=}Kv;%)A9vsrqY2$&ju1#7qsK@senuykHQ z89xZ2(3#D2yZ|kBEItxOZ_z*8`4wVddsksw5dUT%MycJ$+4lm$p`$wE?Xg7s<@7rV zNzYk2Y6@#xqnsICfPofuj&TlyC_0QbB}zWQx|-t31t`RF*jd>YE+NK_ZcG3AW-I!M zm-!?jguTq)*R3`C%pudhYtvRPWm|(xB5*hh14%hzR2{dnuD7JpQd{dRwF=8e%UUe8 z04YMUooARmue}iH)8eFAlRkLhsO@_0pgsPbefHg*djWiaOJx(0W?KtHdgG!MYzJEH z++&8x!#L9I36Ny}Q3!VS3_D>*ZCR?#)-Bs&3tNInJP>Sl4#@Zj+>wly2rTF@hoeZ~ z1>=NQbDXrAfNIxzbcVVg>%oL21Y-h??gG>~s+Jd7{n8R^0l~GjS2`!%h$eRpHnXeX zq;)LiY)h1TRn>)* z_J&c`8$L|`Vm#DbS8U6+R@#zHwGiuC_MTV!ITErEE)9y$Y12Ar{9*A~;rQHf@H=Owx<02S;x z=)^mj5X5x2N5U)kM9yV_SBS$nqDD)^-DMgC>dXw=fVNLdD{9Bkl<1f#_S$c zQa^ML+e3mz`~;fOg&in$HS?|k;Soc8pQ=FcvxwAFY&Ds9di&n zAaRP~&8>N<_CsqudX#Iti+%{>R*6=3-7<*u%gU{FNrg4RNjI@pS__euxchuW+PjBp z#JfnSqv>4km9DUSVjax3D*Z`PD%UB8aRMV_;j+}jO>XUE%lyltA1L0N% zUL@YCg-dbwplNqDJ@$hy?zHb5f0nPhB#~Ia)o3d@ez3CPMzr#o zEQv@%pwsLL9zKZ@kMGe8X$V=$zA>uv)}I})o{1qKG}`q`Hrt}s)z)0Iz$(i`u(gH_ zQ6ugPieRfLL3EL$Rx?NK?$I8UA5XES3XUyhowNpSdco#OYh79afq+P}PO2?JRket_ z=Q211(4?z{N>zU&-AOK&Cb^2vyRP~!Wy_^W2Gob-hFGgU!@m0C2Yc=CbG>%t>3-{c z8m@W|x*l4OZ)1~GE$hU$GD#$oCNeJiW=!e%;$6r~emucd<)oN3Ni{9S#8h^<#a)Z& z4q;or8*YE#z%Z8Gide6%vz5QJz=?Gm#JUbs%hQ(|^113z?ojXf>0Z#Y1|apPe|O5O zH=y2l@x1$&@S}BM>Byj{XMIS@x>zH|G_P@Vt95e60;HYcJIh+8(-no|oFD@T$)OWt zts{#tV+_Zt7=PS6&6rHgiyAfeoO}!o9$rLja{-d?=e$S%i@M7Y3u_|miyP_xE^7Pt zrau0Yhk~g5&6m_GE-LjT@QcLzQff_cI@x?Hm(8|v6cU|K8@L1Aa{uW+3auHWj`);Q zrwb7LHU5-Hyeg%LYG%(l;;n%B5g@thw+Rx`p6C>_*>p3+n|+&dBWyz{I?Z5dW6w7f zB_tK$C`5SnUyz2zW0qNQ;cvdziZG_2K{Wx~@k1TZJT$!}7ViO`#MErv8rQd#^;WPF zg8J$kt>~5wa9Xi!gb5f)30<%Y+=0re=t4HhrAb5Lbg)4$Tj@@n?DVzieY+0YGe6jE z4}SJJ+ih&yuwz{FxfZfAU1#g(ufWj68b`QkCQoU^YSL;xEAbn3N($U@UJ)v0;Bcf+ zHaIb8U0j%&uyu?G z0iTC^E-%W53aggU4L5|SwTn~sj_vQYgFDXJktYYN^KlN-KzXiY5nOc(CQTFZx9TXY zQ8R>G^c`TF|92XJ_WDpRG|spfJEBOQC%St$po2C@u|<<>V1Nn4t{&UZvJN)(?7G+0 z*{vT~ZX0i0VaqUm+|=CQCPDM_A!6-LX#x0>xM~N%kM7E%GN`+L{H|x5(vs*JTjtgS zt~xycab_*}I2Q_g)&YU;*7t;tiOPr&ixU<{EaKb>&YYJ3%}-a)e(|Hbmc|^6Uakoj z2=_#0pN;l@B=yO+{DpG$e2L2QYv4uVonIRl^SqV7+Z7>o0~k>qEikw8n$2m@O0+wN zo4`OzA@-_+1-ASStBxMtooMIHIyF-_*5uN^u*nTB2z(Km=7l~BjWCk9jKZ849qw7& zbc#u2_Fs-)`^R^uUA-ajFwF?12rL*qjb?0$?z zR@lmUtL(0MYptyw^|bPOKdeART?Bio(pk*$wQ+Z`x7w9%p{5XrDDl76rAOhW$IoE% z9mH9~7OeJ{RM@`k0&_GaTIB^c zI`Cl#_ouU4zl9Z`&g}EC#y!2JUL@YrYve-Tw?7y{PDpG{C(Q5gGr>@&t)x@O)5NBS zSLbG=4|R4~C5KBCax$39U`1dF8wjo<1+H5@ju`X~kxMUP3(_4odp5M}mO6%}IaZTg z=`DdBvK7+tb!v&q5gShI?Xls{z1_-Rely_1cMprWdSc@e8~WB3xNigu?epFCuyP^P zlyizBHwOX^^p6FagUd(yFEv zcKd>LHm_lc)l@J^L8>CeE3vpvRtlg4k`4fWxWD;?+KUIF))K&IH%ep@dn0Age(8D$ zRQt`9Hg5&j63%lR#bv&Na_OpwW?lK&RsDDS$AiWy(f2rhLjBVgjI}wcL{X+XjnP<4pJ3}V z5F!wL;{-bEhI2j%f!1-vQdC7r&yFE+;}kky^Od~^;W3OhxdK;XqI{_MkuWPQ=a^%H zO6y0#A@59K5oZjubwvfC;Y7IafBr<}`L*~W@y@S}i+SGuVbJ6@b^f((Wnyv|oC`$* zM4FEa5l7FaX$~Oo>W7a)loWxtgeZpbmH6e4HhpJnp*?fTCVu(VR?^()Y0D&PAtweI zCLlGSocIINwaG=WSgTH*0EwUwGP11J#y|E;R(1XQkncQvki@9X_=!|*Zrjfg;5 za%>iI7{dfaqc{3G-}A2+W^iE0&4h|j?{c|vODU0!VG^_VkO*~B=}FauTqJp!1#gJMR({4H#GT_kh$XYO5*6W_7T<0QAl!A8 z%~rwz%|hBzPP>)}wpX<{YVf8?898bZ>d_MrYW7f9=&rV#7WSfEy$#fllUM@gh0wbvlzd2TS}zp=kvR4YMC1g- zL;dx2P}*6X@_zQr5T~gP+M(xqY|o?JcJyIRSbJ*Ricu~rUr=Z@E3iVgI@FiM;O7!+ zC(%>#sgkSwItw~Jhk6Pev=iubLZCOAJ2#N|)-x?^ zo{w=+4hTAUh&X*mC{5Pxyy15H@wa`zUV8gxTZzO=Curq6NqM|AVq476cKP9_JkK;n z^BYsrJ=JlCh%~rjAJ;*QF&$I=(+*m5mWH(mweIziNYj|gmI$@3nI=9{GV-JJLZ#}^ z_KEZXJKVI}OLqU8RU1DP_agDWglZQ#WI$(8P=GzQrB1vr51m}1_cliTJ9?XWd zv?IeRVVjsZ$i!(N`b+fT9hcSNH4MVBO|6!?=XE)5Sj4+XmcmdZN@;yb+b+%yTT1)D zIhrrKsnI50x5-8}t+1low{Sudi=CcEgrB4<8H}qaMQw)!BSP zO-Q*ZISomN4>SBKPr7vOboV$&R$IG@7ud^|-Dr#0i``J&#<7DzEU9y*(}*^eKt$Rz zfV}!%q|dT?ZjIUrwK!^R%i_?I2{>wreH*Zhwc=&fHh(3=dP$XY)H1j+n~mulp-w8$ z2{erroA#0l=Jn4{YzM*4^%1YgNw5kdQLGMhkRlUjLR$yd39e&^Vzsk@GsT@Sd=o9qj}bl*@n1e)!EXM3zC14j)9jRpwEesC5vmeN>o;Hk(O zQDj_%=(q@R^09sWw)?~Zayw+N*m9S>>Fw{Zm)w4%tysB~u@`oAs3>t9#Wqo&y6`S~ z;C_5?O-Dq@pqpT8Q_=pz5O2cej9G_|32)%hHL$YSB^KE9j#HNT>i-B9(G&;HQnt|x zA7=fm7X_HYJ{w)eNxQeNw#>5mHpV{i(se7Xc+q@pfhBHwexiT8=OK}Z{585Gl8N^i zyqCSgZq!zvpe?MW8Z(yKK)NS_$*?rDGEwo5bO=}7qZi0i6dB@Y9#yBLG&G} zDZ-npYGat`cjZZ3wz!u8S3DbVP4Uf3w^)1Y8f&Uuz}7A%Eg3XSk1kua^vwvX}EZnp2~tF=|z zQk6wW5Pgb(3y`n`2q7EUL$=8ZNl3^t`(_Zr%m4sD07*naRLp&!@A=aq1{W zjc?kqlW@~mO;iEVgpk3xB%3XVzu8uZg}17zjpxd z_UY65(Ty{rH>@_3>>Wh=*KF%v-PTWcFPALMM<(W7tMm zIjI5j2fu^QT*E@97N9=ujLEzuygSsAEYVLL{;RrqZl$`B2v`k~;aT~psrK$sGi-SA zILpr~c9!Deup1OqTO>%dtHTdE&lA)2)Dq*?l2f|&b-4aRkuh2E^lTe@{U94XwZKMA z9Fz>U(W%H-n$*v=cwq3u(BDgACH=PE^7sgV@rFPeu5K@Tankz%X+^pdD0qO@UZPm# zwb#A$3_VY;k@h-Q?Y-q!ok7nxihFVQ;1Xzyz4#R&OdOje7L^Mnl)>ZNivz_mrR4mCRa4bkH_99ta&n!!mcba+zKE!(++v_ z$3TFu$j{gp_*o1* zKoBlow$f|t4ndJ)%s0pfrWLEJQ2@%_$zf6uZSFRSX6YYp;`RR8Np7ya&-+XFQwX5# z!ht(>@FuUXiO)W^8x?0(Iv#usBEwud=5Yc0?vyuoh~-g15H<7oW=qAl*zRYfj%(_g z&gMPa)nWv-Ntml-6}IdXXldWQOaUr)k1KTs8&S?S86i8S!|ykhnC1_3xeZ)5Y&=X~ z8r+k$#hn(0I5;BHe}4_d*cE7}ljz8s`XHR~09Qn%adz!1*|mWtPTqIot03$&@a`8> zKB=gGGR}m@W$`;Xi}}J?+3(r8{DvK*`ai8?h*X@Xe=AWx z4X2n{nR;`NM&Cr?%HS4{Tq*k4!&D0D93BTY<6c}2Y|ZH9Ro#id9n*;GU{chpVYKzs z9zT9#8L6M0mp$dRNccilzzKYM{5IbFIDsVXkL^)l7oPFwCD%?uknYH5V;~;w!_XH8 z*A^*huceJjwtqHCU=v<;{6;5_TzQpa0`#(+A@`Wf#C zl~tbrfuRVOFg4zT!wfx*`2OV8b4iq}#`=KZC5YwnNG>^1C!*7i%y!Gx30;5AXxuTl z>9)7XZMBKQNTS%%DtY~IbE$6kb~3GWu}Jl&@jgHNG;KijxDqX|nr1et%Acg?r?l@e z5x`p>%ft{x=Tz@5%9>Pi3vt)OwnS{z1X{tZ^5iHGv-$O_?(YX~=L*e7*#jX1|TkBmbjx=!+p+f_!NvR}LP*=j2MqXx~tWT==QSll()j$znFR2%;A)BO8M zbf`a{JJGyu2a+@xvo&HVGCFHWQncPZJ)r&DFgmTmU@MC45`8?L+u!KAsAj8hT)tGj zVLvU>xS8Ya2vAw;=_s+livq-g5x)M*uvQl?5#$i8lW{`Qk zNkJr_Sz9cJ@@MAHqpFo@e}Yl|d(?r$o$5mDtnk}LlC>QhoX6k$q`B zVKnz!3)#Wd4jL7eIK(lNHQk9EcT2T3-I(w^4$Xf?*zZOA>?S;I`KXpMsrl+ss|0cl zpw<#=hlTdQsp;9hv|Y|a%&y-bK%|BkeymYPGTSW}(XxpM%Srt8!$A5e!eh}hUnb5Dyr^wL3#JdM8_f!AkMB=^h zS>fs+=pfR;DJG5EdQX;1opt}x-5C!WK-CIS>GV;i&-Sss)&0ri)B5vo#v5S!9L4D| z2Z^quqs}<+G`J|W*)`k3pmU}?QY)3Cal(~GDX7+RG{MIzxD%w(DRXlDYx+Fc0~!$` zF0XpC-nZIewPw;Nz$H}3Z`xMC9?Fi(S5VqW!XBaZ7YE{BwfN00T%IJjfi6S?#UE-9 zehXAbgAZIsOlXw1ze?RsM#{Kt78LPBsWm^X4Z}(71%&;uCUi(#MR99%o8|+q!QEH) za!EQYZ6R!s&)FFik-oCyUqt6cN+bh>+1sm>e64b^l^=^~_tFvx*>Ljud*W;E3t)ukL2psG|9Ioh8vanTkO z?!MVnRL!cw7ZQpv3~%|32V8#yn0fi{3iz!l^rf#%%(Ds9#-n#%#2kpUy#&q4rhUSS z*=FGlS{$xgi9BJv%v4=AJs}_H$kA8SOCGcL&|HL^_x9M6G_h1boPD`PGlJvBpVEtB zTkyOKKllZM=tsgF(+MOYX#Da;dRHR9D9X&XG{9vTs|wB)EBiM%B_nM}tG+4eyuS9uapMH@8=!O0LfgsZ2TOoWwPvSIRgORB2E2Bave8gx zZ7IDLh61SasDXIg!9cXSD=lhQNRcp2C+^0~6-TT5>@fjqJ-?JjFt~{ZpP+nntUf{E z;)T580CM)yCOJ;D>Ecm8HR`)AuGSnNI+L|E4en2_qJGxQYmrcmR=`O=72!&3mrs9% z{&~^i(wY!j<$-B1*PXbr5PkDPv~>N)*I=V47Q3^!a{dWUnNtS2ZK%z?ep5$Gl^P9g4uTX=?*kV|(= zsqex5*cb{11}bjzQRQB?v=H!;=d0f>$#R{zzstI}1}PeDqKdr9ysRJsZ>=n-P|!j-j74s7m{X({A9JV#wZfT)v-Ix9n@Y|?cwTmX&KcW zyd&?nV*&9z(-HEm(%kcB6=n*bDnIu-NG<*K-RVAU30PvTjql?6SbQsFwz2K;7R7Me zWkiUqg)+YIs3qHCb}^&4J+`9o`gOqQ`<84MP`#9o$3Skh1ni%WMcZu_VkLe$oHj2bPlM{ogl~}|V8L~MWDz*t zwK(KB=y*Dm{p_#hnKc1g|5gLpY295g9*#HKmASL)cxvBCWX!K`*mtARGHpXg*Hai( zais!UY}*-*)7B3)Nj4(*`rGY(V0eGQ-Np<)jEUAg&LSq_5Fo?wPK=d?0d3QUKWZ>W zN!qG5YVgR2Z1C{y6y_`jPdmmOTSmF9LD}m1zKrOk>=v(hLan9*g`jk4V7Saf*}8yt zH#K~cGvjP$TDy~Ah|N$n#ewlk>@V56lRH02B7w^eTXWrv??Nk2tEBA@-DI`V-B_QM za8LVYqa3p)L}(YgFW>cF6W7j~wmN5Vja?A}|*!+%P9@!Hu7&qf#mMtx{PvJ`&j!0Ljd0Mtty~ zkKo|I;_fT1+vconY}`nR*`e%WJ-P(!*vUavX~>iSD;xd6Lzke}b+sl8{DdCrnBAh` z%|8-@!KyEYyR_sR(<r~LuTF<}y zR+yv1`t?Rh;0bhOk8!-@cK^Y_YD;UV{p&3S5!as)>Z1`dsxrxO%)6+Q!mWarM!|y6 zE#ec}sm00lw>c2bxABBC^nVjZ!JKQT!rFbR<4%O5<h9X~3xn@~xt@MFelIn}b3;`$(I^B!!r|oDf&F^oSul$n? zT|O25+Xr4N6D#v0IOHMG_8pY=$Cp_G%d2E6hYM|`m!5s!t1CHu-%Vha<(I#r>dOge z0G_O67VXYKoliuKY6a+qGV|d?CmOOWETN?Qt$5M*vPyrrWt)pNoeNMHol}Qzn|=U0 zr>>U>_99x}5eRhS5@L4fg5-~6l8`wUavQSRY%^<0Kqc-I*GTRgNnMVT@Z4_d9VI!) zN>gPG+(MsD)bov$vauMfy4lQ)KI!gNc%F zI#~;au9v1A3FGIaTgmMFG-)jjhwH2jR&`31+A$H4%8=MbP(w6=VGdGP|>=pPPaWmnP za5sWQM(Z)j^n9ml8EXw?T7FOm#p?J8Ap=~NC3L+JYBW{uv~!ObAvtiqLRfQ%+Bpv; zveqGuUO{B%)kcCx3|1t+!Acc;mmBqXNvC9Noi;*f#{0Veb#*uz_GRv8CDuf5r3t^b zK`aAiZSEqQD4V z?f-aZmt*~`0SwPDu?R2Xw$=gdRZ>G$4(G`y7Hjyl*DvN+);`({?3OtA*|(&qh%qS# zoXv?*VgS(!i&eel0m>RS9#U37N&yoZOVKJkoh4!iNn@ptS)OpWI(w9-GTI5|f#x>4 zIw%?A?3C=^zcJA?a5T`gp&iR#JllA0PNfaXPW9WPf{p+!R>!5;M?`r6=e$@V2KV2G zvO)hutK>v*N;Tz}yN{x+DbUmYDv$?!K|Wlw%$y`3npJRDwTa!F#dy$$$Zlixyd*Ol zkTN}$4Lmhexzv6tuHLY^UpL})cvjOSm?Hk;2VI`FDfb5UmBKMe=1rGxNkh3#Wnv_e zRafCEC&1 zH28c%|M8WpKDu3#F}gx$*X>r}kw{fk;v$48Zp|pf!vrP%P=K^8N$hGVCX|u&gT)kw zz-39v^p9D`QzcXQ!kn?4+OJZDitjzsakjgE1t2+5x>YlU=Iq&DdMoeM%Ss+ZB^0}} zE$U=xTMj|`&~~s=E8UlQ4x0_xL3F2Qv%bbn9uZou$TU8g=2+}Mbw&pu9dAHlcy!qw zN%dNW&pkB{VjAXv_n)2NO+wSfu{jq5yZxU`qeA86fOG5p5jkj6al4q|wZ0yiXjs+L zmUL0n4m`A2`>g;2mf}X;tJ^6+&hFyhKoUGb-U>fN0!P*6#W~suW|`->2`@n0P8GCB z$i2pzs}99xEW@iFi*ZUPW#jVO#u>(l0`!_RCC!fe5#jCOR~&aqIz5=H;V;Xg;ljpr zH^=`S)LD2p5x6{Zw6D~7By|Qu=nF3sB6QMX;z6pl9EUMtGf!NE&-&3N9K4xNn`l_( zJ#ydcr!zM9=t3_jrEq(!?c7U_%9*6b#;K7X>e7`Fa0P=?|kqo$lVq7vWv(S#6C zBH#v3EuSv>xxGaT5T?8@)n5qdoDMADbayeXm+po*Z8agYZC2d>HLE~~zM+s|SSj9K z5?ueOM1_gH2pa5BgjDrQrQ+eaOVsWM4iXU(*5J~{Fv3ynhu7ieI+%@YdTg4ytX=|o zrFdW^%D&=yPlBY!dy1>fNrx;*E61Y^yl4B81dqV3BPAgo&ukmb68rYZJS^|2(-)2| z3#;2TUbPTdOLUzjG~NUZQOO!OTum@@s}CRf($)o1bBsuUww9v{J?~RP;Rz^NQcSXR zQEB#p;{uCIvFl5_Rs2X)&nu^Ap@xyIaE-^J4NH|i3nw9xR*&!6yC((VI`Q{^dS*52 z(hK6g28zS(2kC_>=;{&LQlEg#q4VVU(c`OSE>TR-*0FrIX|$56(h7}qBGh{cl=&ds zPB~9O##~AU-v*(Zu@Z-gNbA-^q_lu)9C5qYT^U1AYn*F9k_t%M{c`H;{HRbIFCnxZ zW{C))j?(l~jFc&Exn>DDwB;(7r8)ges;{GMi$v$?CE9D93b0_i!reHHZo41QOA_#`Xrdil&%a%)7N?=2P1mU)wm-R_)JkK6J zWRRAzZ;&Nx^sHTa##U4@8fwpZ)@Bi(9w-fGt7 zw1wSOuIQ`i5|~2@ExP5zT71OyeJsC`5FfH}z)0dK%IeDktioyXB`RZWm8B)2m%7tL%Vz zW=J~`v=ZgbcQJO^8%I6NK&S5%+HHeE5G{VM7K+8&ghsFD#d`1722U@~zIVBucCd>n zDC1r<(44T=iOq0lbYGuM*<8FSA>zaFEv&zJp#`~_Y5yexm7Eo`NY}D< zlR$K*PAbey!R-dHrL11=)@h%yzj#p3VX>;3x=`!PdS77FdWY1<6;yx~u=`==Vew|M zWz15pNTpEjbjN2kOB=K)J=U??5224LrkRvG>DZb@eP%b5wEje~M>b6tYHYnwXAry? zbxxH@%Yj~|A}%0XMB)t6i<)-^D!V7Jh9T`GCz~M5>YpH*26pz~LendMs{?(@o_haS zoCM#C`1U9tf^a-<@0Q}pk+zN{ce?%Ei-hiDdVE;)`Cjjw8M6g@erS&PSKKSf-EX*~ zzs;rzYb{70*WsW&0|W)A@L7mh!DFSrge&FpbOvhtFu+Kbx*-R$gc*?&YPzW6tOx{0b!=Hrj{2nSa4!_y5>VGJaC zoN#HLR0O}`U{n(iIH0HWeB~Z&*_tYbJSwi0)QYAj8`y&G3KgR5cW*I?l(QZvK9g@w zi!yul{#6y29Lg>kZB>v_L5aZ7b`m9Q!fAI(@Ng;gpQj7Qc2lYRcIR|xQZLh3&v(qq{)^&fvp1h_8g}x_P=Spr;vv6Fy$>Ozp)iD)uT>+3UW&Pc>FER9j#;A}#_NkPRBjGSiNBwrzllno0eg_jIH3H@91 z*?kGR#A5kq9Bx&z$EPpGl!&WqB-&*|P4RAFuF3?!EdNz1CC>~VzkvYbSW8#|CXTA- zxV)%Zi&xcUKSaPVd91NWtH~)qs|pr^p28{02#frjc!!S~(<{_ys?>%Eq7yO*jj_du{7^Pu;QU7Y32>Frjj{w@`pL{Pva^+NE zt5DX)prZ(gCk@>Dy~GhYo#wkQ+%ZTgSo)qw1&|MZ<;&Ryo(fG{?jxr!0a0{kG|vqnX0(c@ z7AXhphj|a$Ua`kFAnwgZnF!WU@SS8{#SiF$_Knz7Aqq=M5WWf4A>L}?N)fjuCdS0b zcmLvCuD8KJK#iUudR8pP?A{*sbNNfxWy^v03vpr5H~X^pUyX4}EE?NvjAIJPKHR`nT>8QNaBh?42UQ zMwLG z;qk9*UoE&B%}bGmRz*R3wiG6d$!B@p7|U)e9$!2lnqsEwScNlflZPjo5~;BOfe?;2 zc@1@5jh23?3`kB_!Kx5;x?yNWzs<04QTix428K*AgzXpdI4FKgHCU~NI2)pZqvz;U zOQEdo;VXHf+uTWYO_jpgFNQAoO=ZZ$*er@0@F78$}f#fS^nPC zL2MIJ3%!m-YCt)#$hj2mH9IDknrPw@WLnJd*gG%liE=FvfA0~QbR#p1Y83Rmth!Zo zbMbe^@2`^@m&?-lBc)4+#|O))@#1l9&`c;6aK5hyqg`}50M37 z&+ulII~v{|QkubfykQEgSJw1L7tFQovMj5njR9~-<+yo9;hb_y6z0*L| z(G3%k*N-lx9Buo(lGFr`iWwv%Hg*O2xX-3;=Vk6Hd_`~ZpJIEP&B0tD3DfqaW|OLu z%N8X{+>PM5Y&``+@AGEc?u5rsD(y?j9-NKm3%y`Ot2(_LKOE(UvljhvQNbB!P_IO! zJ9A;Jz;f>Aody6JC55%Lw+HogH($rUc|(O>LK3kaIUMyY!V57R(;+U%SRQmzfjH={Ia6mr|D%Y^sGVPkUr3!g)dr3ECD3r$SP=VYX*-)U=;_t0 z)wAQIUdQ;I1g!GgfunW5?$_oR%sq>C@}!*~CtEWGGv34RDZ^ZTU5j=v&D9gl>QyIF z-`R`EtaAe4U-Q#zGlkN()5ceU3;!?(v^^@%^&+{^Zm) zenVExEs&)}?OHDR&g@R~yz&>1C`66UK}xM1%QTY=wX;~js^5>>$9d~8vM06=zE(0O z?Ix42-Z0L%Itos$kW+f8+tEpfvE$e|Y98?KZSn1x7Hx&4sjE~*;U>@U-x%3bZ~siq z#d8bnDIT`f!1km9XY?Rk3ib~!qWwSKI$!|E%^{%`BZ|-0%)9TM=;r<1-Q|X_an!~I zT7kh|^_zg>=x&KJ(=J z;=xnO*(9x2&AEFhoSe|~x5v9=EaYv+Z;|8s^ON=22&(hMvlS5F%MLVS#dQhJW1y0K zfh=*C!>Mx2BzU<%A*Z=S3vj3?hcRvsF2x)l9T!iD<7#kUVmCS|kqSCC=pZCI_kM*b z3plYS{82YfI~7Zj`(@w&%|w<<*zLOv20;Fqq2h6sX9om%sv}LT>J3uruuo7**AD6S zi26ccWWra*%sX@35i_w0ZTF<3fYQa&t6QD6nZf1^1Cll|gc_l@O&`cVsdK~%_Z&6( z_JvE9puv_iI@dkVlitQo_DAHhfFluK7cQsH;q82*){5R-ggH&8)OszNs|ys8+)+&wMOqXPD7lNo$IuhEGF?UgFLC*eEP$_(T5jX6 zu6*>%d=QD6uXKT?;UqSwX%gMub0Mq76abTlZ!t zYW=xa;7T&fGFG>NC7S;~*fob~*K5|8$)UBdD07&7{YIvKm+vR>yUvMyX_4cWLdnk8 zgqBj={=4>>ylTuw1#$>U(`ApJzFJHm)R`i$MsH+?dietukAGk?&cs+lt@dC26$|RA z@CpxAUN3YMG0|be7kPK{%U-?1Vg-3;a@mkOt)SK8%36c~%CB%N;i}1+&bjb*%{>c} zRZ96pT7)+Fmk6a?#a?c>(&q9wc=}52vr6|plPT{c+Yjb8e=?04O1EIvXTH@&|AGdn^tZV!eq;e$AN@n5$5(s`{5E$WQI+@8ue)ao6Z zdphTc+Be7uJ&};1DqTGli@faysTK#Z++OoC4qUlnbaS}rqsSDL`DsZ##UQv5J|5Kv z3)Z`Fx6~+m2>ZTT651G`F0V-Nn~$vpf~av@j8%#M6f>A>_r{md zrpzCRmrYqAe2A!86qfv*>XeGjCvizS`=;g*OW}-nn$_x!xxJz+Uw_PU2xtC@S
x@t#7&Vbx-^PM zJ4ARS+DvnImNmGk3Vc?1*-h&<(Vp<@CRz4I8mju}GTL?TMC~rGjTU8~MMUOUE>;R2 zCuM`it5!|jqaiZr_{KO!PC&@*wp2L5)WJDF)ZN)LhWl$d?6`>A6y-JP*GuM6m|wudga3^Wjv}y#)HVecZ~KmYkee95JC2T zM^g4HT&({M9QK7DXhgtDs}i2<3;gDtB2vMdudHYS%=(nPgsS9USvzY2)(^bAU6RYo1j8^ZOA*wegC+F(Y z`%UKkozrU-AnS?Q*WXN&i0Ay9rtd!0w8aw70+#H4Sf)iWR2TT_B^4 zEF7k=vs6+iq5IO}5s2;DZT$|r<+SccsKdlx(_zD5HCzEyCn?9$jUS%@E;!u<^7Hjzral}FJs`jZJl0gywQ%>Wlqvj>4Mo^oTw-ae9ZFQ2XO8ipKe zg;1zn$8P`>8d4r|L*6pv8imvztwmY$WOW1%s2VSeZW!_S_esY zXV(K;r6&^r%J6Pc#SEk27&6st#^Vqec$?!yVbtjpwvubO^vw6lfT~8mDBBYk`2KRa zs{MX=Xy3aKzpU?TM=9cz)*Fwg8?@zgAzChxV)A43+aXUUq3q7t z*b5w)>7v)0U3WlIUeme{B+nHz?DEmJg?Y4dTrKXv1wJHi3dUiQtgjW9c-AAfOvF6H zq%F`G;^VF~G9+^hO@#@*cc%BpmlmJJQ;)-e+^HR{fxSNht@)s-2d{u7O@|@3nVx6u zK773vXbHD4FN^a|b0ugTtbzFLmrXeD+#s_3&`x_*R{npIh%b&MLB%7&#z_XvNrZp@ z0%*<2dz8?l>v3Q=mt!?fKa3Q>HM1B?si-vT{iod??JogWSo*Km@#ui-w^SvkGSnp~zhbxP}{XS*ea2YV-&nU7O67)ifU+fC?^`EK|7K2}M| zJ)bl09R`>yg7YytAHzWq1KnFFyIw+q5qJ+yI=pXj^Kb6y7CwEn0zVZ6GY5lD_k6^Y zSA}F>N$-KI+PTpdDC_8)MR*Z|Qg7Bujs9KU*Xi}l)1x17jN5Th<1h%(>N{Q==YRjBDcd)(r{Acr7L`p<$ zJ@!7=Iz$brmSJ^xyDCfgfF(1zVAn;_%;zS|OhLPv*o<%PmI>I9#YjP!ajmn#-0wa$ z;GJ98M0O`RY!IF0X%y&*L9t{vbi8|2KkT$V=RB{rI?n^DRFmYG%j}2%`H z5xew(W(rN6*uqySyD=S$yC$4#o_AXSFeHIjI!@kK|jB1XG|t`#dC43Wo|cr8%0Lebc-GYh$3&j-Y)m; z*y>|5;AWs8a+ale&-n`JVPzghVbOw7o7+{7Jzcr}2GPx7;agwI0dA`(J?#Tn$H5{L zhh?H^R2xT^&qjq-nX&N?B{Gvhv##tcV7<|xWghDzI-lFF@sk+zX(Erdld5ly#)NpiaaYhv~K;w*YvF#)CLZV`_I%I)iUMKWN4s zl%lD(Q01&l&)mhU&uN4G-gXVd(sT<%%SRl)?Y0c~9f7L&Mtk=&UPUp&)`meqMEf>h z<^(OO>aPlmVbkx6pSJ9U7Vi-j+;7Xn9xARYIATmWdB)3kkVucc>+t1sJ~+3#SDu4= z-49#Pl>yt_G)uKdxIe}@`#tlHxTGvB^jSlm75WOhoN!sn0t$~E8a8DND?sJ7mv5(? z_k$uI5}EvuKV@U$GSwb{v)>_1o;Cc`6697W9S&lC#Eml(vaV++Tdb5B@&@)=qqH+D zja4(%-YCGY@fx(s%U&hwt>+JXHlk~_o2AbNi1dcYYVEAe=vN1~8=;j{Wq~on$7dAt zoelST^`Es}yB!&05y1m)#l!k{!c-qw%{Z>ge>O~h@#bJu#i zF)W@#t>e63Ed}t-{{GL+(`z1=&+NIT+<{1@w@c-^eC|TWbFF9gIqdEBKT~3J>?HmT z>7Qu`f0w-;W_D;`(?$~|HnLT>MEi?03|ElXxvofTe9>vAjaYb1LZ>0}rnZBQZT~CVQ>NM0T zBO*5~MoZ>?!A}=yDcIOHn*LM(kA9e!p)Asl+%ov6|LOT0OomyuVg$l5HRvFi4VYaWUFN)GY&EgAX*exZj^2AaDKAlY-4 z1>UP&#NEbmm8yb8a2M9bY3?MK??=`VO0ZJf6t{~*8~)q4@#9axkK2-_yjLWFA@RIK zQC34SQ7I91j5PnGprYiVn1;GUHwW`g4I*dPNUqLAiyz5yoIaX4liq9SnVe=dUx*Bk zs34=t`M-ypoU%4^1OX!5k`V5X-FYFa%&nUcr+fgrS9kasB%wh1{I0>E?V!?4xrMQSE=J4k@J5 zeD!D^DS+3_gz>uXHHwn|_PH6{ig8G3=||qh8FdpJk{!q5ZUT?f1elX(0s;p<_R$EA? zYdE6kw@&7wfO3i!F^i3@Pp=H`K_QtJ7e6t|l{fylmZxw~vgiODup}jf!haRy7N!)Ep#&?`u8O z*8M(ZLKI;8^NWy{qr}g+8n#R3&-hi1p?p5t?UGax6StZ?JBp1}i=E*mqyX&ehJ(sZ zYDXGAh5mM>vD^_TC0t>>ks@RCP;c-QRN&Nk{h(NbYgKiohgw;YcsvXiTR*fptH-U@ z$7*+~U2jPmBp?O2UI>Z7ork0_h6Q5IDV=edE7}A*is*KEPLxucxH?!yS>8lvD z9_LvW#9C;h4{xK;^BY3y_MBn00J_UkwB+#EhzmFrLZS>$&KxpN z{4*ch_4-b69HMc#B4e@E_fp(^#(-F79FOOyA~On87#0Zz$p9Vu=-)PgfwuKhfb5z6 z4~EDxy>@L=o7_xZP3EH%QC9tY=oYWO-7P}P9N)sP7jI_2k1h$%rHC%^=b0*Qt20@= zl*y^!}hO1|+)zwpqM#jzySAUODp>5DoPYg&bzKRa1q(wps|XC_lpGndL-W=Z`s0G6`%4vooLexy`pH*rx4OdM#nE-(Sd*Ki`DHWHUI3wOO`dD+OYf z_>HYM2u;S9?Rj>uvD_un#sl)^ z(>7@zXNu&NZv#ih;xWSi&P;EIhNK|`=kG6t%+q3CsBpJeyz@C%4kUa_mk=}UFB`Vr zUIorcmZs~E-G*H|dREk*`S{57{P zUP#v~mG!Es@hPZYhT${AcS z@SPG>WB_V_C{vCYSpw2!2q0rNfc&-!>1??AZOep3xO_+Iki7e|R>h;FH#_w01lBlqIMbTvRJ0jSwsVd?0SWnd_<7xnEC>?fBXuA3*{nhcl42kM(Muwct1Zc9 zf{yd$pWTEsZZ=L&rrp;btsd;-Y5_4*wB=)icl`x3t3G}|YW~|v9pLHuAI?sQp8H*A z0PDc%y?qp~ozN0dI$tLja`dxpdTsIm^b~(;>jdFHqo|EQ=&^@`B*kqp$qIC=j&$Z7 z_0MDVNxs-eFm(-HnK^tkYz)pv+lUax$ne5=jDO=S@K5b=?y!%08->Xgs^9h+EiL+2 zVV!mVcoCS8i3N6&NPFA`NbVxO-9enN!H~EZ_03dy4eaDA!YtPbB+wb?{-z==g|0A@ zWV5TYsHDh&sWFb(c=$d5JxMLeYF8}vh)sw7NzNc_lc95C>SJ!twf^~$*LujBkpX_) z5SxT1Akah}&X|g>ZqGSst83F9lj6EQa%Nf&*|0ik?E7i6L}X7hy$;gzX$r)>aDIQFtJG9*EH{A}^EMS+BHaG2Y zK->%3U;CT2c_}*Yl2dhGka|_ANL6stmcPPC%gs^xAz=T9j%<-`6|QhGlAbRGNM8k? zF9TgD^dCi0sm;d>Wf{cM-M8C&tJY{T{V^%IQ|@^`J!FU+e})l8fi$RMdqv2z=0^v* zAVU}v#o&iY}S z8IRiyf_L7J#Q(zF?4#yw06Ue`kr(q@i-q~-9&16O=n{L@NgRc;PW~Co1Y3KQ zwS5;tq56)zh7#-{3^X71>EwpmW@Smm&2I-a#_L<8Ys3wQ{3d%KzufCdQGac`b zMOmr;x_0rCg5=SY_X-Uo_8fH%2Cbj4lZ!!16*g*Mi4rvsqsOS<>UauvIhu_5nvY(J zh`tQ^V*{R6-)v|a&}{N^F$=S#6x+yfOZvDcWkFJF%S~Z&tnvFu zYJ92gSGi`A-fV?lb?0MD6Ex_W{W)sKPt3yh()05z>C~qI!dUku7+Ep*?e&(&)~@KW z75S4}Mj+2SxY*C#p|Ykiwm!WRQ|u`Gf$K(Pjm@dpM{G7aeEhChOBGY z^&g%-$@SQdQ*U;p-C21X;?O`aa-@p$%bvYS1M&q$4Jk}1qe*m6X9=XhXVS+F1LB2V z>dsx{3(hQ)vg2vYvRNuY;C;*hXbk$t>s5&f ze|Lm6?@#>BmylLj1KomUsp z7wyy!Fk>0UB$U?%M<-Se=Ksjk%p!J6UXT93R{`w_N5c#ZeIGkBziY~R(b=LxjBLxs z4La$|6C=Ktw+5!GU1x(<%puD$w{!6X2COupDL$3Ki%t*ilk@zmg7GLEN2S_mq)}vX z#Q!-jpC`r&yJfp)ZT=Ib+kb-S!-5aiOUl$eUEM2GAU)98>pYuR5PY;g&5j`Gv<7QbaxKN_KdE((dfs)5zxm@Q%HtXD7J85PYgnFJGE0v&wYG`>+Jyx< zaoJt&p5DLUp#0B5d1;XhX0`#*yqyVKB{V!nxY3|E)sM7;6hEc-#EjuarJ#V;gchgm0FVmNyd3FME zQ%vj+=rQj9V>mxeCmAd=;g!}>v_Pz;WMW?=Fra$>??x0UF=Mr3^a%LgTb_%TQfDkE z?459_iFZ^B#jL*gv4GoqMxoYeD@fvpMi#Fh&93vD6`UD|O3LR|`akNd<&n-=HMqI0 zxkz9o3BFeH!@94iyxn6PqKSuh+&a!t%S(;;LEiQrdEDAZ@V;OTT!`8~!k=ZF3ZP)K z`Gv~$|I$d#ur1?r5D?G_a*|>i*Q(q3&`ut;@XqitW~ffRi?-~hC!qMK970xVnx3Ei zsbt<8Hd66Fg3s|%Q?7N+;K^1%K2MmpOF5X|$^MUuBZOs^zry|6fgsfNxC0Br{UiOz z%sSiUm+(g+4I;9OEMw2qr~Q>TOiA)*Ivmc`lYU+LOzCLbdYd9U%zRJ3v|6c6qR}T@ zZxuBB3_y|p59N6v88mDIT&W;m@2P3t)n2&Yk`1~UYmPVzh`T;ikS(n zZxYj<2(|dBlX|lM0OKx(IC zTn>ghln4frc1U$l!3T^dDUQbm!n-_JQM_?RNoAM}5UuQvEEdpNr3Z1fR@XWKgO6kJ zLwUetu0bR~5z}iimBvWtV&9)Z^oJbM{f++>oM0#$Y=rG>a#^6Yy%%$O$k8jiW7D`F%keV0%3upgZla&fV0lLR^`w))Dz{`rk<<+mgEqA&JmLSUZ5SkX zK3FL4W^pw#wMAT)nq9q~Wt(pOtn>xrh=fAG5oXta49tJXUF36|SI`jZ1kY5YRNJYP ylGa!CTXfbU=tAU@|1;?S|NnpQKo$|~y9j0+k_@vf^LL2PPfki%5+rUC^8Wx|wfHUo From 8ef8035bf201f87d05139c2278138bbbf340d628 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Renault?= Date: Mon, 21 Oct 2024 08:28:33 +0200 Subject: [PATCH 090/111] Fix CI --- .github/workflows/test-suite.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 7dbd7d866..ce7fb30b6 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -169,5 +169,5 @@ jobs: # Since we want to trigger (and fail) this action as fast as possible, instead of building the benchmark crate # we are going to create an empty file where rustfmt expects it. run: | - echo -ne "\n" > benchmarks/benches/datasets_paths.rs + echo -ne "\n" > crates/benchmarks/benches/datasets_paths.rs cargo fmt --all -- --check From ee72f622c72892d2dce6f97ebed8b90afab99c60 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Renault?= Date: Mon, 28 Oct 2024 14:06:46 +0100 Subject: [PATCH 091/111] Update benchmarks to match the new crates subfolder --- .github/workflows/benchmarks-push-search-geo.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/benchmarks-push-search-geo.yml b/.github/workflows/benchmarks-push-search-geo.yml index 82881b41b..22218cd6e 100644 --- a/.github/workflows/benchmarks-push-search-geo.yml +++ b/.github/workflows/benchmarks-push-search-geo.yml @@ -40,7 +40,7 @@ jobs: # Run benchmarks - name: Run benchmarks - Dataset ${BENCH_NAME} - Branch ${{ steps.current_branch.outputs.name }} - Commit ${{ steps.commit_sha.outputs.short }} run: | - cd benchmarks + cd crates/benchmarks cargo bench --bench ${BENCH_NAME} -- --save-baseline ${{ steps.file.outputs.basename }} # Generate critcmp files From b02a72c0c0d68068c5c20e77b4f5c9d2e151375f Mon Sep 17 00:00:00 2001 From: Pedro Turik Firmino Date: Tue, 29 Oct 2024 19:30:11 -0300 Subject: [PATCH 092/111] Applies optimizations to some integration tests --- .../tests/documents/update_documents.rs | 73 +++++++++---------- 1 file changed, 36 insertions(+), 37 deletions(-) diff --git a/crates/meilisearch/tests/documents/update_documents.rs b/crates/meilisearch/tests/documents/update_documents.rs index 195dca914..c0703e81b 100644 --- a/crates/meilisearch/tests/documents/update_documents.rs +++ b/crates/meilisearch/tests/documents/update_documents.rs @@ -23,8 +23,8 @@ async fn error_document_update_create_index_bad_uid() { #[actix_rt::test] async fn document_update_with_primary_key() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let documents = json!([ { @@ -32,15 +32,14 @@ async fn document_update_with_primary_key() { "content": "foo", } ]); - let (_response, code) = index.update_documents(documents, Some("primary")).await; + let (response, code) = index.update_documents(documents, Some("primary")).await; assert_eq!(code, 202); - index.wait_task(0).await; + index.wait_task(response.uid()).await.succeeded(); - let (response, code) = index.get_task(0).await; + let (response, code) = index.get_task(response.uid()).await; assert_eq!(code, 200); assert_eq!(response["status"], "succeeded"); - assert_eq!(response["uid"], 0); assert_eq!(response["type"], "documentAdditionOrUpdate"); assert_eq!(response["details"]["indexedDocuments"], 1); assert_eq!(response["details"]["receivedDocuments"], 1); @@ -52,8 +51,8 @@ async fn document_update_with_primary_key() { #[actix_rt::test] async fn update_document() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let documents = json!([ { @@ -62,10 +61,10 @@ async fn update_document() { } ]); - let (_response, code) = index.add_documents(documents, None).await; + let (response, code) = index.add_documents(documents, None).await; assert_eq!(code, 202); - index.wait_task(0).await; + index.wait_task(response.uid()).await.succeeded(); let documents = json!([ { @@ -77,9 +76,9 @@ async fn update_document() { let (response, code) = index.update_documents(documents, None).await; assert_eq!(code, 202, "response: {}", response); - index.wait_task(1).await; + index.wait_task(response.uid()).await.succeeded(); - let (response, code) = index.get_task(1).await; + let (response, code) = index.get_task(response.uid()).await; assert_eq!(code, 200); assert_eq!(response["status"], "succeeded"); @@ -96,8 +95,8 @@ async fn update_document() { #[actix_rt::test] async fn update_document_gzip_encoded() { - let server = Server::new().await; - let index = server.index_with_encoder("test", Encoder::Gzip); + let server = Server::new_shared(); + let index = server.unique_index_with_encoder(Encoder::Gzip); let documents = json!([ { @@ -106,10 +105,10 @@ async fn update_document_gzip_encoded() { } ]); - let (_response, code) = index.add_documents(documents, None).await; + let (response, code) = index.add_documents(documents, None).await; assert_eq!(code, 202); - index.wait_task(0).await; + index.wait_task(response.uid()).await.succeeded(); let documents = json!([ { @@ -121,9 +120,9 @@ async fn update_document_gzip_encoded() { let (response, code) = index.update_documents(documents, None).await; assert_eq!(code, 202, "response: {}", response); - index.wait_task(1).await; + index.wait_task(response.uid()).await.succeeded(); - let (response, code) = index.get_task(1).await; + let (response, code) = index.get_task(response.uid()).await; assert_eq!(code, 200); assert_eq!(response["status"], "succeeded"); @@ -140,12 +139,12 @@ async fn update_document_gzip_encoded() { #[actix_rt::test] async fn update_larger_dataset() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let documents = serde_json::from_str(include_str!("../assets/test_set.json")).unwrap(); - index.update_documents(documents, None).await; - index.wait_task(0).await; - let (response, code) = index.get_task(0).await; + let (task, _code) = index.update_documents(documents, None).await; + index.wait_task(task.uid()).await.succeeded(); + let (response, code) = index.get_task(task.uid()).await; assert_eq!(code, 200); assert_eq!(response["type"], "documentAdditionOrUpdate"); assert_eq!(response["details"]["indexedDocuments"], 77); @@ -158,8 +157,8 @@ async fn update_larger_dataset() { #[actix_rt::test] async fn error_update_documents_bad_document_id() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); index.create(Some("docid")).await; let documents = json!([ { @@ -167,8 +166,8 @@ async fn error_update_documents_bad_document_id() { "content": "foobar" } ]); - index.update_documents(documents, None).await; - let response = index.wait_task(1).await; + let (task, _code) = index.update_documents(documents, None).await; + let response = index.wait_task(task.uid()).await; assert_eq!(response["status"], json!("failed")); assert_eq!( response["error"]["message"], @@ -186,8 +185,8 @@ async fn error_update_documents_bad_document_id() { #[actix_rt::test] async fn error_update_documents_missing_document_id() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); index.create(Some("docid")).await; let documents = json!([ { @@ -195,8 +194,8 @@ async fn error_update_documents_missing_document_id() { "content": "foobar" } ]); - index.update_documents(documents, None).await; - let response = index.wait_task(1).await; + let (task, _code) = index.update_documents(documents, None).await; + let response = index.wait_task(task.uid()).await; assert_eq!(response["status"], "failed"); assert_eq!( response["error"]["message"], @@ -212,8 +211,8 @@ async fn error_update_documents_missing_document_id() { #[actix_rt::test] async fn update_faceted_document() { - let server = Server::new().await; - let index = server.index("test"); + let server = Server::new_shared(); + let index = server.unique_index(); let (response, code) = index .update_settings(json!({ @@ -221,7 +220,7 @@ async fn update_faceted_document() { })) .await; assert_eq!("202", code.as_str(), "{:?}", response); - index.wait_task(0).await; + index.wait_task(response.uid()).await.succeeded(); let documents: Vec<_> = (0..1000) .map(|id| { @@ -232,10 +231,10 @@ async fn update_faceted_document() { }) .collect(); - let (_response, code) = index.add_documents(documents.into(), None).await; + let (response, code) = index.add_documents(documents.into(), None).await; assert_eq!(code, 202); - index.wait_task(1).await; + index.wait_task(response.uid()).await.succeeded(); let documents = json!([ { @@ -247,7 +246,7 @@ async fn update_faceted_document() { let (response, code) = index.update_documents(documents, None).await; assert_eq!(code, 202, "response: {}", response); - index.wait_task(2).await; + index.wait_task(response.uid()).await.succeeded(); index .search(json!({"limit": 10}), |response, code| { From 186326fe40af73956e520e294cedeaeb96093a78 Mon Sep 17 00:00:00 2001 From: Tamo Date: Mon, 4 Nov 2024 16:33:04 +0100 Subject: [PATCH 093/111] update the macos version --- .github/workflows/publish-binaries.yml | 6 +++--- .github/workflows/test-suite.yml | 2 +- bors.toml | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/publish-binaries.yml b/.github/workflows/publish-binaries.yml index 016a9d282..c53946fea 100644 --- a/.github/workflows/publish-binaries.yml +++ b/.github/workflows/publish-binaries.yml @@ -65,9 +65,9 @@ jobs: strategy: fail-fast: false matrix: - os: [macos-12, windows-2022] + os: [macos-13, windows-2022] include: - - os: macos-12 + - os: macos-13 artifact_name: meilisearch asset_name: meilisearch-macos-amd64 - os: windows-2022 @@ -90,7 +90,7 @@ jobs: publish-macos-apple-silicon: name: Publish binary for macOS silicon - runs-on: macos-12 + runs-on: macos-13 needs: check-version strategy: matrix: diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index ce7fb30b6..90fb03538 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -51,7 +51,7 @@ jobs: strategy: fail-fast: false matrix: - os: [macos-12, windows-2022] + os: [macos-13, windows-2022] steps: - uses: actions/checkout@v3 - name: Cache dependencies diff --git a/bors.toml b/bors.toml index 8750ed993..96e9ef65e 100644 --- a/bors.toml +++ b/bors.toml @@ -1,6 +1,6 @@ status = [ 'Tests on ubuntu-20.04', - 'Tests on macos-12', + 'Tests on macos-13', 'Tests on windows-2022', 'Run Clippy', 'Run Rustfmt', From 362836efb7d5924a485fa3e15171257f40214509 Mon Sep 17 00:00:00 2001 From: Tamo Date: Mon, 28 Oct 2024 11:57:02 +0100 Subject: [PATCH 094/111] make an upgrade module where we'll be able to shove each version instead of putting everything in the same file --- crates/meilitool/src/main.rs | 428 +-------------------------------- meilitool/src/upgrade/mod.rs | 46 ++++ meilitool/src/upgrade/v1_10.rs | 279 +++++++++++++++++++++ meilitool/src/upgrade/v1_9.rs | 100 ++++++++ 4 files changed, 430 insertions(+), 423 deletions(-) create mode 100644 meilitool/src/upgrade/mod.rs create mode 100644 meilitool/src/upgrade/v1_10.rs create mode 100644 meilitool/src/upgrade/v1_9.rs diff --git a/crates/meilitool/src/main.rs b/crates/meilitool/src/main.rs index 9dbff2486..ef137f746 100644 --- a/crates/meilitool/src/main.rs +++ b/crates/meilitool/src/main.rs @@ -2,7 +2,7 @@ use std::fs::{read_dir, read_to_string, remove_file, File}; use std::io::BufWriter; use std::path::PathBuf; -use anyhow::{bail, Context}; +use anyhow::Context; use clap::{Parser, Subcommand}; use dump::{DumpWriter, IndexMetadata}; use file_store::FileStore; @@ -10,15 +10,16 @@ use meilisearch_auth::AuthController; use meilisearch_types::heed::types::{SerdeJson, Str}; use meilisearch_types::heed::{Database, Env, EnvOpenOptions, RoTxn, RwTxn, Unspecified}; use meilisearch_types::milli::documents::{obkv_to_object, DocumentsBatchReader}; -use meilisearch_types::milli::index::{db_name, main_key}; use meilisearch_types::milli::{obkv_to_json, BEU32}; use meilisearch_types::tasks::{Status, Task}; -use meilisearch_types::versioning::{create_version_file, get_version, parse_version}; +use meilisearch_types::versioning::{get_version, parse_version}; use meilisearch_types::Index; use time::macros::format_description; use time::OffsetDateTime; +use upgrade::OfflineUpgrade; use uuid_codec::UuidCodec; +mod upgrade; mod uuid_codec; #[derive(Parser)] @@ -72,7 +73,7 @@ enum Command { /// /// Supported upgrade paths: /// - /// - v1.9.0 -> v1.10.0 + /// - v1.9.0 -> v1.10.0 -> v1.11.0 OfflineUpgrade { #[arg(long)] target_version: String, @@ -96,425 +97,6 @@ fn main() -> anyhow::Result<()> { } } -struct OfflineUpgrade { - db_path: PathBuf, - current_version: (String, String, String), - target_version: (String, String, String), -} - -impl OfflineUpgrade { - fn upgrade(self) -> anyhow::Result<()> { - // TODO: if we make this process support more versions, introduce a more flexible way of checking for the version - // currently only supports v1.9 to v1.10 - let (current_major, current_minor, current_patch) = &self.current_version; - - match (current_major.as_str(), current_minor.as_str(), current_patch.as_str()) { - ("1", "9", _) => {} - _ => { - bail!("Unsupported current version {current_major}.{current_minor}.{current_patch}. Can only upgrade from v1.9") - } - } - - let (target_major, target_minor, target_patch) = &self.target_version; - - match (target_major.as_str(), target_minor.as_str(), target_patch.as_str()) { - ("1", "10", _) => {} - _ => { - bail!("Unsupported target version {target_major}.{target_minor}.{target_patch}. Can only upgrade to v1.10") - } - } - - println!("Upgrading from {current_major}.{current_minor}.{current_patch} to {target_major}.{target_minor}.{target_patch}"); - - self.v1_9_to_v1_10()?; - - println!("Writing VERSION file"); - - create_version_file(&self.db_path, target_major, target_minor, target_patch) - .context("while writing VERSION file after the upgrade")?; - - println!("Success"); - - Ok(()) - } - - fn v1_9_to_v1_10(&self) -> anyhow::Result<()> { - // 2 changes here - - // 1. date format. needs to be done before opening the Index - // 2. REST embedders. We don't support this case right now, so bail - - let index_scheduler_path = self.db_path.join("tasks"); - let env = unsafe { EnvOpenOptions::new().max_dbs(100).open(&index_scheduler_path) } - .with_context(|| { - format!("While trying to open {:?}", index_scheduler_path.display()) - })?; - - let mut sched_wtxn = env.write_txn()?; - - let index_mapping: Database = - try_opening_database(&env, &sched_wtxn, "index-mapping")?; - - let index_stats: Database = - try_opening_database(&env, &sched_wtxn, "index-stats").with_context(|| { - format!("While trying to open {:?}", index_scheduler_path.display()) - })?; - - let index_count = - index_mapping.len(&sched_wtxn).context("while reading the number of indexes")?; - - // FIXME: not ideal, we have to pre-populate all indexes to prevent double borrow of sched_wtxn - // 1. immutably for the iteration - // 2. mutably for updating index stats - let indexes: Vec<_> = index_mapping - .iter(&sched_wtxn)? - .map(|res| res.map(|(uid, uuid)| (uid.to_owned(), uuid))) - .collect(); - - let mut rest_embedders = Vec::new(); - - let mut unwrapped_indexes = Vec::new(); - - // check that update can take place - for (index_index, result) in indexes.into_iter().enumerate() { - let (uid, uuid) = result?; - let index_path = self.db_path.join("indexes").join(uuid.to_string()); - - println!( - "[{}/{index_count}]Checking that update can take place for `{uid}` at `{}`", - index_index + 1, - index_path.display() - ); - - let index_env = unsafe { - // FIXME: fetch the 25 magic number from the index file - EnvOpenOptions::new().max_dbs(25).open(&index_path).with_context(|| { - format!("while opening index {uid} at '{}'", index_path.display()) - })? - }; - - let index_txn = index_env.read_txn().with_context(|| { - format!( - "while obtaining a write transaction for index {uid} at {}", - index_path.display() - ) - })?; - - println!("\t- Checking for incompatible embedders (REST embedders)"); - let rest_embedders_for_index = find_rest_embedders(&uid, &index_env, &index_txn)?; - - if rest_embedders_for_index.is_empty() { - unwrapped_indexes.push((uid, uuid)); - } else { - // no need to add to unwrapped indexes because we'll exit early - rest_embedders.push((uid, rest_embedders_for_index)); - } - } - - if !rest_embedders.is_empty() { - let rest_embedders = rest_embedders - .into_iter() - .flat_map(|(index, embedders)| std::iter::repeat(index.clone()).zip(embedders)) - .map(|(index, embedder)| format!("\t- embedder `{embedder}` in index `{index}`")) - .collect::>() - .join("\n"); - bail!("The update cannot take place because there are REST embedder(s). Remove them before proceeding with the update:\n{rest_embedders}\n\n\ - The database has not been modified and is still a valid v1.9 database."); - } - - println!("Update can take place, updating"); - - for (index_index, (uid, uuid)) in unwrapped_indexes.into_iter().enumerate() { - let index_path = self.db_path.join("indexes").join(uuid.to_string()); - - println!( - "[{}/{index_count}]Updating index `{uid}` at `{}`", - index_index + 1, - index_path.display() - ); - - let index_env = unsafe { - // FIXME: fetch the 25 magic number from the index file - EnvOpenOptions::new().max_dbs(25).open(&index_path).with_context(|| { - format!("while opening index {uid} at '{}'", index_path.display()) - })? - }; - - let mut index_wtxn = index_env.write_txn().with_context(|| { - format!( - "while obtaining a write transaction for index `{uid}` at `{}`", - index_path.display() - ) - })?; - - println!("\t- Updating index stats"); - update_index_stats(index_stats, &uid, uuid, &mut sched_wtxn)?; - println!("\t- Updating date format"); - update_date_format(&uid, &index_env, &mut index_wtxn)?; - - index_wtxn.commit().with_context(|| { - format!( - "while committing the write txn for index `{uid}` at {}", - index_path.display() - ) - })?; - } - - sched_wtxn.commit().context("while committing the write txn for the index-scheduler")?; - - println!("Upgrading database succeeded"); - - Ok(()) - } -} - -pub mod v1_9 { - pub type FieldDistribution = std::collections::BTreeMap; - - /// The statistics that can be computed from an `Index` object. - #[derive(serde::Serialize, serde::Deserialize, Debug)] - pub struct IndexStats { - /// Number of documents in the index. - pub number_of_documents: u64, - /// Size taken up by the index' DB, in bytes. - /// - /// This includes the size taken by both the used and free pages of the DB, and as the free pages - /// are not returned to the disk after a deletion, this number is typically larger than - /// `used_database_size` that only includes the size of the used pages. - pub database_size: u64, - /// Size taken by the used pages of the index' DB, in bytes. - /// - /// As the DB backend does not return to the disk the pages that are not currently used by the DB, - /// this value is typically smaller than `database_size`. - pub used_database_size: u64, - /// Association of every field name with the number of times it occurs in the documents. - pub field_distribution: FieldDistribution, - /// Creation date of the index. - pub created_at: time::OffsetDateTime, - /// Date of the last update of the index. - pub updated_at: time::OffsetDateTime, - } - - use serde::{Deserialize, Serialize}; - - #[derive(Debug, Deserialize, Serialize)] - pub struct IndexEmbeddingConfig { - pub name: String, - pub config: EmbeddingConfig, - } - - #[derive(Debug, Clone, Default, serde::Deserialize, serde::Serialize)] - pub struct EmbeddingConfig { - /// Options of the embedder, specific to each kind of embedder - pub embedder_options: EmbedderOptions, - } - - /// Options of an embedder, specific to each kind of embedder. - #[derive(Debug, Clone, Hash, PartialEq, Eq, serde::Deserialize, serde::Serialize)] - pub enum EmbedderOptions { - HuggingFace(hf::EmbedderOptions), - OpenAi(openai::EmbedderOptions), - Ollama(ollama::EmbedderOptions), - UserProvided(manual::EmbedderOptions), - Rest(rest::EmbedderOptions), - } - - impl Default for EmbedderOptions { - fn default() -> Self { - Self::OpenAi(openai::EmbedderOptions { api_key: None, dimensions: None }) - } - } - - mod hf { - #[derive(Debug, Clone, Hash, PartialEq, Eq, serde::Deserialize, serde::Serialize)] - pub struct EmbedderOptions { - pub model: String, - pub revision: Option, - } - } - mod openai { - - #[derive(Debug, Clone, Hash, PartialEq, Eq, serde::Deserialize, serde::Serialize)] - pub struct EmbedderOptions { - pub api_key: Option, - pub dimensions: Option, - } - } - mod ollama { - #[derive(Debug, Clone, Hash, PartialEq, Eq, serde::Deserialize, serde::Serialize)] - pub struct EmbedderOptions { - pub embedding_model: String, - pub url: Option, - pub api_key: Option, - } - } - mod manual { - #[derive(Debug, Clone, Hash, PartialEq, Eq, serde::Deserialize, serde::Serialize)] - pub struct EmbedderOptions { - pub dimensions: usize, - } - } - mod rest { - #[derive(Debug, Clone, PartialEq, Eq, serde::Deserialize, serde::Serialize, Hash)] - pub struct EmbedderOptions { - pub api_key: Option, - pub dimensions: Option, - pub url: String, - pub input_field: Vec, - // path to the array of embeddings - pub path_to_embeddings: Vec, - // shape of a single embedding - pub embedding_object: Vec, - } - } - - pub type OffsetDateTime = time::OffsetDateTime; -} - -pub mod v1_10 { - use crate::v1_9; - - pub type FieldDistribution = std::collections::BTreeMap; - - /// The statistics that can be computed from an `Index` object. - #[derive(serde::Serialize, serde::Deserialize, Debug)] - pub struct IndexStats { - /// Number of documents in the index. - pub number_of_documents: u64, - /// Size taken up by the index' DB, in bytes. - /// - /// This includes the size taken by both the used and free pages of the DB, and as the free pages - /// are not returned to the disk after a deletion, this number is typically larger than - /// `used_database_size` that only includes the size of the used pages. - pub database_size: u64, - /// Size taken by the used pages of the index' DB, in bytes. - /// - /// As the DB backend does not return to the disk the pages that are not currently used by the DB, - /// this value is typically smaller than `database_size`. - pub used_database_size: u64, - /// Association of every field name with the number of times it occurs in the documents. - pub field_distribution: FieldDistribution, - /// Creation date of the index. - #[serde(with = "time::serde::rfc3339")] - pub created_at: time::OffsetDateTime, - /// Date of the last update of the index. - #[serde(with = "time::serde::rfc3339")] - pub updated_at: time::OffsetDateTime, - } - - impl From for IndexStats { - fn from( - v1_9::IndexStats { - number_of_documents, - database_size, - used_database_size, - field_distribution, - created_at, - updated_at, - }: v1_9::IndexStats, - ) -> Self { - IndexStats { - number_of_documents, - database_size, - used_database_size, - field_distribution, - created_at, - updated_at, - } - } - } - - #[derive(serde::Serialize, serde::Deserialize)] - #[serde(transparent)] - pub struct OffsetDateTime(#[serde(with = "time::serde::rfc3339")] pub time::OffsetDateTime); -} - -fn update_index_stats( - index_stats: Database, - index_uid: &str, - index_uuid: uuid::Uuid, - sched_wtxn: &mut RwTxn, -) -> anyhow::Result<()> { - let ctx = || format!("while updating index stats for index `{index_uid}`"); - - let stats: Option = index_stats - .remap_data_type::>() - .get(sched_wtxn, &index_uuid) - .with_context(ctx)?; - - if let Some(stats) = stats { - let stats: v1_10::IndexStats = stats.into(); - - index_stats - .remap_data_type::>() - .put(sched_wtxn, &index_uuid, &stats) - .with_context(ctx)?; - } - - Ok(()) -} - -fn update_date_format( - index_uid: &str, - index_env: &Env, - index_wtxn: &mut RwTxn, -) -> anyhow::Result<()> { - let main = try_opening_poly_database(index_env, index_wtxn, db_name::MAIN) - .with_context(|| format!("while updating date format for index `{index_uid}`"))?; - - date_round_trip(index_wtxn, index_uid, main, main_key::CREATED_AT_KEY)?; - date_round_trip(index_wtxn, index_uid, main, main_key::UPDATED_AT_KEY)?; - - Ok(()) -} - -fn find_rest_embedders( - index_uid: &str, - index_env: &Env, - index_txn: &RoTxn, -) -> anyhow::Result> { - let main = try_opening_poly_database(index_env, index_txn, db_name::MAIN) - .with_context(|| format!("while checking REST embedders for index `{index_uid}`"))?; - - let mut rest_embedders = vec![]; - - for config in main - .remap_types::>>() - .get(index_txn, main_key::EMBEDDING_CONFIGS)? - .unwrap_or_default() - { - if let v1_9::EmbedderOptions::Rest(_) = config.config.embedder_options { - rest_embedders.push(config.name); - } - } - - Ok(rest_embedders) -} - -fn date_round_trip( - wtxn: &mut RwTxn, - index_uid: &str, - db: Database, - key: &str, -) -> anyhow::Result<()> { - let datetime = - db.remap_types::>().get(wtxn, key).with_context( - || format!("could not read `{key}` while updating date format for index `{index_uid}`"), - )?; - - if let Some(datetime) = datetime { - db.remap_types::>() - .put(wtxn, key, &v1_10::OffsetDateTime(datetime)) - .with_context(|| { - format!( - "could not write `{key}` while updating date format for index `{index_uid}`" - ) - })?; - } - - Ok(()) -} - /// Clears the task queue located at `db_path`. fn clear_task_queue(db_path: PathBuf) -> anyhow::Result<()> { let path = db_path.join("tasks"); diff --git a/meilitool/src/upgrade/mod.rs b/meilitool/src/upgrade/mod.rs new file mode 100644 index 000000000..053c61c14 --- /dev/null +++ b/meilitool/src/upgrade/mod.rs @@ -0,0 +1,46 @@ +mod v1_10; +mod v1_9; + +use std::path::PathBuf; + +use anyhow::{bail, Context}; +use meilisearch_types::versioning::create_version_file; + +use v1_10::v1_9_to_v1_10; + +pub struct OfflineUpgrade { + pub db_path: PathBuf, + pub current_version: (String, String, String), + pub target_version: (String, String, String), +} + +impl OfflineUpgrade { + pub fn upgrade(self) -> anyhow::Result<()> { + let (current_major, current_minor, current_patch) = &self.current_version; + let (target_major, target_minor, target_patch) = &self.target_version; + + println!("Upgrading from {current_major}.{current_minor}.{current_patch} to {target_major}.{target_minor}.{target_patch}"); + + match ( + (current_major.as_str(), current_minor.as_str(), current_patch.as_str()), + (target_major.as_str(), target_minor.as_str(), target_patch.as_str()), + ) { + (("1", "9", _), ("1", "10", _)) => v1_9_to_v1_10(&self.db_path)?, + ((major, minor, _), _) if major != "1" && minor != "9" => + bail!("Unsupported current version {current_major}.{current_minor}.{current_patch}. Can only upgrade from v1.9"), + (_, (major, minor, _)) if major != "1" && minor != "10" => + bail!("Unsupported target version {target_major}.{target_minor}.{target_patch}. Can only upgrade to v1.10"), + _ => + bail!("Unsupported upgrade from {current_major}.{current_minor}.{current_patch} to {target_major}.{target_minor}.{target_patch}. Can only upgrade from v1.9 to v1.10"), + } + + println!("Writing VERSION file"); + + create_version_file(&self.db_path, target_major, target_minor, target_patch) + .context("while writing VERSION file after the upgrade")?; + + println!("Success"); + + Ok(()) + } +} diff --git a/meilitool/src/upgrade/v1_10.rs b/meilitool/src/upgrade/v1_10.rs new file mode 100644 index 000000000..96af99c39 --- /dev/null +++ b/meilitool/src/upgrade/v1_10.rs @@ -0,0 +1,279 @@ +use anyhow::bail; +use std::path::Path; + +use anyhow::Context; +use meilisearch_types::{ + heed::{ + types::{SerdeJson, Str}, + Database, Env, EnvOpenOptions, RoTxn, RwTxn, Unspecified, + }, + milli::index::{db_name, main_key}, +}; + +use crate::{try_opening_database, try_opening_poly_database, uuid_codec::UuidCodec}; + +use super::v1_9; + +pub type FieldDistribution = std::collections::BTreeMap; + +/// The statistics that can be computed from an `Index` object. +#[derive(serde::Serialize, serde::Deserialize, Debug)] +pub struct IndexStats { + /// Number of documents in the index. + pub number_of_documents: u64, + /// Size taken up by the index' DB, in bytes. + /// + /// This includes the size taken by both the used and free pages of the DB, and as the free pages + /// are not returned to the disk after a deletion, this number is typically larger than + /// `used_database_size` that only includes the size of the used pages. + pub database_size: u64, + /// Size taken by the used pages of the index' DB, in bytes. + /// + /// As the DB backend does not return to the disk the pages that are not currently used by the DB, + /// this value is typically smaller than `database_size`. + pub used_database_size: u64, + /// Association of every field name with the number of times it occurs in the documents. + pub field_distribution: FieldDistribution, + /// Creation date of the index. + #[serde(with = "time::serde::rfc3339")] + pub created_at: time::OffsetDateTime, + /// Date of the last update of the index. + #[serde(with = "time::serde::rfc3339")] + pub updated_at: time::OffsetDateTime, +} + +impl From for IndexStats { + fn from( + v1_9::IndexStats { + number_of_documents, + database_size, + used_database_size, + field_distribution, + created_at, + updated_at, + }: v1_9::IndexStats, + ) -> Self { + IndexStats { + number_of_documents, + database_size, + used_database_size, + field_distribution, + created_at, + updated_at, + } + } +} + +#[derive(serde::Serialize, serde::Deserialize)] +#[serde(transparent)] +pub struct OffsetDateTime(#[serde(with = "time::serde::rfc3339")] pub time::OffsetDateTime); + +fn update_index_stats( + index_stats: Database, + index_uid: &str, + index_uuid: uuid::Uuid, + sched_wtxn: &mut RwTxn, +) -> anyhow::Result<()> { + let ctx = || format!("while updating index stats for index `{index_uid}`"); + + let stats: Option = index_stats + .remap_data_type::>() + .get(sched_wtxn, &index_uuid) + .with_context(ctx)?; + + if let Some(stats) = stats { + let stats: self::IndexStats = stats.into(); + + index_stats + .remap_data_type::>() + .put(sched_wtxn, &index_uuid, &stats) + .with_context(ctx)?; + } + + Ok(()) +} + +fn update_date_format( + index_uid: &str, + index_env: &Env, + index_wtxn: &mut RwTxn, +) -> anyhow::Result<()> { + let main = try_opening_poly_database(index_env, index_wtxn, db_name::MAIN) + .with_context(|| format!("while updating date format for index `{index_uid}`"))?; + + date_round_trip(index_wtxn, index_uid, main, main_key::CREATED_AT_KEY)?; + date_round_trip(index_wtxn, index_uid, main, main_key::UPDATED_AT_KEY)?; + + Ok(()) +} + +fn find_rest_embedders( + index_uid: &str, + index_env: &Env, + index_txn: &RoTxn, +) -> anyhow::Result> { + let main = try_opening_poly_database(index_env, index_txn, db_name::MAIN) + .with_context(|| format!("while checking REST embedders for index `{index_uid}`"))?; + + let mut rest_embedders = vec![]; + + for config in main + .remap_types::>>() + .get(index_txn, main_key::EMBEDDING_CONFIGS)? + .unwrap_or_default() + { + if let v1_9::EmbedderOptions::Rest(_) = config.config.embedder_options { + rest_embedders.push(config.name); + } + } + + Ok(rest_embedders) +} + +fn date_round_trip( + wtxn: &mut RwTxn, + index_uid: &str, + db: Database, + key: &str, +) -> anyhow::Result<()> { + let datetime = + db.remap_types::>().get(wtxn, key).with_context( + || format!("could not read `{key}` while updating date format for index `{index_uid}`"), + )?; + + if let Some(datetime) = datetime { + db.remap_types::>() + .put(wtxn, key, &self::OffsetDateTime(datetime)) + .with_context(|| { + format!( + "could not write `{key}` while updating date format for index `{index_uid}`" + ) + })?; + } + + Ok(()) +} + +pub fn v1_9_to_v1_10(db_path: &Path) -> anyhow::Result<()> { + // 2 changes here + + // 1. date format. needs to be done before opening the Index + // 2. REST embedders. We don't support this case right now, so bail + + let index_scheduler_path = db_path.join("tasks"); + let env = unsafe { EnvOpenOptions::new().max_dbs(100).open(&index_scheduler_path) } + .with_context(|| format!("While trying to open {:?}", index_scheduler_path.display()))?; + + let mut sched_wtxn = env.write_txn()?; + + let index_mapping: Database = + try_opening_database(&env, &sched_wtxn, "index-mapping")?; + + let index_stats: Database = + try_opening_database(&env, &sched_wtxn, "index-stats").with_context(|| { + format!("While trying to open {:?}", index_scheduler_path.display()) + })?; + + let index_count = + index_mapping.len(&sched_wtxn).context("while reading the number of indexes")?; + + // FIXME: not ideal, we have to pre-populate all indexes to prevent double borrow of sched_wtxn + // 1. immutably for the iteration + // 2. mutably for updating index stats + let indexes: Vec<_> = index_mapping + .iter(&sched_wtxn)? + .map(|res| res.map(|(uid, uuid)| (uid.to_owned(), uuid))) + .collect(); + + let mut rest_embedders = Vec::new(); + + let mut unwrapped_indexes = Vec::new(); + + // check that update can take place + for (index_index, result) in indexes.into_iter().enumerate() { + let (uid, uuid) = result?; + let index_path = db_path.join("indexes").join(uuid.to_string()); + + println!( + "[{}/{index_count}]Checking that update can take place for `{uid}` at `{}`", + index_index + 1, + index_path.display() + ); + + let index_env = unsafe { + // FIXME: fetch the 25 magic number from the index file + EnvOpenOptions::new().max_dbs(25).open(&index_path).with_context(|| { + format!("while opening index {uid} at '{}'", index_path.display()) + })? + }; + + let index_txn = index_env.read_txn().with_context(|| { + format!( + "while obtaining a write transaction for index {uid} at {}", + index_path.display() + ) + })?; + + println!("\t- Checking for incompatible embedders (REST embedders)"); + let rest_embedders_for_index = find_rest_embedders(&uid, &index_env, &index_txn)?; + + if rest_embedders_for_index.is_empty() { + unwrapped_indexes.push((uid, uuid)); + } else { + // no need to add to unwrapped indexes because we'll exit early + rest_embedders.push((uid, rest_embedders_for_index)); + } + } + + if !rest_embedders.is_empty() { + let rest_embedders = rest_embedders + .into_iter() + .flat_map(|(index, embedders)| std::iter::repeat(index.clone()).zip(embedders)) + .map(|(index, embedder)| format!("\t- embedder `{embedder}` in index `{index}`")) + .collect::>() + .join("\n"); + bail!("The update cannot take place because there are REST embedder(s). Remove them before proceeding with the update:\n{rest_embedders}\n\n\ + The database has not been modified and is still a valid v1.9 database."); + } + + println!("Update can take place, updating"); + + for (index_index, (uid, uuid)) in unwrapped_indexes.into_iter().enumerate() { + let index_path = db_path.join("indexes").join(uuid.to_string()); + + println!( + "[{}/{index_count}]Updating index `{uid}` at `{}`", + index_index + 1, + index_path.display() + ); + + let index_env = unsafe { + // FIXME: fetch the 25 magic number from the index file + EnvOpenOptions::new().max_dbs(25).open(&index_path).with_context(|| { + format!("while opening index {uid} at '{}'", index_path.display()) + })? + }; + + let mut index_wtxn = index_env.write_txn().with_context(|| { + format!( + "while obtaining a write transaction for index `{uid}` at `{}`", + index_path.display() + ) + })?; + + println!("\t- Updating index stats"); + update_index_stats(index_stats, &uid, uuid, &mut sched_wtxn)?; + println!("\t- Updating date format"); + update_date_format(&uid, &index_env, &mut index_wtxn)?; + + index_wtxn.commit().with_context(|| { + format!("while committing the write txn for index `{uid}` at {}", index_path.display()) + })?; + } + + sched_wtxn.commit().context("while committing the write txn for the index-scheduler")?; + + println!("Upgrading database succeeded"); + + Ok(()) +} diff --git a/meilitool/src/upgrade/v1_9.rs b/meilitool/src/upgrade/v1_9.rs new file mode 100644 index 000000000..faa2d9814 --- /dev/null +++ b/meilitool/src/upgrade/v1_9.rs @@ -0,0 +1,100 @@ +use serde::{Deserialize, Serialize}; + +pub type FieldDistribution = std::collections::BTreeMap; + +/// The statistics that can be computed from an `Index` object. +#[derive(serde::Serialize, serde::Deserialize, Debug)] +pub struct IndexStats { + /// Number of documents in the index. + pub number_of_documents: u64, + /// Size taken up by the index' DB, in bytes. + /// + /// This includes the size taken by both the used and free pages of the DB, and as the free pages + /// are not returned to the disk after a deletion, this number is typically larger than + /// `used_database_size` that only includes the size of the used pages. + pub database_size: u64, + /// Size taken by the used pages of the index' DB, in bytes. + /// + /// As the DB backend does not return to the disk the pages that are not currently used by the DB, + /// this value is typically smaller than `database_size`. + pub used_database_size: u64, + /// Association of every field name with the number of times it occurs in the documents. + pub field_distribution: FieldDistribution, + /// Creation date of the index. + pub created_at: time::OffsetDateTime, + /// Date of the last update of the index. + pub updated_at: time::OffsetDateTime, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct IndexEmbeddingConfig { + pub name: String, + pub config: EmbeddingConfig, +} + +#[derive(Debug, Clone, Default, serde::Deserialize, serde::Serialize)] +pub struct EmbeddingConfig { + /// Options of the embedder, specific to each kind of embedder + pub embedder_options: EmbedderOptions, +} + +/// Options of an embedder, specific to each kind of embedder. +#[derive(Debug, Clone, Hash, PartialEq, Eq, serde::Deserialize, serde::Serialize)] +pub enum EmbedderOptions { + HuggingFace(hf::EmbedderOptions), + OpenAi(openai::EmbedderOptions), + Ollama(ollama::EmbedderOptions), + UserProvided(manual::EmbedderOptions), + Rest(rest::EmbedderOptions), +} + +impl Default for EmbedderOptions { + fn default() -> Self { + Self::OpenAi(openai::EmbedderOptions { api_key: None, dimensions: None }) + } +} + +mod hf { + #[derive(Debug, Clone, Hash, PartialEq, Eq, serde::Deserialize, serde::Serialize)] + pub struct EmbedderOptions { + pub model: String, + pub revision: Option, + } +} +mod openai { + + #[derive(Debug, Clone, Hash, PartialEq, Eq, serde::Deserialize, serde::Serialize)] + pub struct EmbedderOptions { + pub api_key: Option, + pub dimensions: Option, + } +} +mod ollama { + #[derive(Debug, Clone, Hash, PartialEq, Eq, serde::Deserialize, serde::Serialize)] + pub struct EmbedderOptions { + pub embedding_model: String, + pub url: Option, + pub api_key: Option, + } +} +mod manual { + #[derive(Debug, Clone, Hash, PartialEq, Eq, serde::Deserialize, serde::Serialize)] + pub struct EmbedderOptions { + pub dimensions: usize, + } +} +mod rest { + #[derive(Debug, Clone, PartialEq, Eq, serde::Deserialize, serde::Serialize, Hash)] + pub struct EmbedderOptions { + pub api_key: Option, + pub dimensions: Option, + pub url: String, + pub input_field: Vec, + // path to the array of embeddings + pub path_to_embeddings: Vec, + // shape of a single embedding + pub embedding_object: Vec, + } +} + +pub type OffsetDateTime = time::OffsetDateTime; From ddd03e9b370f145787bca447b8791aeff5485c94 Mon Sep 17 00:00:00 2001 From: Tamo Date: Tue, 29 Oct 2024 02:46:14 +0100 Subject: [PATCH 095/111] implement the upgrade from v1.10 to v1.11 in meilitool --- Cargo.lock | 28 +++++++++++-- crates/meilitool/Cargo.toml | 2 + crates/milli/Cargo.toml | 2 +- meilitool/src/upgrade/mod.rs | 60 +++++++++++++++++++-------- meilitool/src/upgrade/v1_10.rs | 7 +++- meilitool/src/upgrade/v1_11.rs | 76 ++++++++++++++++++++++++++++++++++ 6 files changed, 150 insertions(+), 25 deletions(-) create mode 100644 meilitool/src/upgrade/v1_11.rs diff --git a/Cargo.lock b/Cargo.lock index 500f28454..43a93bb05 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -404,6 +404,25 @@ dependencies = [ "thiserror", ] +[[package]] +name = "arroy" +version = "0.5.0" +source = "git+https://github.com/meilisearch/arroy/?rev=3908c9e#3908c9edfba77ba18cc50bda41c88166ba5ebd37" +dependencies = [ + "bytemuck", + "byteorder", + "heed", + "log", + "memmap2", + "nohash", + "ordered-float", + "rand", + "rayon", + "roaring", + "tempfile", + "thiserror", +] + [[package]] name = "assert-json-diff" version = "2.0.2" @@ -707,9 +726,9 @@ checksum = "2c676a478f63e9fa2dd5368a42f28bba0d6c560b775f38583c8bbaa7fcd67c9c" [[package]] name = "bytemuck" -version = "1.16.1" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b236fc92302c97ed75b38da1f4917b5cdda4984745740f153a5d3059e48d725e" +checksum = "8334215b81e418a0a7bdb8ef0849474f40bb10c8b71f1c4ed315cff49f32494d" dependencies = [ "bytemuck_derive", ] @@ -2556,7 +2575,7 @@ name = "index-scheduler" version = "1.11.0" dependencies = [ "anyhow", - "arroy", + "arroy 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "big_s", "bincode", "crossbeam", @@ -3517,6 +3536,7 @@ name = "meilitool" version = "1.11.0" dependencies = [ "anyhow", + "arroy 0.5.0 (git+https://github.com/meilisearch/arroy/?rev=3908c9e)", "clap", "dump", "file-store", @@ -3547,7 +3567,7 @@ dependencies = [ name = "milli" version = "1.11.0" dependencies = [ - "arroy", + "arroy 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "big_s", "bimap", "bincode", diff --git a/crates/meilitool/Cargo.toml b/crates/meilitool/Cargo.toml index ce6c1ad5b..937a484e2 100644 --- a/crates/meilitool/Cargo.toml +++ b/crates/meilitool/Cargo.toml @@ -18,3 +18,5 @@ meilisearch-types = { path = "../meilisearch-types" } serde = { version = "1.0.209", features = ["derive"] } time = { version = "0.3.36", features = ["formatting"] } uuid = { version = "1.10.0", features = ["v4"], default-features = false } +arroy_v04_to_v05 = { package = "arroy", git = "https://github.com/meilisearch/arroy/", rev = "3908c9e" } + diff --git a/crates/milli/Cargo.toml b/crates/milli/Cargo.toml index df0e59496..7b43fbf33 100644 --- a/crates/milli/Cargo.toml +++ b/crates/milli/Cargo.toml @@ -15,7 +15,7 @@ license.workspace = true bimap = { version = "0.6.3", features = ["serde"] } bincode = "1.3.3" bstr = "1.9.1" -bytemuck = { version = "1.16.1", features = ["extern_crate_alloc"] } +bytemuck = { version = "1.18.0", features = ["extern_crate_alloc"] } byteorder = "1.5.0" charabia = { version = "0.9.1", default-features = false } concat-arrays = "0.1.2" diff --git a/meilitool/src/upgrade/mod.rs b/meilitool/src/upgrade/mod.rs index 053c61c14..9a1e4286f 100644 --- a/meilitool/src/upgrade/mod.rs +++ b/meilitool/src/upgrade/mod.rs @@ -1,13 +1,16 @@ mod v1_10; +mod v1_11; mod v1_9; -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use anyhow::{bail, Context}; use meilisearch_types::versioning::create_version_file; use v1_10::v1_9_to_v1_10; +use crate::upgrade::v1_11::v1_10_to_v1_11; + pub struct OfflineUpgrade { pub db_path: PathBuf, pub current_version: (String, String, String), @@ -16,29 +19,50 @@ pub struct OfflineUpgrade { impl OfflineUpgrade { pub fn upgrade(self) -> anyhow::Result<()> { + let upgrade_list = [ + (v1_9_to_v1_10 as fn(&Path) -> Result<(), anyhow::Error>, "1", "10", "0"), + (v1_10_to_v1_11, "1", "11", "0"), + ]; + let (current_major, current_minor, current_patch) = &self.current_version; + + let start_at = match ( + current_major.as_str(), + current_minor.as_str(), + current_patch.as_str(), + ) { + ("1", "9", _) => 0, + ("1", "10", _) => 1, + _ => { + bail!("Unsupported current version {current_major}.{current_minor}.{current_patch}. Can only upgrade from v1.9") + } + }; + let (target_major, target_minor, target_patch) = &self.target_version; - println!("Upgrading from {current_major}.{current_minor}.{current_patch} to {target_major}.{target_minor}.{target_patch}"); + let ends_at = match (target_major.as_str(), target_minor.as_str(), target_patch.as_str()) { + ("v1", "10", _) => 0, + ("v1", "11", _) => 1, + _ => { + bail!("Unsupported target version {target_major}.{target_minor}.{target_patch}. Can only upgrade to v1.11") + } + }; - match ( - (current_major.as_str(), current_minor.as_str(), current_patch.as_str()), - (target_major.as_str(), target_minor.as_str(), target_patch.as_str()), - ) { - (("1", "9", _), ("1", "10", _)) => v1_9_to_v1_10(&self.db_path)?, - ((major, minor, _), _) if major != "1" && minor != "9" => - bail!("Unsupported current version {current_major}.{current_minor}.{current_patch}. Can only upgrade from v1.9"), - (_, (major, minor, _)) if major != "1" && minor != "10" => - bail!("Unsupported target version {target_major}.{target_minor}.{target_patch}. Can only upgrade to v1.10"), - _ => - bail!("Unsupported upgrade from {current_major}.{current_minor}.{current_patch} to {target_major}.{target_minor}.{target_patch}. Can only upgrade from v1.9 to v1.10"), + println!("Starting the upgrade from {current_major}.{current_minor}.{current_patch} to {target_major}.{target_minor}.{target_patch}"); + + #[allow(clippy::needless_range_loop)] + for index in start_at..=ends_at { + let (func, major, minor, patch) = upgrade_list[index]; + (func)(&self.db_path)?; + println!("Done"); + // We're writing the version file just in case an issue arise _while_ upgrading. + // We don't want the DB to fail in an unknown state. + println!("Writing VERSION file"); + + create_version_file(&self.db_path, major, minor, patch) + .context("while writing VERSION file after the upgrade")?; } - println!("Writing VERSION file"); - - create_version_file(&self.db_path, target_major, target_minor, target_patch) - .context("while writing VERSION file after the upgrade")?; - println!("Success"); Ok(()) diff --git a/meilitool/src/upgrade/v1_10.rs b/meilitool/src/upgrade/v1_10.rs index 96af99c39..99fe104e3 100644 --- a/meilitool/src/upgrade/v1_10.rs +++ b/meilitool/src/upgrade/v1_10.rs @@ -79,7 +79,8 @@ fn update_index_stats( let stats: Option = index_stats .remap_data_type::>() .get(sched_wtxn, &index_uuid) - .with_context(ctx)?; + .with_context(ctx) + .with_context(|| "While reading value")?; if let Some(stats) = stats { let stats: self::IndexStats = stats.into(); @@ -87,7 +88,8 @@ fn update_index_stats( index_stats .remap_data_type::>() .put(sched_wtxn, &index_uuid, &stats) - .with_context(ctx)?; + .with_context(ctx) + .with_context(|| "While writing value")?; } Ok(()) @@ -155,6 +157,7 @@ fn date_round_trip( } pub fn v1_9_to_v1_10(db_path: &Path) -> anyhow::Result<()> { + println!("Upgrading from v1.9.0 to v1.10.0"); // 2 changes here // 1. date format. needs to be done before opening the Index diff --git a/meilitool/src/upgrade/v1_11.rs b/meilitool/src/upgrade/v1_11.rs new file mode 100644 index 000000000..26c4234f6 --- /dev/null +++ b/meilitool/src/upgrade/v1_11.rs @@ -0,0 +1,76 @@ +//! The breaking changes that happened between the v1.10 and the v1.11 are: +//! - Arroy went from the v0.4.0 to the v0.5.0, see this release note to get the whole context: https://github.com/meilisearch/arroy/releases/tag/v0.5.0 +//! - The `angular` distance has been renamed to `cosine` => We only need to update the string in the metadata. +//! - Reorganize the `NodeId` to make the appending of vectors work => We'll have to update the keys of almost all items in the DB. +//! - Store the list of updated IDs directly in LMDB instead of a roaring bitmap => This shouldn't be an issue since we are never supposed to commit this roaring bitmap, but it's not forbidden by arroy so ensuring it works is probably better than anything. + +use std::path::Path; + +use anyhow::Context; +use meilisearch_types::{ + heed::{types::Str, Database, EnvOpenOptions}, + milli::index::db_name, +}; + +use crate::{try_opening_database, try_opening_poly_database, uuid_codec::UuidCodec}; + +pub fn v1_10_to_v1_11(db_path: &Path) -> anyhow::Result<()> { + println!("Upgrading from v1.10.0 to v1.11.0"); + + let index_scheduler_path = db_path.join("tasks"); + let env = unsafe { EnvOpenOptions::new().max_dbs(100).open(&index_scheduler_path) } + .with_context(|| format!("While trying to open {:?}", index_scheduler_path.display()))?; + + let sched_rtxn = env.read_txn()?; + + let index_mapping: Database = + try_opening_database(&env, &sched_rtxn, "index-mapping")?; + + let index_count = + index_mapping.len(&sched_rtxn).context("while reading the number of indexes")?; + + let indexes: Vec<_> = index_mapping + .iter(&sched_rtxn)? + .map(|res| res.map(|(uid, uuid)| (uid.to_owned(), uuid))) + .collect(); + + // check that update can take place + for (index_index, result) in indexes.into_iter().enumerate() { + let (uid, uuid) = result?; + let index_path = db_path.join("indexes").join(uuid.to_string()); + + println!( + "[{}/{index_count}]Checking that update can take place for `{uid}` at `{}`", + index_index + 1, + index_path.display() + ); + + let index_env = unsafe { + EnvOpenOptions::new().max_dbs(25).open(&index_path).with_context(|| { + format!("while opening index {uid} at '{}'", index_path.display()) + })? + }; + + let index_rtxn = index_env.read_txn().with_context(|| { + format!( + "while obtaining a read transaction for index {uid} at {}", + index_path.display() + ) + })?; + let mut index_wtxn = index_env.write_txn().with_context(|| { + format!( + "while obtaining a write transaction for index {uid} at {}", + index_path.display() + ) + })?; + + let database = try_opening_poly_database(&index_env, &index_rtxn, db_name::VECTOR_ARROY) + .with_context(|| format!("while updating date format for index `{uid}`"))?; + + arroy_v04_to_v05::ugrade_from_prev_version(&index_rtxn, &mut index_wtxn, database)?; + + index_wtxn.commit()?; + } + + Ok(()) +} From a9b61c84349e23cf34ce9ed342ec46339c36eb9a Mon Sep 17 00:00:00 2001 From: Tamo Date: Tue, 29 Oct 2024 02:51:26 +0100 Subject: [PATCH 096/111] fix the version parsing and improve error handling --- meilitool/src/upgrade/mod.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/meilitool/src/upgrade/mod.rs b/meilitool/src/upgrade/mod.rs index 9a1e4286f..ae095b6bd 100644 --- a/meilitool/src/upgrade/mod.rs +++ b/meilitool/src/upgrade/mod.rs @@ -41,8 +41,11 @@ impl OfflineUpgrade { let (target_major, target_minor, target_patch) = &self.target_version; let ends_at = match (target_major.as_str(), target_minor.as_str(), target_patch.as_str()) { - ("v1", "10", _) => 0, - ("v1", "11", _) => 1, + ("1", "10", _) => 0, + ("1", "11", _) => 1, + (major, _, _) if major.starts_with('v') => { + bail!("Target version must not starts with a `v`. Instead of writing `v1.9.0` write `1.9.0` for example.") + } _ => { bail!("Unsupported target version {target_major}.{target_minor}.{target_patch}. Can only upgrade to v1.11") } From 690eb42fc09db277d8426aeaa1d54e54001e1501 Mon Sep 17 00:00:00 2001 From: Tamo Date: Tue, 29 Oct 2024 03:27:26 +0100 Subject: [PATCH 097/111] update the version of arroy --- Cargo.lock | 4 ++-- crates/meilitool/Cargo.toml | 3 +-- meilitool/src/upgrade/v1_11.rs | 16 +++++++++++++--- 3 files changed, 16 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 43a93bb05..fd14a4a7d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -407,7 +407,7 @@ dependencies = [ [[package]] name = "arroy" version = "0.5.0" -source = "git+https://github.com/meilisearch/arroy/?rev=3908c9e#3908c9edfba77ba18cc50bda41c88166ba5ebd37" +source = "git+https://github.com/meilisearch/arroy/?rev=32670e7dd8b93640fcb53261ace89bda1c06497b#32670e7dd8b93640fcb53261ace89bda1c06497b" dependencies = [ "bytemuck", "byteorder", @@ -3536,7 +3536,7 @@ name = "meilitool" version = "1.11.0" dependencies = [ "anyhow", - "arroy 0.5.0 (git+https://github.com/meilisearch/arroy/?rev=3908c9e)", + "arroy 0.5.0 (git+https://github.com/meilisearch/arroy/?rev=32670e7dd8b93640fcb53261ace89bda1c06497b)", "clap", "dump", "file-store", diff --git a/crates/meilitool/Cargo.toml b/crates/meilitool/Cargo.toml index 937a484e2..693de6da8 100644 --- a/crates/meilitool/Cargo.toml +++ b/crates/meilitool/Cargo.toml @@ -18,5 +18,4 @@ meilisearch-types = { path = "../meilisearch-types" } serde = { version = "1.0.209", features = ["derive"] } time = { version = "0.3.36", features = ["formatting"] } uuid = { version = "1.10.0", features = ["v4"], default-features = false } -arroy_v04_to_v05 = { package = "arroy", git = "https://github.com/meilisearch/arroy/", rev = "3908c9e" } - +arroy_v04_to_v05 = { package = "arroy", git = "https://github.com/meilisearch/arroy/", rev = "32670e7dd8b93640fcb53261ace89bda1c06497b" } diff --git a/meilitool/src/upgrade/v1_11.rs b/meilitool/src/upgrade/v1_11.rs index 26c4234f6..4105879fd 100644 --- a/meilitool/src/upgrade/v1_11.rs +++ b/meilitool/src/upgrade/v1_11.rs @@ -57,6 +57,10 @@ pub fn v1_10_to_v1_11(db_path: &Path) -> anyhow::Result<()> { index_path.display() ) })?; + let index_read_database = + try_opening_poly_database(&index_env, &index_rtxn, db_name::VECTOR_ARROY) + .with_context(|| format!("while updating date format for index `{uid}`"))?; + let mut index_wtxn = index_env.write_txn().with_context(|| { format!( "while obtaining a write transaction for index {uid} at {}", @@ -64,10 +68,16 @@ pub fn v1_10_to_v1_11(db_path: &Path) -> anyhow::Result<()> { ) })?; - let database = try_opening_poly_database(&index_env, &index_rtxn, db_name::VECTOR_ARROY) - .with_context(|| format!("while updating date format for index `{uid}`"))?; + let index_write_database = + try_opening_poly_database(&index_env, &index_wtxn, db_name::VECTOR_ARROY) + .with_context(|| format!("while updating date format for index `{uid}`"))?; - arroy_v04_to_v05::ugrade_from_prev_version(&index_rtxn, &mut index_wtxn, database)?; + arroy_v04_to_v05::ugrade_from_prev_version( + &index_rtxn, + index_read_database, + &mut index_wtxn, + index_write_database, + )?; index_wtxn.commit()?; } From 5f57306858b86c4ca8755cffbb4e3d2dd36ffbfa Mon Sep 17 00:00:00 2001 From: Tamo Date: Mon, 4 Nov 2024 11:46:36 +0100 Subject: [PATCH 098/111] update the arroy version in meilitool --- Cargo.lock | 4 ++-- crates/meilitool/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fd14a4a7d..04812fd1b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -407,7 +407,7 @@ dependencies = [ [[package]] name = "arroy" version = "0.5.0" -source = "git+https://github.com/meilisearch/arroy/?rev=32670e7dd8b93640fcb53261ace89bda1c06497b#32670e7dd8b93640fcb53261ace89bda1c06497b" +source = "git+https://github.com/meilisearch/arroy/?rev=053807bf38dc079f25b003f19fc30fbf3613f6e7#053807bf38dc079f25b003f19fc30fbf3613f6e7" dependencies = [ "bytemuck", "byteorder", @@ -3536,7 +3536,7 @@ name = "meilitool" version = "1.11.0" dependencies = [ "anyhow", - "arroy 0.5.0 (git+https://github.com/meilisearch/arroy/?rev=32670e7dd8b93640fcb53261ace89bda1c06497b)", + "arroy 0.5.0 (git+https://github.com/meilisearch/arroy/?rev=053807bf38dc079f25b003f19fc30fbf3613f6e7)", "clap", "dump", "file-store", diff --git a/crates/meilitool/Cargo.toml b/crates/meilitool/Cargo.toml index 693de6da8..f2c8920c9 100644 --- a/crates/meilitool/Cargo.toml +++ b/crates/meilitool/Cargo.toml @@ -18,4 +18,4 @@ meilisearch-types = { path = "../meilisearch-types" } serde = { version = "1.0.209", features = ["derive"] } time = { version = "0.3.36", features = ["formatting"] } uuid = { version = "1.10.0", features = ["v4"], default-features = false } -arroy_v04_to_v05 = { package = "arroy", git = "https://github.com/meilisearch/arroy/", rev = "32670e7dd8b93640fcb53261ace89bda1c06497b" } +arroy_v04_to_v05 = { package = "arroy", git = "https://github.com/meilisearch/arroy/", rev = "053807bf38dc079f25b003f19fc30fbf3613f6e7" } From 4eef0cd332168e60c38b9115560e1180d0a13d8e Mon Sep 17 00:00:00 2001 From: Tamo Date: Mon, 4 Nov 2024 15:50:38 +0100 Subject: [PATCH 099/111] fix the update from v1_9 to v1_10 by providing a custom datetime formatter myself --- meilitool/src/upgrade/v1_10.rs | 19 +++++++++++++------ meilitool/src/upgrade/v1_9.rs | 12 +++++++++--- 2 files changed, 22 insertions(+), 9 deletions(-) diff --git a/meilitool/src/upgrade/v1_10.rs b/meilitool/src/upgrade/v1_10.rs index 99fe104e3..671f4d6d2 100644 --- a/meilitool/src/upgrade/v1_10.rs +++ b/meilitool/src/upgrade/v1_10.rs @@ -58,8 +58,8 @@ impl From for IndexStats { database_size, used_database_size, field_distribution, - created_at, - updated_at, + created_at: created_at.0, + updated_at: updated_at.0, } } } @@ -76,6 +76,13 @@ fn update_index_stats( ) -> anyhow::Result<()> { let ctx = || format!("while updating index stats for index `{index_uid}`"); + let stats: Option<&str> = index_stats + .remap_data_type::() + .get(sched_wtxn, &index_uuid) + .with_context(ctx) + .with_context(|| "While reading value")?; + dbg!(stats); + let stats: Option = index_stats .remap_data_type::>() .get(sched_wtxn, &index_uuid) @@ -139,13 +146,13 @@ fn date_round_trip( key: &str, ) -> anyhow::Result<()> { let datetime = - db.remap_types::>().get(wtxn, key).with_context( - || format!("could not read `{key}` while updating date format for index `{index_uid}`"), - )?; + db.remap_types::>().get(wtxn, key).with_context(|| { + format!("could not read `{key}` while updating date format for index `{index_uid}`") + })?; if let Some(datetime) = datetime { db.remap_types::>() - .put(wtxn, key, &self::OffsetDateTime(datetime)) + .put(wtxn, key, &self::OffsetDateTime(datetime.0)) .with_context(|| { format!( "could not write `{key}` while updating date format for index `{index_uid}`" diff --git a/meilitool/src/upgrade/v1_9.rs b/meilitool/src/upgrade/v1_9.rs index faa2d9814..3e6cfde6c 100644 --- a/meilitool/src/upgrade/v1_9.rs +++ b/meilitool/src/upgrade/v1_9.rs @@ -1,4 +1,5 @@ use serde::{Deserialize, Serialize}; +use time::OffsetDateTime; pub type FieldDistribution = std::collections::BTreeMap; @@ -21,9 +22,9 @@ pub struct IndexStats { /// Association of every field name with the number of times it occurs in the documents. pub field_distribution: FieldDistribution, /// Creation date of the index. - pub created_at: time::OffsetDateTime, + pub created_at: LegacyTime, /// Date of the last update of the index. - pub updated_at: time::OffsetDateTime, + pub updated_at: LegacyTime, } #[derive(Debug, Deserialize, Serialize)] @@ -97,4 +98,9 @@ mod rest { } } -pub type OffsetDateTime = time::OffsetDateTime; +// 2024-11-04 13:32:08.48368 +00:00:00 +time::serde::format_description!(legacy_datetime, OffsetDateTime, "[year]-[month]-[day] [hour]:[minute]:[second].[subsecond] [offset_hour sign:mandatory]:[offset_minute]:[offset_second]"); + +#[derive(Debug, serde::Serialize, serde::Deserialize)] +#[serde(transparent)] +pub struct LegacyTime(#[serde(with = "legacy_datetime")] pub OffsetDateTime); From 106cc7fe3a8dd295b9230fd77c3a98c3d8f86ace Mon Sep 17 00:00:00 2001 From: Tamo Date: Mon, 4 Nov 2024 17:51:40 +0100 Subject: [PATCH 100/111] fmt --- .../src/routes/indexes/search_analytics.rs | 20 +++++++++---------- .../src/routes/indexes/settings.rs | 2 +- .../src/routes/indexes/settings_analytics.rs | 7 ++++--- .../src/routes/indexes/similar_analytics.rs | 8 +++----- crates/meilisearch/src/routes/multi_search.rs | 3 +-- .../src/routes/multi_search_analytics.rs | 6 ++---- crates/meilisearch/tests/common/index.rs | 3 +-- 7 files changed, 21 insertions(+), 28 deletions(-) diff --git a/crates/meilisearch/src/routes/indexes/search_analytics.rs b/crates/meilisearch/src/routes/indexes/search_analytics.rs index 8bbb1781f..b16e2636e 100644 --- a/crates/meilisearch/src/routes/indexes/search_analytics.rs +++ b/crates/meilisearch/src/routes/indexes/search_analytics.rs @@ -1,18 +1,16 @@ -use once_cell::sync::Lazy; -use regex::Regex; -use serde_json::{json, Value}; use std::collections::{BTreeSet, BinaryHeap, HashMap}; use meilisearch_types::locales::Locale; +use once_cell::sync::Lazy; +use regex::Regex; +use serde_json::{json, Value}; -use crate::{ - aggregate_methods, - analytics::{Aggregate, AggregateMethod}, - search::{ - SearchQuery, SearchResult, DEFAULT_CROP_LENGTH, DEFAULT_CROP_MARKER, - DEFAULT_HIGHLIGHT_POST_TAG, DEFAULT_HIGHLIGHT_PRE_TAG, DEFAULT_SEARCH_LIMIT, - DEFAULT_SEMANTIC_RATIO, - }, +use crate::aggregate_methods; +use crate::analytics::{Aggregate, AggregateMethod}; +use crate::search::{ + SearchQuery, SearchResult, DEFAULT_CROP_LENGTH, DEFAULT_CROP_MARKER, + DEFAULT_HIGHLIGHT_POST_TAG, DEFAULT_HIGHLIGHT_PRE_TAG, DEFAULT_SEARCH_LIMIT, + DEFAULT_SEMANTIC_RATIO, }; aggregate_methods!( diff --git a/crates/meilisearch/src/routes/indexes/settings.rs b/crates/meilisearch/src/routes/indexes/settings.rs index bca763a99..a9d8d3053 100644 --- a/crates/meilisearch/src/routes/indexes/settings.rs +++ b/crates/meilisearch/src/routes/indexes/settings.rs @@ -1,4 +1,3 @@ -use super::settings_analytics::*; use actix_web::web::Data; use actix_web::{web, HttpRequest, HttpResponse}; use deserr::actix_web::AwebJson; @@ -11,6 +10,7 @@ use meilisearch_types::settings::{settings, SecretPolicy, Settings, Unchecked}; use meilisearch_types::tasks::KindWithContent; use tracing::debug; +use super::settings_analytics::*; use crate::analytics::Analytics; use crate::extractors::authentication::policies::*; use crate::extractors::authentication::GuardedData; diff --git a/crates/meilisearch/src/routes/indexes/settings_analytics.rs b/crates/meilisearch/src/routes/indexes/settings_analytics.rs index de01b72e8..32bddcbdd 100644 --- a/crates/meilisearch/src/routes/indexes/settings_analytics.rs +++ b/crates/meilisearch/src/routes/indexes/settings_analytics.rs @@ -3,15 +3,16 @@ //! through the sub-settings route directly without any manipulation. //! This is why we often use a `Option<&Vec<_>>` instead of a `Option<&[_]>`. +use std::collections::{BTreeMap, BTreeSet, HashSet}; + +use meilisearch_types::facet_values_sort::FacetValuesSort; use meilisearch_types::locales::{Locale, LocalizedAttributesRuleView}; use meilisearch_types::milli::update::Setting; use meilisearch_types::milli::vector::settings::EmbeddingSettings; use meilisearch_types::settings::{ - FacetingSettings, PaginationSettings, ProximityPrecisionView, TypoSettings, + FacetingSettings, PaginationSettings, ProximityPrecisionView, RankingRuleView, TypoSettings, }; -use meilisearch_types::{facet_values_sort::FacetValuesSort, settings::RankingRuleView}; use serde::Serialize; -use std::collections::{BTreeMap, BTreeSet, HashSet}; use crate::analytics::Aggregate; diff --git a/crates/meilisearch/src/routes/indexes/similar_analytics.rs b/crates/meilisearch/src/routes/indexes/similar_analytics.rs index 69685a56c..726839c3a 100644 --- a/crates/meilisearch/src/routes/indexes/similar_analytics.rs +++ b/crates/meilisearch/src/routes/indexes/similar_analytics.rs @@ -4,11 +4,9 @@ use once_cell::sync::Lazy; use regex::Regex; use serde_json::{json, Value}; -use crate::{ - aggregate_methods, - analytics::{Aggregate, AggregateMethod}, - search::{SimilarQuery, SimilarResult}, -}; +use crate::aggregate_methods; +use crate::analytics::{Aggregate, AggregateMethod}; +use crate::search::{SimilarQuery, SimilarResult}; aggregate_methods!( SimilarPOST => "Similar POST", diff --git a/crates/meilisearch/src/routes/multi_search.rs b/crates/meilisearch/src/routes/multi_search.rs index b7bd31716..f8b1bc6ee 100644 --- a/crates/meilisearch/src/routes/multi_search.rs +++ b/crates/meilisearch/src/routes/multi_search.rs @@ -9,6 +9,7 @@ use meilisearch_types::keys::actions; use serde::Serialize; use tracing::debug; +use super::multi_search_analytics::MultiSearchAggregator; use crate::analytics::Analytics; use crate::error::MeilisearchHttpError; use crate::extractors::authentication::policies::ActionPolicy; @@ -21,8 +22,6 @@ use crate::search::{ }; use crate::search_queue::SearchQueue; -use super::multi_search_analytics::MultiSearchAggregator; - pub fn configure(cfg: &mut web::ServiceConfig) { cfg.service(web::resource("").route(web::post().to(SeqHandler(multi_search_with_post)))); } diff --git a/crates/meilisearch/src/routes/multi_search_analytics.rs b/crates/meilisearch/src/routes/multi_search_analytics.rs index be1218399..3d07f471c 100644 --- a/crates/meilisearch/src/routes/multi_search_analytics.rs +++ b/crates/meilisearch/src/routes/multi_search_analytics.rs @@ -2,10 +2,8 @@ use std::collections::HashSet; use serde_json::json; -use crate::{ - analytics::Aggregate, - search::{FederatedSearch, SearchQueryWithIndex}, -}; +use crate::analytics::Aggregate; +use crate::search::{FederatedSearch, SearchQueryWithIndex}; #[derive(Default)] pub struct MultiSearchAggregator { diff --git a/crates/meilisearch/tests/common/index.rs b/crates/meilisearch/tests/common/index.rs index 784067c2d..221333fd7 100644 --- a/crates/meilisearch/tests/common/index.rs +++ b/crates/meilisearch/tests/common/index.rs @@ -9,8 +9,7 @@ use urlencoding::encode as urlencode; use super::encoder::Encoder; use super::service::Service; -use super::Value; -use super::{Owned, Shared}; +use super::{Owned, Shared, Value}; use crate::json; pub struct Index<'a, State = Owned> { From 99a9fde37f18b0498cdbc7b88a1510f8912d00b9 Mon Sep 17 00:00:00 2001 From: Tamo Date: Mon, 4 Nov 2024 17:55:55 +0100 Subject: [PATCH 101/111] push back the removed files --- crates/meilitool/src/upgrade/mod.rs | 73 +++++++ crates/meilitool/src/upgrade/v1_10.rs | 289 ++++++++++++++++++++++++++ crates/meilitool/src/upgrade/v1_11.rs | 86 ++++++++ crates/meilitool/src/upgrade/v1_9.rs | 106 ++++++++++ 4 files changed, 554 insertions(+) create mode 100644 crates/meilitool/src/upgrade/mod.rs create mode 100644 crates/meilitool/src/upgrade/v1_10.rs create mode 100644 crates/meilitool/src/upgrade/v1_11.rs create mode 100644 crates/meilitool/src/upgrade/v1_9.rs diff --git a/crates/meilitool/src/upgrade/mod.rs b/crates/meilitool/src/upgrade/mod.rs new file mode 100644 index 000000000..ae095b6bd --- /dev/null +++ b/crates/meilitool/src/upgrade/mod.rs @@ -0,0 +1,73 @@ +mod v1_10; +mod v1_11; +mod v1_9; + +use std::path::{Path, PathBuf}; + +use anyhow::{bail, Context}; +use meilisearch_types::versioning::create_version_file; + +use v1_10::v1_9_to_v1_10; + +use crate::upgrade::v1_11::v1_10_to_v1_11; + +pub struct OfflineUpgrade { + pub db_path: PathBuf, + pub current_version: (String, String, String), + pub target_version: (String, String, String), +} + +impl OfflineUpgrade { + pub fn upgrade(self) -> anyhow::Result<()> { + let upgrade_list = [ + (v1_9_to_v1_10 as fn(&Path) -> Result<(), anyhow::Error>, "1", "10", "0"), + (v1_10_to_v1_11, "1", "11", "0"), + ]; + + let (current_major, current_minor, current_patch) = &self.current_version; + + let start_at = match ( + current_major.as_str(), + current_minor.as_str(), + current_patch.as_str(), + ) { + ("1", "9", _) => 0, + ("1", "10", _) => 1, + _ => { + bail!("Unsupported current version {current_major}.{current_minor}.{current_patch}. Can only upgrade from v1.9") + } + }; + + let (target_major, target_minor, target_patch) = &self.target_version; + + let ends_at = match (target_major.as_str(), target_minor.as_str(), target_patch.as_str()) { + ("1", "10", _) => 0, + ("1", "11", _) => 1, + (major, _, _) if major.starts_with('v') => { + bail!("Target version must not starts with a `v`. Instead of writing `v1.9.0` write `1.9.0` for example.") + } + _ => { + bail!("Unsupported target version {target_major}.{target_minor}.{target_patch}. Can only upgrade to v1.11") + } + }; + + println!("Starting the upgrade from {current_major}.{current_minor}.{current_patch} to {target_major}.{target_minor}.{target_patch}"); + + #[allow(clippy::needless_range_loop)] + for index in start_at..=ends_at { + let (func, major, minor, patch) = upgrade_list[index]; + (func)(&self.db_path)?; + println!("Done"); + // We're writing the version file just in case an issue arise _while_ upgrading. + // We don't want the DB to fail in an unknown state. + println!("Writing VERSION file"); + + create_version_file(&self.db_path, major, minor, patch) + .context("while writing VERSION file after the upgrade")?; + } + + println!("Success"); + + Ok(()) + } +} diff --git a/crates/meilitool/src/upgrade/v1_10.rs b/crates/meilitool/src/upgrade/v1_10.rs new file mode 100644 index 000000000..671f4d6d2 --- /dev/null +++ b/crates/meilitool/src/upgrade/v1_10.rs @@ -0,0 +1,289 @@ +use anyhow::bail; +use std::path::Path; + +use anyhow::Context; +use meilisearch_types::{ + heed::{ + types::{SerdeJson, Str}, + Database, Env, EnvOpenOptions, RoTxn, RwTxn, Unspecified, + }, + milli::index::{db_name, main_key}, +}; + +use crate::{try_opening_database, try_opening_poly_database, uuid_codec::UuidCodec}; + +use super::v1_9; + +pub type FieldDistribution = std::collections::BTreeMap; + +/// The statistics that can be computed from an `Index` object. +#[derive(serde::Serialize, serde::Deserialize, Debug)] +pub struct IndexStats { + /// Number of documents in the index. + pub number_of_documents: u64, + /// Size taken up by the index' DB, in bytes. + /// + /// This includes the size taken by both the used and free pages of the DB, and as the free pages + /// are not returned to the disk after a deletion, this number is typically larger than + /// `used_database_size` that only includes the size of the used pages. + pub database_size: u64, + /// Size taken by the used pages of the index' DB, in bytes. + /// + /// As the DB backend does not return to the disk the pages that are not currently used by the DB, + /// this value is typically smaller than `database_size`. + pub used_database_size: u64, + /// Association of every field name with the number of times it occurs in the documents. + pub field_distribution: FieldDistribution, + /// Creation date of the index. + #[serde(with = "time::serde::rfc3339")] + pub created_at: time::OffsetDateTime, + /// Date of the last update of the index. + #[serde(with = "time::serde::rfc3339")] + pub updated_at: time::OffsetDateTime, +} + +impl From for IndexStats { + fn from( + v1_9::IndexStats { + number_of_documents, + database_size, + used_database_size, + field_distribution, + created_at, + updated_at, + }: v1_9::IndexStats, + ) -> Self { + IndexStats { + number_of_documents, + database_size, + used_database_size, + field_distribution, + created_at: created_at.0, + updated_at: updated_at.0, + } + } +} + +#[derive(serde::Serialize, serde::Deserialize)] +#[serde(transparent)] +pub struct OffsetDateTime(#[serde(with = "time::serde::rfc3339")] pub time::OffsetDateTime); + +fn update_index_stats( + index_stats: Database, + index_uid: &str, + index_uuid: uuid::Uuid, + sched_wtxn: &mut RwTxn, +) -> anyhow::Result<()> { + let ctx = || format!("while updating index stats for index `{index_uid}`"); + + let stats: Option<&str> = index_stats + .remap_data_type::() + .get(sched_wtxn, &index_uuid) + .with_context(ctx) + .with_context(|| "While reading value")?; + dbg!(stats); + + let stats: Option = index_stats + .remap_data_type::>() + .get(sched_wtxn, &index_uuid) + .with_context(ctx) + .with_context(|| "While reading value")?; + + if let Some(stats) = stats { + let stats: self::IndexStats = stats.into(); + + index_stats + .remap_data_type::>() + .put(sched_wtxn, &index_uuid, &stats) + .with_context(ctx) + .with_context(|| "While writing value")?; + } + + Ok(()) +} + +fn update_date_format( + index_uid: &str, + index_env: &Env, + index_wtxn: &mut RwTxn, +) -> anyhow::Result<()> { + let main = try_opening_poly_database(index_env, index_wtxn, db_name::MAIN) + .with_context(|| format!("while updating date format for index `{index_uid}`"))?; + + date_round_trip(index_wtxn, index_uid, main, main_key::CREATED_AT_KEY)?; + date_round_trip(index_wtxn, index_uid, main, main_key::UPDATED_AT_KEY)?; + + Ok(()) +} + +fn find_rest_embedders( + index_uid: &str, + index_env: &Env, + index_txn: &RoTxn, +) -> anyhow::Result> { + let main = try_opening_poly_database(index_env, index_txn, db_name::MAIN) + .with_context(|| format!("while checking REST embedders for index `{index_uid}`"))?; + + let mut rest_embedders = vec![]; + + for config in main + .remap_types::>>() + .get(index_txn, main_key::EMBEDDING_CONFIGS)? + .unwrap_or_default() + { + if let v1_9::EmbedderOptions::Rest(_) = config.config.embedder_options { + rest_embedders.push(config.name); + } + } + + Ok(rest_embedders) +} + +fn date_round_trip( + wtxn: &mut RwTxn, + index_uid: &str, + db: Database, + key: &str, +) -> anyhow::Result<()> { + let datetime = + db.remap_types::>().get(wtxn, key).with_context(|| { + format!("could not read `{key}` while updating date format for index `{index_uid}`") + })?; + + if let Some(datetime) = datetime { + db.remap_types::>() + .put(wtxn, key, &self::OffsetDateTime(datetime.0)) + .with_context(|| { + format!( + "could not write `{key}` while updating date format for index `{index_uid}`" + ) + })?; + } + + Ok(()) +} + +pub fn v1_9_to_v1_10(db_path: &Path) -> anyhow::Result<()> { + println!("Upgrading from v1.9.0 to v1.10.0"); + // 2 changes here + + // 1. date format. needs to be done before opening the Index + // 2. REST embedders. We don't support this case right now, so bail + + let index_scheduler_path = db_path.join("tasks"); + let env = unsafe { EnvOpenOptions::new().max_dbs(100).open(&index_scheduler_path) } + .with_context(|| format!("While trying to open {:?}", index_scheduler_path.display()))?; + + let mut sched_wtxn = env.write_txn()?; + + let index_mapping: Database = + try_opening_database(&env, &sched_wtxn, "index-mapping")?; + + let index_stats: Database = + try_opening_database(&env, &sched_wtxn, "index-stats").with_context(|| { + format!("While trying to open {:?}", index_scheduler_path.display()) + })?; + + let index_count = + index_mapping.len(&sched_wtxn).context("while reading the number of indexes")?; + + // FIXME: not ideal, we have to pre-populate all indexes to prevent double borrow of sched_wtxn + // 1. immutably for the iteration + // 2. mutably for updating index stats + let indexes: Vec<_> = index_mapping + .iter(&sched_wtxn)? + .map(|res| res.map(|(uid, uuid)| (uid.to_owned(), uuid))) + .collect(); + + let mut rest_embedders = Vec::new(); + + let mut unwrapped_indexes = Vec::new(); + + // check that update can take place + for (index_index, result) in indexes.into_iter().enumerate() { + let (uid, uuid) = result?; + let index_path = db_path.join("indexes").join(uuid.to_string()); + + println!( + "[{}/{index_count}]Checking that update can take place for `{uid}` at `{}`", + index_index + 1, + index_path.display() + ); + + let index_env = unsafe { + // FIXME: fetch the 25 magic number from the index file + EnvOpenOptions::new().max_dbs(25).open(&index_path).with_context(|| { + format!("while opening index {uid} at '{}'", index_path.display()) + })? + }; + + let index_txn = index_env.read_txn().with_context(|| { + format!( + "while obtaining a write transaction for index {uid} at {}", + index_path.display() + ) + })?; + + println!("\t- Checking for incompatible embedders (REST embedders)"); + let rest_embedders_for_index = find_rest_embedders(&uid, &index_env, &index_txn)?; + + if rest_embedders_for_index.is_empty() { + unwrapped_indexes.push((uid, uuid)); + } else { + // no need to add to unwrapped indexes because we'll exit early + rest_embedders.push((uid, rest_embedders_for_index)); + } + } + + if !rest_embedders.is_empty() { + let rest_embedders = rest_embedders + .into_iter() + .flat_map(|(index, embedders)| std::iter::repeat(index.clone()).zip(embedders)) + .map(|(index, embedder)| format!("\t- embedder `{embedder}` in index `{index}`")) + .collect::>() + .join("\n"); + bail!("The update cannot take place because there are REST embedder(s). Remove them before proceeding with the update:\n{rest_embedders}\n\n\ + The database has not been modified and is still a valid v1.9 database."); + } + + println!("Update can take place, updating"); + + for (index_index, (uid, uuid)) in unwrapped_indexes.into_iter().enumerate() { + let index_path = db_path.join("indexes").join(uuid.to_string()); + + println!( + "[{}/{index_count}]Updating index `{uid}` at `{}`", + index_index + 1, + index_path.display() + ); + + let index_env = unsafe { + // FIXME: fetch the 25 magic number from the index file + EnvOpenOptions::new().max_dbs(25).open(&index_path).with_context(|| { + format!("while opening index {uid} at '{}'", index_path.display()) + })? + }; + + let mut index_wtxn = index_env.write_txn().with_context(|| { + format!( + "while obtaining a write transaction for index `{uid}` at `{}`", + index_path.display() + ) + })?; + + println!("\t- Updating index stats"); + update_index_stats(index_stats, &uid, uuid, &mut sched_wtxn)?; + println!("\t- Updating date format"); + update_date_format(&uid, &index_env, &mut index_wtxn)?; + + index_wtxn.commit().with_context(|| { + format!("while committing the write txn for index `{uid}` at {}", index_path.display()) + })?; + } + + sched_wtxn.commit().context("while committing the write txn for the index-scheduler")?; + + println!("Upgrading database succeeded"); + + Ok(()) +} diff --git a/crates/meilitool/src/upgrade/v1_11.rs b/crates/meilitool/src/upgrade/v1_11.rs new file mode 100644 index 000000000..4105879fd --- /dev/null +++ b/crates/meilitool/src/upgrade/v1_11.rs @@ -0,0 +1,86 @@ +//! The breaking changes that happened between the v1.10 and the v1.11 are: +//! - Arroy went from the v0.4.0 to the v0.5.0, see this release note to get the whole context: https://github.com/meilisearch/arroy/releases/tag/v0.5.0 +//! - The `angular` distance has been renamed to `cosine` => We only need to update the string in the metadata. +//! - Reorganize the `NodeId` to make the appending of vectors work => We'll have to update the keys of almost all items in the DB. +//! - Store the list of updated IDs directly in LMDB instead of a roaring bitmap => This shouldn't be an issue since we are never supposed to commit this roaring bitmap, but it's not forbidden by arroy so ensuring it works is probably better than anything. + +use std::path::Path; + +use anyhow::Context; +use meilisearch_types::{ + heed::{types::Str, Database, EnvOpenOptions}, + milli::index::db_name, +}; + +use crate::{try_opening_database, try_opening_poly_database, uuid_codec::UuidCodec}; + +pub fn v1_10_to_v1_11(db_path: &Path) -> anyhow::Result<()> { + println!("Upgrading from v1.10.0 to v1.11.0"); + + let index_scheduler_path = db_path.join("tasks"); + let env = unsafe { EnvOpenOptions::new().max_dbs(100).open(&index_scheduler_path) } + .with_context(|| format!("While trying to open {:?}", index_scheduler_path.display()))?; + + let sched_rtxn = env.read_txn()?; + + let index_mapping: Database = + try_opening_database(&env, &sched_rtxn, "index-mapping")?; + + let index_count = + index_mapping.len(&sched_rtxn).context("while reading the number of indexes")?; + + let indexes: Vec<_> = index_mapping + .iter(&sched_rtxn)? + .map(|res| res.map(|(uid, uuid)| (uid.to_owned(), uuid))) + .collect(); + + // check that update can take place + for (index_index, result) in indexes.into_iter().enumerate() { + let (uid, uuid) = result?; + let index_path = db_path.join("indexes").join(uuid.to_string()); + + println!( + "[{}/{index_count}]Checking that update can take place for `{uid}` at `{}`", + index_index + 1, + index_path.display() + ); + + let index_env = unsafe { + EnvOpenOptions::new().max_dbs(25).open(&index_path).with_context(|| { + format!("while opening index {uid} at '{}'", index_path.display()) + })? + }; + + let index_rtxn = index_env.read_txn().with_context(|| { + format!( + "while obtaining a read transaction for index {uid} at {}", + index_path.display() + ) + })?; + let index_read_database = + try_opening_poly_database(&index_env, &index_rtxn, db_name::VECTOR_ARROY) + .with_context(|| format!("while updating date format for index `{uid}`"))?; + + let mut index_wtxn = index_env.write_txn().with_context(|| { + format!( + "while obtaining a write transaction for index {uid} at {}", + index_path.display() + ) + })?; + + let index_write_database = + try_opening_poly_database(&index_env, &index_wtxn, db_name::VECTOR_ARROY) + .with_context(|| format!("while updating date format for index `{uid}`"))?; + + arroy_v04_to_v05::ugrade_from_prev_version( + &index_rtxn, + index_read_database, + &mut index_wtxn, + index_write_database, + )?; + + index_wtxn.commit()?; + } + + Ok(()) +} diff --git a/crates/meilitool/src/upgrade/v1_9.rs b/crates/meilitool/src/upgrade/v1_9.rs new file mode 100644 index 000000000..3e6cfde6c --- /dev/null +++ b/crates/meilitool/src/upgrade/v1_9.rs @@ -0,0 +1,106 @@ +use serde::{Deserialize, Serialize}; +use time::OffsetDateTime; + +pub type FieldDistribution = std::collections::BTreeMap; + +/// The statistics that can be computed from an `Index` object. +#[derive(serde::Serialize, serde::Deserialize, Debug)] +pub struct IndexStats { + /// Number of documents in the index. + pub number_of_documents: u64, + /// Size taken up by the index' DB, in bytes. + /// + /// This includes the size taken by both the used and free pages of the DB, and as the free pages + /// are not returned to the disk after a deletion, this number is typically larger than + /// `used_database_size` that only includes the size of the used pages. + pub database_size: u64, + /// Size taken by the used pages of the index' DB, in bytes. + /// + /// As the DB backend does not return to the disk the pages that are not currently used by the DB, + /// this value is typically smaller than `database_size`. + pub used_database_size: u64, + /// Association of every field name with the number of times it occurs in the documents. + pub field_distribution: FieldDistribution, + /// Creation date of the index. + pub created_at: LegacyTime, + /// Date of the last update of the index. + pub updated_at: LegacyTime, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct IndexEmbeddingConfig { + pub name: String, + pub config: EmbeddingConfig, +} + +#[derive(Debug, Clone, Default, serde::Deserialize, serde::Serialize)] +pub struct EmbeddingConfig { + /// Options of the embedder, specific to each kind of embedder + pub embedder_options: EmbedderOptions, +} + +/// Options of an embedder, specific to each kind of embedder. +#[derive(Debug, Clone, Hash, PartialEq, Eq, serde::Deserialize, serde::Serialize)] +pub enum EmbedderOptions { + HuggingFace(hf::EmbedderOptions), + OpenAi(openai::EmbedderOptions), + Ollama(ollama::EmbedderOptions), + UserProvided(manual::EmbedderOptions), + Rest(rest::EmbedderOptions), +} + +impl Default for EmbedderOptions { + fn default() -> Self { + Self::OpenAi(openai::EmbedderOptions { api_key: None, dimensions: None }) + } +} + +mod hf { + #[derive(Debug, Clone, Hash, PartialEq, Eq, serde::Deserialize, serde::Serialize)] + pub struct EmbedderOptions { + pub model: String, + pub revision: Option, + } +} +mod openai { + + #[derive(Debug, Clone, Hash, PartialEq, Eq, serde::Deserialize, serde::Serialize)] + pub struct EmbedderOptions { + pub api_key: Option, + pub dimensions: Option, + } +} +mod ollama { + #[derive(Debug, Clone, Hash, PartialEq, Eq, serde::Deserialize, serde::Serialize)] + pub struct EmbedderOptions { + pub embedding_model: String, + pub url: Option, + pub api_key: Option, + } +} +mod manual { + #[derive(Debug, Clone, Hash, PartialEq, Eq, serde::Deserialize, serde::Serialize)] + pub struct EmbedderOptions { + pub dimensions: usize, + } +} +mod rest { + #[derive(Debug, Clone, PartialEq, Eq, serde::Deserialize, serde::Serialize, Hash)] + pub struct EmbedderOptions { + pub api_key: Option, + pub dimensions: Option, + pub url: String, + pub input_field: Vec, + // path to the array of embeddings + pub path_to_embeddings: Vec, + // shape of a single embedding + pub embedding_object: Vec, + } +} + +// 2024-11-04 13:32:08.48368 +00:00:00 +time::serde::format_description!(legacy_datetime, OffsetDateTime, "[year]-[month]-[day] [hour]:[minute]:[second].[subsecond] [offset_hour sign:mandatory]:[offset_minute]:[offset_second]"); + +#[derive(Debug, serde::Serialize, serde::Deserialize)] +#[serde(transparent)] +pub struct LegacyTime(#[serde(with = "legacy_datetime")] pub OffsetDateTime); From a1f228f662f5fd76b15fab8acabcbf3b7f40080e Mon Sep 17 00:00:00 2001 From: Tamo Date: Mon, 4 Nov 2024 18:19:36 +0100 Subject: [PATCH 102/111] remove the uneeded files after the rebase --- meilitool/src/upgrade/mod.rs | 73 --------- meilitool/src/upgrade/v1_10.rs | 289 --------------------------------- meilitool/src/upgrade/v1_11.rs | 86 ---------- meilitool/src/upgrade/v1_9.rs | 106 ------------ 4 files changed, 554 deletions(-) delete mode 100644 meilitool/src/upgrade/mod.rs delete mode 100644 meilitool/src/upgrade/v1_10.rs delete mode 100644 meilitool/src/upgrade/v1_11.rs delete mode 100644 meilitool/src/upgrade/v1_9.rs diff --git a/meilitool/src/upgrade/mod.rs b/meilitool/src/upgrade/mod.rs deleted file mode 100644 index ae095b6bd..000000000 --- a/meilitool/src/upgrade/mod.rs +++ /dev/null @@ -1,73 +0,0 @@ -mod v1_10; -mod v1_11; -mod v1_9; - -use std::path::{Path, PathBuf}; - -use anyhow::{bail, Context}; -use meilisearch_types::versioning::create_version_file; - -use v1_10::v1_9_to_v1_10; - -use crate::upgrade::v1_11::v1_10_to_v1_11; - -pub struct OfflineUpgrade { - pub db_path: PathBuf, - pub current_version: (String, String, String), - pub target_version: (String, String, String), -} - -impl OfflineUpgrade { - pub fn upgrade(self) -> anyhow::Result<()> { - let upgrade_list = [ - (v1_9_to_v1_10 as fn(&Path) -> Result<(), anyhow::Error>, "1", "10", "0"), - (v1_10_to_v1_11, "1", "11", "0"), - ]; - - let (current_major, current_minor, current_patch) = &self.current_version; - - let start_at = match ( - current_major.as_str(), - current_minor.as_str(), - current_patch.as_str(), - ) { - ("1", "9", _) => 0, - ("1", "10", _) => 1, - _ => { - bail!("Unsupported current version {current_major}.{current_minor}.{current_patch}. Can only upgrade from v1.9") - } - }; - - let (target_major, target_minor, target_patch) = &self.target_version; - - let ends_at = match (target_major.as_str(), target_minor.as_str(), target_patch.as_str()) { - ("1", "10", _) => 0, - ("1", "11", _) => 1, - (major, _, _) if major.starts_with('v') => { - bail!("Target version must not starts with a `v`. Instead of writing `v1.9.0` write `1.9.0` for example.") - } - _ => { - bail!("Unsupported target version {target_major}.{target_minor}.{target_patch}. Can only upgrade to v1.11") - } - }; - - println!("Starting the upgrade from {current_major}.{current_minor}.{current_patch} to {target_major}.{target_minor}.{target_patch}"); - - #[allow(clippy::needless_range_loop)] - for index in start_at..=ends_at { - let (func, major, minor, patch) = upgrade_list[index]; - (func)(&self.db_path)?; - println!("Done"); - // We're writing the version file just in case an issue arise _while_ upgrading. - // We don't want the DB to fail in an unknown state. - println!("Writing VERSION file"); - - create_version_file(&self.db_path, major, minor, patch) - .context("while writing VERSION file after the upgrade")?; - } - - println!("Success"); - - Ok(()) - } -} diff --git a/meilitool/src/upgrade/v1_10.rs b/meilitool/src/upgrade/v1_10.rs deleted file mode 100644 index 671f4d6d2..000000000 --- a/meilitool/src/upgrade/v1_10.rs +++ /dev/null @@ -1,289 +0,0 @@ -use anyhow::bail; -use std::path::Path; - -use anyhow::Context; -use meilisearch_types::{ - heed::{ - types::{SerdeJson, Str}, - Database, Env, EnvOpenOptions, RoTxn, RwTxn, Unspecified, - }, - milli::index::{db_name, main_key}, -}; - -use crate::{try_opening_database, try_opening_poly_database, uuid_codec::UuidCodec}; - -use super::v1_9; - -pub type FieldDistribution = std::collections::BTreeMap; - -/// The statistics that can be computed from an `Index` object. -#[derive(serde::Serialize, serde::Deserialize, Debug)] -pub struct IndexStats { - /// Number of documents in the index. - pub number_of_documents: u64, - /// Size taken up by the index' DB, in bytes. - /// - /// This includes the size taken by both the used and free pages of the DB, and as the free pages - /// are not returned to the disk after a deletion, this number is typically larger than - /// `used_database_size` that only includes the size of the used pages. - pub database_size: u64, - /// Size taken by the used pages of the index' DB, in bytes. - /// - /// As the DB backend does not return to the disk the pages that are not currently used by the DB, - /// this value is typically smaller than `database_size`. - pub used_database_size: u64, - /// Association of every field name with the number of times it occurs in the documents. - pub field_distribution: FieldDistribution, - /// Creation date of the index. - #[serde(with = "time::serde::rfc3339")] - pub created_at: time::OffsetDateTime, - /// Date of the last update of the index. - #[serde(with = "time::serde::rfc3339")] - pub updated_at: time::OffsetDateTime, -} - -impl From for IndexStats { - fn from( - v1_9::IndexStats { - number_of_documents, - database_size, - used_database_size, - field_distribution, - created_at, - updated_at, - }: v1_9::IndexStats, - ) -> Self { - IndexStats { - number_of_documents, - database_size, - used_database_size, - field_distribution, - created_at: created_at.0, - updated_at: updated_at.0, - } - } -} - -#[derive(serde::Serialize, serde::Deserialize)] -#[serde(transparent)] -pub struct OffsetDateTime(#[serde(with = "time::serde::rfc3339")] pub time::OffsetDateTime); - -fn update_index_stats( - index_stats: Database, - index_uid: &str, - index_uuid: uuid::Uuid, - sched_wtxn: &mut RwTxn, -) -> anyhow::Result<()> { - let ctx = || format!("while updating index stats for index `{index_uid}`"); - - let stats: Option<&str> = index_stats - .remap_data_type::() - .get(sched_wtxn, &index_uuid) - .with_context(ctx) - .with_context(|| "While reading value")?; - dbg!(stats); - - let stats: Option = index_stats - .remap_data_type::>() - .get(sched_wtxn, &index_uuid) - .with_context(ctx) - .with_context(|| "While reading value")?; - - if let Some(stats) = stats { - let stats: self::IndexStats = stats.into(); - - index_stats - .remap_data_type::>() - .put(sched_wtxn, &index_uuid, &stats) - .with_context(ctx) - .with_context(|| "While writing value")?; - } - - Ok(()) -} - -fn update_date_format( - index_uid: &str, - index_env: &Env, - index_wtxn: &mut RwTxn, -) -> anyhow::Result<()> { - let main = try_opening_poly_database(index_env, index_wtxn, db_name::MAIN) - .with_context(|| format!("while updating date format for index `{index_uid}`"))?; - - date_round_trip(index_wtxn, index_uid, main, main_key::CREATED_AT_KEY)?; - date_round_trip(index_wtxn, index_uid, main, main_key::UPDATED_AT_KEY)?; - - Ok(()) -} - -fn find_rest_embedders( - index_uid: &str, - index_env: &Env, - index_txn: &RoTxn, -) -> anyhow::Result> { - let main = try_opening_poly_database(index_env, index_txn, db_name::MAIN) - .with_context(|| format!("while checking REST embedders for index `{index_uid}`"))?; - - let mut rest_embedders = vec![]; - - for config in main - .remap_types::>>() - .get(index_txn, main_key::EMBEDDING_CONFIGS)? - .unwrap_or_default() - { - if let v1_9::EmbedderOptions::Rest(_) = config.config.embedder_options { - rest_embedders.push(config.name); - } - } - - Ok(rest_embedders) -} - -fn date_round_trip( - wtxn: &mut RwTxn, - index_uid: &str, - db: Database, - key: &str, -) -> anyhow::Result<()> { - let datetime = - db.remap_types::>().get(wtxn, key).with_context(|| { - format!("could not read `{key}` while updating date format for index `{index_uid}`") - })?; - - if let Some(datetime) = datetime { - db.remap_types::>() - .put(wtxn, key, &self::OffsetDateTime(datetime.0)) - .with_context(|| { - format!( - "could not write `{key}` while updating date format for index `{index_uid}`" - ) - })?; - } - - Ok(()) -} - -pub fn v1_9_to_v1_10(db_path: &Path) -> anyhow::Result<()> { - println!("Upgrading from v1.9.0 to v1.10.0"); - // 2 changes here - - // 1. date format. needs to be done before opening the Index - // 2. REST embedders. We don't support this case right now, so bail - - let index_scheduler_path = db_path.join("tasks"); - let env = unsafe { EnvOpenOptions::new().max_dbs(100).open(&index_scheduler_path) } - .with_context(|| format!("While trying to open {:?}", index_scheduler_path.display()))?; - - let mut sched_wtxn = env.write_txn()?; - - let index_mapping: Database = - try_opening_database(&env, &sched_wtxn, "index-mapping")?; - - let index_stats: Database = - try_opening_database(&env, &sched_wtxn, "index-stats").with_context(|| { - format!("While trying to open {:?}", index_scheduler_path.display()) - })?; - - let index_count = - index_mapping.len(&sched_wtxn).context("while reading the number of indexes")?; - - // FIXME: not ideal, we have to pre-populate all indexes to prevent double borrow of sched_wtxn - // 1. immutably for the iteration - // 2. mutably for updating index stats - let indexes: Vec<_> = index_mapping - .iter(&sched_wtxn)? - .map(|res| res.map(|(uid, uuid)| (uid.to_owned(), uuid))) - .collect(); - - let mut rest_embedders = Vec::new(); - - let mut unwrapped_indexes = Vec::new(); - - // check that update can take place - for (index_index, result) in indexes.into_iter().enumerate() { - let (uid, uuid) = result?; - let index_path = db_path.join("indexes").join(uuid.to_string()); - - println!( - "[{}/{index_count}]Checking that update can take place for `{uid}` at `{}`", - index_index + 1, - index_path.display() - ); - - let index_env = unsafe { - // FIXME: fetch the 25 magic number from the index file - EnvOpenOptions::new().max_dbs(25).open(&index_path).with_context(|| { - format!("while opening index {uid} at '{}'", index_path.display()) - })? - }; - - let index_txn = index_env.read_txn().with_context(|| { - format!( - "while obtaining a write transaction for index {uid} at {}", - index_path.display() - ) - })?; - - println!("\t- Checking for incompatible embedders (REST embedders)"); - let rest_embedders_for_index = find_rest_embedders(&uid, &index_env, &index_txn)?; - - if rest_embedders_for_index.is_empty() { - unwrapped_indexes.push((uid, uuid)); - } else { - // no need to add to unwrapped indexes because we'll exit early - rest_embedders.push((uid, rest_embedders_for_index)); - } - } - - if !rest_embedders.is_empty() { - let rest_embedders = rest_embedders - .into_iter() - .flat_map(|(index, embedders)| std::iter::repeat(index.clone()).zip(embedders)) - .map(|(index, embedder)| format!("\t- embedder `{embedder}` in index `{index}`")) - .collect::>() - .join("\n"); - bail!("The update cannot take place because there are REST embedder(s). Remove them before proceeding with the update:\n{rest_embedders}\n\n\ - The database has not been modified and is still a valid v1.9 database."); - } - - println!("Update can take place, updating"); - - for (index_index, (uid, uuid)) in unwrapped_indexes.into_iter().enumerate() { - let index_path = db_path.join("indexes").join(uuid.to_string()); - - println!( - "[{}/{index_count}]Updating index `{uid}` at `{}`", - index_index + 1, - index_path.display() - ); - - let index_env = unsafe { - // FIXME: fetch the 25 magic number from the index file - EnvOpenOptions::new().max_dbs(25).open(&index_path).with_context(|| { - format!("while opening index {uid} at '{}'", index_path.display()) - })? - }; - - let mut index_wtxn = index_env.write_txn().with_context(|| { - format!( - "while obtaining a write transaction for index `{uid}` at `{}`", - index_path.display() - ) - })?; - - println!("\t- Updating index stats"); - update_index_stats(index_stats, &uid, uuid, &mut sched_wtxn)?; - println!("\t- Updating date format"); - update_date_format(&uid, &index_env, &mut index_wtxn)?; - - index_wtxn.commit().with_context(|| { - format!("while committing the write txn for index `{uid}` at {}", index_path.display()) - })?; - } - - sched_wtxn.commit().context("while committing the write txn for the index-scheduler")?; - - println!("Upgrading database succeeded"); - - Ok(()) -} diff --git a/meilitool/src/upgrade/v1_11.rs b/meilitool/src/upgrade/v1_11.rs deleted file mode 100644 index 4105879fd..000000000 --- a/meilitool/src/upgrade/v1_11.rs +++ /dev/null @@ -1,86 +0,0 @@ -//! The breaking changes that happened between the v1.10 and the v1.11 are: -//! - Arroy went from the v0.4.0 to the v0.5.0, see this release note to get the whole context: https://github.com/meilisearch/arroy/releases/tag/v0.5.0 -//! - The `angular` distance has been renamed to `cosine` => We only need to update the string in the metadata. -//! - Reorganize the `NodeId` to make the appending of vectors work => We'll have to update the keys of almost all items in the DB. -//! - Store the list of updated IDs directly in LMDB instead of a roaring bitmap => This shouldn't be an issue since we are never supposed to commit this roaring bitmap, but it's not forbidden by arroy so ensuring it works is probably better than anything. - -use std::path::Path; - -use anyhow::Context; -use meilisearch_types::{ - heed::{types::Str, Database, EnvOpenOptions}, - milli::index::db_name, -}; - -use crate::{try_opening_database, try_opening_poly_database, uuid_codec::UuidCodec}; - -pub fn v1_10_to_v1_11(db_path: &Path) -> anyhow::Result<()> { - println!("Upgrading from v1.10.0 to v1.11.0"); - - let index_scheduler_path = db_path.join("tasks"); - let env = unsafe { EnvOpenOptions::new().max_dbs(100).open(&index_scheduler_path) } - .with_context(|| format!("While trying to open {:?}", index_scheduler_path.display()))?; - - let sched_rtxn = env.read_txn()?; - - let index_mapping: Database = - try_opening_database(&env, &sched_rtxn, "index-mapping")?; - - let index_count = - index_mapping.len(&sched_rtxn).context("while reading the number of indexes")?; - - let indexes: Vec<_> = index_mapping - .iter(&sched_rtxn)? - .map(|res| res.map(|(uid, uuid)| (uid.to_owned(), uuid))) - .collect(); - - // check that update can take place - for (index_index, result) in indexes.into_iter().enumerate() { - let (uid, uuid) = result?; - let index_path = db_path.join("indexes").join(uuid.to_string()); - - println!( - "[{}/{index_count}]Checking that update can take place for `{uid}` at `{}`", - index_index + 1, - index_path.display() - ); - - let index_env = unsafe { - EnvOpenOptions::new().max_dbs(25).open(&index_path).with_context(|| { - format!("while opening index {uid} at '{}'", index_path.display()) - })? - }; - - let index_rtxn = index_env.read_txn().with_context(|| { - format!( - "while obtaining a read transaction for index {uid} at {}", - index_path.display() - ) - })?; - let index_read_database = - try_opening_poly_database(&index_env, &index_rtxn, db_name::VECTOR_ARROY) - .with_context(|| format!("while updating date format for index `{uid}`"))?; - - let mut index_wtxn = index_env.write_txn().with_context(|| { - format!( - "while obtaining a write transaction for index {uid} at {}", - index_path.display() - ) - })?; - - let index_write_database = - try_opening_poly_database(&index_env, &index_wtxn, db_name::VECTOR_ARROY) - .with_context(|| format!("while updating date format for index `{uid}`"))?; - - arroy_v04_to_v05::ugrade_from_prev_version( - &index_rtxn, - index_read_database, - &mut index_wtxn, - index_write_database, - )?; - - index_wtxn.commit()?; - } - - Ok(()) -} diff --git a/meilitool/src/upgrade/v1_9.rs b/meilitool/src/upgrade/v1_9.rs deleted file mode 100644 index 3e6cfde6c..000000000 --- a/meilitool/src/upgrade/v1_9.rs +++ /dev/null @@ -1,106 +0,0 @@ -use serde::{Deserialize, Serialize}; -use time::OffsetDateTime; - -pub type FieldDistribution = std::collections::BTreeMap; - -/// The statistics that can be computed from an `Index` object. -#[derive(serde::Serialize, serde::Deserialize, Debug)] -pub struct IndexStats { - /// Number of documents in the index. - pub number_of_documents: u64, - /// Size taken up by the index' DB, in bytes. - /// - /// This includes the size taken by both the used and free pages of the DB, and as the free pages - /// are not returned to the disk after a deletion, this number is typically larger than - /// `used_database_size` that only includes the size of the used pages. - pub database_size: u64, - /// Size taken by the used pages of the index' DB, in bytes. - /// - /// As the DB backend does not return to the disk the pages that are not currently used by the DB, - /// this value is typically smaller than `database_size`. - pub used_database_size: u64, - /// Association of every field name with the number of times it occurs in the documents. - pub field_distribution: FieldDistribution, - /// Creation date of the index. - pub created_at: LegacyTime, - /// Date of the last update of the index. - pub updated_at: LegacyTime, -} - -#[derive(Debug, Deserialize, Serialize)] -pub struct IndexEmbeddingConfig { - pub name: String, - pub config: EmbeddingConfig, -} - -#[derive(Debug, Clone, Default, serde::Deserialize, serde::Serialize)] -pub struct EmbeddingConfig { - /// Options of the embedder, specific to each kind of embedder - pub embedder_options: EmbedderOptions, -} - -/// Options of an embedder, specific to each kind of embedder. -#[derive(Debug, Clone, Hash, PartialEq, Eq, serde::Deserialize, serde::Serialize)] -pub enum EmbedderOptions { - HuggingFace(hf::EmbedderOptions), - OpenAi(openai::EmbedderOptions), - Ollama(ollama::EmbedderOptions), - UserProvided(manual::EmbedderOptions), - Rest(rest::EmbedderOptions), -} - -impl Default for EmbedderOptions { - fn default() -> Self { - Self::OpenAi(openai::EmbedderOptions { api_key: None, dimensions: None }) - } -} - -mod hf { - #[derive(Debug, Clone, Hash, PartialEq, Eq, serde::Deserialize, serde::Serialize)] - pub struct EmbedderOptions { - pub model: String, - pub revision: Option, - } -} -mod openai { - - #[derive(Debug, Clone, Hash, PartialEq, Eq, serde::Deserialize, serde::Serialize)] - pub struct EmbedderOptions { - pub api_key: Option, - pub dimensions: Option, - } -} -mod ollama { - #[derive(Debug, Clone, Hash, PartialEq, Eq, serde::Deserialize, serde::Serialize)] - pub struct EmbedderOptions { - pub embedding_model: String, - pub url: Option, - pub api_key: Option, - } -} -mod manual { - #[derive(Debug, Clone, Hash, PartialEq, Eq, serde::Deserialize, serde::Serialize)] - pub struct EmbedderOptions { - pub dimensions: usize, - } -} -mod rest { - #[derive(Debug, Clone, PartialEq, Eq, serde::Deserialize, serde::Serialize, Hash)] - pub struct EmbedderOptions { - pub api_key: Option, - pub dimensions: Option, - pub url: String, - pub input_field: Vec, - // path to the array of embeddings - pub path_to_embeddings: Vec, - // shape of a single embedding - pub embedding_object: Vec, - } -} - -// 2024-11-04 13:32:08.48368 +00:00:00 -time::serde::format_description!(legacy_datetime, OffsetDateTime, "[year]-[month]-[day] [hour]:[minute]:[second].[subsecond] [offset_hour sign:mandatory]:[offset_minute]:[offset_second]"); - -#[derive(Debug, serde::Serialize, serde::Deserialize)] -#[serde(transparent)] -pub struct LegacyTime(#[serde(with = "legacy_datetime")] pub OffsetDateTime); From 48ab898ca2d8cd125458aac1ea500ecf324b7bc8 Mon Sep 17 00:00:00 2001 From: Tamo Date: Tue, 5 Nov 2024 10:30:53 +0100 Subject: [PATCH 103/111] fix the datetime of v1.9 --- crates/meilitool/Cargo.toml | 2 +- crates/meilitool/src/upgrade/v1_10.rs | 6 +-- crates/meilitool/src/upgrade/v1_9.rs | 70 +++++++++++++++++++++++---- 3 files changed, 65 insertions(+), 13 deletions(-) diff --git a/crates/meilitool/Cargo.toml b/crates/meilitool/Cargo.toml index f2c8920c9..353d44e9a 100644 --- a/crates/meilitool/Cargo.toml +++ b/crates/meilitool/Cargo.toml @@ -16,6 +16,6 @@ file-store = { path = "../file-store" } meilisearch-auth = { path = "../meilisearch-auth" } meilisearch-types = { path = "../meilisearch-types" } serde = { version = "1.0.209", features = ["derive"] } -time = { version = "0.3.36", features = ["formatting"] } +time = { version = "0.3.36", features = ["formatting", "parsing", "alloc"] } uuid = { version = "1.10.0", features = ["v4"], default-features = false } arroy_v04_to_v05 = { package = "arroy", git = "https://github.com/meilisearch/arroy/", rev = "053807bf38dc079f25b003f19fc30fbf3613f6e7" } diff --git a/crates/meilitool/src/upgrade/v1_10.rs b/crates/meilitool/src/upgrade/v1_10.rs index 671f4d6d2..3dd7c72a2 100644 --- a/crates/meilitool/src/upgrade/v1_10.rs +++ b/crates/meilitool/src/upgrade/v1_10.rs @@ -146,9 +146,9 @@ fn date_round_trip( key: &str, ) -> anyhow::Result<()> { let datetime = - db.remap_types::>().get(wtxn, key).with_context(|| { - format!("could not read `{key}` while updating date format for index `{index_uid}`") - })?; + db.remap_types::>().get(wtxn, key).with_context( + || format!("could not read `{key}` while updating date format for index `{index_uid}`"), + )?; if let Some(datetime) = datetime { db.remap_types::>() diff --git a/crates/meilitool/src/upgrade/v1_9.rs b/crates/meilitool/src/upgrade/v1_9.rs index 3e6cfde6c..96cbfe68c 100644 --- a/crates/meilitool/src/upgrade/v1_9.rs +++ b/crates/meilitool/src/upgrade/v1_9.rs @@ -1,10 +1,10 @@ use serde::{Deserialize, Serialize}; -use time::OffsetDateTime; +use time::{Date, OffsetDateTime, Time, UtcOffset}; pub type FieldDistribution = std::collections::BTreeMap; /// The statistics that can be computed from an `Index` object. -#[derive(serde::Serialize, serde::Deserialize, Debug)] +#[derive(serde::Deserialize, Debug)] pub struct IndexStats { /// Number of documents in the index. pub number_of_documents: u64, @@ -22,9 +22,9 @@ pub struct IndexStats { /// Association of every field name with the number of times it occurs in the documents. pub field_distribution: FieldDistribution, /// Creation date of the index. - pub created_at: LegacyTime, + pub created_at: LegacyDateTime, /// Date of the last update of the index. - pub updated_at: LegacyTime, + pub updated_at: LegacyDateTime, } #[derive(Debug, Deserialize, Serialize)] @@ -98,9 +98,61 @@ mod rest { } } -// 2024-11-04 13:32:08.48368 +00:00:00 -time::serde::format_description!(legacy_datetime, OffsetDateTime, "[year]-[month]-[day] [hour]:[minute]:[second].[subsecond] [offset_hour sign:mandatory]:[offset_minute]:[offset_second]"); +/// A datetime from Meilisearch v1.9 with an unspecified format. +#[derive(Debug)] +pub struct LegacyDateTime(pub OffsetDateTime); -#[derive(Debug, serde::Serialize, serde::Deserialize)] -#[serde(transparent)] -pub struct LegacyTime(#[serde(with = "legacy_datetime")] pub OffsetDateTime); +impl<'de> Deserialize<'de> for LegacyDateTime { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + struct Visitor; + impl<'de> serde::de::Visitor<'de> for Visitor { + type Value = OffsetDateTime; + + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(formatter, "a valid datetime") + } + + // Comes from a binary. The legacy format is: + // 2024-11-04 13:32:08.48368 +00:00:00 + fn visit_str(self, v: &str) -> Result + where + E: serde::de::Error, + { + let format = time::macros::format_description!("[year]-[month]-[day] [hour]:[minute]:[second].[subsecond] [offset_hour sign:mandatory]:[offset_minute]:[offset_second]"); + OffsetDateTime::parse(v, format).map_err(E::custom) + } + + // Comes from the docker image, the legacy format is: + // [2024, 309, 17, 15, 1, 698184971, 0,0,0] + // year, day in year, hour, minute, sec, subsec , offset stuff + fn visit_seq
(self, mut seq: A) -> Result + where + A: serde::de::SeqAccess<'de>, + { + let mut vec = Vec::new(); + // We must deserialize the value as `i64` because the largest values are `u32` and `i32` + while let Some(el) = seq.next_element::()? { + vec.push(el); + } + if vec.len() != 9 { + return Err(serde::de::Error::custom(format!( + "Invalid datetime, received an array of {} elements instead of 9", + vec.len() + ))); + } + Ok(OffsetDateTime::new_in_offset( + Date::from_ordinal_date(vec[0] as i32, vec[1] as u16) + .map_err(serde::de::Error::custom)?, + Time::from_hms_nano(vec[2] as u8, vec[3] as u8, vec[4] as u8, vec[5] as u32) + .map_err(serde::de::Error::custom)?, + UtcOffset::from_hms(vec[6] as i8, vec[7] as i8, vec[8] as i8) + .map_err(serde::de::Error::custom)?, + )) + } + } + deserializer.deserialize_any(Visitor).map(LegacyDateTime) + } +} From 9799812b27b0fee47b969a1e3bdba771f29b93bc Mon Sep 17 00:00:00 2001 From: Tamo Date: Tue, 5 Nov 2024 15:08:01 +0100 Subject: [PATCH 104/111] fix the benchmarks --- .github/workflows/benchmarks-manual.yml | 2 +- .github/workflows/benchmarks-pr.yml | 2 +- .github/workflows/benchmarks-push-indexing.yml | 2 +- .github/workflows/benchmarks-push-search-songs.yml | 2 +- .github/workflows/benchmarks-push-search-wiki.yml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/benchmarks-manual.yml b/.github/workflows/benchmarks-manual.yml index da33bf803..14b77c83d 100644 --- a/.github/workflows/benchmarks-manual.yml +++ b/.github/workflows/benchmarks-manual.yml @@ -43,7 +43,7 @@ jobs: # Run benchmarks - name: Run benchmarks - Dataset ${BENCH_NAME} - Branch ${{ steps.current_branch.outputs.name }} - Commit ${{ steps.commit_sha.outputs.short }} run: | - cd benchmarks + cd crates/benchmarks cargo bench --bench ${BENCH_NAME} -- --save-baseline ${{ steps.file.outputs.basename }} # Generate critcmp files diff --git a/.github/workflows/benchmarks-pr.yml b/.github/workflows/benchmarks-pr.yml index f9d609d6e..a083baa3c 100644 --- a/.github/workflows/benchmarks-pr.yml +++ b/.github/workflows/benchmarks-pr.yml @@ -88,7 +88,7 @@ jobs: # Run benchmarks - name: Run benchmarks - Dataset ${{ steps.command.outputs.command-arguments }} - Branch ${{ steps.current_branch.outputs.name }} - Commit ${{ steps.commit_sha.outputs.short }} run: | - cd benchmarks + cd crates/benchmarks cargo bench --bench ${{ steps.command.outputs.command-arguments }} -- --save-baseline ${{ steps.file.outputs.basename }} # Generate critcmp files diff --git a/.github/workflows/benchmarks-push-indexing.yml b/.github/workflows/benchmarks-push-indexing.yml index 1fdd5fd67..4495b4b9d 100644 --- a/.github/workflows/benchmarks-push-indexing.yml +++ b/.github/workflows/benchmarks-push-indexing.yml @@ -41,7 +41,7 @@ jobs: # Run benchmarks - name: Run benchmarks - Dataset ${BENCH_NAME} - Branch ${{ steps.current_branch.outputs.name }} - Commit ${{ steps.commit_sha.outputs.short }} run: | - cd benchmarks + cd crates/benchmarks cargo bench --bench ${BENCH_NAME} -- --save-baseline ${{ steps.file.outputs.basename }} # Generate critcmp files diff --git a/.github/workflows/benchmarks-push-search-songs.yml b/.github/workflows/benchmarks-push-search-songs.yml index b6169ddf7..e9744a434 100644 --- a/.github/workflows/benchmarks-push-search-songs.yml +++ b/.github/workflows/benchmarks-push-search-songs.yml @@ -40,7 +40,7 @@ jobs: # Run benchmarks - name: Run benchmarks - Dataset ${BENCH_NAME} - Branch ${{ steps.current_branch.outputs.name }} - Commit ${{ steps.commit_sha.outputs.short }} run: | - cd benchmarks + cd crates/benchmarks cargo bench --bench ${BENCH_NAME} -- --save-baseline ${{ steps.file.outputs.basename }} # Generate critcmp files diff --git a/.github/workflows/benchmarks-push-search-wiki.yml b/.github/workflows/benchmarks-push-search-wiki.yml index dd3146a14..bc9e1bcd0 100644 --- a/.github/workflows/benchmarks-push-search-wiki.yml +++ b/.github/workflows/benchmarks-push-search-wiki.yml @@ -40,7 +40,7 @@ jobs: # Run benchmarks - name: Run benchmarks - Dataset ${BENCH_NAME} - Branch ${{ steps.current_branch.outputs.name }} - Commit ${{ steps.commit_sha.outputs.short }} run: | - cd benchmarks + cd crates/benchmarks cargo bench --bench ${BENCH_NAME} -- --save-baseline ${{ steps.file.outputs.basename }} # Generate critcmp files From f193c3a67c5d0a39d94e8437ef683aaa27b0e377 Mon Sep 17 00:00:00 2001 From: Tamo Date: Tue, 5 Nov 2024 15:13:32 +0100 Subject: [PATCH 105/111] Update crates/meilitool/src/main.rs Co-authored-by: Louis Dureuil --- crates/meilitool/src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/meilitool/src/main.rs b/crates/meilitool/src/main.rs index ef137f746..978824356 100644 --- a/crates/meilitool/src/main.rs +++ b/crates/meilitool/src/main.rs @@ -73,7 +73,7 @@ enum Command { /// /// Supported upgrade paths: /// - /// - v1.9.0 -> v1.10.0 -> v1.11.0 + /// - v1.9.x -> v1.10.x -> v1.11.x OfflineUpgrade { #[arg(long)] target_version: String, From 66b7e0824efd310b335be45b12f461695f99e1b4 Mon Sep 17 00:00:00 2001 From: Tamo Date: Tue, 5 Nov 2024 15:13:40 +0100 Subject: [PATCH 106/111] Update crates/meilitool/src/upgrade/mod.rs Co-authored-by: Louis Dureuil --- crates/meilitool/src/upgrade/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/meilitool/src/upgrade/mod.rs b/crates/meilitool/src/upgrade/mod.rs index ae095b6bd..0fd903ffe 100644 --- a/crates/meilitool/src/upgrade/mod.rs +++ b/crates/meilitool/src/upgrade/mod.rs @@ -34,7 +34,7 @@ impl OfflineUpgrade { ("1", "9", _) => 0, ("1", "10", _) => 1, _ => { - bail!("Unsupported current version {current_major}.{current_minor}.{current_patch}. Can only upgrade from v1.9") + bail!("Unsupported current version {current_major}.{current_minor}.{current_patch}. Can only upgrade from v1.9 and v1.10") } }; From e4993aa705a8e8a3a870a4616c845bfd143fd5f9 Mon Sep 17 00:00:00 2001 From: Tamo Date: Tue, 5 Nov 2024 15:13:50 +0100 Subject: [PATCH 107/111] Update crates/meilitool/src/upgrade/mod.rs Co-authored-by: Louis Dureuil --- crates/meilitool/src/upgrade/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/meilitool/src/upgrade/mod.rs b/crates/meilitool/src/upgrade/mod.rs index 0fd903ffe..36630c3b3 100644 --- a/crates/meilitool/src/upgrade/mod.rs +++ b/crates/meilitool/src/upgrade/mod.rs @@ -47,7 +47,7 @@ impl OfflineUpgrade { bail!("Target version must not starts with a `v`. Instead of writing `v1.9.0` write `1.9.0` for example.") } _ => { - bail!("Unsupported target version {target_major}.{target_minor}.{target_patch}. Can only upgrade to v1.11") + bail!("Unsupported target version {target_major}.{target_minor}.{target_patch}. Can only upgrade to v1.10 and v1.11") } }; From 0f74a933467b0e372898975fa18a69cc3d1dd5b9 Mon Sep 17 00:00:00 2001 From: Tamo Date: Tue, 5 Nov 2024 15:14:02 +0100 Subject: [PATCH 108/111] Update crates/meilitool/src/upgrade/v1_11.rs Co-authored-by: Louis Dureuil --- crates/meilitool/src/upgrade/v1_11.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/crates/meilitool/src/upgrade/v1_11.rs b/crates/meilitool/src/upgrade/v1_11.rs index 4105879fd..de852f3dc 100644 --- a/crates/meilitool/src/upgrade/v1_11.rs +++ b/crates/meilitool/src/upgrade/v1_11.rs @@ -34,7 +34,6 @@ pub fn v1_10_to_v1_11(db_path: &Path) -> anyhow::Result<()> { .map(|res| res.map(|(uid, uuid)| (uid.to_owned(), uuid))) .collect(); - // check that update can take place for (index_index, result) in indexes.into_iter().enumerate() { let (uid, uuid) = result?; let index_path = db_path.join("indexes").join(uuid.to_string()); From a5d138ac34448c7fc2410dee1e16ebca91b1a248 Mon Sep 17 00:00:00 2001 From: Tamo Date: Tue, 5 Nov 2024 15:23:27 +0100 Subject: [PATCH 109/111] use a tag while importing arroy instead of a loose branch or rev --- Cargo.lock | 4 ++-- crates/meilitool/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 04812fd1b..cef8e9c8a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -407,7 +407,7 @@ dependencies = [ [[package]] name = "arroy" version = "0.5.0" -source = "git+https://github.com/meilisearch/arroy/?rev=053807bf38dc079f25b003f19fc30fbf3613f6e7#053807bf38dc079f25b003f19fc30fbf3613f6e7" +source = "git+https://github.com/meilisearch/arroy/?tag=DO-NOT-DELETE-upgrade-v04-to-v05#053807bf38dc079f25b003f19fc30fbf3613f6e7" dependencies = [ "bytemuck", "byteorder", @@ -3536,7 +3536,7 @@ name = "meilitool" version = "1.11.0" dependencies = [ "anyhow", - "arroy 0.5.0 (git+https://github.com/meilisearch/arroy/?rev=053807bf38dc079f25b003f19fc30fbf3613f6e7)", + "arroy 0.5.0 (git+https://github.com/meilisearch/arroy/?tag=DO-NOT-DELETE-upgrade-v04-to-v05)", "clap", "dump", "file-store", diff --git a/crates/meilitool/Cargo.toml b/crates/meilitool/Cargo.toml index 353d44e9a..048da6232 100644 --- a/crates/meilitool/Cargo.toml +++ b/crates/meilitool/Cargo.toml @@ -18,4 +18,4 @@ meilisearch-types = { path = "../meilisearch-types" } serde = { version = "1.0.209", features = ["derive"] } time = { version = "0.3.36", features = ["formatting", "parsing", "alloc"] } uuid = { version = "1.10.0", features = ["v4"], default-features = false } -arroy_v04_to_v05 = { package = "arroy", git = "https://github.com/meilisearch/arroy/", rev = "053807bf38dc079f25b003f19fc30fbf3613f6e7" } +arroy_v04_to_v05 = { package = "arroy", git = "https://github.com/meilisearch/arroy/", tag = "DO-NOT-DELETE-upgrade-v04-to-v05" } From 7415ef7ff5498bdc93ef835713f865df80c4b144 Mon Sep 17 00:00:00 2001 From: Tamo Date: Tue, 5 Nov 2024 15:37:59 +0100 Subject: [PATCH 110/111] Update crates/meilitool/src/upgrade/v1_11.rs Co-authored-by: Louis Dureuil --- crates/meilitool/src/upgrade/v1_11.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/meilitool/src/upgrade/v1_11.rs b/crates/meilitool/src/upgrade/v1_11.rs index de852f3dc..0c84d3842 100644 --- a/crates/meilitool/src/upgrade/v1_11.rs +++ b/crates/meilitool/src/upgrade/v1_11.rs @@ -39,7 +39,7 @@ pub fn v1_10_to_v1_11(db_path: &Path) -> anyhow::Result<()> { let index_path = db_path.join("indexes").join(uuid.to_string()); println!( - "[{}/{index_count}]Checking that update can take place for `{uid}` at `{}`", + "[{}/{index_count}]Updating embeddings for `{uid}` at `{}`", index_index + 1, index_path.display() ); From 2e4d4b398d574f42f1085107c8ec3becbd5b2df1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 6 Nov 2024 12:57:04 +0000 Subject: [PATCH 111/111] Bump Swatinem/rust-cache from 2.7.1 to 2.7.5 Bumps [Swatinem/rust-cache](https://github.com/swatinem/rust-cache) from 2.7.1 to 2.7.5. - [Release notes](https://github.com/swatinem/rust-cache/releases) - [Changelog](https://github.com/Swatinem/rust-cache/blob/master/CHANGELOG.md) - [Commits](https://github.com/swatinem/rust-cache/compare/v2.7.1...v2.7.5) --- updated-dependencies: - dependency-name: Swatinem/rust-cache dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/test-suite.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 90fb03538..e142b15b6 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -33,7 +33,7 @@ jobs: - name: Setup test with Rust stable uses: dtolnay/rust-toolchain@1.79 - name: Cache dependencies - uses: Swatinem/rust-cache@v2.7.1 + uses: Swatinem/rust-cache@v2.7.5 - name: Run cargo check without any default features uses: actions-rs/cargo@v1 with: @@ -55,7 +55,7 @@ jobs: steps: - uses: actions/checkout@v3 - name: Cache dependencies - uses: Swatinem/rust-cache@v2.7.1 + uses: Swatinem/rust-cache@v2.7.5 - uses: dtolnay/rust-toolchain@1.79 - name: Run cargo check without any default features uses: actions-rs/cargo@v1 @@ -127,7 +127,7 @@ jobs: apt-get install build-essential -y - uses: dtolnay/rust-toolchain@1.79 - name: Cache dependencies - uses: Swatinem/rust-cache@v2.7.1 + uses: Swatinem/rust-cache@v2.7.5 - name: Run tests in debug uses: actions-rs/cargo@v1 with: @@ -144,7 +144,7 @@ jobs: profile: minimal components: clippy - name: Cache dependencies - uses: Swatinem/rust-cache@v2.7.1 + uses: Swatinem/rust-cache@v2.7.5 - name: Run cargo clippy uses: actions-rs/cargo@v1 with: @@ -163,7 +163,7 @@ jobs: override: true components: rustfmt - name: Cache dependencies - uses: Swatinem/rust-cache@v2.7.1 + uses: Swatinem/rust-cache@v2.7.5 - name: Run cargo fmt # Since we never ran the `build.rs` script in the benchmark directory we are missing one auto-generated import file. # Since we want to trigger (and fail) this action as fast as possible, instead of building the benchmark crate