Make clippy happy

This commit is contained in:
Clément Renault 2025-01-08 16:16:21 +01:00
parent 0ee4671a91
commit 71e5605daa
No known key found for this signature in database
GPG Key ID: F250A4C4E3AE5F5F
19 changed files with 55 additions and 59 deletions

View File

@ -1,7 +1,7 @@
/*!
This crate defines the index scheduler, which is responsible for:
1. Keeping references to meilisearch's indexes and mapping them to their
user-defined names.
user-defined names.
2. Scheduling tasks given by the user and executing them, in batch if possible.
When an `IndexScheduler` is created, a new thread containing a reference to the
@ -513,7 +513,7 @@ impl IndexScheduler {
/// the user.
///
/// 1. IndexSwap tasks are not publicly associated with any index, but they are associated
/// with many indexes internally.
/// with many indexes internally.
/// 2. The user may not have the rights to access the tasks (internally) associated with all indexes.
pub fn get_tasks_from_authorized_indexes(
&self,
@ -532,7 +532,7 @@ impl IndexScheduler {
/// the user.
///
/// 1. IndexSwap tasks are not publicly associated with any index, but they are associated
/// with many indexes internally.
/// with many indexes internally.
/// 2. The user may not have the rights to access the tasks (internally) associated with all indexes.
pub fn get_task_ids_from_authorized_indexes(
&self,
@ -551,7 +551,7 @@ impl IndexScheduler {
/// the user.
///
/// 1. IndexSwap tasks are not publicly associated with any index, but they are associated
/// with many indexes internally.
/// with many indexes internally.
/// 2. The user may not have the rights to access the tasks (internally) associated with all indexes.
pub fn get_batches_from_authorized_indexes(
&self,
@ -570,7 +570,7 @@ impl IndexScheduler {
/// the user.
///
/// 1. IndexSwap tasks are not publicly associated with any index, but they are associated
/// with many indexes internally.
/// with many indexes internally.
/// 2. The user may not have the rights to access the tasks (internally) associated with all indexes.
pub fn get_batch_ids_from_authorized_indexes(
&self,

View File

@ -444,7 +444,7 @@ impl Queue {
/// the user.
///
/// 1. IndexSwap tasks are not publicly associated with any index, but they are associated
/// with many indexes internally.
/// with many indexes internally.
/// 2. The user may not have the rights to access the tasks (internally) associated with all indexes.
pub(crate) fn get_batch_ids_from_authorized_indexes(
&self,

View File

@ -106,7 +106,7 @@ impl IndexScheduler {
progress.update_progress(DumpCreationProgress::DumpTheIndexes);
let nb_indexes = self.index_mapper.index_mapping.len(&rtxn)? as u32;
let mut count = 0;
self.index_mapper.try_for_each_index(&rtxn, |uid, index| -> Result<()> {
let () = self.index_mapper.try_for_each_index(&rtxn, |uid, index| -> Result<()> {
progress.update_progress(VariableNameStep::new(uid.to_string(), count, nb_indexes));
count += 1;

View File

@ -426,13 +426,8 @@ impl Segment {
&AuthFilter::default(),
) {
// Replace the version number with the prototype name if any.
let version = if let Some(prototype) = build_info::DescribeResult::from_build()
.and_then(|describe| describe.as_prototype())
{
prototype
} else {
env!("CARGO_PKG_VERSION")
};
let version = build_info::DescribeResult::from_build()
.and_then(|describe| describe.as_prototype()).unwrap_or(env!("CARGO_PKG_VERSION"));
let _ = self
.batcher

View File

@ -188,13 +188,13 @@ impl tracing_actix_web::RootSpanBuilder for AwebTracingLogger {
if let Some(error) = response.response().error() {
// use the status code already constructed for the outgoing HTTP response
span.record("error", &tracing::field::display(error.as_response_error()));
span.record("error", tracing::field::display(error.as_response_error()));
}
}
Err(error) => {
let code: i32 = error.error_response().status().as_u16().into();
span.record("status_code", code);
span.record("error", &tracing::field::display(error.as_response_error()));
span.record("error", tracing::field::display(error.as_response_error()));
}
};
}

View File

@ -545,5 +545,5 @@ pub async fn get_health(
index_scheduler.health().unwrap();
auth_controller.health().unwrap();
Ok(HttpResponse::Ok().json(&HealthResponse::default()))
Ok(HttpResponse::Ok().json(HealthResponse::default()))
}

View File

@ -73,8 +73,8 @@ async fn get_and_paginate_indexes() {
let server = Server::new().await;
const NB_INDEXES: usize = 50;
for i in 0..NB_INDEXES {
server.index(&format!("test_{i:02}")).create(None).await;
server.index(&format!("test_{i:02}")).wait_task(i as u64).await;
server.index(format!("test_{i:02}")).create(None).await;
server.index(format!("test_{i:02}")).wait_task(i as u64).await;
}
// basic

View File

@ -132,12 +132,12 @@ impl<'t, 'b, 'bitmap> FacetRangeSearch<'t, 'b, 'bitmap> {
///
/// 1. So long as the element's range is less than the left bound, we do nothing and keep iterating
/// 2. If the element's range is fully contained by the bounds, then all of its docids are added to
/// the roaring bitmap.
/// the roaring bitmap.
/// 3. If the element's range merely intersects the bounds, then we call the algorithm recursively
/// on the children of the element from the level below.
/// on the children of the element from the level below.
/// 4. If the element's range is greater than the right bound, we do nothing and stop iterating.
/// Note that the right bound is found through either the `left_bound` of the *next* element,
/// or from the `rightmost_bound` argument
/// Note that the right bound is found through either the `left_bound` of the *next* element,
/// or from the `rightmost_bound` argument
///
/// ## Arguments
/// - `level`: the level being visited

View File

@ -18,10 +18,10 @@ pub struct DistinctOutput {
/// Return a [`DistinctOutput`] containing:
/// - `remaining`: a set of docids built such that exactly one element from `candidates`
/// is kept for each distinct value inside the given field. If the field does not exist, it
/// is considered unique.
/// is kept for each distinct value inside the given field. If the field does not exist, it
/// is considered unique.
/// - `excluded`: the set of document ids that contain a value for the given field that occurs
/// in the given candidates.
/// in the given candidates.
pub fn apply_distinct_rule(
ctx: &mut SearchContext<'_>,
field_id: u16,

View File

@ -149,7 +149,7 @@ pub type WordId = u16;
/// A given token can partially match a query word for several reasons:
/// - split words
/// - multi-word synonyms
/// In these cases we need to match consecutively several tokens to consider that the match is full.
/// In these cases we need to match consecutively several tokens to consider that the match is full.
#[derive(Debug, PartialEq)]
pub enum MatchType<'a> {
Full { char_count: usize, byte_len: usize, ids: &'a RangeInclusive<WordId> },

View File

@ -21,9 +21,9 @@ use crate::Result;
/// 1. `Start` : unique, represents the start of the query
/// 2. `End` : unique, represents the end of a query
/// 3. `Deleted` : represents a node that was deleted.
/// All deleted nodes are unreachable from the start node.
/// All deleted nodes are unreachable from the start node.
/// 4. `Term` is a regular node representing a word or combination of words
/// from the user query.
/// from the user query.
#[derive(Clone)]
pub struct QueryNode {
pub data: QueryNodeData,

View File

@ -8,7 +8,7 @@ with them, they are "unconditional". These kinds of edges are used to "skip" a n
The algorithm uses a depth-first search. It benefits from two main optimisations:
- The list of all possible costs to go from any node to the END node is precomputed
- The `DeadEndsCache` reduces the number of valid paths drastically, by making some edges
untraversable depending on what other edges were selected.
untraversable depending on what other edges were selected.
These two optimisations are meant to avoid traversing edges that wouldn't lead
to a valid path. In practically all cases, we avoid the exponential complexity
@ -24,6 +24,7 @@ For example, the DeadEndsCache could say the following:
- if we take `g`, then `[f]` is also forbidden
- etc.
- etc.
As we traverse the graph, we also traverse the `DeadEndsCache` and keep a list of forbidden
conditions in memory. Then, we know to avoid all edges which have a condition that is forbidden.

View File

@ -58,7 +58,7 @@ pub struct ComputedCondition {
/// 2. The cost of traversing this edge
/// 3. The condition associated with it
/// 4. The list of nodes that have to be skipped
/// if this edge is traversed.
/// if this edge is traversed.
#[derive(Clone)]
pub struct Edge<E> {
pub source_node: Interned<QueryNode>,

View File

@ -14,7 +14,7 @@ This module tests the following properties about the exactness ranking rule:
3. those that contain the most exact words from the remaining query
- if it is followed by other graph-based ranking rules (`typo`, `proximity`, `attribute`).
Then these rules will only work with
Then these rules will only work with
1. the exact terms selected by `exactness
2. the full query term otherwise
*/

View File

@ -4,15 +4,14 @@ This module tests the Proximity ranking rule:
1. A proximity of >7 always has the same cost.
2. Phrase terms can be in sprximity to other terms via their start and end words,
but we need to make sure that the phrase exists in the document that meets this
proximity condition. This is especially relevant with split words and synonyms.
but we need to make sure that the phrase exists in the document that meets this
proximity condition. This is especially relevant with split words and synonyms.
3. An ngram has the same sprximity cost as its component words being consecutive.
e.g. `sunflower` equivalent to `sun flower`.
e.g. `sunflower` equivalent to `sun flower`.
4. The prefix databases can be used to find the sprximity between two words, but
they store fewer sprximities than the regular word sprximity DB.
they store fewer sprximities than the regular word sprximity DB.
*/
use std::collections::BTreeMap;

View File

@ -11,7 +11,7 @@ This module tests the following properties:
8. 2grams can have 1 typo if they are larger than `min_word_len_two_typos`
9. 3grams are not typo tolerant (but they can be split into two words)
10. The `typo` ranking rule assumes the role of the `words` ranking rule implicitly
if `words` doesn't exist before it.
if `words` doesn't exist before it.
11. The `typo` ranking rule places documents with the same number of typos in the same bucket
12. Prefix tolerance costs nothing according to the typo ranking rule
13. Split words cost 1 typo according to the typo ranking rule

View File

@ -2,11 +2,11 @@
This module tests the following properties:
1. The `last` term matching strategy starts removing terms from the query
starting from the end if no more results match it.
starting from the end if no more results match it.
2. Phrases are never deleted by the `last` term matching strategy
3. Duplicate words don't affect the ranking of a document according to the `words` ranking rule
4. The proximity of the first and last word of a phrase to its adjacent terms is taken into
account by the proximity ranking rule.
account by the proximity ranking rule.
5. Unclosed double quotes still make a phrase
6. The `all` term matching strategy does not remove any term from the query
7. The search is capable of returning no results if no documents match the query

View File

@ -21,29 +21,30 @@ use crate::{CboRoaringBitmapCodec, Index, Result};
/// Enum used as a return value for the facet incremental indexing.
///
/// - `ModificationResult::InPlace` means that modifying the `facet_value` into the `level` did not have
/// an effect on the number of keys in that level. Therefore, it did not increase the number of children
/// of the parent node.
/// an effect on the number of keys in that level. Therefore, it did not increase the number of children
/// of the parent node.
///
/// - `ModificationResult::Insert` means that modifying the `facet_value` into the `level` resulted
/// in the addition of a new key in that level, and that therefore the number of children
/// of the parent node should be incremented.
/// in the addition of a new key in that level, and that therefore the number of children
/// of the parent node should be incremented.
///
/// - `ModificationResult::Remove` means that modifying the `facet_value` into the `level` resulted in a change in the
/// number of keys in the level. For example, removing a document id from the facet value `3` could
/// cause it to have no corresponding document in level 0 anymore, and therefore the key was deleted
/// entirely. In that case, `ModificationResult::Remove` is returned. The parent of the deleted key must
/// then adjust its group size. If its group size falls to 0, then it will need to be deleted as well.
/// number of keys in the level. For example, removing a document id from the facet value `3` could
/// cause it to have no corresponding document in level 0 anymore, and therefore the key was deleted
/// entirely. In that case, `ModificationResult::Remove` is returned. The parent of the deleted key must
/// then adjust its group size. If its group size falls to 0, then it will need to be deleted as well.
///
/// - `ModificationResult::Reduce/Expand` means that modifying the `facet_value` into the `level` resulted in a change in the
/// bounds of the keys of the level. For example, removing a document id from the facet value
/// `3` might have caused the facet value `3` to have no corresponding document in level 0. Therefore,
/// in level 1, the key with the left bound `3` had to be changed to the next facet value (e.g. 4).
/// In that case `ModificationResult::Reduce` is returned. The parent of the reduced key may need to adjust
/// its left bound as well.
/// bounds of the keys of the level. For example, removing a document id from the facet value
/// `3` might have caused the facet value `3` to have no corresponding document in level 0. Therefore,
/// in level 1, the key with the left bound `3` had to be changed to the next facet value (e.g. 4).
/// In that case `ModificationResult::Reduce` is returned. The parent of the reduced key may need to adjust
/// its left bound as well.
///
/// - `ModificationResult::Nothing` means that modifying the `facet_value` didn't have any impact into the `level`.
/// This case is reachable when a document id is removed from a sub-level node but is still present in another one.
/// For example, removing `2` from a document containing `2` and `3`, the document id will removed form the `level 0` but should remain in the group node [1..4] in `level 1`.
/// This case is reachable when a document id is removed from a sub-level node but is still present in another one.
/// For example, removing `2` from a document containing `2` and `3`, the document id will removed form the `level 0`
/// but should remain in the group node [1..4] in `level 1`.
enum ModificationResult {
InPlace,
Expand,

View File

@ -29,9 +29,9 @@ use std::cell::RefCell;
/// - An example of a type that verifies (1) and (2) is [`std::rc::Rc`] (when `T` is `Send` and `Sync`).
/// - An example of a type that doesn't verify (1) is thread-local data.
/// - An example of a type that doesn't verify (2) is [`std::sync::MutexGuard`]: a lot of mutex implementations require that
/// a lock is returned to the operating system on the same thread that initially locked the mutex, failing to uphold this
/// invariant will cause Undefined Behavior
/// (see last § in [the nomicon](https://doc.rust-lang.org/nomicon/send-and-sync.html)).
/// a lock is returned to the operating system on the same thread that initially locked the mutex, failing to uphold this
/// invariant will cause Undefined Behavior
/// (see last § in [the nomicon](https://doc.rust-lang.org/nomicon/send-and-sync.html)).
///
/// It is **always safe** to implement this trait on a type that is `Send`, but no placeholder impl is provided due to limitations in
/// coherency. Use the [`FullySend`] wrapper in this situation.
@ -86,7 +86,7 @@ impl<T: MostlySend> MostlySendWrapper<T> {
/// # Safety
///
/// 1. `T` is [`MostlySend`], so by its safety contract it can be accessed by any thread and all of its operations are available
/// from any thread.
/// from any thread.
/// 2. (P1) of `MostlySendWrapper::new` forces the user to never access the value from multiple threads concurrently.
unsafe impl<T: MostlySend> Send for MostlySendWrapper<T> {}