mirror of
https://github.com/meilisearch/meilisearch.git
synced 2025-02-07 11:26:16 +08:00
Make clippy happy
This commit is contained in:
parent
0ee4671a91
commit
71e5605daa
@ -1,7 +1,7 @@
|
||||
/*!
|
||||
This crate defines the index scheduler, which is responsible for:
|
||||
1. Keeping references to meilisearch's indexes and mapping them to their
|
||||
user-defined names.
|
||||
user-defined names.
|
||||
2. Scheduling tasks given by the user and executing them, in batch if possible.
|
||||
|
||||
When an `IndexScheduler` is created, a new thread containing a reference to the
|
||||
|
@ -106,7 +106,7 @@ impl IndexScheduler {
|
||||
progress.update_progress(DumpCreationProgress::DumpTheIndexes);
|
||||
let nb_indexes = self.index_mapper.index_mapping.len(&rtxn)? as u32;
|
||||
let mut count = 0;
|
||||
self.index_mapper.try_for_each_index(&rtxn, |uid, index| -> Result<()> {
|
||||
let () = self.index_mapper.try_for_each_index(&rtxn, |uid, index| -> Result<()> {
|
||||
progress.update_progress(VariableNameStep::new(uid.to_string(), count, nb_indexes));
|
||||
count += 1;
|
||||
|
||||
|
@ -426,13 +426,8 @@ impl Segment {
|
||||
&AuthFilter::default(),
|
||||
) {
|
||||
// Replace the version number with the prototype name if any.
|
||||
let version = if let Some(prototype) = build_info::DescribeResult::from_build()
|
||||
.and_then(|describe| describe.as_prototype())
|
||||
{
|
||||
prototype
|
||||
} else {
|
||||
env!("CARGO_PKG_VERSION")
|
||||
};
|
||||
let version = build_info::DescribeResult::from_build()
|
||||
.and_then(|describe| describe.as_prototype()).unwrap_or(env!("CARGO_PKG_VERSION"));
|
||||
|
||||
let _ = self
|
||||
.batcher
|
||||
|
@ -188,13 +188,13 @@ impl tracing_actix_web::RootSpanBuilder for AwebTracingLogger {
|
||||
|
||||
if let Some(error) = response.response().error() {
|
||||
// use the status code already constructed for the outgoing HTTP response
|
||||
span.record("error", &tracing::field::display(error.as_response_error()));
|
||||
span.record("error", tracing::field::display(error.as_response_error()));
|
||||
}
|
||||
}
|
||||
Err(error) => {
|
||||
let code: i32 = error.error_response().status().as_u16().into();
|
||||
span.record("status_code", code);
|
||||
span.record("error", &tracing::field::display(error.as_response_error()));
|
||||
span.record("error", tracing::field::display(error.as_response_error()));
|
||||
}
|
||||
};
|
||||
}
|
||||
|
@ -545,5 +545,5 @@ pub async fn get_health(
|
||||
index_scheduler.health().unwrap();
|
||||
auth_controller.health().unwrap();
|
||||
|
||||
Ok(HttpResponse::Ok().json(&HealthResponse::default()))
|
||||
Ok(HttpResponse::Ok().json(HealthResponse::default()))
|
||||
}
|
||||
|
@ -73,8 +73,8 @@ async fn get_and_paginate_indexes() {
|
||||
let server = Server::new().await;
|
||||
const NB_INDEXES: usize = 50;
|
||||
for i in 0..NB_INDEXES {
|
||||
server.index(&format!("test_{i:02}")).create(None).await;
|
||||
server.index(&format!("test_{i:02}")).wait_task(i as u64).await;
|
||||
server.index(format!("test_{i:02}")).create(None).await;
|
||||
server.index(format!("test_{i:02}")).wait_task(i as u64).await;
|
||||
}
|
||||
|
||||
// basic
|
||||
|
@ -8,7 +8,7 @@ with them, they are "unconditional". These kinds of edges are used to "skip" a n
|
||||
The algorithm uses a depth-first search. It benefits from two main optimisations:
|
||||
- The list of all possible costs to go from any node to the END node is precomputed
|
||||
- The `DeadEndsCache` reduces the number of valid paths drastically, by making some edges
|
||||
untraversable depending on what other edges were selected.
|
||||
untraversable depending on what other edges were selected.
|
||||
|
||||
These two optimisations are meant to avoid traversing edges that wouldn't lead
|
||||
to a valid path. In practically all cases, we avoid the exponential complexity
|
||||
@ -24,6 +24,7 @@ For example, the DeadEndsCache could say the following:
|
||||
- if we take `g`, then `[f]` is also forbidden
|
||||
- etc.
|
||||
- etc.
|
||||
|
||||
As we traverse the graph, we also traverse the `DeadEndsCache` and keep a list of forbidden
|
||||
conditions in memory. Then, we know to avoid all edges which have a condition that is forbidden.
|
||||
|
||||
|
@ -14,7 +14,7 @@ This module tests the following properties about the exactness ranking rule:
|
||||
3. those that contain the most exact words from the remaining query
|
||||
|
||||
- if it is followed by other graph-based ranking rules (`typo`, `proximity`, `attribute`).
|
||||
Then these rules will only work with
|
||||
Then these rules will only work with
|
||||
1. the exact terms selected by `exactness
|
||||
2. the full query term otherwise
|
||||
*/
|
||||
|
@ -4,15 +4,14 @@ This module tests the Proximity ranking rule:
|
||||
1. A proximity of >7 always has the same cost.
|
||||
|
||||
2. Phrase terms can be in sprximity to other terms via their start and end words,
|
||||
but we need to make sure that the phrase exists in the document that meets this
|
||||
proximity condition. This is especially relevant with split words and synonyms.
|
||||
but we need to make sure that the phrase exists in the document that meets this
|
||||
proximity condition. This is especially relevant with split words and synonyms.
|
||||
|
||||
3. An ngram has the same sprximity cost as its component words being consecutive.
|
||||
e.g. `sunflower` equivalent to `sun flower`.
|
||||
e.g. `sunflower` equivalent to `sun flower`.
|
||||
|
||||
4. The prefix databases can be used to find the sprximity between two words, but
|
||||
they store fewer sprximities than the regular word sprximity DB.
|
||||
|
||||
they store fewer sprximities than the regular word sprximity DB.
|
||||
*/
|
||||
|
||||
use std::collections::BTreeMap;
|
||||
|
@ -11,7 +11,7 @@ This module tests the following properties:
|
||||
8. 2grams can have 1 typo if they are larger than `min_word_len_two_typos`
|
||||
9. 3grams are not typo tolerant (but they can be split into two words)
|
||||
10. The `typo` ranking rule assumes the role of the `words` ranking rule implicitly
|
||||
if `words` doesn't exist before it.
|
||||
if `words` doesn't exist before it.
|
||||
11. The `typo` ranking rule places documents with the same number of typos in the same bucket
|
||||
12. Prefix tolerance costs nothing according to the typo ranking rule
|
||||
13. Split words cost 1 typo according to the typo ranking rule
|
||||
|
@ -2,11 +2,11 @@
|
||||
This module tests the following properties:
|
||||
|
||||
1. The `last` term matching strategy starts removing terms from the query
|
||||
starting from the end if no more results match it.
|
||||
starting from the end if no more results match it.
|
||||
2. Phrases are never deleted by the `last` term matching strategy
|
||||
3. Duplicate words don't affect the ranking of a document according to the `words` ranking rule
|
||||
4. The proximity of the first and last word of a phrase to its adjacent terms is taken into
|
||||
account by the proximity ranking rule.
|
||||
account by the proximity ranking rule.
|
||||
5. Unclosed double quotes still make a phrase
|
||||
6. The `all` term matching strategy does not remove any term from the query
|
||||
7. The search is capable of returning no results if no documents match the query
|
||||
|
@ -43,7 +43,8 @@ use crate::{CboRoaringBitmapCodec, Index, Result};
|
||||
///
|
||||
/// - `ModificationResult::Nothing` means that modifying the `facet_value` didn't have any impact into the `level`.
|
||||
/// This case is reachable when a document id is removed from a sub-level node but is still present in another one.
|
||||
/// For example, removing `2` from a document containing `2` and `3`, the document id will removed form the `level 0` but should remain in the group node [1..4] in `level 1`.
|
||||
/// For example, removing `2` from a document containing `2` and `3`, the document id will removed form the `level 0`
|
||||
/// but should remain in the group node [1..4] in `level 1`.
|
||||
enum ModificationResult {
|
||||
InPlace,
|
||||
Expand,
|
||||
|
Loading…
x
Reference in New Issue
Block a user