mirror of
https://github.com/meilisearch/meilisearch.git
synced 2024-11-23 02:27:40 +08:00
Execute cargo clippy --fix
This commit is contained in:
parent
d8fed1f7a9
commit
811f156031
@ -1234,7 +1234,7 @@ pub(crate) mod tests {
|
|||||||
{
|
{
|
||||||
let builder = IndexDocuments::new(
|
let builder = IndexDocuments::new(
|
||||||
wtxn,
|
wtxn,
|
||||||
&self,
|
self,
|
||||||
&self.indexer_config,
|
&self.indexer_config,
|
||||||
self.index_documents_config.clone(),
|
self.index_documents_config.clone(),
|
||||||
|_| (),
|
|_| (),
|
||||||
|
@ -610,7 +610,11 @@ fn query_pair_proximity_docids(
|
|||||||
}
|
}
|
||||||
(QueryKind::Exact { word: left, .. }, QueryKind::Tolerant { typo, word: right }) => {
|
(QueryKind::Exact { word: left, .. }, QueryKind::Tolerant { typo, word: right }) => {
|
||||||
let r_words = word_derivations(right, prefix, *typo, ctx.words_fst(), wdcache)?;
|
let r_words = word_derivations(right, prefix, *typo, ctx.words_fst(), wdcache)?;
|
||||||
|
<<<<<<< HEAD
|
||||||
all_word_pair_overall_proximity_docids(ctx, &[(left, 0)], r_words, proximity)
|
all_word_pair_overall_proximity_docids(ctx, &[(left, 0)], r_words, proximity)
|
||||||
|
=======
|
||||||
|
all_word_pair_proximity_docids(ctx, &[(left, 0)], r_words, proximity)
|
||||||
|
>>>>>>> 08fe530b (Execute cargo clippy --fix)
|
||||||
}
|
}
|
||||||
(
|
(
|
||||||
QueryKind::Tolerant { typo: l_typo, word: left },
|
QueryKind::Tolerant { typo: l_typo, word: left },
|
||||||
@ -866,7 +870,7 @@ pub mod test {
|
|||||||
|
|
||||||
let mut keys = word_docids.keys().collect::<Vec<_>>();
|
let mut keys = word_docids.keys().collect::<Vec<_>>();
|
||||||
keys.sort_unstable();
|
keys.sort_unstable();
|
||||||
let words_fst = fst::Set::from_iter(keys).unwrap().map_data(|v| Cow::Owned(v)).unwrap();
|
let words_fst = fst::Set::from_iter(keys).unwrap().map_data(Cow::Owned).unwrap();
|
||||||
|
|
||||||
TestContext {
|
TestContext {
|
||||||
words_fst,
|
words_fst,
|
||||||
|
@ -110,7 +110,7 @@ mod test {
|
|||||||
addition.execute().unwrap();
|
addition.execute().unwrap();
|
||||||
|
|
||||||
let fields_map = index.fields_ids_map(&txn).unwrap();
|
let fields_map = index.fields_ids_map(&txn).unwrap();
|
||||||
let fid = fields_map.id(&distinct).unwrap();
|
let fid = fields_map.id(distinct).unwrap();
|
||||||
|
|
||||||
let documents = DocumentsBatchReader::from_reader(Cursor::new(JSON.as_slice())).unwrap();
|
let documents = DocumentsBatchReader::from_reader(Cursor::new(JSON.as_slice())).unwrap();
|
||||||
let map = (0..documents.documents_count() as u32).collect();
|
let map = (0..documents.documents_count() as u32).collect();
|
||||||
@ -133,7 +133,7 @@ mod test {
|
|||||||
let s = value.to_string();
|
let s = value.to_string();
|
||||||
assert!(seen.insert(s));
|
assert!(seen.insert(s));
|
||||||
}
|
}
|
||||||
Value::Array(values) => values.into_iter().for_each(|value| test(seen, value)),
|
Value::Array(values) => values.iter().for_each(|value| test(seen, value)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -314,8 +314,8 @@ pub fn snap_field_id_docid_facet_strings(index: &Index) -> String {
|
|||||||
pub fn snap_documents_ids(index: &Index) -> String {
|
pub fn snap_documents_ids(index: &Index) -> String {
|
||||||
let rtxn = index.read_txn().unwrap();
|
let rtxn = index.read_txn().unwrap();
|
||||||
let documents_ids = index.documents_ids(&rtxn).unwrap();
|
let documents_ids = index.documents_ids(&rtxn).unwrap();
|
||||||
let snap = display_bitmap(&documents_ids);
|
|
||||||
snap
|
display_bitmap(&documents_ids)
|
||||||
}
|
}
|
||||||
pub fn snap_stop_words(index: &Index) -> String {
|
pub fn snap_stop_words(index: &Index) -> String {
|
||||||
let rtxn = index.read_txn().unwrap();
|
let rtxn = index.read_txn().unwrap();
|
||||||
@ -326,8 +326,8 @@ pub fn snap_stop_words(index: &Index) -> String {
|
|||||||
pub fn snap_soft_deleted_documents_ids(index: &Index) -> String {
|
pub fn snap_soft_deleted_documents_ids(index: &Index) -> String {
|
||||||
let rtxn = index.read_txn().unwrap();
|
let rtxn = index.read_txn().unwrap();
|
||||||
let soft_deleted_documents_ids = index.soft_deleted_documents_ids(&rtxn).unwrap();
|
let soft_deleted_documents_ids = index.soft_deleted_documents_ids(&rtxn).unwrap();
|
||||||
let soft_deleted_documents_ids = display_bitmap(&soft_deleted_documents_ids);
|
|
||||||
soft_deleted_documents_ids
|
display_bitmap(&soft_deleted_documents_ids)
|
||||||
}
|
}
|
||||||
pub fn snap_field_distributions(index: &Index) -> String {
|
pub fn snap_field_distributions(index: &Index) -> String {
|
||||||
let rtxn = index.read_txn().unwrap();
|
let rtxn = index.read_txn().unwrap();
|
||||||
@ -350,8 +350,8 @@ pub fn snap_fields_ids_map(index: &Index) -> String {
|
|||||||
pub fn snap_geo_faceted_documents_ids(index: &Index) -> String {
|
pub fn snap_geo_faceted_documents_ids(index: &Index) -> String {
|
||||||
let rtxn = index.read_txn().unwrap();
|
let rtxn = index.read_txn().unwrap();
|
||||||
let geo_faceted_documents_ids = index.geo_faceted_documents_ids(&rtxn).unwrap();
|
let geo_faceted_documents_ids = index.geo_faceted_documents_ids(&rtxn).unwrap();
|
||||||
let snap = display_bitmap(&geo_faceted_documents_ids);
|
|
||||||
snap
|
display_bitmap(&geo_faceted_documents_ids)
|
||||||
}
|
}
|
||||||
pub fn snap_external_documents_ids(index: &Index) -> String {
|
pub fn snap_external_documents_ids(index: &Index) -> String {
|
||||||
let rtxn = index.read_txn().unwrap();
|
let rtxn = index.read_txn().unwrap();
|
||||||
|
@ -641,7 +641,7 @@ mod tests {
|
|||||||
external_ids: &[&str],
|
external_ids: &[&str],
|
||||||
disable_soft_deletion: bool,
|
disable_soft_deletion: bool,
|
||||||
) -> Vec<u32> {
|
) -> Vec<u32> {
|
||||||
let external_document_ids = index.external_documents_ids(&wtxn).unwrap();
|
let external_document_ids = index.external_documents_ids(wtxn).unwrap();
|
||||||
let ids_to_delete: Vec<u32> = external_ids
|
let ids_to_delete: Vec<u32> = external_ids
|
||||||
.iter()
|
.iter()
|
||||||
.map(|id| external_document_ids.get(id.as_bytes()).unwrap())
|
.map(|id| external_document_ids.get(id.as_bytes()).unwrap())
|
||||||
@ -858,7 +858,7 @@ mod tests {
|
|||||||
assert!(!results.documents_ids.is_empty());
|
assert!(!results.documents_ids.is_empty());
|
||||||
for id in results.documents_ids.iter() {
|
for id in results.documents_ids.iter() {
|
||||||
assert!(
|
assert!(
|
||||||
!deleted_internal_ids.contains(&id),
|
!deleted_internal_ids.contains(id),
|
||||||
"The document {} was supposed to be deleted",
|
"The document {} was supposed to be deleted",
|
||||||
id
|
id
|
||||||
);
|
);
|
||||||
@ -922,7 +922,7 @@ mod tests {
|
|||||||
assert!(!results.documents_ids.is_empty());
|
assert!(!results.documents_ids.is_empty());
|
||||||
for id in results.documents_ids.iter() {
|
for id in results.documents_ids.iter() {
|
||||||
assert!(
|
assert!(
|
||||||
!deleted_internal_ids.contains(&id),
|
!deleted_internal_ids.contains(id),
|
||||||
"The document {} was supposed to be deleted",
|
"The document {} was supposed to be deleted",
|
||||||
id
|
id
|
||||||
);
|
);
|
||||||
@ -986,7 +986,7 @@ mod tests {
|
|||||||
assert!(!results.documents_ids.is_empty());
|
assert!(!results.documents_ids.is_empty());
|
||||||
for id in results.documents_ids.iter() {
|
for id in results.documents_ids.iter() {
|
||||||
assert!(
|
assert!(
|
||||||
!deleted_internal_ids.contains(&id),
|
!deleted_internal_ids.contains(id),
|
||||||
"The document {} was supposed to be deleted",
|
"The document {} was supposed to be deleted",
|
||||||
id
|
id
|
||||||
);
|
);
|
||||||
|
@ -772,7 +772,7 @@ mod tests {
|
|||||||
|
|
||||||
let docs = index.documents(&rtxn, vec![0, 1, 2]).unwrap();
|
let docs = index.documents(&rtxn, vec![0, 1, 2]).unwrap();
|
||||||
let (_id, obkv) = docs.iter().find(|(_id, kv)| kv.get(0) == Some(br#""kevin""#)).unwrap();
|
let (_id, obkv) = docs.iter().find(|(_id, kv)| kv.get(0) == Some(br#""kevin""#)).unwrap();
|
||||||
let kevin_uuid: String = serde_json::from_slice(&obkv.get(1).unwrap()).unwrap();
|
let kevin_uuid: String = serde_json::from_slice(obkv.get(1).unwrap()).unwrap();
|
||||||
drop(rtxn);
|
drop(rtxn);
|
||||||
|
|
||||||
// Second we send 1 document with the generated uuid, to erase the previous ones.
|
// Second we send 1 document with the generated uuid, to erase the previous ones.
|
||||||
@ -1811,7 +1811,7 @@ mod tests {
|
|||||||
let long_word = "lol".repeat(1000);
|
let long_word = "lol".repeat(1000);
|
||||||
let doc1 = documents! {[{
|
let doc1 = documents! {[{
|
||||||
"id": "1",
|
"id": "1",
|
||||||
"title": long_word.clone(),
|
"title": long_word,
|
||||||
}]};
|
}]};
|
||||||
|
|
||||||
index.add_documents(doc1).unwrap();
|
index.add_documents(doc1).unwrap();
|
||||||
|
@ -574,7 +574,7 @@ mod tests {
|
|||||||
expected_prefixes: &[&str],
|
expected_prefixes: &[&str],
|
||||||
) {
|
) {
|
||||||
let mut actual_prefixes = vec![];
|
let mut actual_prefixes = vec![];
|
||||||
trie.for_each_prefix_of(word.as_bytes(), &mut Vec::new(), &search_start, |x| {
|
trie.for_each_prefix_of(word.as_bytes(), &mut Vec::new(), search_start, |x| {
|
||||||
let s = String::from_utf8(x.to_owned()).unwrap();
|
let s = String::from_utf8(x.to_owned()).unwrap();
|
||||||
actual_prefixes.push(s);
|
actual_prefixes.push(s);
|
||||||
});
|
});
|
||||||
|
@ -19,7 +19,7 @@ mod query_criteria;
|
|||||||
mod sort;
|
mod sort;
|
||||||
mod typo_tolerance;
|
mod typo_tolerance;
|
||||||
|
|
||||||
pub const TEST_QUERY: &'static str = "hello world america";
|
pub const TEST_QUERY: &str = "hello world america";
|
||||||
|
|
||||||
pub const EXTERNAL_DOCUMENTS_IDS: &[&str; 17] =
|
pub const EXTERNAL_DOCUMENTS_IDS: &[&str; 17] =
|
||||||
&["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q"];
|
&["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q"];
|
||||||
@ -177,7 +177,7 @@ fn execute_filter(filter: &str, document: &TestDocument) -> Option<String> {
|
|||||||
{
|
{
|
||||||
id = Some(document.id.clone())
|
id = Some(document.id.clone())
|
||||||
}
|
}
|
||||||
} else if let Some((field, filter)) = filter.split_once("=") {
|
} else if let Some((field, filter)) = filter.split_once('=') {
|
||||||
if field == "tag" && document.tag == filter {
|
if field == "tag" && document.tag == filter {
|
||||||
id = Some(document.id.clone())
|
id = Some(document.id.clone())
|
||||||
} else if field == "asc_desc_rank"
|
} else if field == "asc_desc_rank"
|
||||||
@ -185,11 +185,11 @@ fn execute_filter(filter: &str, document: &TestDocument) -> Option<String> {
|
|||||||
{
|
{
|
||||||
id = Some(document.id.clone())
|
id = Some(document.id.clone())
|
||||||
}
|
}
|
||||||
} else if let Some(("asc_desc_rank", filter)) = filter.split_once("<") {
|
} else if let Some(("asc_desc_rank", filter)) = filter.split_once('<') {
|
||||||
if document.asc_desc_rank < filter.parse().unwrap() {
|
if document.asc_desc_rank < filter.parse().unwrap() {
|
||||||
id = Some(document.id.clone())
|
id = Some(document.id.clone())
|
||||||
}
|
}
|
||||||
} else if let Some(("asc_desc_rank", filter)) = filter.split_once(">") {
|
} else if let Some(("asc_desc_rank", filter)) = filter.split_once('>') {
|
||||||
if document.asc_desc_rank > filter.parse().unwrap() {
|
if document.asc_desc_rank > filter.parse().unwrap() {
|
||||||
id = Some(document.id.clone())
|
id = Some(document.id.clone())
|
||||||
}
|
}
|
||||||
|
@ -200,14 +200,12 @@ test_criterion!(
|
|||||||
#[test]
|
#[test]
|
||||||
fn criteria_mixup() {
|
fn criteria_mixup() {
|
||||||
use Criterion::*;
|
use Criterion::*;
|
||||||
let index = search::setup_search_index_with_criteria(&vec![
|
let index = search::setup_search_index_with_criteria(&[Words,
|
||||||
Words,
|
|
||||||
Attribute,
|
Attribute,
|
||||||
Desc(S("asc_desc_rank")),
|
Desc(S("asc_desc_rank")),
|
||||||
Exactness,
|
Exactness,
|
||||||
Proximity,
|
Proximity,
|
||||||
Typo,
|
Typo]);
|
||||||
]);
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
#[rustfmt::skip]
|
||||||
let criteria_mix = {
|
let criteria_mix = {
|
||||||
|
Loading…
Reference in New Issue
Block a user