diff --git a/milli/src/update/index_documents/transform.rs b/milli/src/update/index_documents/transform.rs index 04c9e9256..65007aa32 100644 --- a/milli/src/update/index_documents/transform.rs +++ b/milli/src/update/index_documents/transform.rs @@ -122,7 +122,7 @@ impl<'a, 'i> Transform<'a, 'i> { // We initialize the sorter with the user indexing settings. let original_sorter = create_sorter( grenad::SortAlgorithm::Stable, - merge_function.clone(), + merge_function, indexer_settings.chunk_compression_type, indexer_settings.chunk_compression_level, indexer_settings.max_nb_chunks, diff --git a/milli/src/update/new/channel.rs b/milli/src/update/new/channel.rs index 237581cb3..d9823096e 100644 --- a/milli/src/update/new/channel.rs +++ b/milli/src/update/new/channel.rs @@ -518,7 +518,7 @@ impl DocumentSender<'_> { impl Drop for DocumentSender<'_> { fn drop(&mut self) { if let Some(sender) = self.0.take() { - sender.send(MergerOperation::FinishedDocument); + let _ = sender.send(MergerOperation::FinishedDocument); } } } diff --git a/milli/src/update/new/extract/mod.rs b/milli/src/update/new/extract/mod.rs index d1f6bb787..6e60a4063 100644 --- a/milli/src/update/new/extract/mod.rs +++ b/milli/src/update/new/extract/mod.rs @@ -58,7 +58,7 @@ pub mod perm_json_p { seeker: &mut impl FnMut(&str, &Value) -> Result<()>, ) -> Result<()> { if value.is_empty() { - seeker(&base_key, &Value::Object(Map::with_capacity(0)))?; + seeker(base_key, &Value::Object(Map::with_capacity(0)))?; } for (key, value) in value.iter() { @@ -103,7 +103,7 @@ pub mod perm_json_p { seeker: &mut impl FnMut(&str, &Value) -> Result<()>, ) -> Result<()> { if values.is_empty() { - seeker(&base_key, &Value::Array(vec![]))?; + seeker(base_key, &Value::Array(vec![]))?; } for value in values { @@ -128,10 +128,10 @@ pub mod perm_json_p { ) -> bool { selectors.map_or(true, |selectors| { selectors.iter().any(|selector| { - contained_in(selector, &field_name) || contained_in(&field_name, selector) + contained_in(selector, field_name) || contained_in(field_name, selector) }) }) && !skip_selectors.iter().any(|skip_selector| { - contained_in(skip_selector, &field_name) || contained_in(&field_name, skip_selector) + contained_in(skip_selector, field_name) || contained_in(field_name, skip_selector) }) } } diff --git a/milli/src/update/new/extract/searchable/tokenize_document.rs b/milli/src/update/new/extract/searchable/tokenize_document.rs index d2795114e..fda619013 100644 --- a/milli/src/update/new/extract/searchable/tokenize_document.rs +++ b/milli/src/update/new/extract/searchable/tokenize_document.rs @@ -48,7 +48,7 @@ impl<'a> DocumentTokenizer<'a> { .entry(field_id) .and_modify(|counter| *counter += MAX_DISTANCE) .or_insert(0); - if *position as u32 >= self.max_positions_per_attributes { + if *position >= self.max_positions_per_attributes { return Ok(()); } @@ -72,7 +72,7 @@ impl<'a> DocumentTokenizer<'a> { *position, self.tokenizer.tokenize_with_allow_list(text.as_str(), locales), ) - .take_while(|(p, _)| (*p as u32) < self.max_positions_per_attributes); + .take_while(|(p, _)| *p < self.max_positions_per_attributes); for (index, token) in tokens { // keep a word only if it is not empty and fit in a LMDB key. diff --git a/milli/src/update/new/indexer/document_operation.rs b/milli/src/update/new/indexer/document_operation.rs index f088370fb..572ea8528 100644 --- a/milli/src/update/new/indexer/document_operation.rs +++ b/milli/src/update/new/indexer/document_operation.rs @@ -2,7 +2,7 @@ use std::borrow::Cow; use std::collections::{BTreeMap, HashMap}; use std::sync::Arc; -use heed::types::{Bytes, DecodeIgnore}; +use heed::types::Bytes; use heed::RoTxn; use memmap2::Mmap; use rayon::iter::{IndexedParallelIterator, IntoParallelIterator, ParallelIterator}; diff --git a/milli/src/update/new/indexer/mod.rs b/milli/src/update/new/indexer/mod.rs index 5187e4f4c..57821c51a 100644 --- a/milli/src/update/new/indexer/mod.rs +++ b/milli/src/update/new/indexer/mod.rs @@ -6,7 +6,7 @@ pub use document_deletion::DocumentDeletion; pub use document_operation::DocumentOperation; use heed::{RoTxn, RwTxn}; pub use partial_dump::PartialDump; -use rayon::iter::{IndexedParallelIterator, IntoParallelIterator, ParallelIterator}; +use rayon::iter::{IndexedParallelIterator, IntoParallelIterator}; use rayon::ThreadPool; pub use update_by_function::UpdateByFunction; @@ -229,7 +229,8 @@ fn extract_and_send_docids( sender: &ExtractorSender, ) -> Result<()> { let merger = E::run_extraction(index, fields_ids_map, indexer, document_changes)?; - Ok(sender.send_searchable::(merger).unwrap()) + sender.send_searchable::(merger).unwrap(); + Ok(()) } /// Returns the primary key *field id* that has already been set for this index or the diff --git a/milli/src/update/new/indexer/partial_dump.rs b/milli/src/update/new/indexer/partial_dump.rs index 5f8743e31..43a89c46c 100644 --- a/milli/src/update/new/indexer/partial_dump.rs +++ b/milli/src/update/new/indexer/partial_dump.rs @@ -1,4 +1,4 @@ -use rayon::iter::{IndexedParallelIterator, ParallelBridge, ParallelIterator}; +use rayon::iter::IndexedParallelIterator; use super::DocumentChanges; use crate::documents::{DocumentIdExtractionError, PrimaryKey};