diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index d35e78c70..5be5a506b 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -66,6 +66,24 @@ jobs: with: command: clippy + clippy: + name: Run Clippy + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v3 + - uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + components: clippy + - name: Cache dependencies + uses: Swatinem/rust-cache@v2.0.0 + - name: Run cargo clippy + uses: actions-rs/cargo@v1 + with: + command: clippy + fmt: name: Run Rustfmt runs-on: ubuntu-20.04 diff --git a/milli/src/search/criteria/mod.rs b/milli/src/search/criteria/mod.rs index 8d0e3af05..d1c482b38 100644 --- a/milli/src/search/criteria/mod.rs +++ b/milli/src/search/criteria/mod.rs @@ -432,18 +432,10 @@ pub fn resolve_phrase(ctx: &dyn Context, phrase: &[Option]) -> Result { subgroup.push(positions.iter().map(|p| (p, 0, p)).collect()) diff --git a/milli/src/search/query_tree.rs b/milli/src/search/query_tree.rs index 5042f4762..3237bb9d3 100755 --- a/milli/src/search/query_tree.rs +++ b/milli/src/search/query_tree.rs @@ -591,7 +591,7 @@ fn create_matching_words( (0..words.len()).into_iter().map(|i| id + i as PrimitiveWordId).collect(); let words = words .into_iter() - .filter_map(|w| w) + .flatten() .map(|w| MatchingWord::new(w, 0, false)) .collect(); matching_words.push((words, ids)); diff --git a/milli/src/update/index_documents/extract/extract_docid_word_positions.rs b/milli/src/update/index_documents/extract/extract_docid_word_positions.rs index f1d595039..8eae0caee 100644 --- a/milli/src/update/index_documents/extract/extract_docid_word_positions.rs +++ b/milli/src/update/index_documents/extract/extract_docid_word_positions.rs @@ -80,7 +80,7 @@ pub fn extract_docid_word_positions( .map_err(|_| SerializationError::InvalidNumberSerialization)?; let position = absolute_from_relative_position(field_id, position); docid_word_positions_sorter - .insert(&key_buffer, &position.to_ne_bytes())?; + .insert(&key_buffer, position.to_ne_bytes())?; } } } diff --git a/milli/src/update/index_documents/extract/extract_facet_string_docids.rs b/milli/src/update/index_documents/extract/extract_facet_string_docids.rs index 221356ba0..182538683 100644 --- a/milli/src/update/index_documents/extract/extract_facet_string_docids.rs +++ b/milli/src/update/index_documents/extract/extract_facet_string_docids.rs @@ -43,7 +43,7 @@ pub fn extract_facet_string_docids( let key_bytes = FacetGroupKeyCodec::::bytes_encode(&key).unwrap(); // document id is encoded in native-endian because of the CBO roaring bitmap codec - facet_string_docids_sorter.insert(&key_bytes, &document_id.to_ne_bytes())?; + facet_string_docids_sorter.insert(&key_bytes, document_id.to_ne_bytes())?; } sorter_into_reader(facet_string_docids_sorter, indexer) diff --git a/milli/src/update/index_documents/extract/extract_word_pair_proximity_docids.rs b/milli/src/update/index_documents/extract/extract_word_pair_proximity_docids.rs index 0c7700a33..6707fc268 100644 --- a/milli/src/update/index_documents/extract/extract_word_pair_proximity_docids.rs +++ b/milli/src/update/index_documents/extract/extract_word_pair_proximity_docids.rs @@ -145,7 +145,7 @@ fn document_word_positions_into_sorter( key_buffer.push(0); key_buffer.extend_from_slice(w2.as_bytes()); - word_pair_proximity_docids_sorter.insert(&key_buffer, &document_id.to_ne_bytes())?; + word_pair_proximity_docids_sorter.insert(&key_buffer, document_id.to_ne_bytes())?; } Ok(()) diff --git a/milli/src/update/index_documents/extract/extract_word_position_docids.rs b/milli/src/update/index_documents/extract/extract_word_position_docids.rs index d4a3eda2c..b7015d2ce 100644 --- a/milli/src/update/index_documents/extract/extract_word_position_docids.rs +++ b/milli/src/update/index_documents/extract/extract_word_position_docids.rs @@ -41,7 +41,7 @@ pub fn extract_word_position_docids( key_buffer.extend_from_slice(word_bytes); key_buffer.extend_from_slice(&position.to_be_bytes()); - word_position_docids_sorter.insert(&key_buffer, &document_id.to_ne_bytes())?; + word_position_docids_sorter.insert(&key_buffer,document_id.to_ne_bytes())?; } } diff --git a/milli/src/update/index_documents/transform.rs b/milli/src/update/index_documents/transform.rs index 7c9a912b3..59f18b22d 100644 --- a/milli/src/update/index_documents/transform.rs +++ b/milli/src/update/index_documents/transform.rs @@ -248,7 +248,7 @@ impl<'a, 'i> Transform<'a, 'i> { skip_insertion = true; } else { // we associate the base document with the new key, everything will get merged later. - self.original_sorter.insert(&docid.to_be_bytes(), base_obkv)?; + self.original_sorter.insert(docid.to_be_bytes(), base_obkv)?; match self.flatten_from_fields_ids_map(KvReader::new(base_obkv))? { Some(buffer) => { self.flattened_sorter.insert(docid.to_be_bytes(), &buffer)? @@ -261,7 +261,7 @@ impl<'a, 'i> Transform<'a, 'i> { if !skip_insertion { self.new_documents_ids.insert(docid); // We use the extracted/generated user id as the key for this document. - self.original_sorter.insert(&docid.to_be_bytes(), obkv_buffer.clone())?; + self.original_sorter.insert(docid.to_be_bytes(), obkv_buffer.clone())?; match self.flatten_from_fields_ids_map(KvReader::new(&obkv_buffer))? { Some(buffer) => self.flattened_sorter.insert(docid.to_be_bytes(), &buffer)?,