mirror of
https://github.com/meilisearch/meilisearch.git
synced 2024-11-22 18:17:39 +08:00
Fix clippy errors
Add clippy job Add clippy job to CI
This commit is contained in:
parent
401e956128
commit
3009981d31
18
.github/workflows/rust.yml
vendored
18
.github/workflows/rust.yml
vendored
@ -66,6 +66,24 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
command: clippy
|
command: clippy
|
||||||
|
|
||||||
|
clippy:
|
||||||
|
name: Run Clippy
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- uses: actions-rs/toolchain@v1
|
||||||
|
with:
|
||||||
|
profile: minimal
|
||||||
|
toolchain: stable
|
||||||
|
override: true
|
||||||
|
components: clippy
|
||||||
|
- name: Cache dependencies
|
||||||
|
uses: Swatinem/rust-cache@v2.0.0
|
||||||
|
- name: Run cargo clippy
|
||||||
|
uses: actions-rs/cargo@v1
|
||||||
|
with:
|
||||||
|
command: clippy
|
||||||
|
|
||||||
fmt:
|
fmt:
|
||||||
name: Run Rustfmt
|
name: Run Rustfmt
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
|
@ -432,18 +432,10 @@ pub fn resolve_phrase(ctx: &dyn Context, phrase: &[Option<String>]) -> Result<Ro
|
|||||||
// Get all the documents with the matching distance for each word pairs.
|
// Get all the documents with the matching distance for each word pairs.
|
||||||
let mut bitmaps = Vec::with_capacity(winsize.pow(2));
|
let mut bitmaps = Vec::with_capacity(winsize.pow(2));
|
||||||
for (offset, s1) in win.iter().enumerate().filter_map(|(index, word)| {
|
for (offset, s1) in win.iter().enumerate().filter_map(|(index, word)| {
|
||||||
if let Some(word) = word {
|
word.as_ref().map(|word| (index, word))
|
||||||
Some((index, word))
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}) {
|
}) {
|
||||||
for (dist, s2) in win.iter().skip(offset + 1).enumerate().filter_map(|(index, word)| {
|
for (dist, s2) in win.iter().skip(offset + 1).enumerate().filter_map(|(index, word)| {
|
||||||
if let Some(word) = word {
|
word.as_ref().map(|word| (index, word))
|
||||||
Some((index, word))
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}) {
|
}) {
|
||||||
if dist == 0 {
|
if dist == 0 {
|
||||||
match ctx.word_pair_proximity_docids(s1, s2, 1)? {
|
match ctx.word_pair_proximity_docids(s1, s2, 1)? {
|
||||||
|
@ -488,7 +488,7 @@ fn resolve_plane_sweep_candidates(
|
|||||||
}
|
}
|
||||||
// make a consecutive plane-sweep on the subgroup of words.
|
// make a consecutive plane-sweep on the subgroup of words.
|
||||||
let mut subgroup = Vec::with_capacity(words.len());
|
let mut subgroup = Vec::with_capacity(words.len());
|
||||||
for word in words.into_iter().map(|w| w.as_deref().unwrap()) {
|
for word in words.iter().map(|w| w.as_deref().unwrap()) {
|
||||||
match words_positions.get(word) {
|
match words_positions.get(word) {
|
||||||
Some(positions) => {
|
Some(positions) => {
|
||||||
subgroup.push(positions.iter().map(|p| (p, 0, p)).collect())
|
subgroup.push(positions.iter().map(|p| (p, 0, p)).collect())
|
||||||
|
@ -591,7 +591,7 @@ fn create_matching_words(
|
|||||||
(0..words.len()).into_iter().map(|i| id + i as PrimitiveWordId).collect();
|
(0..words.len()).into_iter().map(|i| id + i as PrimitiveWordId).collect();
|
||||||
let words = words
|
let words = words
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter_map(|w| w)
|
.flatten()
|
||||||
.map(|w| MatchingWord::new(w, 0, false))
|
.map(|w| MatchingWord::new(w, 0, false))
|
||||||
.collect();
|
.collect();
|
||||||
matching_words.push((words, ids));
|
matching_words.push((words, ids));
|
||||||
|
@ -80,7 +80,7 @@ pub fn extract_docid_word_positions<R: io::Read + io::Seek>(
|
|||||||
.map_err(|_| SerializationError::InvalidNumberSerialization)?;
|
.map_err(|_| SerializationError::InvalidNumberSerialization)?;
|
||||||
let position = absolute_from_relative_position(field_id, position);
|
let position = absolute_from_relative_position(field_id, position);
|
||||||
docid_word_positions_sorter
|
docid_word_positions_sorter
|
||||||
.insert(&key_buffer, &position.to_ne_bytes())?;
|
.insert(&key_buffer, position.to_ne_bytes())?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -43,7 +43,7 @@ pub fn extract_facet_string_docids<R: io::Read + io::Seek>(
|
|||||||
let key_bytes = FacetGroupKeyCodec::<StrRefCodec>::bytes_encode(&key).unwrap();
|
let key_bytes = FacetGroupKeyCodec::<StrRefCodec>::bytes_encode(&key).unwrap();
|
||||||
|
|
||||||
// document id is encoded in native-endian because of the CBO roaring bitmap codec
|
// document id is encoded in native-endian because of the CBO roaring bitmap codec
|
||||||
facet_string_docids_sorter.insert(&key_bytes, &document_id.to_ne_bytes())?;
|
facet_string_docids_sorter.insert(&key_bytes, document_id.to_ne_bytes())?;
|
||||||
}
|
}
|
||||||
|
|
||||||
sorter_into_reader(facet_string_docids_sorter, indexer)
|
sorter_into_reader(facet_string_docids_sorter, indexer)
|
||||||
|
@ -145,7 +145,7 @@ fn document_word_positions_into_sorter(
|
|||||||
key_buffer.push(0);
|
key_buffer.push(0);
|
||||||
key_buffer.extend_from_slice(w2.as_bytes());
|
key_buffer.extend_from_slice(w2.as_bytes());
|
||||||
|
|
||||||
word_pair_proximity_docids_sorter.insert(&key_buffer, &document_id.to_ne_bytes())?;
|
word_pair_proximity_docids_sorter.insert(&key_buffer, document_id.to_ne_bytes())?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -41,7 +41,7 @@ pub fn extract_word_position_docids<R: io::Read + io::Seek>(
|
|||||||
key_buffer.extend_from_slice(word_bytes);
|
key_buffer.extend_from_slice(word_bytes);
|
||||||
key_buffer.extend_from_slice(&position.to_be_bytes());
|
key_buffer.extend_from_slice(&position.to_be_bytes());
|
||||||
|
|
||||||
word_position_docids_sorter.insert(&key_buffer, &document_id.to_ne_bytes())?;
|
word_position_docids_sorter.insert(&key_buffer,document_id.to_ne_bytes())?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -248,7 +248,7 @@ impl<'a, 'i> Transform<'a, 'i> {
|
|||||||
skip_insertion = true;
|
skip_insertion = true;
|
||||||
} else {
|
} else {
|
||||||
// we associate the base document with the new key, everything will get merged later.
|
// we associate the base document with the new key, everything will get merged later.
|
||||||
self.original_sorter.insert(&docid.to_be_bytes(), base_obkv)?;
|
self.original_sorter.insert(docid.to_be_bytes(), base_obkv)?;
|
||||||
match self.flatten_from_fields_ids_map(KvReader::new(base_obkv))? {
|
match self.flatten_from_fields_ids_map(KvReader::new(base_obkv))? {
|
||||||
Some(buffer) => {
|
Some(buffer) => {
|
||||||
self.flattened_sorter.insert(docid.to_be_bytes(), &buffer)?
|
self.flattened_sorter.insert(docid.to_be_bytes(), &buffer)?
|
||||||
@ -261,7 +261,7 @@ impl<'a, 'i> Transform<'a, 'i> {
|
|||||||
if !skip_insertion {
|
if !skip_insertion {
|
||||||
self.new_documents_ids.insert(docid);
|
self.new_documents_ids.insert(docid);
|
||||||
// We use the extracted/generated user id as the key for this document.
|
// We use the extracted/generated user id as the key for this document.
|
||||||
self.original_sorter.insert(&docid.to_be_bytes(), obkv_buffer.clone())?;
|
self.original_sorter.insert(docid.to_be_bytes(), obkv_buffer.clone())?;
|
||||||
|
|
||||||
match self.flatten_from_fields_ids_map(KvReader::new(&obkv_buffer))? {
|
match self.flatten_from_fields_ids_map(KvReader::new(&obkv_buffer))? {
|
||||||
Some(buffer) => self.flattened_sorter.insert(docid.to_be_bytes(), &buffer)?,
|
Some(buffer) => self.flattened_sorter.insert(docid.to_be_bytes(), &buffer)?,
|
||||||
|
Loading…
Reference in New Issue
Block a user