mirror of
https://github.com/meilisearch/meilisearch.git
synced 2025-01-18 08:48:32 +08:00
Merge #673
673: Add clippy job r=ManyTheFish a=unvalley # Pull Request ## Related issue Fixes #231 ## What does this PR do? - fix some clippy errors remain - add clippy job to CI (I set `nightly` as toolchain) ## PR checklist Please check if your PR fulfills the following requirements: - [x] Does this PR fix an existing issue, or have you listed the changes applied in the PR description (and why they are needed)? - [x] Have you read the contributing guidelines? - [x] Have you made sure that the title is accurate and descriptive of the changes? Co-authored-by: unvalley <kirohi.code@gmail.com>
This commit is contained in:
commit
cf76ec7b37
18
.github/workflows/rust.yml
vendored
18
.github/workflows/rust.yml
vendored
@ -48,6 +48,24 @@ jobs:
|
|||||||
command: test
|
command: test
|
||||||
args: --release --all
|
args: --release --all
|
||||||
|
|
||||||
|
clippy:
|
||||||
|
name: Run Clippy
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- uses: actions-rs/toolchain@v1
|
||||||
|
with:
|
||||||
|
profile: minimal
|
||||||
|
toolchain: nightly
|
||||||
|
override: true
|
||||||
|
components: clippy
|
||||||
|
- name: Cache dependencies
|
||||||
|
uses: Swatinem/rust-cache@v2.0.0
|
||||||
|
- name: Run cargo clippy
|
||||||
|
uses: actions-rs/cargo@v1
|
||||||
|
with:
|
||||||
|
command: clippy
|
||||||
|
|
||||||
fmt:
|
fmt:
|
||||||
name: Run Rustfmt
|
name: Run Rustfmt
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
|
@ -2,6 +2,7 @@ status = [
|
|||||||
'Tests on ubuntu-20.04',
|
'Tests on ubuntu-20.04',
|
||||||
'Tests on macos-latest',
|
'Tests on macos-latest',
|
||||||
'Tests on windows-latest',
|
'Tests on windows-latest',
|
||||||
|
'Run Clippy',
|
||||||
'Run Rustfmt',
|
'Run Rustfmt',
|
||||||
]
|
]
|
||||||
# 3 hours timeout
|
# 3 hours timeout
|
||||||
|
@ -431,20 +431,17 @@ pub fn resolve_phrase(ctx: &dyn Context, phrase: &[Option<String>]) -> Result<Ro
|
|||||||
for win in phrase.windows(winsize) {
|
for win in phrase.windows(winsize) {
|
||||||
// Get all the documents with the matching distance for each word pairs.
|
// Get all the documents with the matching distance for each word pairs.
|
||||||
let mut bitmaps = Vec::with_capacity(winsize.pow(2));
|
let mut bitmaps = Vec::with_capacity(winsize.pow(2));
|
||||||
for (offset, s1) in win.iter().enumerate().filter_map(|(index, word)| {
|
for (offset, s1) in win
|
||||||
if let Some(word) = word {
|
.iter()
|
||||||
Some((index, word))
|
.enumerate()
|
||||||
} else {
|
.filter_map(|(index, word)| word.as_ref().map(|word| (index, word)))
|
||||||
None
|
{
|
||||||
}
|
for (dist, s2) in win
|
||||||
}) {
|
.iter()
|
||||||
for (dist, s2) in win.iter().skip(offset + 1).enumerate().filter_map(|(index, word)| {
|
.skip(offset + 1)
|
||||||
if let Some(word) = word {
|
.enumerate()
|
||||||
Some((index, word))
|
.filter_map(|(index, word)| word.as_ref().map(|word| (index, word)))
|
||||||
} else {
|
{
|
||||||
None
|
|
||||||
}
|
|
||||||
}) {
|
|
||||||
if dist == 0 {
|
if dist == 0 {
|
||||||
match ctx.word_pair_proximity_docids(s1, s2, 1)? {
|
match ctx.word_pair_proximity_docids(s1, s2, 1)? {
|
||||||
Some(m) => bitmaps.push(m),
|
Some(m) => bitmaps.push(m),
|
||||||
|
@ -488,7 +488,7 @@ fn resolve_plane_sweep_candidates(
|
|||||||
}
|
}
|
||||||
// make a consecutive plane-sweep on the subgroup of words.
|
// make a consecutive plane-sweep on the subgroup of words.
|
||||||
let mut subgroup = Vec::with_capacity(words.len());
|
let mut subgroup = Vec::with_capacity(words.len());
|
||||||
for word in words.into_iter().map(|w| w.as_deref().unwrap()) {
|
for word in words.iter().map(|w| w.as_deref().unwrap()) {
|
||||||
match words_positions.get(word) {
|
match words_positions.get(word) {
|
||||||
Some(positions) => {
|
Some(positions) => {
|
||||||
subgroup.push(positions.iter().map(|p| (p, 0, p)).collect())
|
subgroup.push(positions.iter().map(|p| (p, 0, p)).collect())
|
||||||
|
@ -225,7 +225,7 @@ fn bytes_to_highlight(source: &str, target: &str) -> usize {
|
|||||||
for (col, char_t) in target.chars().enumerate() {
|
for (col, char_t) in target.chars().enumerate() {
|
||||||
let col = col + 1;
|
let col = col + 1;
|
||||||
let last_match_row = *last_row.get(&char_t).unwrap_or(&0);
|
let last_match_row = *last_row.get(&char_t).unwrap_or(&0);
|
||||||
let cost = if char_s == char_t { 0 } else { 1 };
|
let cost = usize::from(char_s != char_t);
|
||||||
|
|
||||||
let dist_add = matrix[(row, col + 1)] + 1;
|
let dist_add = matrix[(row, col + 1)] + 1;
|
||||||
let dist_del = matrix[(row + 1, col)] + 1;
|
let dist_del = matrix[(row + 1, col)] + 1;
|
||||||
|
@ -589,11 +589,8 @@ fn create_matching_words(
|
|||||||
PrimitiveQueryPart::Phrase(words) => {
|
PrimitiveQueryPart::Phrase(words) => {
|
||||||
let ids: Vec<_> =
|
let ids: Vec<_> =
|
||||||
(0..words.len()).into_iter().map(|i| id + i as PrimitiveWordId).collect();
|
(0..words.len()).into_iter().map(|i| id + i as PrimitiveWordId).collect();
|
||||||
let words = words
|
let words =
|
||||||
.into_iter()
|
words.into_iter().flatten().map(|w| MatchingWord::new(w, 0, false)).collect();
|
||||||
.filter_map(|w| w)
|
|
||||||
.map(|w| MatchingWord::new(w, 0, false))
|
|
||||||
.collect();
|
|
||||||
matching_words.push((words, ids));
|
matching_words.push((words, ids));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -329,7 +329,7 @@ impl FacetsUpdateIncrementalInner {
|
|||||||
|
|
||||||
let key =
|
let key =
|
||||||
FacetGroupKey { field_id, level, left_bound: insertion_key.left_bound.clone() };
|
FacetGroupKey { field_id, level, left_bound: insertion_key.left_bound.clone() };
|
||||||
let value = FacetGroupValue { size: size_left as u8, bitmap: values_left };
|
let value = FacetGroupValue { size: size_left, bitmap: values_left };
|
||||||
(key, value)
|
(key, value)
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -345,7 +345,7 @@ impl FacetsUpdateIncrementalInner {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let key = FacetGroupKey { field_id, level, left_bound: right_left_bound.to_vec() };
|
let key = FacetGroupKey { field_id, level, left_bound: right_left_bound.to_vec() };
|
||||||
let value = FacetGroupValue { size: size_right as u8, bitmap: values_right };
|
let value = FacetGroupValue { size: size_right, bitmap: values_right };
|
||||||
(key, value)
|
(key, value)
|
||||||
};
|
};
|
||||||
drop(iter);
|
drop(iter);
|
||||||
@ -373,8 +373,7 @@ impl FacetsUpdateIncrementalInner {
|
|||||||
|
|
||||||
let highest_level = get_highest_level(txn, self.db, field_id)?;
|
let highest_level = get_highest_level(txn, self.db, field_id)?;
|
||||||
|
|
||||||
let result =
|
let result = self.insert_in_level(txn, field_id, highest_level, facet_value, docids)?;
|
||||||
self.insert_in_level(txn, field_id, highest_level as u8, facet_value, docids)?;
|
|
||||||
match result {
|
match result {
|
||||||
InsertionResult::InPlace => return Ok(()),
|
InsertionResult::InPlace => return Ok(()),
|
||||||
InsertionResult::Expand => return Ok(()),
|
InsertionResult::Expand => return Ok(()),
|
||||||
@ -425,7 +424,7 @@ impl FacetsUpdateIncrementalInner {
|
|||||||
level: highest_level + 1,
|
level: highest_level + 1,
|
||||||
left_bound: first_key.unwrap().left_bound,
|
left_bound: first_key.unwrap().left_bound,
|
||||||
};
|
};
|
||||||
let value = FacetGroupValue { size: group_size as u8, bitmap: values };
|
let value = FacetGroupValue { size: group_size, bitmap: values };
|
||||||
to_add.push((key.into_owned(), value));
|
to_add.push((key.into_owned(), value));
|
||||||
}
|
}
|
||||||
// now we add the rest of the level, in case its size is > group_size * min_level_size
|
// now we add the rest of the level, in case its size is > group_size * min_level_size
|
||||||
@ -584,8 +583,7 @@ impl FacetsUpdateIncrementalInner {
|
|||||||
}
|
}
|
||||||
let highest_level = get_highest_level(txn, self.db, field_id)?;
|
let highest_level = get_highest_level(txn, self.db, field_id)?;
|
||||||
|
|
||||||
let result =
|
let result = self.delete_in_level(txn, field_id, highest_level, facet_value, docids)?;
|
||||||
self.delete_in_level(txn, field_id, highest_level as u8, facet_value, docids)?;
|
|
||||||
match result {
|
match result {
|
||||||
DeletionResult::InPlace => return Ok(()),
|
DeletionResult::InPlace => return Ok(()),
|
||||||
DeletionResult::Reduce { .. } => return Ok(()),
|
DeletionResult::Reduce { .. } => return Ok(()),
|
||||||
|
@ -80,7 +80,7 @@ pub fn extract_docid_word_positions<R: io::Read + io::Seek>(
|
|||||||
.map_err(|_| SerializationError::InvalidNumberSerialization)?;
|
.map_err(|_| SerializationError::InvalidNumberSerialization)?;
|
||||||
let position = absolute_from_relative_position(field_id, position);
|
let position = absolute_from_relative_position(field_id, position);
|
||||||
docid_word_positions_sorter
|
docid_word_positions_sorter
|
||||||
.insert(&key_buffer, &position.to_ne_bytes())?;
|
.insert(&key_buffer, position.to_ne_bytes())?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -43,7 +43,7 @@ pub fn extract_facet_string_docids<R: io::Read + io::Seek>(
|
|||||||
let key_bytes = FacetGroupKeyCodec::<StrRefCodec>::bytes_encode(&key).unwrap();
|
let key_bytes = FacetGroupKeyCodec::<StrRefCodec>::bytes_encode(&key).unwrap();
|
||||||
|
|
||||||
// document id is encoded in native-endian because of the CBO roaring bitmap codec
|
// document id is encoded in native-endian because of the CBO roaring bitmap codec
|
||||||
facet_string_docids_sorter.insert(&key_bytes, &document_id.to_ne_bytes())?;
|
facet_string_docids_sorter.insert(&key_bytes, document_id.to_ne_bytes())?;
|
||||||
}
|
}
|
||||||
|
|
||||||
sorter_into_reader(facet_string_docids_sorter, indexer)
|
sorter_into_reader(facet_string_docids_sorter, indexer)
|
||||||
|
@ -145,7 +145,7 @@ fn document_word_positions_into_sorter(
|
|||||||
key_buffer.push(0);
|
key_buffer.push(0);
|
||||||
key_buffer.extend_from_slice(w2.as_bytes());
|
key_buffer.extend_from_slice(w2.as_bytes());
|
||||||
|
|
||||||
word_pair_proximity_docids_sorter.insert(&key_buffer, &document_id.to_ne_bytes())?;
|
word_pair_proximity_docids_sorter.insert(&key_buffer, document_id.to_ne_bytes())?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -41,7 +41,7 @@ pub fn extract_word_position_docids<R: io::Read + io::Seek>(
|
|||||||
key_buffer.extend_from_slice(word_bytes);
|
key_buffer.extend_from_slice(word_bytes);
|
||||||
key_buffer.extend_from_slice(&position.to_be_bytes());
|
key_buffer.extend_from_slice(&position.to_be_bytes());
|
||||||
|
|
||||||
word_position_docids_sorter.insert(&key_buffer, &document_id.to_ne_bytes())?;
|
word_position_docids_sorter.insert(&key_buffer, document_id.to_ne_bytes())?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -248,7 +248,7 @@ impl<'a, 'i> Transform<'a, 'i> {
|
|||||||
skip_insertion = true;
|
skip_insertion = true;
|
||||||
} else {
|
} else {
|
||||||
// we associate the base document with the new key, everything will get merged later.
|
// we associate the base document with the new key, everything will get merged later.
|
||||||
self.original_sorter.insert(&docid.to_be_bytes(), base_obkv)?;
|
self.original_sorter.insert(docid.to_be_bytes(), base_obkv)?;
|
||||||
match self.flatten_from_fields_ids_map(KvReader::new(base_obkv))? {
|
match self.flatten_from_fields_ids_map(KvReader::new(base_obkv))? {
|
||||||
Some(buffer) => {
|
Some(buffer) => {
|
||||||
self.flattened_sorter.insert(docid.to_be_bytes(), &buffer)?
|
self.flattened_sorter.insert(docid.to_be_bytes(), &buffer)?
|
||||||
@ -261,7 +261,7 @@ impl<'a, 'i> Transform<'a, 'i> {
|
|||||||
if !skip_insertion {
|
if !skip_insertion {
|
||||||
self.new_documents_ids.insert(docid);
|
self.new_documents_ids.insert(docid);
|
||||||
// We use the extracted/generated user id as the key for this document.
|
// We use the extracted/generated user id as the key for this document.
|
||||||
self.original_sorter.insert(&docid.to_be_bytes(), obkv_buffer.clone())?;
|
self.original_sorter.insert(docid.to_be_bytes(), obkv_buffer.clone())?;
|
||||||
|
|
||||||
match self.flatten_from_fields_ids_map(KvReader::new(&obkv_buffer))? {
|
match self.flatten_from_fields_ids_map(KvReader::new(&obkv_buffer))? {
|
||||||
Some(buffer) => self.flattened_sorter.insert(docid.to_be_bytes(), &buffer)?,
|
Some(buffer) => self.flattened_sorter.insert(docid.to_be_bytes(), &buffer)?,
|
||||||
|
@ -36,7 +36,7 @@ impl<'t, 'u, 'i> WordsPrefixesFst<'t, 'u, 'i> {
|
|||||||
/// Default value is `4` bytes. This value must be between 1 and 25 will be clamped
|
/// Default value is `4` bytes. This value must be between 1 and 25 will be clamped
|
||||||
/// to these bounds, otherwise.
|
/// to these bounds, otherwise.
|
||||||
pub fn max_prefix_length(&mut self, value: usize) -> &mut Self {
|
pub fn max_prefix_length(&mut self, value: usize) -> &mut Self {
|
||||||
self.max_prefix_length = value.min(25).max(1); // clamp [1, 25]
|
self.max_prefix_length = value.clamp(1, 25);
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user