mirror of
https://github.com/meilisearch/meilisearch.git
synced 2024-11-25 19:45:05 +08:00
Merge #4054
4054: Update version for the next release (v1.2.1) in Cargo.toml r=curquiza a=meili-bot
⚠️ This PR is automatically generated. Check the new version is the expected one and Cargo.lock has been updated before merging.
Co-authored-by: curquiza <curquiza@users.noreply.github.com>
Co-authored-by: ManyTheFish <many@meilisearch.com>
This commit is contained in:
commit
75e4fe4e55
26
Cargo.lock
generated
26
Cargo.lock
generated
@ -463,7 +463,7 @@ checksum = "b645a089122eccb6111b4f81cbc1a49f5900ac4666bb93ac027feaecf15607bf"
|
||||
|
||||
[[package]]
|
||||
name = "benchmarks"
|
||||
version = "1.2.0"
|
||||
version = "1.2.1"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"bytes",
|
||||
@ -1209,7 +1209,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "dump"
|
||||
version = "1.2.0"
|
||||
version = "1.2.1"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"big_s",
|
||||
@ -1428,7 +1428,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "file-store"
|
||||
version = "1.2.0"
|
||||
version = "1.2.1"
|
||||
dependencies = [
|
||||
"faux",
|
||||
"tempfile",
|
||||
@ -1450,7 +1450,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "filter-parser"
|
||||
version = "1.2.0"
|
||||
version = "1.2.1"
|
||||
dependencies = [
|
||||
"insta",
|
||||
"nom",
|
||||
@ -1476,7 +1476,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "flatten-serde-json"
|
||||
version = "1.2.0"
|
||||
version = "1.2.1"
|
||||
dependencies = [
|
||||
"criterion",
|
||||
"serde_json",
|
||||
@ -1959,7 +1959,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "index-scheduler"
|
||||
version = "1.2.0"
|
||||
version = "1.2.1"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"big_s",
|
||||
@ -2113,7 +2113,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "json-depth-checker"
|
||||
version = "1.2.0"
|
||||
version = "1.2.1"
|
||||
dependencies = [
|
||||
"criterion",
|
||||
"serde_json",
|
||||
@ -2539,7 +2539,7 @@ checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771"
|
||||
|
||||
[[package]]
|
||||
name = "meili-snap"
|
||||
version = "1.2.0"
|
||||
version = "1.2.1"
|
||||
dependencies = [
|
||||
"insta",
|
||||
"md5",
|
||||
@ -2548,7 +2548,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meilisearch"
|
||||
version = "1.2.0"
|
||||
version = "1.2.1"
|
||||
dependencies = [
|
||||
"actix-cors",
|
||||
"actix-http",
|
||||
@ -2636,7 +2636,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meilisearch-auth"
|
||||
version = "1.2.0"
|
||||
version = "1.2.1"
|
||||
dependencies = [
|
||||
"base64 0.21.0",
|
||||
"enum-iterator",
|
||||
@ -2655,7 +2655,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meilisearch-types"
|
||||
version = "1.2.0"
|
||||
version = "1.2.1"
|
||||
dependencies = [
|
||||
"actix-web",
|
||||
"anyhow",
|
||||
@ -2709,7 +2709,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "milli"
|
||||
version = "1.2.0"
|
||||
version = "1.2.1"
|
||||
dependencies = [
|
||||
"big_s",
|
||||
"bimap",
|
||||
@ -3064,7 +3064,7 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e"
|
||||
|
||||
[[package]]
|
||||
name = "permissive-json-pointer"
|
||||
version = "1.2.0"
|
||||
version = "1.2.1"
|
||||
dependencies = [
|
||||
"big_s",
|
||||
"serde_json",
|
||||
|
@ -17,7 +17,7 @@ members = [
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
version = "1.2.0"
|
||||
version = "1.2.1"
|
||||
authors = ["Quentin de Quelen <quentin@dequelen.me>", "Clément Renault <clement@meilisearch.com>"]
|
||||
description = "Meilisearch HTTP server"
|
||||
homepage = "https://meilisearch.com"
|
||||
|
@ -223,7 +223,9 @@ impl IndexMap {
|
||||
enable_mdb_writemap: bool,
|
||||
map_size_growth: usize,
|
||||
) {
|
||||
let Some(index) = self.available.remove(uuid) else { return; };
|
||||
let Some(index) = self.available.remove(uuid) else {
|
||||
return;
|
||||
};
|
||||
self.close(*uuid, index, enable_mdb_writemap, map_size_growth);
|
||||
}
|
||||
|
||||
|
@ -147,9 +147,7 @@ impl Key {
|
||||
fn parse_expiration_date(
|
||||
string: Option<String>,
|
||||
) -> std::result::Result<Option<OffsetDateTime>, ParseOffsetDateTimeError> {
|
||||
let Some(string) = string else {
|
||||
return Ok(None)
|
||||
};
|
||||
let Some(string) = string else { return Ok(None) };
|
||||
let datetime = if let Ok(datetime) = OffsetDateTime::parse(&string, &Rfc3339) {
|
||||
datetime
|
||||
} else if let Ok(primitive_datetime) = PrimitiveDateTime::parse(
|
||||
|
@ -125,7 +125,12 @@ pub fn bucket_sort<'ctx, Q: RankingRuleQueryTrait>(
|
||||
continue;
|
||||
}
|
||||
|
||||
let Some(next_bucket) = ranking_rules[cur_ranking_rule_index].next_bucket(ctx, logger, &ranking_rule_universes[cur_ranking_rule_index])? else {
|
||||
let Some(next_bucket) = ranking_rules[cur_ranking_rule_index].next_bucket(
|
||||
ctx,
|
||||
logger,
|
||||
&ranking_rule_universes[cur_ranking_rule_index],
|
||||
)?
|
||||
else {
|
||||
back!();
|
||||
continue;
|
||||
};
|
||||
|
@ -193,9 +193,10 @@ impl<'ctx, G: RankingRuleGraphTrait> RankingRule<'ctx, QueryGraph> for GraphBase
|
||||
.all_costs
|
||||
.get(state.graph.query_graph.root_node)
|
||||
.iter()
|
||||
.find(|c| **c >= state.cur_cost) else {
|
||||
self.state = None;
|
||||
return Ok(None);
|
||||
.find(|c| **c >= state.cur_cost)
|
||||
else {
|
||||
self.state = None;
|
||||
return Ok(None);
|
||||
};
|
||||
state.cur_cost = cost + 1;
|
||||
|
||||
|
@ -80,7 +80,9 @@ impl MatchingWords {
|
||||
let word = self.word_interner.get(*word);
|
||||
// if the word is a prefix we match using starts_with.
|
||||
if located_words.is_prefix && token.lemma().starts_with(word) {
|
||||
let Some((char_index, c)) = word.char_indices().take(located_words.original_char_count).last() else {
|
||||
let Some((char_index, c)) =
|
||||
word.char_indices().take(located_words.original_char_count).last()
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
let prefix_length = char_index + c.len_utf8();
|
||||
|
@ -176,9 +176,7 @@ impl QueryTermSubset {
|
||||
|
||||
pub fn use_prefix_db(&self, ctx: &SearchContext) -> Option<Word> {
|
||||
let original = ctx.term_interner.get(self.original);
|
||||
let Some(use_prefix_db) = original.zero_typo.use_prefix_db else {
|
||||
return None
|
||||
};
|
||||
let Some(use_prefix_db) = original.zero_typo.use_prefix_db else { return None };
|
||||
let word = match &self.zero_typo_subset {
|
||||
NTypoTermSubset::All => Some(use_prefix_db),
|
||||
NTypoTermSubset::Subset { words, phrases: _ } => {
|
||||
@ -264,13 +262,15 @@ impl QueryTermSubset {
|
||||
|
||||
match &self.one_typo_subset {
|
||||
NTypoTermSubset::All => {
|
||||
let Lazy::Init(OneTypoTerm { split_words: _, one_typo }) = &original.one_typo else {
|
||||
let Lazy::Init(OneTypoTerm { split_words: _, one_typo }) = &original.one_typo
|
||||
else {
|
||||
panic!()
|
||||
};
|
||||
result.extend(one_typo.iter().copied().map(Word::Derived))
|
||||
}
|
||||
NTypoTermSubset::Subset { words, phrases: _ } => {
|
||||
let Lazy::Init(OneTypoTerm { split_words: _, one_typo }) = &original.one_typo else {
|
||||
let Lazy::Init(OneTypoTerm { split_words: _, one_typo }) = &original.one_typo
|
||||
else {
|
||||
panic!()
|
||||
};
|
||||
result.extend(one_typo.intersection(words).copied().map(Word::Derived));
|
||||
@ -280,15 +280,11 @@ impl QueryTermSubset {
|
||||
|
||||
match &self.two_typo_subset {
|
||||
NTypoTermSubset::All => {
|
||||
let Lazy::Init(TwoTypoTerm { two_typos }) = &original.two_typo else {
|
||||
panic!()
|
||||
};
|
||||
let Lazy::Init(TwoTypoTerm { two_typos }) = &original.two_typo else { panic!() };
|
||||
result.extend(two_typos.iter().copied().map(Word::Derived));
|
||||
}
|
||||
NTypoTermSubset::Subset { words, phrases: _ } => {
|
||||
let Lazy::Init(TwoTypoTerm { two_typos }) = &original.two_typo else {
|
||||
panic!()
|
||||
};
|
||||
let Lazy::Init(TwoTypoTerm { two_typos }) = &original.two_typo else { panic!() };
|
||||
result.extend(two_typos.intersection(words).copied().map(Word::Derived));
|
||||
}
|
||||
NTypoTermSubset::Nothing => {}
|
||||
@ -312,13 +308,15 @@ impl QueryTermSubset {
|
||||
|
||||
match &self.one_typo_subset {
|
||||
NTypoTermSubset::All => {
|
||||
let Lazy::Init(OneTypoTerm { split_words, one_typo: _ }) = &original.one_typo else {
|
||||
let Lazy::Init(OneTypoTerm { split_words, one_typo: _ }) = &original.one_typo
|
||||
else {
|
||||
panic!();
|
||||
};
|
||||
result.extend(split_words.iter().copied());
|
||||
}
|
||||
NTypoTermSubset::Subset { phrases, .. } => {
|
||||
let Lazy::Init(OneTypoTerm { split_words, one_typo: _ }) = &original.one_typo else {
|
||||
let Lazy::Init(OneTypoTerm { split_words, one_typo: _ }) = &original.one_typo
|
||||
else {
|
||||
panic!();
|
||||
};
|
||||
if let Some(split_words) = split_words {
|
||||
|
@ -18,7 +18,7 @@ pub fn build_edges(
|
||||
return Ok(vec![(
|
||||
(right_ngram_length - 1) as u32,
|
||||
conditions_interner.insert(ProximityCondition::Term { term: right_term.clone() }),
|
||||
)])
|
||||
)]);
|
||||
};
|
||||
|
||||
if left_term.positions.end() + 1 != *right_term.positions.start() {
|
||||
|
@ -2045,10 +2045,11 @@ mod tests {
|
||||
"branch_id_number": 0
|
||||
}]};
|
||||
|
||||
let Err(Error::UserError(UserError::MultiplePrimaryKeyCandidatesFound {
|
||||
candidates
|
||||
})) =
|
||||
index.add_documents(doc_multiple_ids) else { panic!("Expected Error::UserError(MultiplePrimaryKeyCandidatesFound)") };
|
||||
let Err(Error::UserError(UserError::MultiplePrimaryKeyCandidatesFound { candidates })) =
|
||||
index.add_documents(doc_multiple_ids)
|
||||
else {
|
||||
panic!("Expected Error::UserError(MultiplePrimaryKeyCandidatesFound)")
|
||||
};
|
||||
|
||||
assert_eq!(candidates, vec![S("id"), S("project_id"), S("public_uid"),]);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user