mirror of
https://github.com/meilisearch/meilisearch.git
synced 2025-01-30 23:13:09 +08:00
resolve merge conflicts
This commit is contained in:
commit
4fb5c39b92
39
Cargo.lock
generated
39
Cargo.lock
generated
@ -503,7 +503,7 @@ source = "git+https://github.com/meilisearch/bbqueue#cbb87cc707b5af415ef203bdaf2
|
||||
|
||||
[[package]]
|
||||
name = "benchmarks"
|
||||
version = "1.12.2"
|
||||
version = "1.13.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"bumpalo",
|
||||
@ -694,7 +694,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "build-info"
|
||||
version = "1.12.2"
|
||||
version = "1.13.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"time",
|
||||
@ -1671,7 +1671,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "dump"
|
||||
version = "1.12.2"
|
||||
version = "1.13.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"big_s",
|
||||
@ -1873,7 +1873,7 @@ checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
|
||||
|
||||
[[package]]
|
||||
name = "file-store"
|
||||
version = "1.12.2"
|
||||
version = "1.13.0"
|
||||
dependencies = [
|
||||
"tempfile",
|
||||
"thiserror 2.0.9",
|
||||
@ -1895,7 +1895,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "filter-parser"
|
||||
version = "1.12.2"
|
||||
version = "1.13.0"
|
||||
dependencies = [
|
||||
"insta",
|
||||
"nom",
|
||||
@ -1915,7 +1915,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "flatten-serde-json"
|
||||
version = "1.12.2"
|
||||
version = "1.13.0"
|
||||
dependencies = [
|
||||
"criterion",
|
||||
"serde_json",
|
||||
@ -2054,7 +2054,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "fuzzers"
|
||||
version = "1.12.2"
|
||||
version = "1.13.0"
|
||||
dependencies = [
|
||||
"arbitrary",
|
||||
"bumpalo",
|
||||
@ -2743,7 +2743,7 @@ checksum = "206ca75c9c03ba3d4ace2460e57b189f39f43de612c2f85836e65c929701bb2d"
|
||||
|
||||
[[package]]
|
||||
name = "index-scheduler"
|
||||
version = "1.12.2"
|
||||
version = "1.13.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"arroy 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -2950,7 +2950,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "json-depth-checker"
|
||||
version = "1.12.2"
|
||||
version = "1.13.0"
|
||||
dependencies = [
|
||||
"criterion",
|
||||
"serde_json",
|
||||
@ -3569,7 +3569,7 @@ checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771"
|
||||
|
||||
[[package]]
|
||||
name = "meili-snap"
|
||||
version = "1.12.2"
|
||||
version = "1.13.0"
|
||||
dependencies = [
|
||||
"insta",
|
||||
"md5",
|
||||
@ -3578,7 +3578,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meilisearch"
|
||||
version = "1.12.2"
|
||||
version = "1.13.0"
|
||||
dependencies = [
|
||||
"actix-cors",
|
||||
"actix-http",
|
||||
@ -3670,7 +3670,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meilisearch-auth"
|
||||
version = "1.12.2"
|
||||
version = "1.13.0"
|
||||
dependencies = [
|
||||
"base64 0.22.1",
|
||||
"enum-iterator",
|
||||
@ -3689,7 +3689,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meilisearch-types"
|
||||
version = "1.12.2"
|
||||
version = "1.13.0"
|
||||
dependencies = [
|
||||
"actix-web",
|
||||
"anyhow",
|
||||
@ -3723,7 +3723,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meilitool"
|
||||
version = "1.12.2"
|
||||
version = "1.13.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"arroy 0.5.0 (git+https://github.com/meilisearch/arroy/?tag=DO-NOT-DELETE-upgrade-v04-to-v05)",
|
||||
@ -3758,7 +3758,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "milli"
|
||||
version = "1.12.2"
|
||||
version = "1.13.0"
|
||||
dependencies = [
|
||||
"allocator-api2",
|
||||
"arroy 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -3776,6 +3776,7 @@ dependencies = [
|
||||
"candle-transformers",
|
||||
"charabia",
|
||||
"concat-arrays",
|
||||
"convert_case 0.6.0",
|
||||
"crossbeam-channel",
|
||||
"csv",
|
||||
"deserr",
|
||||
@ -4269,7 +4270,7 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e"
|
||||
|
||||
[[package]]
|
||||
name = "permissive-json-pointer"
|
||||
version = "1.12.2"
|
||||
version = "1.13.0"
|
||||
dependencies = [
|
||||
"big_s",
|
||||
"serde_json",
|
||||
@ -6231,9 +6232,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "utoipa-scalar"
|
||||
version = "0.2.1"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "088e93bf19f6bd06e0aacb02ca432b3c5a449c4aec2e4aa9fc333a667f2b2c55"
|
||||
checksum = "59559e1509172f6b26c1cdbc7247c4ddd1ac6560fe94b584f81ee489b141f719"
|
||||
dependencies = [
|
||||
"actix-web",
|
||||
"serde",
|
||||
@ -6846,7 +6847,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "xtask"
|
||||
version = "1.12.2"
|
||||
version = "1.13.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"build-info",
|
||||
|
@ -22,7 +22,7 @@ members = [
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
version = "1.12.2"
|
||||
version = "1.13.0"
|
||||
authors = [
|
||||
"Quentin de Quelen <quentin@dequelen.me>",
|
||||
"Clément Renault <clement@meilisearch.com>",
|
||||
|
@ -38,7 +38,7 @@ fn setup_index() -> Index {
|
||||
let mut options = EnvOpenOptions::new();
|
||||
options.map_size(100 * 1024 * 1024 * 1024); // 100 GB
|
||||
options.max_readers(100);
|
||||
Index::new(options, path).unwrap()
|
||||
Index::new(options, path, true).unwrap()
|
||||
}
|
||||
|
||||
fn setup_settings<'t>(
|
||||
|
@ -68,7 +68,7 @@ pub fn base_setup(conf: &Conf) -> Index {
|
||||
let mut options = EnvOpenOptions::new();
|
||||
options.map_size(100 * 1024 * 1024 * 1024); // 100 GB
|
||||
options.max_readers(100);
|
||||
let index = Index::new(options, conf.database_name).unwrap();
|
||||
let index = Index::new(options, conf.database_name, true).unwrap();
|
||||
|
||||
let config = IndexerConfig::default();
|
||||
let mut wtxn = index.write_txn().unwrap();
|
||||
|
@ -141,6 +141,9 @@ pub enum KindDump {
|
||||
instance_uid: Option<InstanceUid>,
|
||||
},
|
||||
SnapshotCreation,
|
||||
UpgradeDatabase {
|
||||
from: (u32, u32, u32),
|
||||
},
|
||||
}
|
||||
|
||||
impl From<Task> for TaskDump {
|
||||
@ -210,6 +213,9 @@ impl From<KindWithContent> for KindDump {
|
||||
KindDump::DumpCreation { keys, instance_uid }
|
||||
}
|
||||
KindWithContent::SnapshotCreation => KindDump::SnapshotCreation,
|
||||
KindWithContent::UpgradeDatabase { from: version } => {
|
||||
KindDump::UpgradeDatabase { from: version }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -458,7 +464,7 @@ pub(crate) mod test {
|
||||
}
|
||||
|
||||
fn create_test_features() -> RuntimeTogglableFeatures {
|
||||
RuntimeTogglableFeatures { vector_store: true, ..Default::default() }
|
||||
RuntimeTogglableFeatures::default()
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -327,10 +327,7 @@ pub(crate) mod test {
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!(
|
||||
dump.features().unwrap().unwrap(),
|
||||
RuntimeTogglableFeatures { vector_store: true, ..Default::default() }
|
||||
);
|
||||
assert_eq!(dump.features().unwrap().unwrap(), RuntimeTogglableFeatures::default());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -373,10 +370,7 @@ pub(crate) mod test {
|
||||
|
||||
assert_eq!(test.documents().unwrap().count(), 1);
|
||||
|
||||
assert_eq!(
|
||||
dump.features().unwrap().unwrap(),
|
||||
RuntimeTogglableFeatures { vector_store: true, ..Default::default() }
|
||||
);
|
||||
assert_eq!(dump.features().unwrap().unwrap(), RuntimeTogglableFeatures::default());
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -63,7 +63,7 @@ fn main() {
|
||||
Some(path) => TempDir::new_in(path).unwrap(),
|
||||
None => TempDir::new().unwrap(),
|
||||
};
|
||||
let index = Index::new(options, tempdir.path()).unwrap();
|
||||
let index = Index::new(options, tempdir.path(), true).unwrap();
|
||||
let indexer_config = IndexerConfig::default();
|
||||
|
||||
std::thread::scope(|s| {
|
||||
|
@ -1,8 +1,9 @@
|
||||
use std::collections::HashMap;
|
||||
use std::io;
|
||||
|
||||
use dump::{KindDump, TaskDump, UpdateFile};
|
||||
use meilisearch_types::heed::RwTxn;
|
||||
use meilisearch_types::milli::documents::DocumentsBatchBuilder;
|
||||
use meilisearch_types::milli;
|
||||
use meilisearch_types::tasks::{Kind, KindWithContent, Status, Task};
|
||||
use roaring::RoaringBitmap;
|
||||
use uuid::Uuid;
|
||||
@ -39,14 +40,19 @@ impl<'a> Dump<'a> {
|
||||
task: TaskDump,
|
||||
content_file: Option<Box<UpdateFile>>,
|
||||
) -> Result<Task> {
|
||||
let task_has_no_docs = matches!(task.kind, KindDump::DocumentImport { documents_count, .. } if documents_count == 0);
|
||||
|
||||
let content_uuid = match content_file {
|
||||
Some(content_file) if task.status == Status::Enqueued => {
|
||||
let (uuid, mut file) = self.index_scheduler.queue.create_update_file(false)?;
|
||||
let mut builder = DocumentsBatchBuilder::new(&mut file);
|
||||
let (uuid, file) = self.index_scheduler.queue.create_update_file(false)?;
|
||||
let mut writer = io::BufWriter::new(file);
|
||||
for doc in content_file {
|
||||
builder.append_json_object(&doc?)?;
|
||||
let doc = doc?;
|
||||
serde_json::to_writer(&mut writer, &doc).map_err(|e| {
|
||||
Error::from_milli(milli::InternalError::SerdeJson(e).into(), None)
|
||||
})?;
|
||||
}
|
||||
builder.into_inner()?;
|
||||
let file = writer.into_inner().map_err(|e| e.into_error())?;
|
||||
file.persist()?;
|
||||
|
||||
Some(uuid)
|
||||
@ -54,6 +60,12 @@ impl<'a> Dump<'a> {
|
||||
// If the task isn't `Enqueued` then just generate a recognisable `Uuid`
|
||||
// in case we try to open it later.
|
||||
_ if task.status != Status::Enqueued => Some(Uuid::nil()),
|
||||
None if task.status == Status::Enqueued && task_has_no_docs => {
|
||||
let (uuid, file) = self.index_scheduler.queue.create_update_file(false)?;
|
||||
file.persist()?;
|
||||
|
||||
Some(uuid)
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
|
||||
@ -132,6 +144,7 @@ impl<'a> Dump<'a> {
|
||||
KindWithContent::DumpCreation { keys, instance_uid }
|
||||
}
|
||||
KindDump::SnapshotCreation => KindWithContent::SnapshotCreation,
|
||||
KindDump::UpgradeDatabase { from } => KindWithContent::UpgradeDatabase { from },
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -147,7 +147,9 @@ pub enum Error {
|
||||
#[error("Corrupted task queue.")]
|
||||
CorruptedTaskQueue,
|
||||
#[error(transparent)]
|
||||
TaskDatabaseUpdate(Box<Self>),
|
||||
DatabaseUpgrade(Box<Self>),
|
||||
#[error(transparent)]
|
||||
UnrecoverableError(Box<Self>),
|
||||
#[error(transparent)]
|
||||
HeedTransaction(heed::Error),
|
||||
|
||||
@ -202,7 +204,8 @@ impl Error {
|
||||
| Error::Anyhow(_) => true,
|
||||
Error::CreateBatch(_)
|
||||
| Error::CorruptedTaskQueue
|
||||
| Error::TaskDatabaseUpdate(_)
|
||||
| Error::DatabaseUpgrade(_)
|
||||
| Error::UnrecoverableError(_)
|
||||
| Error::HeedTransaction(_) => false,
|
||||
#[cfg(test)]
|
||||
Error::PlannedFailure => false,
|
||||
@ -266,7 +269,8 @@ impl ErrorCode for Error {
|
||||
Error::Anyhow(_) => Code::Internal,
|
||||
Error::CorruptedTaskQueue => Code::Internal,
|
||||
Error::CorruptedDump => Code::Internal,
|
||||
Error::TaskDatabaseUpdate(_) => Code::Internal,
|
||||
Error::DatabaseUpgrade(_) => Code::Internal,
|
||||
Error::UnrecoverableError(_) => Code::Internal,
|
||||
Error::CreateBatch(_) => Code::Internal,
|
||||
|
||||
// This one should never be seen by the end user
|
||||
|
@ -7,7 +7,12 @@ use meilisearch_types::heed::{Database, Env, RwTxn};
|
||||
use crate::error::FeatureNotEnabledError;
|
||||
use crate::Result;
|
||||
|
||||
const EXPERIMENTAL_FEATURES: &str = "experimental-features";
|
||||
/// The number of database used by features
|
||||
const NUMBER_OF_DATABASES: u32 = 1;
|
||||
/// Database const names for the `FeatureData`.
|
||||
mod db_name {
|
||||
pub const EXPERIMENTAL_FEATURES: &str = "experimental-features";
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct FeatureData {
|
||||
@ -56,19 +61,6 @@ impl RoFeatures {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn check_vector(&self, disabled_action: &'static str) -> Result<()> {
|
||||
if self.runtime.vector_store {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(FeatureNotEnabledError {
|
||||
disabled_action,
|
||||
feature: "vector store",
|
||||
issue_link: "https://github.com/meilisearch/product/discussions/677",
|
||||
}
|
||||
.into())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn check_edit_documents_by_function(&self, disabled_action: &'static str) -> Result<()> {
|
||||
if self.runtime.edit_documents_by_function {
|
||||
Ok(())
|
||||
@ -97,14 +89,20 @@ impl RoFeatures {
|
||||
}
|
||||
|
||||
impl FeatureData {
|
||||
pub fn new(env: &Env, instance_features: InstanceTogglableFeatures) -> Result<Self> {
|
||||
let mut wtxn = env.write_txn()?;
|
||||
let runtime_features_db = env.create_database(&mut wtxn, Some(EXPERIMENTAL_FEATURES))?;
|
||||
wtxn.commit()?;
|
||||
pub(crate) const fn nb_db() -> u32 {
|
||||
NUMBER_OF_DATABASES
|
||||
}
|
||||
|
||||
pub fn new(
|
||||
env: &Env,
|
||||
wtxn: &mut RwTxn,
|
||||
instance_features: InstanceTogglableFeatures,
|
||||
) -> Result<Self> {
|
||||
let runtime_features_db =
|
||||
env.create_database(wtxn, Some(db_name::EXPERIMENTAL_FEATURES))?;
|
||||
|
||||
let txn = env.read_txn()?;
|
||||
let persisted_features: RuntimeTogglableFeatures =
|
||||
runtime_features_db.get(&txn, EXPERIMENTAL_FEATURES)?.unwrap_or_default();
|
||||
runtime_features_db.get(wtxn, db_name::EXPERIMENTAL_FEATURES)?.unwrap_or_default();
|
||||
let InstanceTogglableFeatures { metrics, logs_route, contains_filter } = instance_features;
|
||||
let runtime = Arc::new(RwLock::new(RuntimeTogglableFeatures {
|
||||
metrics: metrics || persisted_features.metrics,
|
||||
@ -121,7 +119,7 @@ impl FeatureData {
|
||||
mut wtxn: RwTxn,
|
||||
features: RuntimeTogglableFeatures,
|
||||
) -> Result<()> {
|
||||
self.persisted.put(&mut wtxn, EXPERIMENTAL_FEATURES, &features)?;
|
||||
self.persisted.put(&mut wtxn, db_name::EXPERIMENTAL_FEATURES, &features)?;
|
||||
wtxn.commit()?;
|
||||
|
||||
// safe to unwrap, the lock will only fail if:
|
||||
|
@ -102,7 +102,7 @@ impl ReopenableIndex {
|
||||
return Ok(());
|
||||
}
|
||||
map.unavailable.remove(&self.uuid);
|
||||
map.create(&self.uuid, path, None, self.enable_mdb_writemap, self.map_size)?;
|
||||
map.create(&self.uuid, path, None, self.enable_mdb_writemap, self.map_size, false)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@ -171,11 +171,12 @@ impl IndexMap {
|
||||
date: Option<(OffsetDateTime, OffsetDateTime)>,
|
||||
enable_mdb_writemap: bool,
|
||||
map_size: usize,
|
||||
creation: bool,
|
||||
) -> Result<Index> {
|
||||
if !matches!(self.get_unavailable(uuid), Missing) {
|
||||
panic!("Attempt to open an index that was unavailable");
|
||||
}
|
||||
let index = create_or_open_index(path, date, enable_mdb_writemap, map_size)?;
|
||||
let index = create_or_open_index(path, date, enable_mdb_writemap, map_size, creation)?;
|
||||
match self.available.insert(*uuid, index.clone()) {
|
||||
InsertionOutcome::InsertedNew => (),
|
||||
InsertionOutcome::Evicted(evicted_uuid, evicted_index) => {
|
||||
@ -299,6 +300,7 @@ fn create_or_open_index(
|
||||
date: Option<(OffsetDateTime, OffsetDateTime)>,
|
||||
enable_mdb_writemap: bool,
|
||||
map_size: usize,
|
||||
creation: bool,
|
||||
) -> Result<Index> {
|
||||
let mut options = EnvOpenOptions::new();
|
||||
options.map_size(clamp_to_page_size(map_size));
|
||||
@ -308,9 +310,9 @@ fn create_or_open_index(
|
||||
}
|
||||
|
||||
if let Some((created, updated)) = date {
|
||||
Ok(Index::new_with_creation_dates(options, path, created, updated)?)
|
||||
Ok(Index::new_with_creation_dates(options, path, created, updated, creation)?)
|
||||
} else {
|
||||
Ok(Index::new(options, path)?)
|
||||
Ok(Index::new(options, path, creation)?)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -20,8 +20,13 @@ use crate::{Error, IndexBudget, IndexSchedulerOptions, Result};
|
||||
|
||||
mod index_map;
|
||||
|
||||
const INDEX_MAPPING: &str = "index-mapping";
|
||||
const INDEX_STATS: &str = "index-stats";
|
||||
/// The number of database used by index mapper
|
||||
const NUMBER_OF_DATABASES: u32 = 2;
|
||||
/// Database const names for the `IndexMapper`.
|
||||
mod db_name {
|
||||
pub const INDEX_MAPPING: &str = "index-mapping";
|
||||
pub const INDEX_STATS: &str = "index-stats";
|
||||
}
|
||||
|
||||
/// Structure managing meilisearch's indexes.
|
||||
///
|
||||
@ -138,6 +143,10 @@ impl IndexStats {
|
||||
}
|
||||
|
||||
impl IndexMapper {
|
||||
pub(crate) const fn nb_db() -> u32 {
|
||||
NUMBER_OF_DATABASES
|
||||
}
|
||||
|
||||
pub fn new(
|
||||
env: &Env,
|
||||
wtxn: &mut RwTxn,
|
||||
@ -146,8 +155,8 @@ impl IndexMapper {
|
||||
) -> Result<Self> {
|
||||
Ok(Self {
|
||||
index_map: Arc::new(RwLock::new(IndexMap::new(budget.index_count))),
|
||||
index_mapping: env.create_database(wtxn, Some(INDEX_MAPPING))?,
|
||||
index_stats: env.create_database(wtxn, Some(INDEX_STATS))?,
|
||||
index_mapping: env.create_database(wtxn, Some(db_name::INDEX_MAPPING))?,
|
||||
index_stats: env.create_database(wtxn, Some(db_name::INDEX_STATS))?,
|
||||
base_path: options.indexes_path.clone(),
|
||||
index_base_map_size: budget.map_size,
|
||||
index_growth_amount: options.index_growth_amount,
|
||||
@ -189,6 +198,7 @@ impl IndexMapper {
|
||||
date,
|
||||
self.enable_mdb_writemap,
|
||||
self.index_base_map_size,
|
||||
true,
|
||||
)
|
||||
.map_err(|e| Error::from_milli(e, Some(uuid.to_string())))?;
|
||||
let index_rtxn = index.read_txn()?;
|
||||
@ -387,6 +397,7 @@ impl IndexMapper {
|
||||
None,
|
||||
self.enable_mdb_writemap,
|
||||
self.index_base_map_size,
|
||||
false,
|
||||
)
|
||||
.map_err(|e| Error::from_milli(e, Some(uuid.to_string())))?;
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ use meilisearch_types::heed::types::{SerdeBincode, SerdeJson, Str};
|
||||
use meilisearch_types::heed::{Database, RoTxn};
|
||||
use meilisearch_types::milli::{CboRoaringBitmapCodec, RoaringBitmapCodec, BEU32};
|
||||
use meilisearch_types::tasks::{Details, Kind, Status, Task};
|
||||
use meilisearch_types::versioning;
|
||||
use roaring::RoaringBitmap;
|
||||
|
||||
use crate::index_mapper::IndexMapper;
|
||||
@ -21,6 +22,7 @@ pub fn snapshot_index_scheduler(scheduler: &IndexScheduler) -> String {
|
||||
cleanup_enabled: _,
|
||||
processing_tasks,
|
||||
env,
|
||||
version,
|
||||
queue,
|
||||
scheduler,
|
||||
|
||||
@ -38,6 +40,16 @@ pub fn snapshot_index_scheduler(scheduler: &IndexScheduler) -> String {
|
||||
|
||||
let mut snap = String::new();
|
||||
|
||||
let indx_sched_version = version.get_version(&rtxn).unwrap();
|
||||
let latest_version = (
|
||||
versioning::VERSION_MAJOR.parse().unwrap(),
|
||||
versioning::VERSION_MINOR.parse().unwrap(),
|
||||
versioning::VERSION_PATCH.parse().unwrap(),
|
||||
);
|
||||
if indx_sched_version != Some(latest_version) {
|
||||
snap.push_str(&format!("index scheduler running on version {indx_sched_version:?}\n"));
|
||||
}
|
||||
|
||||
let processing = processing_tasks.read().unwrap().clone();
|
||||
snap.push_str(&format!("### Autobatching Enabled = {}\n", scheduler.autobatching_enabled));
|
||||
snap.push_str(&format!(
|
||||
@ -279,6 +291,9 @@ fn snapshot_details(d: &Details) -> String {
|
||||
Details::IndexSwap { swaps } => {
|
||||
format!("{{ swaps: {swaps:?} }}")
|
||||
}
|
||||
Details::UpgradeDatabase { from, to } => {
|
||||
format!("{{ from: {from:?}, to: {to:?} }}")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -30,8 +30,10 @@ mod queue;
|
||||
mod scheduler;
|
||||
#[cfg(test)]
|
||||
mod test_utils;
|
||||
pub mod upgrade;
|
||||
mod utils;
|
||||
pub mod uuid_codec;
|
||||
mod versioning;
|
||||
|
||||
pub type Result<T, E = Error> = std::result::Result<T, E>;
|
||||
pub type TaskId = u32;
|
||||
@ -65,6 +67,7 @@ use queue::Queue;
|
||||
use roaring::RoaringBitmap;
|
||||
use scheduler::Scheduler;
|
||||
use time::OffsetDateTime;
|
||||
use versioning::Versioning;
|
||||
|
||||
use crate::index_mapper::IndexMapper;
|
||||
use crate::utils::clamp_to_page_size;
|
||||
@ -120,6 +123,8 @@ pub struct IndexSchedulerOptions {
|
||||
pub batched_tasks_size_limit: u64,
|
||||
/// The experimental features enabled for this instance.
|
||||
pub instance_features: InstanceTogglableFeatures,
|
||||
/// The experimental features enabled for this instance.
|
||||
pub auto_upgrade: bool,
|
||||
}
|
||||
|
||||
/// Structure which holds meilisearch's indexes and schedules the tasks
|
||||
@ -131,17 +136,18 @@ pub struct IndexScheduler {
|
||||
/// The list of tasks currently processing
|
||||
pub(crate) processing_tasks: Arc<RwLock<ProcessingTasks>>,
|
||||
|
||||
/// A database containing only the version of the index-scheduler
|
||||
pub version: versioning::Versioning,
|
||||
/// The queue containing both the tasks and the batches.
|
||||
pub queue: queue::Queue,
|
||||
|
||||
pub scheduler: scheduler::Scheduler,
|
||||
|
||||
/// In charge of creating, opening, storing and returning indexes.
|
||||
pub(crate) index_mapper: IndexMapper,
|
||||
|
||||
/// In charge of fetching and setting the status of experimental features.
|
||||
features: features::FeatureData,
|
||||
|
||||
/// Everything related to the processing of the tasks
|
||||
pub scheduler: scheduler::Scheduler,
|
||||
|
||||
/// Whether we should automatically cleanup the task queue or not.
|
||||
pub(crate) cleanup_enabled: bool,
|
||||
|
||||
@ -176,6 +182,7 @@ impl IndexScheduler {
|
||||
IndexScheduler {
|
||||
env: self.env.clone(),
|
||||
processing_tasks: self.processing_tasks.clone(),
|
||||
version: self.version.clone(),
|
||||
queue: self.queue.private_clone(),
|
||||
scheduler: self.scheduler.private_clone(),
|
||||
|
||||
@ -194,10 +201,15 @@ impl IndexScheduler {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) const fn nb_db() -> u32 {
|
||||
Versioning::nb_db() + Queue::nb_db() + IndexMapper::nb_db() + features::FeatureData::nb_db()
|
||||
}
|
||||
|
||||
/// Create an index scheduler and start its run loop.
|
||||
#[allow(private_interfaces)] // because test_utils is private
|
||||
pub fn new(
|
||||
options: IndexSchedulerOptions,
|
||||
from_db_version: (u32, u32, u32),
|
||||
#[cfg(test)] test_breakpoint_sdr: crossbeam_channel::Sender<(test_utils::Breakpoint, bool)>,
|
||||
#[cfg(test)] planned_failures: Vec<(usize, test_utils::FailureLocation)>,
|
||||
) -> Result<Self> {
|
||||
@ -229,14 +241,16 @@ impl IndexScheduler {
|
||||
|
||||
let env = unsafe {
|
||||
heed::EnvOpenOptions::new()
|
||||
.max_dbs(19)
|
||||
.max_dbs(Self::nb_db())
|
||||
.map_size(budget.task_db_size)
|
||||
.open(&options.tasks_path)
|
||||
}?;
|
||||
|
||||
let features = features::FeatureData::new(&env, options.instance_features)?;
|
||||
// We **must** starts by upgrading the version because it'll also upgrade the required database before we can open them
|
||||
let version = versioning::Versioning::new(&env, from_db_version)?;
|
||||
|
||||
let mut wtxn = env.write_txn()?;
|
||||
let features = features::FeatureData::new(&env, &mut wtxn, options.instance_features)?;
|
||||
let queue = Queue::new(&env, &mut wtxn, &options)?;
|
||||
let index_mapper = IndexMapper::new(&env, &mut wtxn, &options, budget)?;
|
||||
wtxn.commit()?;
|
||||
@ -244,6 +258,7 @@ impl IndexScheduler {
|
||||
// allow unreachable_code to get rids of the warning in the case of a test build.
|
||||
let this = Self {
|
||||
processing_tasks: Arc::new(RwLock::new(ProcessingTasks::new())),
|
||||
version,
|
||||
queue,
|
||||
scheduler: Scheduler::new(&options),
|
||||
|
||||
@ -366,6 +381,7 @@ impl IndexScheduler {
|
||||
match ret {
|
||||
Ok(Ok(TickOutcome::TickAgain(_))) => (),
|
||||
Ok(Ok(TickOutcome::WaitForSignal)) => run.scheduler.wake_up.wait(),
|
||||
Ok(Ok(TickOutcome::StopProcessingForever)) => break,
|
||||
Ok(Err(e)) => {
|
||||
tracing::error!("{e}");
|
||||
// Wait one second when an irrecoverable error occurs.
|
||||
@ -813,6 +829,8 @@ pub enum TickOutcome {
|
||||
TickAgain(u64),
|
||||
/// The scheduler should wait for an external signal before attempting another `tick`.
|
||||
WaitForSignal,
|
||||
/// The scheduler exits the run-loop and will never process tasks again
|
||||
StopProcessingForever,
|
||||
}
|
||||
|
||||
/// How many indexes we can afford to have open simultaneously.
|
||||
|
@ -1,8 +1,6 @@
|
||||
use std::borrow::Cow;
|
||||
use std::sync::Arc;
|
||||
|
||||
use enum_iterator::Sequence;
|
||||
use meilisearch_types::milli::progress::{AtomicSubStep, NamedStep, Progress, ProgressView, Step};
|
||||
use meilisearch_types::milli::progress::{AtomicSubStep, NamedStep, Progress, ProgressView};
|
||||
use meilisearch_types::milli::{make_atomic_progress, make_enum_progress};
|
||||
use roaring::RoaringBitmap;
|
||||
|
||||
@ -173,32 +171,6 @@ make_atomic_progress!(Document alias AtomicDocumentStep => "document" );
|
||||
make_atomic_progress!(Batch alias AtomicBatchStep => "batch" );
|
||||
make_atomic_progress!(UpdateFile alias AtomicUpdateFileStep => "update file" );
|
||||
|
||||
pub struct VariableNameStep {
|
||||
name: String,
|
||||
current: u32,
|
||||
total: u32,
|
||||
}
|
||||
|
||||
impl VariableNameStep {
|
||||
pub fn new(name: impl Into<String>, current: u32, total: u32) -> Self {
|
||||
Self { name: name.into(), current, total }
|
||||
}
|
||||
}
|
||||
|
||||
impl Step for VariableNameStep {
|
||||
fn name(&self) -> Cow<'static, str> {
|
||||
self.name.clone().into()
|
||||
}
|
||||
|
||||
fn current(&self) -> u32 {
|
||||
self.current
|
||||
}
|
||||
|
||||
fn total(&self) -> u32 {
|
||||
self.total
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use std::sync::atomic::Ordering;
|
||||
|
@ -1,3 +1,4 @@
|
||||
use std::collections::HashSet;
|
||||
use std::ops::{Bound, RangeBounds};
|
||||
|
||||
use meilisearch_types::batches::{Batch, BatchId};
|
||||
@ -10,9 +11,14 @@ use time::OffsetDateTime;
|
||||
|
||||
use super::{Query, Queue};
|
||||
use crate::processing::ProcessingTasks;
|
||||
use crate::utils::{insert_task_datetime, keep_ids_within_datetimes, map_bound, ProcessingBatch};
|
||||
use crate::utils::{
|
||||
insert_task_datetime, keep_ids_within_datetimes, map_bound, remove_task_datetime,
|
||||
ProcessingBatch,
|
||||
};
|
||||
use crate::{Error, Result, BEI128};
|
||||
|
||||
/// The number of database used by the batch queue
|
||||
const NUMBER_OF_DATABASES: u32 = 7;
|
||||
/// Database const names for the `IndexScheduler`.
|
||||
mod db_name {
|
||||
pub const ALL_BATCHES: &str = "all-batches";
|
||||
@ -56,6 +62,10 @@ impl BatchQueue {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) const fn nb_db() -> u32 {
|
||||
NUMBER_OF_DATABASES
|
||||
}
|
||||
|
||||
pub(super) fn new(env: &Env, wtxn: &mut RwTxn) -> Result<Self> {
|
||||
Ok(Self {
|
||||
all_batches: env.create_database(wtxn, Some(db_name::ALL_BATCHES))?,
|
||||
@ -159,6 +169,8 @@ impl BatchQueue {
|
||||
}
|
||||
|
||||
pub(crate) fn write_batch(&self, wtxn: &mut RwTxn, batch: ProcessingBatch) -> Result<()> {
|
||||
let old_batch = self.all_batches.get(wtxn, &batch.uid)?;
|
||||
|
||||
self.all_batches.put(
|
||||
wtxn,
|
||||
&batch.uid,
|
||||
@ -172,30 +184,92 @@ impl BatchQueue {
|
||||
},
|
||||
)?;
|
||||
|
||||
// Update the statuses
|
||||
if let Some(ref old_batch) = old_batch {
|
||||
for status in old_batch.stats.status.keys() {
|
||||
self.update_status(wtxn, *status, |bitmap| {
|
||||
bitmap.remove(batch.uid);
|
||||
})?;
|
||||
}
|
||||
}
|
||||
for status in batch.statuses {
|
||||
self.update_status(wtxn, status, |bitmap| {
|
||||
bitmap.insert(batch.uid);
|
||||
})?;
|
||||
}
|
||||
|
||||
// Update the kinds / types
|
||||
if let Some(ref old_batch) = old_batch {
|
||||
let kinds: HashSet<_> = old_batch.stats.types.keys().cloned().collect();
|
||||
for kind in kinds.difference(&batch.kinds) {
|
||||
self.update_kind(wtxn, *kind, |bitmap| {
|
||||
bitmap.remove(batch.uid);
|
||||
})?;
|
||||
}
|
||||
}
|
||||
for kind in batch.kinds {
|
||||
self.update_kind(wtxn, kind, |bitmap| {
|
||||
bitmap.insert(batch.uid);
|
||||
})?;
|
||||
}
|
||||
|
||||
// Update the indexes
|
||||
if let Some(ref old_batch) = old_batch {
|
||||
let indexes: HashSet<_> = old_batch.stats.index_uids.keys().cloned().collect();
|
||||
for index in indexes.difference(&batch.indexes) {
|
||||
self.update_index(wtxn, index, |bitmap| {
|
||||
bitmap.remove(batch.uid);
|
||||
})?;
|
||||
}
|
||||
}
|
||||
for index in batch.indexes {
|
||||
self.update_index(wtxn, &index, |bitmap| {
|
||||
bitmap.insert(batch.uid);
|
||||
})?;
|
||||
}
|
||||
|
||||
// Update the enqueued_at: we cannot retrieve the previous enqueued at from the previous batch, and
|
||||
// must instead go through the db looking for it. We cannot look at the task contained in this batch either
|
||||
// because they may have been removed.
|
||||
// What we know, though, is that the task date is from before the enqueued_at, and max two timestamps have been written
|
||||
// to the DB per batches.
|
||||
if let Some(ref old_batch) = old_batch {
|
||||
let started_at = old_batch.started_at.unix_timestamp_nanos();
|
||||
|
||||
// We have either one or two enqueued at to remove
|
||||
let mut exit = old_batch.stats.total_nb_tasks.clamp(0, 2);
|
||||
let mut iterator = self.enqueued_at.rev_iter_mut(wtxn)?;
|
||||
while let Some(entry) = iterator.next() {
|
||||
let (key, mut value) = entry?;
|
||||
if key > started_at {
|
||||
continue;
|
||||
}
|
||||
if value.remove(old_batch.uid) {
|
||||
exit = exit.saturating_sub(1);
|
||||
// Safe because the key and value are owned
|
||||
unsafe {
|
||||
iterator.put_current(&key, &value)?;
|
||||
}
|
||||
if exit == 0 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Some(enqueued_at) = batch.oldest_enqueued_at {
|
||||
insert_task_datetime(wtxn, self.enqueued_at, enqueued_at, batch.uid)?;
|
||||
}
|
||||
if let Some(enqueued_at) = batch.earliest_enqueued_at {
|
||||
insert_task_datetime(wtxn, self.enqueued_at, enqueued_at, batch.uid)?;
|
||||
}
|
||||
|
||||
// Update the started at and finished at
|
||||
if let Some(ref old_batch) = old_batch {
|
||||
remove_task_datetime(wtxn, self.started_at, old_batch.started_at, old_batch.uid)?;
|
||||
if let Some(finished_at) = old_batch.finished_at {
|
||||
remove_task_datetime(wtxn, self.finished_at, finished_at, old_batch.uid)?;
|
||||
}
|
||||
}
|
||||
insert_task_datetime(wtxn, self.started_at, batch.started_at, batch.uid)?;
|
||||
insert_task_datetime(wtxn, self.finished_at, batch.finished_at.unwrap(), batch.uid)?;
|
||||
|
||||
|
@ -20,14 +20,16 @@ use time::format_description::well_known::Rfc3339;
|
||||
use time::OffsetDateTime;
|
||||
use uuid::Uuid;
|
||||
|
||||
use self::batches::BatchQueue;
|
||||
use self::tasks::TaskQueue;
|
||||
pub(crate) use self::batches::BatchQueue;
|
||||
pub(crate) use self::tasks::TaskQueue;
|
||||
use crate::processing::ProcessingTasks;
|
||||
use crate::utils::{
|
||||
check_index_swap_validity, filter_out_references_to_newer_tasks, ProcessingBatch,
|
||||
};
|
||||
use crate::{Error, IndexSchedulerOptions, Result, TaskId};
|
||||
|
||||
/// The number of database used by queue itself
|
||||
const NUMBER_OF_DATABASES: u32 = 1;
|
||||
/// Database const names for the `IndexScheduler`.
|
||||
mod db_name {
|
||||
pub const BATCH_TO_TASKS_MAPPING: &str = "batch-to-tasks-mapping";
|
||||
@ -148,6 +150,10 @@ impl Queue {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) const fn nb_db() -> u32 {
|
||||
tasks::TaskQueue::nb_db() + batches::BatchQueue::nb_db() + NUMBER_OF_DATABASES
|
||||
}
|
||||
|
||||
/// Create an index scheduler and start its run loop.
|
||||
pub(crate) fn new(
|
||||
env: &Env,
|
||||
|
@ -9,12 +9,17 @@ use time::OffsetDateTime;
|
||||
|
||||
use super::{Query, Queue};
|
||||
use crate::processing::ProcessingTasks;
|
||||
use crate::utils::{self, insert_task_datetime, keep_ids_within_datetimes, map_bound};
|
||||
use crate::utils::{
|
||||
self, insert_task_datetime, keep_ids_within_datetimes, map_bound, remove_task_datetime,
|
||||
};
|
||||
use crate::{Error, Result, TaskId, BEI128};
|
||||
|
||||
/// The number of database used by the task queue
|
||||
const NUMBER_OF_DATABASES: u32 = 8;
|
||||
/// Database const names for the `IndexScheduler`.
|
||||
mod db_name {
|
||||
pub const ALL_TASKS: &str = "all-tasks";
|
||||
|
||||
pub const STATUS: &str = "status";
|
||||
pub const KIND: &str = "kind";
|
||||
pub const INDEX_TASKS: &str = "index-tasks";
|
||||
@ -59,7 +64,11 @@ impl TaskQueue {
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn new(env: &Env, wtxn: &mut RwTxn) -> Result<Self> {
|
||||
pub(crate) const fn nb_db() -> u32 {
|
||||
NUMBER_OF_DATABASES
|
||||
}
|
||||
|
||||
pub(crate) fn new(env: &Env, wtxn: &mut RwTxn) -> Result<Self> {
|
||||
Ok(Self {
|
||||
all_tasks: env.create_database(wtxn, Some(db_name::ALL_TASKS))?,
|
||||
status: env.create_database(wtxn, Some(db_name::STATUS))?,
|
||||
@ -90,12 +99,14 @@ impl TaskQueue {
|
||||
|
||||
pub(crate) fn update_task(&self, wtxn: &mut RwTxn, task: &Task) -> Result<()> {
|
||||
let old_task = self.get_task(wtxn, task.uid)?.ok_or(Error::CorruptedTaskQueue)?;
|
||||
let reprocessing = old_task.status != Status::Enqueued;
|
||||
|
||||
debug_assert!(old_task != *task);
|
||||
debug_assert_eq!(old_task.uid, task.uid);
|
||||
debug_assert!(old_task.batch_uid.is_none() && task.batch_uid.is_some());
|
||||
|
||||
// If we're processing a task that failed it may already contains a batch_uid
|
||||
debug_assert!(
|
||||
old_task.batch_uid.is_none() && task.batch_uid.is_some(),
|
||||
reprocessing || (old_task.batch_uid.is_none() && task.batch_uid.is_some()),
|
||||
"\n==> old: {old_task:?}\n==> new: {task:?}"
|
||||
);
|
||||
|
||||
@ -122,13 +133,25 @@ impl TaskQueue {
|
||||
"Cannot update a task's enqueued_at time"
|
||||
);
|
||||
if old_task.started_at != task.started_at {
|
||||
assert!(old_task.started_at.is_none(), "Cannot update a task's started_at time");
|
||||
assert!(
|
||||
reprocessing || old_task.started_at.is_none(),
|
||||
"Cannot update a task's started_at time"
|
||||
);
|
||||
if let Some(started_at) = old_task.started_at {
|
||||
remove_task_datetime(wtxn, self.started_at, started_at, task.uid)?;
|
||||
}
|
||||
if let Some(started_at) = task.started_at {
|
||||
insert_task_datetime(wtxn, self.started_at, started_at, task.uid)?;
|
||||
}
|
||||
}
|
||||
if old_task.finished_at != task.finished_at {
|
||||
assert!(old_task.finished_at.is_none(), "Cannot update a task's finished_at time");
|
||||
assert!(
|
||||
reprocessing || old_task.finished_at.is_none(),
|
||||
"Cannot update a task's finished_at time"
|
||||
);
|
||||
if let Some(finished_at) = old_task.finished_at {
|
||||
remove_task_datetime(wtxn, self.finished_at, finished_at, task.uid)?;
|
||||
}
|
||||
if let Some(finished_at) = task.finished_at {
|
||||
insert_task_datetime(wtxn, self.finished_at, finished_at, task.uid)?;
|
||||
}
|
||||
|
@ -165,6 +165,7 @@ fn test_disable_auto_deletion_of_tasks() {
|
||||
let (index_scheduler, mut handle) = IndexScheduler::test_with_custom_config(vec![], |config| {
|
||||
config.cleanup_enabled = false;
|
||||
config.max_number_of_tasks = 2;
|
||||
None
|
||||
});
|
||||
|
||||
index_scheduler
|
||||
@ -228,6 +229,7 @@ fn test_disable_auto_deletion_of_tasks() {
|
||||
fn test_auto_deletion_of_tasks() {
|
||||
let (index_scheduler, mut handle) = IndexScheduler::test_with_custom_config(vec![], |config| {
|
||||
config.max_number_of_tasks = 2;
|
||||
None
|
||||
});
|
||||
|
||||
index_scheduler
|
||||
@ -325,6 +327,7 @@ fn test_task_queue_is_full() {
|
||||
let (index_scheduler, mut handle) = IndexScheduler::test_with_custom_config(vec![], |config| {
|
||||
// that's the minimum map size possible
|
||||
config.task_db_size = 1048576;
|
||||
None
|
||||
});
|
||||
|
||||
index_scheduler
|
||||
|
@ -85,6 +85,7 @@ impl From<KindWithContent> for AutobatchKind {
|
||||
KindWithContent::TaskCancelation { .. }
|
||||
| KindWithContent::TaskDeletion { .. }
|
||||
| KindWithContent::DumpCreation { .. }
|
||||
| KindWithContent::UpgradeDatabase { .. }
|
||||
| KindWithContent::SnapshotCreation => {
|
||||
panic!("The autobatcher should never be called with tasks that don't apply to an index.")
|
||||
}
|
||||
|
@ -47,6 +47,9 @@ pub(crate) enum Batch {
|
||||
IndexSwap {
|
||||
task: Task,
|
||||
},
|
||||
UpgradeDatabase {
|
||||
tasks: Vec<Task>,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
@ -105,6 +108,7 @@ impl Batch {
|
||||
}
|
||||
Batch::SnapshotCreation(tasks)
|
||||
| Batch::TaskDeletions(tasks)
|
||||
| Batch::UpgradeDatabase { tasks }
|
||||
| Batch::IndexDeletion { tasks, .. } => {
|
||||
RoaringBitmap::from_iter(tasks.iter().map(|task| task.uid))
|
||||
}
|
||||
@ -138,6 +142,7 @@ impl Batch {
|
||||
| TaskDeletions(_)
|
||||
| SnapshotCreation(_)
|
||||
| Dump(_)
|
||||
| UpgradeDatabase { .. }
|
||||
| IndexSwap { .. } => None,
|
||||
IndexOperation { op, .. } => Some(op.index_uid()),
|
||||
IndexCreation { index_uid, .. }
|
||||
@ -162,6 +167,7 @@ impl fmt::Display for Batch {
|
||||
Batch::IndexUpdate { .. } => f.write_str("IndexUpdate")?,
|
||||
Batch::IndexDeletion { .. } => f.write_str("IndexDeletion")?,
|
||||
Batch::IndexSwap { .. } => f.write_str("IndexSwap")?,
|
||||
Batch::UpgradeDatabase { .. } => f.write_str("UpgradeDatabase")?,
|
||||
};
|
||||
match index_uid {
|
||||
Some(name) => f.write_fmt(format_args!(" on {name:?} from tasks: {tasks:?}")),
|
||||
@ -427,9 +433,24 @@ impl IndexScheduler {
|
||||
let mut current_batch = ProcessingBatch::new(batch_id);
|
||||
|
||||
let enqueued = &self.queue.tasks.get_status(rtxn, Status::Enqueued)?;
|
||||
let to_cancel = self.queue.tasks.get_kind(rtxn, Kind::TaskCancelation)? & enqueued;
|
||||
let failed = &self.queue.tasks.get_status(rtxn, Status::Failed)?;
|
||||
|
||||
// 0. The priority over everything is to upgrade the instance
|
||||
// There shouldn't be multiple upgrade tasks but just in case we're going to batch all of them at the same time
|
||||
let upgrade = self.queue.tasks.get_kind(rtxn, Kind::UpgradeDatabase)? & (enqueued | failed);
|
||||
if !upgrade.is_empty() {
|
||||
let mut tasks = self.queue.tasks.get_existing_tasks(rtxn, upgrade)?;
|
||||
// In the case of an upgrade database batch, we want to find back the original batch that tried processing it
|
||||
// and re-use its id
|
||||
if let Some(batch_uid) = tasks.last().unwrap().batch_uid {
|
||||
current_batch.uid = batch_uid;
|
||||
}
|
||||
current_batch.processing(&mut tasks);
|
||||
return Ok(Some((Batch::UpgradeDatabase { tasks }, current_batch)));
|
||||
}
|
||||
|
||||
// 1. we get the last task to cancel.
|
||||
let to_cancel = self.queue.tasks.get_kind(rtxn, Kind::TaskCancelation)? & enqueued;
|
||||
if let Some(task_id) = to_cancel.max() {
|
||||
let mut task =
|
||||
self.queue.tasks.get_task(rtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
|
||||
|
@ -6,6 +6,7 @@ mod process_batch;
|
||||
mod process_dump_creation;
|
||||
mod process_index_operation;
|
||||
mod process_snapshot_creation;
|
||||
mod process_upgrade;
|
||||
#[cfg(test)]
|
||||
mod test;
|
||||
#[cfg(test)]
|
||||
@ -183,6 +184,7 @@ impl IndexScheduler {
|
||||
|
||||
progress.update_progress(BatchProgress::WritingTasksToDisk);
|
||||
processing_batch.finished();
|
||||
let mut stop_scheduler_forever = false;
|
||||
let mut wtxn = self.env.write_txn().map_err(Error::HeedTransaction)?;
|
||||
let mut canceled = RoaringBitmap::new();
|
||||
|
||||
@ -221,7 +223,7 @@ impl IndexScheduler {
|
||||
self.queue
|
||||
.tasks
|
||||
.update_task(&mut wtxn, &task)
|
||||
.map_err(|e| Error::TaskDatabaseUpdate(Box::new(e)))?;
|
||||
.map_err(|e| Error::UnrecoverableError(Box::new(e)))?;
|
||||
}
|
||||
if let Some(canceled_by) = canceled_by {
|
||||
self.queue.tasks.canceled_by.put(&mut wtxn, &canceled_by, &canceled)?;
|
||||
@ -272,6 +274,12 @@ impl IndexScheduler {
|
||||
let (task_progress, task_progress_obj) = AtomicTaskStep::new(ids.len() as u32);
|
||||
progress.update_progress(task_progress_obj);
|
||||
|
||||
if matches!(err, Error::DatabaseUpgrade(_)) {
|
||||
tracing::error!(
|
||||
"Upgrade task failed, tasks won't be processed until the following issue is fixed: {err}"
|
||||
);
|
||||
stop_scheduler_forever = true;
|
||||
}
|
||||
let error: ResponseError = err.into();
|
||||
for id in ids.iter() {
|
||||
task_progress.fetch_add(1, Ordering::Relaxed);
|
||||
@ -279,7 +287,7 @@ impl IndexScheduler {
|
||||
.queue
|
||||
.tasks
|
||||
.get_task(&wtxn, id)
|
||||
.map_err(|e| Error::TaskDatabaseUpdate(Box::new(e)))?
|
||||
.map_err(|e| Error::UnrecoverableError(Box::new(e)))?
|
||||
.ok_or(Error::CorruptedTaskQueue)?;
|
||||
task.status = Status::Failed;
|
||||
task.error = Some(error.clone());
|
||||
@ -296,7 +304,7 @@ impl IndexScheduler {
|
||||
self.queue
|
||||
.tasks
|
||||
.update_task(&mut wtxn, &task)
|
||||
.map_err(|e| Error::TaskDatabaseUpdate(Box::new(e)))?;
|
||||
.map_err(|e| Error::UnrecoverableError(Box::new(e)))?;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -326,7 +334,7 @@ impl IndexScheduler {
|
||||
.queue
|
||||
.tasks
|
||||
.get_task(&rtxn, id)
|
||||
.map_err(|e| Error::TaskDatabaseUpdate(Box::new(e)))?
|
||||
.map_err(|e| Error::UnrecoverableError(Box::new(e)))?
|
||||
.ok_or(Error::CorruptedTaskQueue)?;
|
||||
if let Err(e) = self.queue.delete_persisted_task_data(&task) {
|
||||
tracing::error!(
|
||||
@ -344,6 +352,10 @@ impl IndexScheduler {
|
||||
#[cfg(test)]
|
||||
self.breakpoint(crate::test_utils::Breakpoint::AfterProcessing);
|
||||
|
||||
Ok(TickOutcome::TickAgain(processed_tasks))
|
||||
if stop_scheduler_forever {
|
||||
Ok(TickOutcome::StopProcessingForever)
|
||||
} else {
|
||||
Ok(TickOutcome::TickAgain(processed_tasks))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,9 +1,10 @@
|
||||
use std::collections::{BTreeSet, HashMap, HashSet};
|
||||
use std::panic::{catch_unwind, AssertUnwindSafe};
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
use meilisearch_types::batches::BatchId;
|
||||
use meilisearch_types::heed::{RoTxn, RwTxn};
|
||||
use meilisearch_types::milli::progress::Progress;
|
||||
use meilisearch_types::milli::progress::{Progress, VariableNameStep};
|
||||
use meilisearch_types::milli::{self};
|
||||
use meilisearch_types::tasks::{Details, IndexSwap, KindWithContent, Status, Task};
|
||||
use milli::update::Settings as MilliSettings;
|
||||
@ -13,7 +14,7 @@ use super::create_batch::Batch;
|
||||
use crate::processing::{
|
||||
AtomicBatchStep, AtomicTaskStep, CreateIndexProgress, DeleteIndexProgress,
|
||||
InnerSwappingTwoIndexes, SwappingTheIndexes, TaskCancelationProgress, TaskDeletionProgress,
|
||||
UpdateIndexProgress, VariableNameStep,
|
||||
UpdateIndexProgress,
|
||||
};
|
||||
use crate::utils::{self, swap_index_uid_in_task, ProcessingBatch};
|
||||
use crate::{Error, IndexScheduler, Result, TaskId};
|
||||
@ -297,7 +298,7 @@ impl IndexScheduler {
|
||||
}
|
||||
progress.update_progress(SwappingTheIndexes::SwappingTheIndexes);
|
||||
for (step, swap) in swaps.iter().enumerate() {
|
||||
progress.update_progress(VariableNameStep::new(
|
||||
progress.update_progress(VariableNameStep::<SwappingTheIndexes>::new(
|
||||
format!("swapping index {} and {}", swap.indexes.0, swap.indexes.1),
|
||||
step as u32,
|
||||
swaps.len() as u32,
|
||||
@ -314,6 +315,27 @@ impl IndexScheduler {
|
||||
task.status = Status::Succeeded;
|
||||
Ok(vec![task])
|
||||
}
|
||||
Batch::UpgradeDatabase { mut tasks } => {
|
||||
let KindWithContent::UpgradeDatabase { from } = tasks.last().unwrap().kind else {
|
||||
unreachable!();
|
||||
};
|
||||
let ret = catch_unwind(AssertUnwindSafe(|| self.process_upgrade(from, progress)));
|
||||
match ret {
|
||||
Ok(Ok(())) => (),
|
||||
Ok(Err(e)) => return Err(Error::DatabaseUpgrade(Box::new(e))),
|
||||
Err(_e) => {
|
||||
return Err(Error::DatabaseUpgrade(Box::new(Error::ProcessBatchPanicked)));
|
||||
}
|
||||
}
|
||||
|
||||
for task in tasks.iter_mut() {
|
||||
task.status = Status::Succeeded;
|
||||
// Since this task can be retried we must reset its error status
|
||||
task.error = None;
|
||||
}
|
||||
|
||||
Ok(tasks)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4,17 +4,14 @@ use std::sync::atomic::Ordering;
|
||||
|
||||
use dump::IndexMetadata;
|
||||
use meilisearch_types::milli::constants::RESERVED_VECTORS_FIELD_NAME;
|
||||
use meilisearch_types::milli::documents::{obkv_to_object, DocumentsBatchReader};
|
||||
use meilisearch_types::milli::progress::Progress;
|
||||
use meilisearch_types::milli::progress::{Progress, VariableNameStep};
|
||||
use meilisearch_types::milli::vector::parsed_vectors::{ExplicitVectors, VectorOrArrayOfVectors};
|
||||
use meilisearch_types::milli::{self};
|
||||
use meilisearch_types::tasks::{Details, KindWithContent, Status, Task};
|
||||
use time::macros::format_description;
|
||||
use time::OffsetDateTime;
|
||||
|
||||
use crate::processing::{
|
||||
AtomicDocumentStep, AtomicTaskStep, DumpCreationProgress, VariableNameStep,
|
||||
};
|
||||
use crate::processing::{AtomicDocumentStep, AtomicTaskStep, DumpCreationProgress};
|
||||
use crate::{Error, IndexScheduler, Result};
|
||||
|
||||
impl IndexScheduler {
|
||||
@ -72,6 +69,13 @@ impl IndexScheduler {
|
||||
t.started_at = Some(started_at);
|
||||
t.finished_at = Some(finished_at);
|
||||
}
|
||||
|
||||
// Patch the task to remove the batch uid, because as of v1.12.5 batches are not persisted.
|
||||
// This prevent from referencing *future* batches not actually associated with the task.
|
||||
//
|
||||
// See <https://github.com/meilisearch/meilisearch/issues/5247> for details.
|
||||
t.batch_uid = None;
|
||||
|
||||
let mut dump_content_file = dump_tasks.push_task(&t.into())?;
|
||||
|
||||
// 2.1. Dump the `content_file` associated with the task if there is one and the task is not finished yet.
|
||||
@ -82,19 +86,15 @@ impl IndexScheduler {
|
||||
if status == Status::Enqueued {
|
||||
let content_file = self.queue.file_store.get_update(content_file)?;
|
||||
|
||||
let reader = DocumentsBatchReader::from_reader(content_file)
|
||||
.map_err(|e| Error::from_milli(e.into(), None))?;
|
||||
|
||||
let (mut cursor, documents_batch_index) = reader.into_cursor_and_fields_index();
|
||||
|
||||
while let Some(doc) =
|
||||
cursor.next_document().map_err(|e| Error::from_milli(e.into(), None))?
|
||||
for document in
|
||||
serde_json::de::Deserializer::from_reader(content_file).into_iter()
|
||||
{
|
||||
dump_content_file.push_document(
|
||||
&obkv_to_object(doc, &documents_batch_index)
|
||||
.map_err(|e| Error::from_milli(e, None))?,
|
||||
)?;
|
||||
let document = document.map_err(|e| {
|
||||
Error::from_milli(milli::InternalError::SerdeJson(e).into(), None)
|
||||
})?;
|
||||
dump_content_file.push_document(&document)?;
|
||||
}
|
||||
|
||||
dump_content_file.flush()?;
|
||||
}
|
||||
}
|
||||
@ -107,7 +107,11 @@ impl IndexScheduler {
|
||||
let nb_indexes = self.index_mapper.index_mapping.len(&rtxn)? as u32;
|
||||
let mut count = 0;
|
||||
let () = self.index_mapper.try_for_each_index(&rtxn, |uid, index| -> Result<()> {
|
||||
progress.update_progress(VariableNameStep::new(uid.to_string(), count, nb_indexes));
|
||||
progress.update_progress(VariableNameStep::<DumpCreationProgress>::new(
|
||||
uid.to_string(),
|
||||
count,
|
||||
nb_indexes,
|
||||
));
|
||||
count += 1;
|
||||
|
||||
let rtxn = index.read_txn()?;
|
||||
|
@ -3,12 +3,12 @@ use std::fs;
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
use meilisearch_types::heed::CompactionOption;
|
||||
use meilisearch_types::milli::progress::Progress;
|
||||
use meilisearch_types::milli::progress::{Progress, VariableNameStep};
|
||||
use meilisearch_types::milli::{self};
|
||||
use meilisearch_types::tasks::{Status, Task};
|
||||
use meilisearch_types::{compression, VERSION_FILE_NAME};
|
||||
|
||||
use crate::processing::{AtomicUpdateFileStep, SnapshotCreationProgress, VariableNameStep};
|
||||
use crate::processing::{AtomicUpdateFileStep, SnapshotCreationProgress};
|
||||
use crate::{Error, IndexScheduler, Result};
|
||||
|
||||
impl IndexScheduler {
|
||||
@ -74,7 +74,9 @@ impl IndexScheduler {
|
||||
|
||||
for (i, result) in index_mapping.iter(&rtxn)?.enumerate() {
|
||||
let (name, uuid) = result?;
|
||||
progress.update_progress(VariableNameStep::new(name, i as u32, nb_indexes));
|
||||
progress.update_progress(VariableNameStep::<SnapshotCreationProgress>::new(
|
||||
name, i as u32, nb_indexes,
|
||||
));
|
||||
let index = self.index_mapper.index(&rtxn, name)?;
|
||||
let dst = temp_snapshot_dir.path().join("indexes").join(uuid.to_string());
|
||||
fs::create_dir_all(&dst)?;
|
||||
|
49
crates/index-scheduler/src/scheduler/process_upgrade/mod.rs
Normal file
49
crates/index-scheduler/src/scheduler/process_upgrade/mod.rs
Normal file
@ -0,0 +1,49 @@
|
||||
use meilisearch_types::milli;
|
||||
use meilisearch_types::milli::progress::{Progress, VariableNameStep};
|
||||
|
||||
use crate::{Error, IndexScheduler, Result};
|
||||
|
||||
impl IndexScheduler {
|
||||
pub(super) fn process_upgrade(
|
||||
&self,
|
||||
db_version: (u32, u32, u32),
|
||||
progress: Progress,
|
||||
) -> Result<()> {
|
||||
#[cfg(test)]
|
||||
self.maybe_fail(crate::test_utils::FailureLocation::ProcessUpgrade)?;
|
||||
|
||||
enum UpgradeIndex {}
|
||||
let indexes = self.index_names()?;
|
||||
|
||||
for (i, uid) in indexes.iter().enumerate() {
|
||||
progress.update_progress(VariableNameStep::<UpgradeIndex>::new(
|
||||
format!("Upgrading index `{uid}`"),
|
||||
i as u32,
|
||||
indexes.len() as u32,
|
||||
));
|
||||
let index = self.index(uid)?;
|
||||
let mut index_wtxn = index.write_txn()?;
|
||||
let regen_stats = milli::update::upgrade::upgrade(
|
||||
&mut index_wtxn,
|
||||
&index,
|
||||
db_version,
|
||||
progress.clone(),
|
||||
)
|
||||
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
|
||||
if regen_stats {
|
||||
let stats = crate::index_mapper::IndexStats::new(&index, &index_wtxn)
|
||||
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
|
||||
index_wtxn.commit()?;
|
||||
|
||||
// Release wtxn as soon as possible because it stops us from registering tasks
|
||||
let mut index_schd_wtxn = self.env.write_txn()?;
|
||||
self.index_mapper.store_stats_of(&mut index_schd_wtxn, uid, &stats)?;
|
||||
index_schd_wtxn.commit()?;
|
||||
} else {
|
||||
index_wtxn.commit()?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
@ -0,0 +1,110 @@
|
||||
---
|
||||
source: crates/index-scheduler/src/scheduler/test_failure.rs
|
||||
snapshot_kind: text
|
||||
---
|
||||
### Autobatching Enabled = true
|
||||
### Processing batch None:
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 13, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
1 {uid: 1, batch_uid: 1, status: succeeded, details: { primary_key: Some("mouse") }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||
2 {uid: 2, batch_uid: 2, status: succeeded, details: { primary_key: Some("bone") }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||
3 {uid: 3, batch_uid: 3, status: failed, error: ResponseError { code: 200, message: "Index `doggo` already exists.", error_code: "index_already_exists", error_type: "invalid_request", error_link: "https://docs.meilisearch.com/errors#index_already_exists" }, details: { primary_key: Some("bone") }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||
4 {uid: 4, batch_uid: 4, status: succeeded, details: { primary_key: Some("leaves") }, kind: IndexCreation { index_uid: "girafo", primary_key: Some("leaves") }}
|
||||
----------------------------------------------------------------------
|
||||
### Status:
|
||||
enqueued []
|
||||
succeeded [0,1,2,4,]
|
||||
failed [3,]
|
||||
----------------------------------------------------------------------
|
||||
### Kind:
|
||||
"indexCreation" [1,2,3,4,]
|
||||
"upgradeDatabase" [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Tasks:
|
||||
catto [1,]
|
||||
doggo [2,3,]
|
||||
girafo [4,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
catto: { number_of_documents: 0, field_distribution: {} }
|
||||
doggo: { number_of_documents: 0, field_distribution: {} }
|
||||
girafo: { number_of_documents: 0, field_distribution: {} }
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Enqueued At:
|
||||
[timestamp] [0,]
|
||||
[timestamp] [1,]
|
||||
[timestamp] [2,]
|
||||
[timestamp] [3,]
|
||||
[timestamp] [4,]
|
||||
----------------------------------------------------------------------
|
||||
### Started At:
|
||||
[timestamp] [0,]
|
||||
[timestamp] [1,]
|
||||
[timestamp] [2,]
|
||||
[timestamp] [3,]
|
||||
[timestamp] [4,]
|
||||
----------------------------------------------------------------------
|
||||
### Finished At:
|
||||
[timestamp] [0,]
|
||||
[timestamp] [1,]
|
||||
[timestamp] [2,]
|
||||
[timestamp] [3,]
|
||||
[timestamp] [4,]
|
||||
----------------------------------------------------------------------
|
||||
### All Batches:
|
||||
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.13.0"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, }
|
||||
1 {uid: 1, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, }
|
||||
2 {uid: 2, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, }
|
||||
3 {uid: 3, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, }
|
||||
4 {uid: 4, details: {"primaryKey":"leaves"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"girafo":1}}, }
|
||||
----------------------------------------------------------------------
|
||||
### Batch to tasks mapping:
|
||||
0 [0,]
|
||||
1 [1,]
|
||||
2 [2,]
|
||||
3 [3,]
|
||||
4 [4,]
|
||||
----------------------------------------------------------------------
|
||||
### Batches Status:
|
||||
succeeded [0,1,2,4,]
|
||||
failed [3,]
|
||||
----------------------------------------------------------------------
|
||||
### Batches Kind:
|
||||
"indexCreation" [1,2,3,4,]
|
||||
"upgradeDatabase" [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Batches Index Tasks:
|
||||
catto [1,]
|
||||
doggo [2,3,]
|
||||
girafo [4,]
|
||||
----------------------------------------------------------------------
|
||||
### Batches Enqueued At:
|
||||
[timestamp] [0,]
|
||||
[timestamp] [1,]
|
||||
[timestamp] [2,]
|
||||
[timestamp] [3,]
|
||||
[timestamp] [4,]
|
||||
----------------------------------------------------------------------
|
||||
### Batches Started At:
|
||||
[timestamp] [0,]
|
||||
[timestamp] [1,]
|
||||
[timestamp] [2,]
|
||||
[timestamp] [3,]
|
||||
[timestamp] [4,]
|
||||
----------------------------------------------------------------------
|
||||
### Batches Finished At:
|
||||
[timestamp] [0,]
|
||||
[timestamp] [1,]
|
||||
[timestamp] [2,]
|
||||
[timestamp] [3,]
|
||||
[timestamp] [4,]
|
||||
----------------------------------------------------------------------
|
||||
### File Store:
|
||||
|
||||
----------------------------------------------------------------------
|
@ -0,0 +1,115 @@
|
||||
---
|
||||
source: crates/index-scheduler/src/scheduler/test_failure.rs
|
||||
snapshot_kind: text
|
||||
---
|
||||
### Autobatching Enabled = true
|
||||
### Processing batch None:
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
1 {uid: 1, batch_uid: 1, status: succeeded, details: { primary_key: Some("mouse") }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||
2 {uid: 2, batch_uid: 2, status: succeeded, details: { primary_key: Some("bone") }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||
3 {uid: 3, batch_uid: 3, status: failed, error: ResponseError { code: 200, message: "Index `doggo` already exists.", error_code: "index_already_exists", error_type: "invalid_request", error_link: "https://docs.meilisearch.com/errors#index_already_exists" }, details: { primary_key: Some("bone") }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||
4 {uid: 4, batch_uid: 4, status: succeeded, details: { primary_key: Some("leaves") }, kind: IndexCreation { index_uid: "girafo", primary_key: Some("leaves") }}
|
||||
5 {uid: 5, batch_uid: 5, status: succeeded, details: { matched_tasks: 1, deleted_tasks: Some(1), original_filter: "types=upgradeDatabase" }, kind: TaskDeletion { query: "types=upgradeDatabase", tasks: RoaringBitmap<[0]> }}
|
||||
----------------------------------------------------------------------
|
||||
### Status:
|
||||
enqueued []
|
||||
succeeded [1,2,4,5,]
|
||||
failed [3,]
|
||||
----------------------------------------------------------------------
|
||||
### Kind:
|
||||
"indexCreation" [1,2,3,4,]
|
||||
"taskDeletion" [5,]
|
||||
"upgradeDatabase" []
|
||||
----------------------------------------------------------------------
|
||||
### Index Tasks:
|
||||
catto [1,]
|
||||
doggo [2,3,]
|
||||
girafo [4,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
catto: { number_of_documents: 0, field_distribution: {} }
|
||||
doggo: { number_of_documents: 0, field_distribution: {} }
|
||||
girafo: { number_of_documents: 0, field_distribution: {} }
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Enqueued At:
|
||||
[timestamp] [1,]
|
||||
[timestamp] [2,]
|
||||
[timestamp] [3,]
|
||||
[timestamp] [4,]
|
||||
[timestamp] [5,]
|
||||
----------------------------------------------------------------------
|
||||
### Started At:
|
||||
[timestamp] [1,]
|
||||
[timestamp] [2,]
|
||||
[timestamp] [3,]
|
||||
[timestamp] [4,]
|
||||
[timestamp] [5,]
|
||||
----------------------------------------------------------------------
|
||||
### Finished At:
|
||||
[timestamp] [1,]
|
||||
[timestamp] [2,]
|
||||
[timestamp] [3,]
|
||||
[timestamp] [4,]
|
||||
[timestamp] [5,]
|
||||
----------------------------------------------------------------------
|
||||
### All Batches:
|
||||
1 {uid: 1, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, }
|
||||
2 {uid: 2, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, }
|
||||
3 {uid: 3, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, }
|
||||
4 {uid: 4, details: {"primaryKey":"leaves"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"girafo":1}}, }
|
||||
5 {uid: 5, details: {"matchedTasks":1,"deletedTasks":1,"originalFilter":"types=upgradeDatabase"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"taskDeletion":1},"indexUids":{}}, }
|
||||
----------------------------------------------------------------------
|
||||
### Batch to tasks mapping:
|
||||
1 [1,]
|
||||
2 [2,]
|
||||
3 [3,]
|
||||
4 [4,]
|
||||
5 [5,]
|
||||
----------------------------------------------------------------------
|
||||
### Batches Status:
|
||||
succeeded [1,2,4,5,]
|
||||
failed [3,]
|
||||
----------------------------------------------------------------------
|
||||
### Batches Kind:
|
||||
"indexCreation" [1,2,3,4,]
|
||||
"taskDeletion" [5,]
|
||||
"upgradeDatabase" []
|
||||
----------------------------------------------------------------------
|
||||
### Batches Index Tasks:
|
||||
catto [1,]
|
||||
doggo [2,3,]
|
||||
girafo [4,]
|
||||
----------------------------------------------------------------------
|
||||
### Batches Enqueued At:
|
||||
[timestamp] [0,]
|
||||
[timestamp] [1,]
|
||||
[timestamp] [2,]
|
||||
[timestamp] [3,]
|
||||
[timestamp] [4,]
|
||||
[timestamp] [5,]
|
||||
----------------------------------------------------------------------
|
||||
### Batches Started At:
|
||||
[timestamp] [0,]
|
||||
[timestamp] [1,]
|
||||
[timestamp] [2,]
|
||||
[timestamp] [3,]
|
||||
[timestamp] [4,]
|
||||
[timestamp] [5,]
|
||||
----------------------------------------------------------------------
|
||||
### Batches Finished At:
|
||||
[timestamp] [0,]
|
||||
[timestamp] [1,]
|
||||
[timestamp] [2,]
|
||||
[timestamp] [3,]
|
||||
[timestamp] [4,]
|
||||
[timestamp] [5,]
|
||||
----------------------------------------------------------------------
|
||||
### File Store:
|
||||
|
||||
----------------------------------------------------------------------
|
@ -0,0 +1,51 @@
|
||||
---
|
||||
source: crates/index-scheduler/src/scheduler/test_failure.rs
|
||||
snapshot_kind: text
|
||||
---
|
||||
### Autobatching Enabled = true
|
||||
### Processing batch None:
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 13, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
----------------------------------------------------------------------
|
||||
### Status:
|
||||
enqueued [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Kind:
|
||||
"upgradeDatabase" [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Tasks:
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Enqueued At:
|
||||
[timestamp] [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Started At:
|
||||
----------------------------------------------------------------------
|
||||
### Finished At:
|
||||
----------------------------------------------------------------------
|
||||
### All Batches:
|
||||
----------------------------------------------------------------------
|
||||
### Batch to tasks mapping:
|
||||
----------------------------------------------------------------------
|
||||
### Batches Status:
|
||||
----------------------------------------------------------------------
|
||||
### Batches Kind:
|
||||
----------------------------------------------------------------------
|
||||
### Batches Index Tasks:
|
||||
----------------------------------------------------------------------
|
||||
### Batches Enqueued At:
|
||||
----------------------------------------------------------------------
|
||||
### Batches Started At:
|
||||
----------------------------------------------------------------------
|
||||
### Batches Finished At:
|
||||
----------------------------------------------------------------------
|
||||
### File Store:
|
||||
|
||||
----------------------------------------------------------------------
|
@ -0,0 +1,55 @@
|
||||
---
|
||||
source: crates/index-scheduler/src/scheduler/test_failure.rs
|
||||
snapshot_kind: text
|
||||
---
|
||||
### Autobatching Enabled = true
|
||||
### Processing batch None:
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, status: enqueued, details: { from: (1, 12, 0), to: (1, 13, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse") }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||
----------------------------------------------------------------------
|
||||
### Status:
|
||||
enqueued [0,1,]
|
||||
----------------------------------------------------------------------
|
||||
### Kind:
|
||||
"indexCreation" [1,]
|
||||
"upgradeDatabase" [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Tasks:
|
||||
catto [1,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Enqueued At:
|
||||
[timestamp] [0,]
|
||||
[timestamp] [1,]
|
||||
----------------------------------------------------------------------
|
||||
### Started At:
|
||||
----------------------------------------------------------------------
|
||||
### Finished At:
|
||||
----------------------------------------------------------------------
|
||||
### All Batches:
|
||||
----------------------------------------------------------------------
|
||||
### Batch to tasks mapping:
|
||||
----------------------------------------------------------------------
|
||||
### Batches Status:
|
||||
----------------------------------------------------------------------
|
||||
### Batches Kind:
|
||||
----------------------------------------------------------------------
|
||||
### Batches Index Tasks:
|
||||
----------------------------------------------------------------------
|
||||
### Batches Enqueued At:
|
||||
----------------------------------------------------------------------
|
||||
### Batches Started At:
|
||||
----------------------------------------------------------------------
|
||||
### Batches Finished At:
|
||||
----------------------------------------------------------------------
|
||||
### File Store:
|
||||
|
||||
----------------------------------------------------------------------
|
@ -0,0 +1,65 @@
|
||||
---
|
||||
source: crates/index-scheduler/src/scheduler/test_failure.rs
|
||||
snapshot_kind: text
|
||||
---
|
||||
### Autobatching Enabled = true
|
||||
### Processing batch None:
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 13, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse") }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||
----------------------------------------------------------------------
|
||||
### Status:
|
||||
enqueued [1,]
|
||||
failed [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Kind:
|
||||
"indexCreation" [1,]
|
||||
"upgradeDatabase" [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Tasks:
|
||||
catto [1,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Enqueued At:
|
||||
[timestamp] [0,]
|
||||
[timestamp] [1,]
|
||||
----------------------------------------------------------------------
|
||||
### Started At:
|
||||
[timestamp] [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Finished At:
|
||||
[timestamp] [0,]
|
||||
----------------------------------------------------------------------
|
||||
### All Batches:
|
||||
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.13.0"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, }
|
||||
----------------------------------------------------------------------
|
||||
### Batch to tasks mapping:
|
||||
0 [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Batches Status:
|
||||
failed [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Batches Kind:
|
||||
"upgradeDatabase" [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Batches Index Tasks:
|
||||
----------------------------------------------------------------------
|
||||
### Batches Enqueued At:
|
||||
[timestamp] [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Batches Started At:
|
||||
[timestamp] [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Batches Finished At:
|
||||
[timestamp] [0,]
|
||||
----------------------------------------------------------------------
|
||||
### File Store:
|
||||
|
||||
----------------------------------------------------------------------
|
@ -0,0 +1,68 @@
|
||||
---
|
||||
source: crates/index-scheduler/src/scheduler/test_failure.rs
|
||||
snapshot_kind: text
|
||||
---
|
||||
### Autobatching Enabled = true
|
||||
### Processing batch None:
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, batch_uid: 0, status: failed, error: ResponseError { code: 200, message: "Planned failure for tests.", error_code: "internal", error_type: "internal", error_link: "https://docs.meilisearch.com/errors#internal" }, details: { from: (1, 12, 0), to: (1, 13, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse") }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||
2 {uid: 2, status: enqueued, details: { primary_key: Some("bone") }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||
----------------------------------------------------------------------
|
||||
### Status:
|
||||
enqueued [1,2,]
|
||||
failed [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Kind:
|
||||
"indexCreation" [1,2,]
|
||||
"upgradeDatabase" [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Tasks:
|
||||
catto [1,]
|
||||
doggo [2,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Enqueued At:
|
||||
[timestamp] [0,]
|
||||
[timestamp] [1,]
|
||||
[timestamp] [2,]
|
||||
----------------------------------------------------------------------
|
||||
### Started At:
|
||||
[timestamp] [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Finished At:
|
||||
[timestamp] [0,]
|
||||
----------------------------------------------------------------------
|
||||
### All Batches:
|
||||
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.13.0"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"upgradeDatabase":1},"indexUids":{}}, }
|
||||
----------------------------------------------------------------------
|
||||
### Batch to tasks mapping:
|
||||
0 [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Batches Status:
|
||||
failed [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Batches Kind:
|
||||
"upgradeDatabase" [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Batches Index Tasks:
|
||||
----------------------------------------------------------------------
|
||||
### Batches Enqueued At:
|
||||
[timestamp] [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Batches Started At:
|
||||
[timestamp] [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Batches Finished At:
|
||||
[timestamp] [0,]
|
||||
----------------------------------------------------------------------
|
||||
### File Store:
|
||||
|
||||
----------------------------------------------------------------------
|
@ -0,0 +1,72 @@
|
||||
---
|
||||
source: crates/index-scheduler/src/scheduler/test_failure.rs
|
||||
snapshot_kind: text
|
||||
---
|
||||
### Autobatching Enabled = true
|
||||
### Processing batch None:
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, batch_uid: 0, status: succeeded, details: { from: (1, 12, 0), to: (1, 13, 0) }, kind: UpgradeDatabase { from: (1, 12, 0) }}
|
||||
1 {uid: 1, status: enqueued, details: { primary_key: Some("mouse") }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||
2 {uid: 2, status: enqueued, details: { primary_key: Some("bone") }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||
3 {uid: 3, status: enqueued, details: { primary_key: Some("bone") }, kind: IndexCreation { index_uid: "doggo", primary_key: Some("bone") }}
|
||||
----------------------------------------------------------------------
|
||||
### Status:
|
||||
enqueued [1,2,3,]
|
||||
succeeded [0,]
|
||||
failed []
|
||||
----------------------------------------------------------------------
|
||||
### Kind:
|
||||
"indexCreation" [1,2,3,]
|
||||
"upgradeDatabase" [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Tasks:
|
||||
catto [1,]
|
||||
doggo [2,3,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Enqueued At:
|
||||
[timestamp] [0,]
|
||||
[timestamp] [1,]
|
||||
[timestamp] [2,]
|
||||
[timestamp] [3,]
|
||||
----------------------------------------------------------------------
|
||||
### Started At:
|
||||
[timestamp] [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Finished At:
|
||||
[timestamp] [0,]
|
||||
----------------------------------------------------------------------
|
||||
### All Batches:
|
||||
0 {uid: 0, details: {"upgradeFrom":"v1.12.0","upgradeTo":"v1.13.0"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"upgradeDatabase":1},"indexUids":{}}, }
|
||||
----------------------------------------------------------------------
|
||||
### Batch to tasks mapping:
|
||||
0 [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Batches Status:
|
||||
succeeded [0,]
|
||||
failed []
|
||||
----------------------------------------------------------------------
|
||||
### Batches Kind:
|
||||
"upgradeDatabase" [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Batches Index Tasks:
|
||||
----------------------------------------------------------------------
|
||||
### Batches Enqueued At:
|
||||
[timestamp] [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Batches Started At:
|
||||
[timestamp] [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Batches Finished At:
|
||||
[timestamp] [0,]
|
||||
----------------------------------------------------------------------
|
||||
### File Store:
|
||||
|
||||
----------------------------------------------------------------------
|
@ -713,68 +713,70 @@ fn basic_get_stats() {
|
||||
let kind = index_creation_task("whalo", "fish");
|
||||
let _task = index_scheduler.register(kind, None, false).unwrap();
|
||||
|
||||
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r###"
|
||||
{
|
||||
"indexes": {
|
||||
"catto": 1,
|
||||
"doggo": 1,
|
||||
"whalo": 1
|
||||
},
|
||||
"statuses": {
|
||||
"canceled": 0,
|
||||
"enqueued": 3,
|
||||
"failed": 0,
|
||||
"processing": 0,
|
||||
"succeeded": 0
|
||||
},
|
||||
"types": {
|
||||
"documentAdditionOrUpdate": 0,
|
||||
"documentDeletion": 0,
|
||||
"documentEdition": 0,
|
||||
"dumpCreation": 0,
|
||||
"indexCreation": 3,
|
||||
"indexDeletion": 0,
|
||||
"indexSwap": 0,
|
||||
"indexUpdate": 0,
|
||||
"settingsUpdate": 0,
|
||||
"snapshotCreation": 0,
|
||||
"taskCancelation": 0,
|
||||
"taskDeletion": 0
|
||||
}
|
||||
}
|
||||
"###);
|
||||
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r#"
|
||||
{
|
||||
"indexes": {
|
||||
"catto": 1,
|
||||
"doggo": 1,
|
||||
"whalo": 1
|
||||
},
|
||||
"statuses": {
|
||||
"canceled": 0,
|
||||
"enqueued": 3,
|
||||
"failed": 0,
|
||||
"processing": 0,
|
||||
"succeeded": 0
|
||||
},
|
||||
"types": {
|
||||
"documentAdditionOrUpdate": 0,
|
||||
"documentDeletion": 0,
|
||||
"documentEdition": 0,
|
||||
"dumpCreation": 0,
|
||||
"indexCreation": 3,
|
||||
"indexDeletion": 0,
|
||||
"indexSwap": 0,
|
||||
"indexUpdate": 0,
|
||||
"settingsUpdate": 0,
|
||||
"snapshotCreation": 0,
|
||||
"taskCancelation": 0,
|
||||
"taskDeletion": 0,
|
||||
"upgradeDatabase": 0
|
||||
}
|
||||
}
|
||||
"#);
|
||||
|
||||
handle.advance_till([Start, BatchCreated]);
|
||||
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r###"
|
||||
{
|
||||
"indexes": {
|
||||
"catto": 1,
|
||||
"doggo": 1,
|
||||
"whalo": 1
|
||||
},
|
||||
"statuses": {
|
||||
"canceled": 0,
|
||||
"enqueued": 2,
|
||||
"failed": 0,
|
||||
"processing": 1,
|
||||
"succeeded": 0
|
||||
},
|
||||
"types": {
|
||||
"documentAdditionOrUpdate": 0,
|
||||
"documentDeletion": 0,
|
||||
"documentEdition": 0,
|
||||
"dumpCreation": 0,
|
||||
"indexCreation": 3,
|
||||
"indexDeletion": 0,
|
||||
"indexSwap": 0,
|
||||
"indexUpdate": 0,
|
||||
"settingsUpdate": 0,
|
||||
"snapshotCreation": 0,
|
||||
"taskCancelation": 0,
|
||||
"taskDeletion": 0
|
||||
}
|
||||
}
|
||||
"###);
|
||||
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r#"
|
||||
{
|
||||
"indexes": {
|
||||
"catto": 1,
|
||||
"doggo": 1,
|
||||
"whalo": 1
|
||||
},
|
||||
"statuses": {
|
||||
"canceled": 0,
|
||||
"enqueued": 2,
|
||||
"failed": 0,
|
||||
"processing": 1,
|
||||
"succeeded": 0
|
||||
},
|
||||
"types": {
|
||||
"documentAdditionOrUpdate": 0,
|
||||
"documentDeletion": 0,
|
||||
"documentEdition": 0,
|
||||
"dumpCreation": 0,
|
||||
"indexCreation": 3,
|
||||
"indexDeletion": 0,
|
||||
"indexSwap": 0,
|
||||
"indexUpdate": 0,
|
||||
"settingsUpdate": 0,
|
||||
"snapshotCreation": 0,
|
||||
"taskCancelation": 0,
|
||||
"taskDeletion": 0,
|
||||
"upgradeDatabase": 0
|
||||
}
|
||||
}
|
||||
"#);
|
||||
|
||||
handle.advance_till([
|
||||
InsideProcessBatch,
|
||||
@ -784,36 +786,37 @@ fn basic_get_stats() {
|
||||
Start,
|
||||
BatchCreated,
|
||||
]);
|
||||
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r###"
|
||||
{
|
||||
"indexes": {
|
||||
"catto": 1,
|
||||
"doggo": 1,
|
||||
"whalo": 1
|
||||
},
|
||||
"statuses": {
|
||||
"canceled": 0,
|
||||
"enqueued": 1,
|
||||
"failed": 0,
|
||||
"processing": 1,
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"documentAdditionOrUpdate": 0,
|
||||
"documentDeletion": 0,
|
||||
"documentEdition": 0,
|
||||
"dumpCreation": 0,
|
||||
"indexCreation": 3,
|
||||
"indexDeletion": 0,
|
||||
"indexSwap": 0,
|
||||
"indexUpdate": 0,
|
||||
"settingsUpdate": 0,
|
||||
"snapshotCreation": 0,
|
||||
"taskCancelation": 0,
|
||||
"taskDeletion": 0
|
||||
}
|
||||
}
|
||||
"###);
|
||||
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r#"
|
||||
{
|
||||
"indexes": {
|
||||
"catto": 1,
|
||||
"doggo": 1,
|
||||
"whalo": 1
|
||||
},
|
||||
"statuses": {
|
||||
"canceled": 0,
|
||||
"enqueued": 1,
|
||||
"failed": 0,
|
||||
"processing": 1,
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"documentAdditionOrUpdate": 0,
|
||||
"documentDeletion": 0,
|
||||
"documentEdition": 0,
|
||||
"dumpCreation": 0,
|
||||
"indexCreation": 3,
|
||||
"indexDeletion": 0,
|
||||
"indexSwap": 0,
|
||||
"indexUpdate": 0,
|
||||
"settingsUpdate": 0,
|
||||
"snapshotCreation": 0,
|
||||
"taskCancelation": 0,
|
||||
"taskDeletion": 0,
|
||||
"upgradeDatabase": 0
|
||||
}
|
||||
}
|
||||
"#);
|
||||
|
||||
// now we make one more batch, the started_at field of the new tasks will be past `second_start_time`
|
||||
handle.advance_till([
|
||||
@ -824,36 +827,37 @@ fn basic_get_stats() {
|
||||
Start,
|
||||
BatchCreated,
|
||||
]);
|
||||
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r###"
|
||||
{
|
||||
"indexes": {
|
||||
"catto": 1,
|
||||
"doggo": 1,
|
||||
"whalo": 1
|
||||
},
|
||||
"statuses": {
|
||||
"canceled": 0,
|
||||
"enqueued": 0,
|
||||
"failed": 0,
|
||||
"processing": 1,
|
||||
"succeeded": 2
|
||||
},
|
||||
"types": {
|
||||
"documentAdditionOrUpdate": 0,
|
||||
"documentDeletion": 0,
|
||||
"documentEdition": 0,
|
||||
"dumpCreation": 0,
|
||||
"indexCreation": 3,
|
||||
"indexDeletion": 0,
|
||||
"indexSwap": 0,
|
||||
"indexUpdate": 0,
|
||||
"settingsUpdate": 0,
|
||||
"snapshotCreation": 0,
|
||||
"taskCancelation": 0,
|
||||
"taskDeletion": 0
|
||||
}
|
||||
}
|
||||
"###);
|
||||
snapshot!(json_string!(index_scheduler.get_stats().unwrap()), @r#"
|
||||
{
|
||||
"indexes": {
|
||||
"catto": 1,
|
||||
"doggo": 1,
|
||||
"whalo": 1
|
||||
},
|
||||
"statuses": {
|
||||
"canceled": 0,
|
||||
"enqueued": 0,
|
||||
"failed": 0,
|
||||
"processing": 1,
|
||||
"succeeded": 2
|
||||
},
|
||||
"types": {
|
||||
"documentAdditionOrUpdate": 0,
|
||||
"documentDeletion": 0,
|
||||
"documentEdition": 0,
|
||||
"dumpCreation": 0,
|
||||
"indexCreation": 3,
|
||||
"indexDeletion": 0,
|
||||
"indexSwap": 0,
|
||||
"indexUpdate": 0,
|
||||
"settingsUpdate": 0,
|
||||
"snapshotCreation": 0,
|
||||
"taskCancelation": 0,
|
||||
"taskDeletion": 0,
|
||||
"upgradeDatabase": 0
|
||||
}
|
||||
}
|
||||
"#);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -899,7 +903,7 @@ fn create_and_list_index() {
|
||||
|
||||
index_scheduler.index("kefir").unwrap();
|
||||
let list = index_scheduler.get_paginated_indexes_stats(&AuthFilter::default(), 0, 20).unwrap();
|
||||
snapshot!(json_string!(list, { "[1][0][1].created_at" => "[date]", "[1][0][1].updated_at" => "[date]", "[1][0][1].database_size" => "[bytes]", "[1][0][1].used_database_size" => "[bytes]" }), @r###"
|
||||
snapshot!(json_string!(list, { "[1][0][1].created_at" => "[date]", "[1][0][1].updated_at" => "[date]", "[1][0][1].used_database_size" => "[bytes]", "[1][0][1].database_size" => "[bytes]" }), @r#"
|
||||
[
|
||||
1,
|
||||
[
|
||||
@ -917,5 +921,5 @@ fn create_and_list_index() {
|
||||
]
|
||||
]
|
||||
]
|
||||
"###);
|
||||
"#);
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ use meili_snap::snapshot;
|
||||
use meilisearch_types::milli::obkv_to_json;
|
||||
use meilisearch_types::milli::update::IndexDocumentsMethod::*;
|
||||
use meilisearch_types::milli::update::Setting;
|
||||
use meilisearch_types::tasks::Kind;
|
||||
use meilisearch_types::tasks::KindWithContent;
|
||||
|
||||
use crate::insta_snapshot::snapshot_index_scheduler;
|
||||
@ -249,3 +250,78 @@ fn panic_in_process_batch_for_index_creation() {
|
||||
// No matter what happens in process_batch, the index_scheduler should be internally consistent
|
||||
snapshot!(snapshot_index_scheduler(&index_scheduler), name: "index_creation_failed");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn upgrade_failure() {
|
||||
// By starting the index-scheduler at the v1.12.0 an upgrade task should be automatically enqueued
|
||||
let (index_scheduler, mut handle) =
|
||||
IndexScheduler::test_with_custom_config(vec![(1, FailureLocation::ProcessUpgrade)], |_| {
|
||||
Some((1, 12, 0))
|
||||
});
|
||||
snapshot!(snapshot_index_scheduler(&index_scheduler), name: "register_automatic_upgrade_task");
|
||||
|
||||
let kind = index_creation_task("catto", "mouse");
|
||||
let _task = index_scheduler.register(kind, None, false).unwrap();
|
||||
snapshot!(snapshot_index_scheduler(&index_scheduler), name: "registered_a_task_while_the_upgrade_task_is_enqueued");
|
||||
|
||||
handle.advance_one_failed_batch();
|
||||
snapshot!(snapshot_index_scheduler(&index_scheduler), name: "upgrade_task_failed");
|
||||
|
||||
// We can still register tasks
|
||||
let kind = index_creation_task("doggo", "bone");
|
||||
let _task = index_scheduler.register(kind, None, false).unwrap();
|
||||
|
||||
// But the scheduler is down and won't process anything ever again
|
||||
handle.scheduler_is_down();
|
||||
|
||||
// =====> After a restart is it still working as expected?
|
||||
let (index_scheduler, mut handle) =
|
||||
handle.restart(index_scheduler, true, vec![(1, FailureLocation::ProcessUpgrade)], |_| {
|
||||
Some((1, 12, 0)) // the upgrade task should be rerun automatically and nothing else should be enqueued
|
||||
});
|
||||
|
||||
handle.advance_one_failed_batch();
|
||||
snapshot!(snapshot_index_scheduler(&index_scheduler), name: "upgrade_task_failed_again");
|
||||
// We can still register tasks
|
||||
let kind = index_creation_task("doggo", "bone");
|
||||
let _task = index_scheduler.register(kind, None, false).unwrap();
|
||||
// And the scheduler is still down and won't process anything ever again
|
||||
handle.scheduler_is_down();
|
||||
|
||||
// =====> After a rerestart and without failure can we upgrade the indexes and process the tasks
|
||||
let (index_scheduler, mut handle) =
|
||||
handle.restart(index_scheduler, true, vec![], |_| Some((1, 12, 0)));
|
||||
|
||||
handle.advance_one_successful_batch();
|
||||
snapshot!(snapshot_index_scheduler(&index_scheduler), name: "upgrade_task_succeeded");
|
||||
// We can still register tasks
|
||||
let kind = index_creation_task("girafo", "leaves");
|
||||
let _task = index_scheduler.register(kind, None, false).unwrap();
|
||||
// The scheduler is up and running
|
||||
handle.advance_one_successful_batch();
|
||||
handle.advance_one_successful_batch();
|
||||
handle.advance_one_failed_batch(); // doggo already exists
|
||||
handle.advance_one_successful_batch();
|
||||
snapshot!(snapshot_index_scheduler(&index_scheduler), name: "after_processing_everything");
|
||||
|
||||
let (upgrade_tasks_ids, _) = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(
|
||||
&crate::Query { types: Some(vec![Kind::UpgradeDatabase]), ..Default::default() },
|
||||
&Default::default(),
|
||||
)
|
||||
.unwrap();
|
||||
// When deleting the single upgrade task it should remove the associated batch
|
||||
let _task = index_scheduler
|
||||
.register(
|
||||
KindWithContent::TaskDeletion {
|
||||
query: String::from("types=upgradeDatabase"),
|
||||
tasks: upgrade_tasks_ids,
|
||||
},
|
||||
None,
|
||||
false,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
handle.advance_one_successful_batch();
|
||||
snapshot!(snapshot_index_scheduler(&index_scheduler), name: "after_removing_the_upgrade_tasks");
|
||||
}
|
||||
|
@ -1,10 +1,18 @@
|
||||
use std::io::{BufWriter, Write};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use big_s::S;
|
||||
use crossbeam_channel::RecvTimeoutError;
|
||||
use file_store::File;
|
||||
use meilisearch_types::document_formats::DocumentFormatError;
|
||||
use meilisearch_types::milli::update::IndexDocumentsMethod::ReplaceDocuments;
|
||||
use meilisearch_types::milli::update::IndexerConfig;
|
||||
use meilisearch_types::tasks::KindWithContent;
|
||||
use meilisearch_types::{versioning, VERSION_FILE_NAME};
|
||||
use tempfile::{NamedTempFile, TempDir};
|
||||
use uuid::Uuid;
|
||||
use Breakpoint::*;
|
||||
|
||||
use crate::insta_snapshot::snapshot_index_scheduler;
|
||||
use crate::{Error, IndexScheduler, IndexSchedulerOptions};
|
||||
@ -28,20 +36,13 @@ pub(crate) enum FailureLocation {
|
||||
InsideCreateBatch,
|
||||
InsideProcessBatch,
|
||||
PanicInsideProcessBatch,
|
||||
ProcessUpgrade,
|
||||
AcquiringWtxn,
|
||||
UpdatingTaskAfterProcessBatchSuccess { task_uid: u32 },
|
||||
UpdatingTaskAfterProcessBatchFailure,
|
||||
CommittingWtxn,
|
||||
}
|
||||
|
||||
use big_s::S;
|
||||
use crossbeam_channel::RecvTimeoutError;
|
||||
use meilisearch_types::milli::update::IndexerConfig;
|
||||
use meilisearch_types::tasks::KindWithContent;
|
||||
use meilisearch_types::VERSION_FILE_NAME;
|
||||
use tempfile::{NamedTempFile, TempDir};
|
||||
use Breakpoint::*;
|
||||
|
||||
impl IndexScheduler {
|
||||
/// Blocks the thread until the test handle asks to progress to/through this breakpoint.
|
||||
///
|
||||
@ -55,7 +56,6 @@ impl IndexScheduler {
|
||||
/// As soon as we find it, the index scheduler is unblocked but then wait again on the call to
|
||||
/// `test_breakpoint_sdr.send(b, true)`. This message will only be able to send once the
|
||||
/// test asks to progress to the next `(b2, false)`.
|
||||
#[cfg(test)]
|
||||
pub(crate) fn breakpoint(&self, b: Breakpoint) {
|
||||
// We send two messages. The first one will sync with the call
|
||||
// to `handle.wait_until(b)`. The second one will block until the
|
||||
@ -75,12 +75,13 @@ impl IndexScheduler {
|
||||
) -> (Self, IndexSchedulerHandle) {
|
||||
Self::test_with_custom_config(planned_failures, |config| {
|
||||
config.autobatching_enabled = autobatching_enabled;
|
||||
None
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn test_with_custom_config(
|
||||
planned_failures: Vec<(usize, FailureLocation)>,
|
||||
configuration: impl Fn(&mut IndexSchedulerOptions),
|
||||
configuration: impl Fn(&mut IndexSchedulerOptions) -> Option<(u32, u32, u32)>,
|
||||
) -> (Self, IndexSchedulerHandle) {
|
||||
let tempdir = TempDir::new().unwrap();
|
||||
let (sender, receiver) = crossbeam_channel::bounded(0);
|
||||
@ -109,10 +110,17 @@ impl IndexScheduler {
|
||||
max_number_of_batched_tasks: usize::MAX,
|
||||
batched_tasks_size_limit: u64::MAX,
|
||||
instance_features: Default::default(),
|
||||
auto_upgrade: true, // Don't cost much and will ensure the happy path works
|
||||
};
|
||||
configuration(&mut options);
|
||||
let version = configuration(&mut options).unwrap_or_else(|| {
|
||||
(
|
||||
versioning::VERSION_MAJOR.parse().unwrap(),
|
||||
versioning::VERSION_MINOR.parse().unwrap(),
|
||||
versioning::VERSION_PATCH.parse().unwrap(),
|
||||
)
|
||||
});
|
||||
|
||||
let index_scheduler = Self::new(options, sender, planned_failures).unwrap();
|
||||
let index_scheduler = Self::new(options, version, sender, planned_failures).unwrap();
|
||||
|
||||
// To be 100% consistent between all test we're going to start the scheduler right now
|
||||
// and ensure it's in the expected starting state.
|
||||
@ -224,6 +232,55 @@ pub struct IndexSchedulerHandle {
|
||||
}
|
||||
|
||||
impl IndexSchedulerHandle {
|
||||
/// Restarts the index-scheduler on the same database.
|
||||
/// To use this function you must give back the index-scheduler that was given to you when
|
||||
/// creating the handle the first time.
|
||||
/// If the index-scheduler has been cloned in the meantime you must drop all copy otherwise
|
||||
/// the function will panic.
|
||||
pub(crate) fn restart(
|
||||
self,
|
||||
index_scheduler: IndexScheduler,
|
||||
autobatching_enabled: bool,
|
||||
planned_failures: Vec<(usize, FailureLocation)>,
|
||||
configuration: impl Fn(&mut IndexSchedulerOptions) -> Option<(u32, u32, u32)>,
|
||||
) -> (IndexScheduler, Self) {
|
||||
drop(index_scheduler);
|
||||
let Self { _tempdir: tempdir, index_scheduler, test_breakpoint_rcv, last_breakpoint: _ } =
|
||||
self;
|
||||
let env = index_scheduler.env.clone();
|
||||
drop(index_scheduler);
|
||||
|
||||
// We must ensure that the `run` function has stopped running before restarting the index scheduler
|
||||
loop {
|
||||
match test_breakpoint_rcv.recv_timeout(Duration::from_secs(5)) {
|
||||
Ok((_, true)) => continue,
|
||||
Ok((b, false)) => {
|
||||
panic!("Scheduler is not stopped and passed {b:?}")
|
||||
}
|
||||
Err(RecvTimeoutError::Timeout) => panic!("The indexing loop is stuck somewhere"),
|
||||
Err(RecvTimeoutError::Disconnected) => break,
|
||||
}
|
||||
}
|
||||
let closed = env.prepare_for_closing().wait_timeout(Duration::from_secs(5));
|
||||
assert!(closed, "The index scheduler couldn't close itself, it seems like someone else is holding the env somewhere");
|
||||
|
||||
let (scheduler, mut handle) =
|
||||
IndexScheduler::test_with_custom_config(planned_failures, |config| {
|
||||
let version = configuration(config);
|
||||
config.autobatching_enabled = autobatching_enabled;
|
||||
config.version_file_path = tempdir.path().join(VERSION_FILE_NAME);
|
||||
config.auth_path = tempdir.path().join("auth");
|
||||
config.tasks_path = tempdir.path().join("db_path");
|
||||
config.update_file_path = tempdir.path().join("file_store");
|
||||
config.indexes_path = tempdir.path().join("indexes");
|
||||
config.snapshots_path = tempdir.path().join("snapshots");
|
||||
config.dumps_path = tempdir.path().join("dumps");
|
||||
version
|
||||
});
|
||||
handle._tempdir = tempdir;
|
||||
(scheduler, handle)
|
||||
}
|
||||
|
||||
/// Advance the scheduler to the next tick.
|
||||
/// Panic
|
||||
/// * If the scheduler is waiting for a task to be registered.
|
||||
@ -349,4 +406,18 @@ impl IndexSchedulerHandle {
|
||||
}
|
||||
self.advance_till([AfterProcessing]);
|
||||
}
|
||||
|
||||
// Wait for one failed batch.
|
||||
#[track_caller]
|
||||
pub(crate) fn scheduler_is_down(&mut self) {
|
||||
loop {
|
||||
match self
|
||||
.test_breakpoint_rcv
|
||||
.recv_timeout(std::time::Duration::from_secs(1)) {
|
||||
Ok((_, true)) => continue,
|
||||
Ok((b, false)) => panic!("The scheduler was supposed to be down but successfully moved to the next breakpoint: {b:?}"),
|
||||
Err(RecvTimeoutError::Timeout | RecvTimeoutError::Disconnected) => break,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
108
crates/index-scheduler/src/upgrade/mod.rs
Normal file
108
crates/index-scheduler/src/upgrade/mod.rs
Normal file
@ -0,0 +1,108 @@
|
||||
use anyhow::bail;
|
||||
use meilisearch_types::heed::{Env, RwTxn};
|
||||
use meilisearch_types::tasks::{Details, KindWithContent, Status, Task};
|
||||
use meilisearch_types::versioning::{VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH};
|
||||
use time::OffsetDateTime;
|
||||
use tracing::info;
|
||||
|
||||
use crate::queue::TaskQueue;
|
||||
use crate::versioning::Versioning;
|
||||
|
||||
trait UpgradeIndexScheduler {
|
||||
fn upgrade(&self, env: &Env, wtxn: &mut RwTxn, original: (u32, u32, u32))
|
||||
-> anyhow::Result<()>;
|
||||
fn target_version(&self) -> (u32, u32, u32);
|
||||
}
|
||||
|
||||
pub fn upgrade_index_scheduler(
|
||||
env: &Env,
|
||||
versioning: &Versioning,
|
||||
from: (u32, u32, u32),
|
||||
to: (u32, u32, u32),
|
||||
) -> anyhow::Result<()> {
|
||||
let current_major = to.0;
|
||||
let current_minor = to.1;
|
||||
let current_patch = to.2;
|
||||
|
||||
let upgrade_functions: &[&dyn UpgradeIndexScheduler] = &[&V1_12_ToCurrent {}];
|
||||
|
||||
let start = match from {
|
||||
(1, 12, _) => 0,
|
||||
(major, minor, patch) => {
|
||||
if major > current_major
|
||||
|| (major == current_major && minor > current_minor)
|
||||
|| (major == current_major && minor == current_minor && patch > current_patch)
|
||||
{
|
||||
bail!(
|
||||
"Database version {major}.{minor}.{patch} is higher than the Meilisearch version {current_major}.{current_minor}.{current_patch}. Downgrade is not supported",
|
||||
);
|
||||
} else if major < 1 || (major == current_major && minor < 12) {
|
||||
bail!(
|
||||
"Database version {major}.{minor}.{patch} is too old for the experimental dumpless upgrade feature. Please generate a dump using the v{major}.{minor}.{patch} and import it in the v{current_major}.{current_minor}.{current_patch}",
|
||||
);
|
||||
} else {
|
||||
bail!("Unknown database version: v{major}.{minor}.{patch}");
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let mut current_version = from;
|
||||
|
||||
info!("Upgrading the task queue");
|
||||
for upgrade in upgrade_functions[start..].iter() {
|
||||
let target = upgrade.target_version();
|
||||
info!(
|
||||
"Upgrading from v{}.{}.{} to v{}.{}.{}",
|
||||
from.0, from.1, from.2, current_version.0, current_version.1, current_version.2
|
||||
);
|
||||
let mut wtxn = env.write_txn()?;
|
||||
upgrade.upgrade(env, &mut wtxn, from)?;
|
||||
versioning.set_version(&mut wtxn, target)?;
|
||||
wtxn.commit()?;
|
||||
current_version = target;
|
||||
}
|
||||
|
||||
let mut wtxn = env.write_txn()?;
|
||||
let queue = TaskQueue::new(env, &mut wtxn)?;
|
||||
let uid = queue.next_task_id(&wtxn)?;
|
||||
queue.register(
|
||||
&mut wtxn,
|
||||
&Task {
|
||||
uid,
|
||||
batch_uid: None,
|
||||
enqueued_at: OffsetDateTime::now_utc(),
|
||||
started_at: None,
|
||||
finished_at: None,
|
||||
error: None,
|
||||
canceled_by: None,
|
||||
details: Some(Details::UpgradeDatabase { from, to }),
|
||||
status: Status::Enqueued,
|
||||
kind: KindWithContent::UpgradeDatabase { from },
|
||||
},
|
||||
)?;
|
||||
wtxn.commit()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(non_camel_case_types)]
|
||||
struct V1_12_ToCurrent {}
|
||||
|
||||
impl UpgradeIndexScheduler for V1_12_ToCurrent {
|
||||
fn upgrade(
|
||||
&self,
|
||||
_env: &Env,
|
||||
_wtxn: &mut RwTxn,
|
||||
_original: (u32, u32, u32),
|
||||
) -> anyhow::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn target_version(&self) -> (u32, u32, u32) {
|
||||
(
|
||||
VERSION_MAJOR.parse().unwrap(),
|
||||
VERSION_MINOR.parse().unwrap(),
|
||||
VERSION_PATCH.parse().unwrap(),
|
||||
)
|
||||
}
|
||||
}
|
@ -234,6 +234,7 @@ pub fn swap_index_uid_in_task(task: &mut Task, swap: (&str, &str)) {
|
||||
K::TaskCancelation { .. }
|
||||
| K::TaskDeletion { .. }
|
||||
| K::DumpCreation { .. }
|
||||
| K::UpgradeDatabase { .. }
|
||||
| K::SnapshotCreation => (),
|
||||
};
|
||||
if let Some(Details::IndexSwap { swaps }) = &mut task.details {
|
||||
@ -547,6 +548,9 @@ impl crate::IndexScheduler {
|
||||
Details::Dump { dump_uid: _ } => {
|
||||
assert_eq!(kind.as_kind(), Kind::DumpCreation);
|
||||
}
|
||||
Details::UpgradeDatabase { from: _, to: _ } => {
|
||||
assert_eq!(kind.as_kind(), Kind::UpgradeDatabase);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
75
crates/index-scheduler/src/versioning.rs
Normal file
75
crates/index-scheduler/src/versioning.rs
Normal file
@ -0,0 +1,75 @@
|
||||
use crate::{upgrade::upgrade_index_scheduler, Result};
|
||||
use meilisearch_types::{
|
||||
heed::{types::Str, Database, Env, RoTxn, RwTxn},
|
||||
milli::heed_codec::version::VersionCodec,
|
||||
versioning,
|
||||
};
|
||||
|
||||
/// The number of database used by queue itself
|
||||
const NUMBER_OF_DATABASES: u32 = 1;
|
||||
/// Database const names for the `IndexScheduler`.
|
||||
mod db_name {
|
||||
pub const VERSION: &str = "version";
|
||||
}
|
||||
mod entry_name {
|
||||
pub const MAIN: &str = "main";
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Versioning {
|
||||
pub version: Database<Str, VersionCodec>,
|
||||
}
|
||||
|
||||
impl Versioning {
|
||||
pub(crate) const fn nb_db() -> u32 {
|
||||
NUMBER_OF_DATABASES
|
||||
}
|
||||
|
||||
pub fn get_version(&self, rtxn: &RoTxn) -> Result<Option<(u32, u32, u32)>> {
|
||||
Ok(self.version.get(rtxn, entry_name::MAIN)?)
|
||||
}
|
||||
|
||||
pub fn set_version(&self, wtxn: &mut RwTxn, version: (u32, u32, u32)) -> Result<()> {
|
||||
Ok(self.version.put(wtxn, entry_name::MAIN, &version)?)
|
||||
}
|
||||
|
||||
pub fn set_current_version(&self, wtxn: &mut RwTxn) -> Result<()> {
|
||||
let major = versioning::VERSION_MAJOR.parse().unwrap();
|
||||
let minor = versioning::VERSION_MINOR.parse().unwrap();
|
||||
let patch = versioning::VERSION_PATCH.parse().unwrap();
|
||||
self.set_version(wtxn, (major, minor, patch))
|
||||
}
|
||||
|
||||
/// Create an index scheduler and start its run loop.
|
||||
pub(crate) fn new(env: &Env, db_version: (u32, u32, u32)) -> Result<Self> {
|
||||
let mut wtxn = env.write_txn()?;
|
||||
let version = env.create_database(&mut wtxn, Some(db_name::VERSION))?;
|
||||
let this = Self { version };
|
||||
let from = match this.get_version(&wtxn)? {
|
||||
Some(version) => version,
|
||||
// fresh DB: use the db version
|
||||
None => {
|
||||
this.set_version(&mut wtxn, db_version)?;
|
||||
db_version
|
||||
}
|
||||
};
|
||||
wtxn.commit()?;
|
||||
|
||||
let bin_major: u32 = versioning::VERSION_MAJOR.parse().unwrap();
|
||||
let bin_minor: u32 = versioning::VERSION_MINOR.parse().unwrap();
|
||||
let bin_patch: u32 = versioning::VERSION_PATCH.parse().unwrap();
|
||||
let to = (bin_major, bin_minor, bin_patch);
|
||||
|
||||
if from != to {
|
||||
upgrade_index_scheduler(env, &this, from, to)?;
|
||||
}
|
||||
|
||||
// Once we reach this point it means the upgrade process, if there was one is entirely finished
|
||||
// we can safely say we reached the latest version of the index scheduler
|
||||
let mut wtxn = env.write_txn()?;
|
||||
this.set_current_version(&mut wtxn)?;
|
||||
wtxn.commit()?;
|
||||
|
||||
Ok(this)
|
||||
}
|
||||
}
|
@ -243,8 +243,9 @@ InvalidVectorsType , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidDocumentId , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidDocumentLimit , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidDocumentOffset , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidEmbedder , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidHybridQuery , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchEmbedder , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSimilarEmbedder , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchHybridQuery , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidIndexLimit , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidIndexOffset , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidIndexPrimaryKey , InvalidRequest , BAD_REQUEST ;
|
||||
@ -370,7 +371,8 @@ VectorEmbeddingError , InvalidRequest , BAD_REQUEST ;
|
||||
NotFoundSimilarId , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidDocumentEditionContext , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidDocumentEditionFunctionFilter , InvalidRequest , BAD_REQUEST ;
|
||||
EditDocumentsByFunctionError , InvalidRequest , BAD_REQUEST
|
||||
EditDocumentsByFunctionError , InvalidRequest , BAD_REQUEST ;
|
||||
CouldNotUpgrade , InvalidRequest , BAD_REQUEST
|
||||
}
|
||||
|
||||
impl ErrorCode for JoinError {
|
||||
@ -443,7 +445,8 @@ impl ErrorCode for milli::Error {
|
||||
UserError::InvalidMinTypoWordLenSetting(_, _) => {
|
||||
Code::InvalidSettingsTypoTolerance
|
||||
}
|
||||
UserError::InvalidEmbedder(_) => Code::InvalidEmbedder,
|
||||
UserError::InvalidSearchEmbedder(_) => Code::InvalidSearchEmbedder,
|
||||
UserError::InvalidSimilarEmbedder(_) => Code::InvalidSimilarEmbedder,
|
||||
UserError::VectorEmbeddingError(_) | UserError::DocumentEmbeddingError(_) => {
|
||||
Code::VectorEmbeddingError
|
||||
}
|
||||
|
@ -3,7 +3,6 @@ use serde::{Deserialize, Serialize};
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, Default, PartialEq, Eq)]
|
||||
#[serde(rename_all = "camelCase", default)]
|
||||
pub struct RuntimeTogglableFeatures {
|
||||
pub vector_store: bool,
|
||||
pub metrics: bool,
|
||||
pub logs_route: bool,
|
||||
pub edit_documents_by_function: bool,
|
||||
|
@ -866,7 +866,7 @@ pub fn settings(
|
||||
(name, SettingEmbeddingSettings { inner: Setting::Set(config.into()) })
|
||||
})
|
||||
.collect();
|
||||
let embedders = if embedders.is_empty() { Setting::NotSet } else { Setting::Set(embedders) };
|
||||
let embedders = Setting::Set(embedders);
|
||||
|
||||
let search_cutoff_ms = index.search_cutoff(rtxn)?;
|
||||
|
||||
|
@ -114,6 +114,10 @@ pub struct DetailsView {
|
||||
pub settings: Option<Box<Settings<Unchecked>>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub swaps: Option<Vec<IndexSwap>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub upgrade_from: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub upgrade_to: Option<String>,
|
||||
}
|
||||
|
||||
impl DetailsView {
|
||||
@ -234,6 +238,18 @@ impl DetailsView {
|
||||
Some(left)
|
||||
}
|
||||
},
|
||||
// We want the earliest version
|
||||
upgrade_from: match (self.upgrade_from.clone(), other.upgrade_from.clone()) {
|
||||
(None, None) => None,
|
||||
(None, Some(from)) | (Some(from), None) => Some(from),
|
||||
(Some(from), Some(_)) => Some(from),
|
||||
},
|
||||
// And the latest
|
||||
upgrade_to: match (self.upgrade_to.clone(), other.upgrade_to.clone()) {
|
||||
(None, None) => None,
|
||||
(None, Some(to)) | (Some(to), None) => Some(to),
|
||||
(Some(_), Some(to)) => Some(to),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -311,6 +327,11 @@ impl From<Details> for DetailsView {
|
||||
Details::IndexSwap { swaps } => {
|
||||
DetailsView { swaps: Some(swaps), ..Default::default() }
|
||||
}
|
||||
Details::UpgradeDatabase { from, to } => DetailsView {
|
||||
upgrade_from: Some(format!("v{}.{}.{}", from.0, from.1, from.2)),
|
||||
upgrade_to: Some(format!("v{}.{}.{}", to.0, to.1, to.2)),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -16,7 +16,7 @@ use crate::batches::BatchId;
|
||||
use crate::error::ResponseError;
|
||||
use crate::keys::Key;
|
||||
use crate::settings::{Settings, Unchecked};
|
||||
use crate::InstanceUid;
|
||||
use crate::{versioning, InstanceUid};
|
||||
|
||||
pub type TaskId = u32;
|
||||
|
||||
@ -50,6 +50,7 @@ impl Task {
|
||||
| SnapshotCreation
|
||||
| TaskCancelation { .. }
|
||||
| TaskDeletion { .. }
|
||||
| UpgradeDatabase { .. }
|
||||
| IndexSwap { .. } => None,
|
||||
DocumentAdditionOrUpdate { index_uid, .. }
|
||||
| DocumentEdition { index_uid, .. }
|
||||
@ -84,7 +85,8 @@ impl Task {
|
||||
| KindWithContent::TaskCancelation { .. }
|
||||
| KindWithContent::TaskDeletion { .. }
|
||||
| KindWithContent::DumpCreation { .. }
|
||||
| KindWithContent::SnapshotCreation => None,
|
||||
| KindWithContent::SnapshotCreation
|
||||
| KindWithContent::UpgradeDatabase { .. } => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -150,6 +152,9 @@ pub enum KindWithContent {
|
||||
instance_uid: Option<InstanceUid>,
|
||||
},
|
||||
SnapshotCreation,
|
||||
UpgradeDatabase {
|
||||
from: (u32, u32, u32),
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, ToSchema)]
|
||||
@ -175,6 +180,7 @@ impl KindWithContent {
|
||||
KindWithContent::TaskDeletion { .. } => Kind::TaskDeletion,
|
||||
KindWithContent::DumpCreation { .. } => Kind::DumpCreation,
|
||||
KindWithContent::SnapshotCreation => Kind::SnapshotCreation,
|
||||
KindWithContent::UpgradeDatabase { .. } => Kind::UpgradeDatabase,
|
||||
}
|
||||
}
|
||||
|
||||
@ -185,7 +191,8 @@ impl KindWithContent {
|
||||
DumpCreation { .. }
|
||||
| SnapshotCreation
|
||||
| TaskCancelation { .. }
|
||||
| TaskDeletion { .. } => vec![],
|
||||
| TaskDeletion { .. }
|
||||
| UpgradeDatabase { .. } => vec![],
|
||||
DocumentAdditionOrUpdate { index_uid, .. }
|
||||
| DocumentEdition { index_uid, .. }
|
||||
| DocumentDeletion { index_uid, .. }
|
||||
@ -262,6 +269,14 @@ impl KindWithContent {
|
||||
}),
|
||||
KindWithContent::DumpCreation { .. } => Some(Details::Dump { dump_uid: None }),
|
||||
KindWithContent::SnapshotCreation => None,
|
||||
KindWithContent::UpgradeDatabase { from } => Some(Details::UpgradeDatabase {
|
||||
from: (from.0, from.1, from.2),
|
||||
to: (
|
||||
versioning::VERSION_MAJOR.parse().unwrap(),
|
||||
versioning::VERSION_MINOR.parse().unwrap(),
|
||||
versioning::VERSION_PATCH.parse().unwrap(),
|
||||
),
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
@ -320,6 +335,14 @@ impl KindWithContent {
|
||||
}),
|
||||
KindWithContent::DumpCreation { .. } => Some(Details::Dump { dump_uid: None }),
|
||||
KindWithContent::SnapshotCreation => None,
|
||||
KindWithContent::UpgradeDatabase { from } => Some(Details::UpgradeDatabase {
|
||||
from: *from,
|
||||
to: (
|
||||
versioning::VERSION_MAJOR.parse().unwrap(),
|
||||
versioning::VERSION_MINOR.parse().unwrap(),
|
||||
versioning::VERSION_PATCH.parse().unwrap(),
|
||||
),
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -360,6 +383,14 @@ impl From<&KindWithContent> for Option<Details> {
|
||||
}),
|
||||
KindWithContent::DumpCreation { .. } => Some(Details::Dump { dump_uid: None }),
|
||||
KindWithContent::SnapshotCreation => None,
|
||||
KindWithContent::UpgradeDatabase { from } => Some(Details::UpgradeDatabase {
|
||||
from: *from,
|
||||
to: (
|
||||
versioning::VERSION_MAJOR.parse().unwrap(),
|
||||
versioning::VERSION_MINOR.parse().unwrap(),
|
||||
versioning::VERSION_PATCH.parse().unwrap(),
|
||||
),
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -468,6 +499,7 @@ pub enum Kind {
|
||||
TaskDeletion,
|
||||
DumpCreation,
|
||||
SnapshotCreation,
|
||||
UpgradeDatabase,
|
||||
}
|
||||
|
||||
impl Kind {
|
||||
@ -484,6 +516,7 @@ impl Kind {
|
||||
| Kind::TaskCancelation
|
||||
| Kind::TaskDeletion
|
||||
| Kind::DumpCreation
|
||||
| Kind::UpgradeDatabase
|
||||
| Kind::SnapshotCreation => false,
|
||||
}
|
||||
}
|
||||
@ -503,6 +536,7 @@ impl Display for Kind {
|
||||
Kind::TaskDeletion => write!(f, "taskDeletion"),
|
||||
Kind::DumpCreation => write!(f, "dumpCreation"),
|
||||
Kind::SnapshotCreation => write!(f, "snapshotCreation"),
|
||||
Kind::UpgradeDatabase => write!(f, "upgradeDatabase"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -534,6 +568,8 @@ impl FromStr for Kind {
|
||||
Ok(Kind::DumpCreation)
|
||||
} else if kind.eq_ignore_ascii_case("snapshotCreation") {
|
||||
Ok(Kind::SnapshotCreation)
|
||||
} else if kind.eq_ignore_ascii_case("upgradeDatabase") {
|
||||
Ok(Kind::UpgradeDatabase)
|
||||
} else {
|
||||
Err(ParseTaskKindError(kind.to_owned()))
|
||||
}
|
||||
@ -607,6 +643,10 @@ pub enum Details {
|
||||
IndexSwap {
|
||||
swaps: Vec<IndexSwap>,
|
||||
},
|
||||
UpgradeDatabase {
|
||||
from: (u32, u32, u32),
|
||||
to: (u32, u32, u32),
|
||||
},
|
||||
}
|
||||
|
||||
impl Details {
|
||||
@ -627,6 +667,7 @@ impl Details {
|
||||
Self::SettingsUpdate { .. }
|
||||
| Self::IndexInfo { .. }
|
||||
| Self::Dump { .. }
|
||||
| Self::UpgradeDatabase { .. }
|
||||
| Self::IndexSwap { .. } => (),
|
||||
}
|
||||
|
||||
@ -687,7 +728,9 @@ pub fn serialize_duration<S: Serializer>(
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::Details;
|
||||
use std::str::FromStr;
|
||||
|
||||
use super::{Details, Kind};
|
||||
use crate::heed::types::SerdeJson;
|
||||
use crate::heed::{BytesDecode, BytesEncode};
|
||||
|
||||
@ -703,4 +746,13 @@ mod tests {
|
||||
meili_snap::snapshot!(format!("{:?}", details), @r###"TaskDeletion { matched_tasks: 1, deleted_tasks: None, original_filter: "hello" }"###);
|
||||
meili_snap::snapshot!(format!("{:?}", deserialised), @r###"TaskDeletion { matched_tasks: 1, deleted_tasks: None, original_filter: "hello" }"###);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn all_kind_can_be_from_str() {
|
||||
for kind in enum_iterator::all::<Kind>() {
|
||||
let s = kind.to_string();
|
||||
let k = Kind::from_str(&s).map_err(|e| format!("Could not from_str {s}: {e}")).unwrap();
|
||||
assert_eq!(kind, k, "{kind}.to_string() returned {s} which was parsed as {k}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -5,9 +5,39 @@ use std::path::Path;
|
||||
/// The name of the file that contains the version of the database.
|
||||
pub const VERSION_FILE_NAME: &str = "VERSION";
|
||||
|
||||
static VERSION_MAJOR: &str = env!("CARGO_PKG_VERSION_MAJOR");
|
||||
static VERSION_MINOR: &str = env!("CARGO_PKG_VERSION_MINOR");
|
||||
static VERSION_PATCH: &str = env!("CARGO_PKG_VERSION_PATCH");
|
||||
pub static VERSION_MAJOR: &str = env!("CARGO_PKG_VERSION_MAJOR");
|
||||
pub static VERSION_MINOR: &str = env!("CARGO_PKG_VERSION_MINOR");
|
||||
pub static VERSION_PATCH: &str = env!("CARGO_PKG_VERSION_PATCH");
|
||||
|
||||
/// Persists the version of the current Meilisearch binary to a VERSION file
|
||||
pub fn update_version_file_for_dumpless_upgrade(
|
||||
db_path: &Path,
|
||||
from: (u32, u32, u32),
|
||||
to: (u32, u32, u32),
|
||||
) -> Result<(), VersionFileError> {
|
||||
let (from_major, from_minor, from_patch) = from;
|
||||
let (to_major, to_minor, to_patch) = to;
|
||||
|
||||
if from_major > to_major
|
||||
|| (from_major == to_major && from_minor > to_minor)
|
||||
|| (from_major == to_major && from_minor == to_minor && from_patch > to_patch)
|
||||
{
|
||||
Err(VersionFileError::DowngradeNotSupported {
|
||||
major: from_major,
|
||||
minor: from_minor,
|
||||
patch: from_patch,
|
||||
})
|
||||
} else if from_major < 1 || (from_major == to_major && from_minor < 12) {
|
||||
Err(VersionFileError::TooOldForAutomaticUpgrade {
|
||||
major: from_major,
|
||||
minor: from_minor,
|
||||
patch: from_patch,
|
||||
})
|
||||
} else {
|
||||
create_current_version_file(db_path)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Persists the version of the current Meilisearch binary to a VERSION file
|
||||
pub fn create_current_version_file(db_path: &Path) -> io::Result<()> {
|
||||
@ -24,18 +54,7 @@ pub fn create_version_file(
|
||||
fs::write(version_path, format!("{}.{}.{}", major, minor, patch))
|
||||
}
|
||||
|
||||
/// Ensures Meilisearch version is compatible with the database, returns an error versions mismatch.
|
||||
pub fn check_version_file(db_path: &Path) -> anyhow::Result<()> {
|
||||
let (major, minor, patch) = get_version(db_path)?;
|
||||
|
||||
if major != VERSION_MAJOR || minor != VERSION_MINOR {
|
||||
return Err(VersionFileError::VersionMismatch { major, minor, patch }.into());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_version(db_path: &Path) -> Result<(String, String, String), VersionFileError> {
|
||||
pub fn get_version(db_path: &Path) -> Result<(u32, u32, u32), VersionFileError> {
|
||||
let version_path = db_path.join(VERSION_FILE_NAME);
|
||||
|
||||
match fs::read_to_string(version_path) {
|
||||
@ -47,11 +66,28 @@ pub fn get_version(db_path: &Path) -> Result<(String, String, String), VersionFi
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_version(version: &str) -> Result<(String, String, String), VersionFileError> {
|
||||
let version_components = version.split('.').collect::<Vec<_>>();
|
||||
pub fn parse_version(version: &str) -> Result<(u32, u32, u32), VersionFileError> {
|
||||
let version_components = version.trim().split('.').collect::<Vec<_>>();
|
||||
let (major, minor, patch) = match &version_components[..] {
|
||||
[major, minor, patch] => (major.to_string(), minor.to_string(), patch.to_string()),
|
||||
_ => return Err(VersionFileError::MalformedVersionFile),
|
||||
[major, minor, patch] => (
|
||||
major.parse().map_err(|e| VersionFileError::MalformedVersionFile {
|
||||
context: format!("Could not parse the major: {e}"),
|
||||
})?,
|
||||
minor.parse().map_err(|e| VersionFileError::MalformedVersionFile {
|
||||
context: format!("Could not parse the minor: {e}"),
|
||||
})?,
|
||||
patch.parse().map_err(|e| VersionFileError::MalformedVersionFile {
|
||||
context: format!("Could not parse the patch: {e}"),
|
||||
})?,
|
||||
),
|
||||
_ => {
|
||||
return Err(VersionFileError::MalformedVersionFile {
|
||||
context: format!(
|
||||
"The version contains {} parts instead of 3 (major, minor and patch)",
|
||||
version_components.len()
|
||||
),
|
||||
})
|
||||
}
|
||||
};
|
||||
Ok((major, minor, patch))
|
||||
}
|
||||
@ -64,14 +100,18 @@ pub enum VersionFileError {
|
||||
env!("CARGO_PKG_VERSION").to_string()
|
||||
)]
|
||||
MissingVersionFile,
|
||||
#[error("Version file is corrupted and thus Meilisearch is unable to determine the version of the database.")]
|
||||
MalformedVersionFile,
|
||||
#[error("Version file is corrupted and thus Meilisearch is unable to determine the version of the database. {context}")]
|
||||
MalformedVersionFile { context: String },
|
||||
#[error(
|
||||
"Your database version ({major}.{minor}.{patch}) is incompatible with your current engine version ({}).\n\
|
||||
To migrate data between Meilisearch versions, please follow our guide on https://www.meilisearch.com/docs/learn/update_and_migration/updating.",
|
||||
env!("CARGO_PKG_VERSION").to_string()
|
||||
)]
|
||||
VersionMismatch { major: String, minor: String, patch: String },
|
||||
VersionMismatch { major: u32, minor: u32, patch: u32 },
|
||||
#[error("Database version {major}.{minor}.{patch} is higher than the Meilisearch version {VERSION_MAJOR}.{VERSION_MINOR}.{VERSION_PATCH}. Downgrade is not supported")]
|
||||
DowngradeNotSupported { major: u32, minor: u32, patch: u32 },
|
||||
#[error("Database version {major}.{minor}.{patch} is too old for the experimental dumpless upgrade feature. Please generate a dump using the v{major}.{minor}.{patch} and import it in the v{VERSION_MAJOR}.{VERSION_MINOR}.{VERSION_PATCH}")]
|
||||
TooOldForAutomaticUpgrade { major: u32, minor: u32, patch: u32 },
|
||||
|
||||
#[error(transparent)]
|
||||
IoError(#[from] std::io::Error),
|
||||
|
@ -105,8 +105,16 @@ tracing-actix-web = "0.7.15"
|
||||
build-info = { version = "1.7.0", path = "../build-info" }
|
||||
roaring = "0.10.10"
|
||||
mopa-maintained = "0.2.3"
|
||||
utoipa = { version = "5.3.1", features = ["actix_extras", "macros", "non_strict_integers", "preserve_order", "uuid", "time", "openapi_extensions"] }
|
||||
utoipa-scalar = { version = "0.2.1", optional = true, features = ["actix-web"] }
|
||||
utoipa = { version = "5.3.1", features = [
|
||||
"actix_extras",
|
||||
"macros",
|
||||
"non_strict_integers",
|
||||
"preserve_order",
|
||||
"uuid",
|
||||
"time",
|
||||
"openapi_extensions",
|
||||
] }
|
||||
utoipa-scalar = { version = "0.3.0", optional = true, features = ["actix-web"] }
|
||||
|
||||
[dev-dependencies]
|
||||
actix-rt = "2.10.0"
|
||||
|
BIN
crates/meilisearch/db.snapshot
Normal file
BIN
crates/meilisearch/db.snapshot
Normal file
Binary file not shown.
@ -177,19 +177,19 @@ impl SegmentAnalytics {
|
||||
/// This structure represent the `infos` field we send in the analytics.
|
||||
/// It's quite close to the `Opt` structure except all sensitive informations
|
||||
/// have been simplified to a boolean.
|
||||
/// It's send as-is in amplitude thus you should never update a name of the
|
||||
/// It's sent as-is in amplitude thus you should never update a name of the
|
||||
/// struct without the approval of the PM.
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
struct Infos {
|
||||
env: String,
|
||||
experimental_contains_filter: bool,
|
||||
experimental_vector_store: bool,
|
||||
experimental_enable_metrics: bool,
|
||||
experimental_edit_documents_by_function: bool,
|
||||
experimental_search_queue_size: usize,
|
||||
experimental_drop_search_after: usize,
|
||||
experimental_nb_searches_per_core: usize,
|
||||
experimental_logs_mode: LogMode,
|
||||
experimental_dumpless_upgrade: bool,
|
||||
experimental_replication_parameters: bool,
|
||||
experimental_enable_logs_route: bool,
|
||||
experimental_reduce_indexing_memory_usage: bool,
|
||||
@ -236,6 +236,7 @@ impl Infos {
|
||||
experimental_drop_search_after,
|
||||
experimental_nb_searches_per_core,
|
||||
experimental_logs_mode,
|
||||
experimental_dumpless_upgrade,
|
||||
experimental_replication_parameters,
|
||||
experimental_enable_logs_route,
|
||||
experimental_reduce_indexing_memory_usage,
|
||||
@ -280,7 +281,6 @@ impl Infos {
|
||||
indexer_options;
|
||||
|
||||
let RuntimeTogglableFeatures {
|
||||
vector_store,
|
||||
metrics,
|
||||
logs_route,
|
||||
edit_documents_by_function,
|
||||
@ -292,13 +292,13 @@ impl Infos {
|
||||
Self {
|
||||
env,
|
||||
experimental_contains_filter: experimental_contains_filter | contains_filter,
|
||||
experimental_vector_store: vector_store,
|
||||
experimental_edit_documents_by_function: edit_documents_by_function,
|
||||
experimental_enable_metrics: experimental_enable_metrics | metrics,
|
||||
experimental_search_queue_size,
|
||||
experimental_drop_search_after: experimental_drop_search_after.into(),
|
||||
experimental_nb_searches_per_core: experimental_nb_searches_per_core.into(),
|
||||
experimental_logs_mode,
|
||||
experimental_dumpless_upgrade,
|
||||
experimental_replication_parameters,
|
||||
experimental_enable_logs_route: experimental_enable_logs_route | logs_route,
|
||||
experimental_reduce_indexing_memory_usage,
|
||||
|
@ -34,11 +34,15 @@ use error::PayloadError;
|
||||
use extractors::payload::PayloadConfig;
|
||||
use index_scheduler::{IndexScheduler, IndexSchedulerOptions};
|
||||
use meilisearch_auth::AuthController;
|
||||
use meilisearch_types::milli::constants::VERSION_MAJOR;
|
||||
use meilisearch_types::milli::documents::{DocumentsBatchBuilder, DocumentsBatchReader};
|
||||
use meilisearch_types::milli::update::{IndexDocumentsConfig, IndexDocumentsMethod};
|
||||
use meilisearch_types::settings::apply_settings_to_builder;
|
||||
use meilisearch_types::tasks::KindWithContent;
|
||||
use meilisearch_types::versioning::{check_version_file, create_current_version_file};
|
||||
use meilisearch_types::versioning::{
|
||||
create_current_version_file, get_version, update_version_file_for_dumpless_upgrade,
|
||||
VersionFileError, VERSION_MINOR, VERSION_PATCH,
|
||||
};
|
||||
use meilisearch_types::{compression, milli, VERSION_FILE_NAME};
|
||||
pub use option::Opt;
|
||||
use option::ScheduleSnapshot;
|
||||
@ -206,13 +210,47 @@ enum OnFailure {
|
||||
}
|
||||
|
||||
pub fn setup_meilisearch(opt: &Opt) -> anyhow::Result<(Arc<IndexScheduler>, Arc<AuthController>)> {
|
||||
let index_scheduler_opt = IndexSchedulerOptions {
|
||||
version_file_path: opt.db_path.join(VERSION_FILE_NAME),
|
||||
auth_path: opt.db_path.join("auth"),
|
||||
tasks_path: opt.db_path.join("tasks"),
|
||||
update_file_path: opt.db_path.join("update_files"),
|
||||
indexes_path: opt.db_path.join("indexes"),
|
||||
snapshots_path: opt.snapshot_dir.clone(),
|
||||
dumps_path: opt.dump_dir.clone(),
|
||||
webhook_url: opt.task_webhook_url.as_ref().map(|url| url.to_string()),
|
||||
webhook_authorization_header: opt.task_webhook_authorization_header.clone(),
|
||||
task_db_size: opt.max_task_db_size.as_u64() as usize,
|
||||
index_base_map_size: opt.max_index_size.as_u64() as usize,
|
||||
enable_mdb_writemap: opt.experimental_reduce_indexing_memory_usage,
|
||||
indexer_config: Arc::new((&opt.indexer_options).try_into()?),
|
||||
autobatching_enabled: true,
|
||||
cleanup_enabled: !opt.experimental_replication_parameters,
|
||||
max_number_of_tasks: 1_000_000,
|
||||
max_number_of_batched_tasks: opt.experimental_max_number_of_batched_tasks,
|
||||
batched_tasks_size_limit: opt.experimental_limit_batched_tasks_total_size,
|
||||
index_growth_amount: byte_unit::Byte::from_str("10GiB").unwrap().as_u64() as usize,
|
||||
index_count: DEFAULT_INDEX_COUNT,
|
||||
instance_features: opt.to_instance_features(),
|
||||
auto_upgrade: opt.experimental_dumpless_upgrade,
|
||||
};
|
||||
let bin_major: u32 = VERSION_MAJOR.parse().unwrap();
|
||||
let bin_minor: u32 = VERSION_MINOR.parse().unwrap();
|
||||
let bin_patch: u32 = VERSION_PATCH.parse().unwrap();
|
||||
let binary_version = (bin_major, bin_minor, bin_patch);
|
||||
|
||||
let empty_db = is_empty_db(&opt.db_path);
|
||||
let (index_scheduler, auth_controller) = if let Some(ref snapshot_path) = opt.import_snapshot {
|
||||
let snapshot_path_exists = snapshot_path.exists();
|
||||
// the db is empty and the snapshot exists, import it
|
||||
if empty_db && snapshot_path_exists {
|
||||
match compression::from_tar_gz(snapshot_path, &opt.db_path) {
|
||||
Ok(()) => open_or_create_database_unchecked(opt, OnFailure::RemoveDb)?,
|
||||
Ok(()) => open_or_create_database_unchecked(
|
||||
opt,
|
||||
index_scheduler_opt,
|
||||
OnFailure::RemoveDb,
|
||||
binary_version, // the db is empty
|
||||
)?,
|
||||
Err(e) => {
|
||||
std::fs::remove_dir_all(&opt.db_path)?;
|
||||
return Err(e);
|
||||
@ -229,14 +267,18 @@ pub fn setup_meilisearch(opt: &Opt) -> anyhow::Result<(Arc<IndexScheduler>, Arc<
|
||||
bail!("snapshot doesn't exist at {}", snapshot_path.display())
|
||||
// the snapshot and the db exist, and we can ignore the snapshot because of the ignore_snapshot_if_db_exists flag
|
||||
} else {
|
||||
open_or_create_database(opt, empty_db)?
|
||||
open_or_create_database(opt, index_scheduler_opt, empty_db, binary_version)?
|
||||
}
|
||||
} else if let Some(ref path) = opt.import_dump {
|
||||
let src_path_exists = path.exists();
|
||||
// the db is empty and the dump exists, import it
|
||||
if empty_db && src_path_exists {
|
||||
let (mut index_scheduler, mut auth_controller) =
|
||||
open_or_create_database_unchecked(opt, OnFailure::RemoveDb)?;
|
||||
let (mut index_scheduler, mut auth_controller) = open_or_create_database_unchecked(
|
||||
opt,
|
||||
index_scheduler_opt,
|
||||
OnFailure::RemoveDb,
|
||||
binary_version, // the db is empty
|
||||
)?;
|
||||
match import_dump(&opt.db_path, path, &mut index_scheduler, &mut auth_controller) {
|
||||
Ok(()) => (index_scheduler, auth_controller),
|
||||
Err(e) => {
|
||||
@ -256,10 +298,10 @@ pub fn setup_meilisearch(opt: &Opt) -> anyhow::Result<(Arc<IndexScheduler>, Arc<
|
||||
// the dump and the db exist and we can ignore the dump because of the ignore_dump_if_db_exists flag
|
||||
// or, the dump is missing but we can ignore that because of the ignore_missing_dump flag
|
||||
} else {
|
||||
open_or_create_database(opt, empty_db)?
|
||||
open_or_create_database(opt, index_scheduler_opt, empty_db, binary_version)?
|
||||
}
|
||||
} else {
|
||||
open_or_create_database(opt, empty_db)?
|
||||
open_or_create_database(opt, index_scheduler_opt, empty_db, binary_version)?
|
||||
};
|
||||
|
||||
// We create a loop in a thread that registers snapshotCreation tasks
|
||||
@ -287,37 +329,15 @@ pub fn setup_meilisearch(opt: &Opt) -> anyhow::Result<(Arc<IndexScheduler>, Arc<
|
||||
/// Try to start the IndexScheduler and AuthController without checking the VERSION file or anything.
|
||||
fn open_or_create_database_unchecked(
|
||||
opt: &Opt,
|
||||
index_scheduler_opt: IndexSchedulerOptions,
|
||||
on_failure: OnFailure,
|
||||
version: (u32, u32, u32),
|
||||
) -> anyhow::Result<(IndexScheduler, AuthController)> {
|
||||
// we don't want to create anything in the data.ms yet, thus we
|
||||
// wrap our two builders in a closure that'll be executed later.
|
||||
let auth_controller = AuthController::new(&opt.db_path, &opt.master_key);
|
||||
let instance_features = opt.to_instance_features();
|
||||
let index_scheduler_builder = || -> anyhow::Result<_> {
|
||||
Ok(IndexScheduler::new(IndexSchedulerOptions {
|
||||
version_file_path: opt.db_path.join(VERSION_FILE_NAME),
|
||||
auth_path: opt.db_path.join("auth"),
|
||||
tasks_path: opt.db_path.join("tasks"),
|
||||
update_file_path: opt.db_path.join("update_files"),
|
||||
indexes_path: opt.db_path.join("indexes"),
|
||||
snapshots_path: opt.snapshot_dir.clone(),
|
||||
dumps_path: opt.dump_dir.clone(),
|
||||
webhook_url: opt.task_webhook_url.as_ref().map(|url| url.to_string()),
|
||||
webhook_authorization_header: opt.task_webhook_authorization_header.clone(),
|
||||
task_db_size: opt.max_task_db_size.as_u64() as usize,
|
||||
index_base_map_size: opt.max_index_size.as_u64() as usize,
|
||||
enable_mdb_writemap: opt.experimental_reduce_indexing_memory_usage,
|
||||
indexer_config: Arc::new((&opt.indexer_options).try_into()?),
|
||||
autobatching_enabled: true,
|
||||
cleanup_enabled: !opt.experimental_replication_parameters,
|
||||
max_number_of_tasks: 1_000_000,
|
||||
max_number_of_batched_tasks: opt.experimental_max_number_of_batched_tasks,
|
||||
batched_tasks_size_limit: opt.experimental_limit_batched_tasks_total_size,
|
||||
index_growth_amount: byte_unit::Byte::from_str("10GiB").unwrap().as_u64() as usize,
|
||||
index_count: DEFAULT_INDEX_COUNT,
|
||||
instance_features,
|
||||
})?)
|
||||
};
|
||||
let index_scheduler_builder =
|
||||
|| -> anyhow::Result<_> { Ok(IndexScheduler::new(index_scheduler_opt, version)?) };
|
||||
|
||||
match (
|
||||
index_scheduler_builder(),
|
||||
@ -334,16 +354,42 @@ fn open_or_create_database_unchecked(
|
||||
}
|
||||
}
|
||||
|
||||
/// Ensures Meilisearch version is compatible with the database, returns an error in case of version mismatch.
|
||||
/// Returns the version that was contained in the version file
|
||||
fn check_version(opt: &Opt, binary_version: (u32, u32, u32)) -> anyhow::Result<(u32, u32, u32)> {
|
||||
let (bin_major, bin_minor, bin_patch) = binary_version;
|
||||
let (db_major, db_minor, db_patch) = get_version(&opt.db_path)?;
|
||||
|
||||
if db_major != bin_major || db_minor != bin_minor || db_patch > bin_patch {
|
||||
if opt.experimental_dumpless_upgrade {
|
||||
update_version_file_for_dumpless_upgrade(
|
||||
&opt.db_path,
|
||||
(db_major, db_minor, db_patch),
|
||||
(bin_major, bin_minor, bin_patch),
|
||||
)?;
|
||||
} else {
|
||||
return Err(VersionFileError::VersionMismatch {
|
||||
major: db_major,
|
||||
minor: db_minor,
|
||||
patch: db_patch,
|
||||
}
|
||||
.into());
|
||||
}
|
||||
}
|
||||
|
||||
Ok((db_major, db_minor, db_patch))
|
||||
}
|
||||
|
||||
/// Ensure you're in a valid state and open the IndexScheduler + AuthController for you.
|
||||
fn open_or_create_database(
|
||||
opt: &Opt,
|
||||
index_scheduler_opt: IndexSchedulerOptions,
|
||||
empty_db: bool,
|
||||
binary_version: (u32, u32, u32),
|
||||
) -> anyhow::Result<(IndexScheduler, AuthController)> {
|
||||
if !empty_db {
|
||||
check_version_file(&opt.db_path)?;
|
||||
}
|
||||
let version = if !empty_db { check_version(opt, binary_version)? } else { binary_version };
|
||||
|
||||
open_or_create_database_unchecked(opt, OnFailure::KeepDb)
|
||||
open_or_create_database_unchecked(opt, index_scheduler_opt, OnFailure::KeepDb, version)
|
||||
}
|
||||
|
||||
fn import_dump(
|
||||
|
@ -49,6 +49,7 @@ const MEILI_IGNORE_DUMP_IF_DB_EXISTS: &str = "MEILI_IGNORE_DUMP_IF_DB_EXISTS";
|
||||
const MEILI_DUMP_DIR: &str = "MEILI_DUMP_DIR";
|
||||
const MEILI_LOG_LEVEL: &str = "MEILI_LOG_LEVEL";
|
||||
const MEILI_EXPERIMENTAL_LOGS_MODE: &str = "MEILI_EXPERIMENTAL_LOGS_MODE";
|
||||
const MEILI_EXPERIMENTAL_DUMPLESS_UPGRADE: &str = "MEILI_EXPERIMENTAL_DUMPLESS_UPGRADE";
|
||||
const MEILI_EXPERIMENTAL_REPLICATION_PARAMETERS: &str = "MEILI_EXPERIMENTAL_REPLICATION_PARAMETERS";
|
||||
const MEILI_EXPERIMENTAL_ENABLE_LOGS_ROUTE: &str = "MEILI_EXPERIMENTAL_ENABLE_LOGS_ROUTE";
|
||||
const MEILI_EXPERIMENTAL_CONTAINS_FILTER: &str = "MEILI_EXPERIMENTAL_CONTAINS_FILTER";
|
||||
@ -400,6 +401,13 @@ pub struct Opt {
|
||||
#[serde(default)]
|
||||
pub experimental_logs_mode: LogMode,
|
||||
|
||||
/// Experimental dumpless upgrade. For more information, see: <https://github.com/orgs/meilisearch/discussions/804>
|
||||
///
|
||||
/// When set, Meilisearch will auto-update its database without using a dump.
|
||||
#[clap(long, env = MEILI_EXPERIMENTAL_DUMPLESS_UPGRADE, default_value_t)]
|
||||
#[serde(default)]
|
||||
pub experimental_dumpless_upgrade: bool,
|
||||
|
||||
/// Experimental logs route feature. For more information,
|
||||
/// see: <https://github.com/orgs/meilisearch/discussions/721>
|
||||
///
|
||||
@ -535,6 +543,7 @@ impl Opt {
|
||||
experimental_drop_search_after,
|
||||
experimental_nb_searches_per_core,
|
||||
experimental_logs_mode,
|
||||
experimental_dumpless_upgrade,
|
||||
experimental_enable_logs_route,
|
||||
experimental_replication_parameters,
|
||||
experimental_reduce_indexing_memory_usage,
|
||||
@ -608,6 +617,10 @@ impl Opt {
|
||||
MEILI_EXPERIMENTAL_LOGS_MODE,
|
||||
experimental_logs_mode.to_string(),
|
||||
);
|
||||
export_to_env_if_not_present(
|
||||
MEILI_EXPERIMENTAL_DUMPLESS_UPGRADE,
|
||||
experimental_dumpless_upgrade.to_string(),
|
||||
);
|
||||
export_to_env_if_not_present(
|
||||
MEILI_EXPERIMENTAL_REPLICATION_PARAMETERS,
|
||||
experimental_replication_parameters.to_string(),
|
||||
|
@ -46,7 +46,6 @@ pub fn configure(cfg: &mut web::ServiceConfig) {
|
||||
security(("Bearer" = ["experimental_features.get", "experimental_features.*", "*"])),
|
||||
responses(
|
||||
(status = OK, description = "Experimental features are returned", body = RuntimeTogglableFeatures, content_type = "application/json", example = json!(RuntimeTogglableFeatures {
|
||||
vector_store: Some(true),
|
||||
metrics: Some(true),
|
||||
logs_route: Some(false),
|
||||
edit_documents_by_function: Some(false),
|
||||
@ -71,6 +70,7 @@ async fn get_features(
|
||||
let features = index_scheduler.features();
|
||||
|
||||
let features = features.runtime_features();
|
||||
let features: RuntimeTogglableFeatures = features.into();
|
||||
debug!(returns = ?features, "Get features");
|
||||
HttpResponse::Ok().json(features)
|
||||
}
|
||||
@ -80,8 +80,6 @@ async fn get_features(
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[schema(rename_all = "camelCase")]
|
||||
pub struct RuntimeTogglableFeatures {
|
||||
#[deserr(default)]
|
||||
pub vector_store: Option<bool>,
|
||||
#[deserr(default)]
|
||||
pub metrics: Option<bool>,
|
||||
#[deserr(default)]
|
||||
@ -92,9 +90,26 @@ pub struct RuntimeTogglableFeatures {
|
||||
pub contains_filter: Option<bool>,
|
||||
}
|
||||
|
||||
impl From<meilisearch_types::features::RuntimeTogglableFeatures> for RuntimeTogglableFeatures {
|
||||
fn from(value: meilisearch_types::features::RuntimeTogglableFeatures) -> Self {
|
||||
let meilisearch_types::features::RuntimeTogglableFeatures {
|
||||
metrics,
|
||||
logs_route,
|
||||
edit_documents_by_function,
|
||||
contains_filter,
|
||||
} = value;
|
||||
|
||||
Self {
|
||||
metrics: Some(metrics),
|
||||
logs_route: Some(logs_route),
|
||||
edit_documents_by_function: Some(edit_documents_by_function),
|
||||
contains_filter: Some(contains_filter),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct PatchExperimentalFeatureAnalytics {
|
||||
vector_store: bool,
|
||||
metrics: bool,
|
||||
logs_route: bool,
|
||||
edit_documents_by_function: bool,
|
||||
@ -108,7 +123,6 @@ impl Aggregate for PatchExperimentalFeatureAnalytics {
|
||||
|
||||
fn aggregate(self: Box<Self>, new: Box<Self>) -> Box<Self> {
|
||||
Box::new(Self {
|
||||
vector_store: new.vector_store,
|
||||
metrics: new.metrics,
|
||||
logs_route: new.logs_route,
|
||||
edit_documents_by_function: new.edit_documents_by_function,
|
||||
@ -131,7 +145,6 @@ impl Aggregate for PatchExperimentalFeatureAnalytics {
|
||||
security(("Bearer" = ["experimental_features.update", "experimental_features.*", "*"])),
|
||||
responses(
|
||||
(status = OK, description = "Experimental features are returned", body = RuntimeTogglableFeatures, content_type = "application/json", example = json!(RuntimeTogglableFeatures {
|
||||
vector_store: Some(true),
|
||||
metrics: Some(true),
|
||||
logs_route: Some(false),
|
||||
edit_documents_by_function: Some(false),
|
||||
@ -161,7 +174,6 @@ async fn patch_features(
|
||||
|
||||
let old_features = features.runtime_features();
|
||||
let new_features = meilisearch_types::features::RuntimeTogglableFeatures {
|
||||
vector_store: new_features.0.vector_store.unwrap_or(old_features.vector_store),
|
||||
metrics: new_features.0.metrics.unwrap_or(old_features.metrics),
|
||||
logs_route: new_features.0.logs_route.unwrap_or(old_features.logs_route),
|
||||
edit_documents_by_function: new_features
|
||||
@ -175,7 +187,6 @@ async fn patch_features(
|
||||
// the it renames to camelCase, which we don't want for analytics.
|
||||
// **Do not** ignore fields with `..` or `_` here, because we want to add them in the future.
|
||||
let meilisearch_types::features::RuntimeTogglableFeatures {
|
||||
vector_store,
|
||||
metrics,
|
||||
logs_route,
|
||||
edit_documents_by_function,
|
||||
@ -184,7 +195,6 @@ async fn patch_features(
|
||||
|
||||
analytics.publish(
|
||||
PatchExperimentalFeatureAnalytics {
|
||||
vector_store,
|
||||
metrics,
|
||||
logs_route,
|
||||
edit_documents_by_function,
|
||||
@ -193,6 +203,7 @@ async fn patch_features(
|
||||
&req,
|
||||
);
|
||||
index_scheduler.put_runtime_features(new_features)?;
|
||||
let new_features: RuntimeTogglableFeatures = new_features.into();
|
||||
debug!(returns = ?new_features, "Patch features");
|
||||
Ok(HttpResponse::Ok().json(new_features))
|
||||
}
|
||||
|
@ -257,8 +257,7 @@ pub async fn get_document(
|
||||
let GetDocument { fields, retrieve_vectors: param_retrieve_vectors } = params.into_inner();
|
||||
let attributes_to_retrieve = fields.merge_star_and_none();
|
||||
|
||||
let features = index_scheduler.features();
|
||||
let retrieve_vectors = RetrieveVectors::new(param_retrieve_vectors.0, features)?;
|
||||
let retrieve_vectors = RetrieveVectors::new(param_retrieve_vectors.0);
|
||||
|
||||
analytics.publish(
|
||||
DocumentsFetchAggregator::<DocumentsGET> {
|
||||
@ -593,8 +592,7 @@ fn documents_by_query(
|
||||
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
|
||||
let BrowseQuery { offset, limit, fields, retrieve_vectors, filter } = query;
|
||||
|
||||
let features = index_scheduler.features();
|
||||
let retrieve_vectors = RetrieveVectors::new(retrieve_vectors, features)?;
|
||||
let retrieve_vectors = RetrieveVectors::new(retrieve_vectors);
|
||||
|
||||
let index = index_scheduler.index(&index_uid)?;
|
||||
let (total, documents) = retrieve_documents(
|
||||
@ -1420,7 +1418,6 @@ fn some_documents<'a, 't: 'a>(
|
||||
ret.map_err(ResponseError::from).and_then(|(key, document)| -> Result<_, ResponseError> {
|
||||
let mut document = milli::obkv_to_json(&all_fields, &fields_ids_map, document)?;
|
||||
match retrieve_vectors {
|
||||
RetrieveVectors::Ignore => {}
|
||||
RetrieveVectors::Hide => {
|
||||
document.remove("_vectors");
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ pub struct FacetSearchQuery {
|
||||
pub q: Option<String>,
|
||||
#[deserr(default, error = DeserrJsonError<InvalidSearchVector>)]
|
||||
pub vector: Option<Vec<f32>>,
|
||||
#[deserr(default, error = DeserrJsonError<InvalidHybridQuery>)]
|
||||
#[deserr(default, error = DeserrJsonError<InvalidSearchHybridQuery>)]
|
||||
pub hybrid: Option<HybridQuery>,
|
||||
#[deserr(default, error = DeserrJsonError<InvalidSearchFilter>)]
|
||||
pub filter: Option<Value>,
|
||||
@ -252,9 +252,7 @@ pub async fn search(
|
||||
}
|
||||
|
||||
let index = index_scheduler.index(&index_uid)?;
|
||||
let features = index_scheduler.features();
|
||||
let search_kind =
|
||||
search_kind(&search_query, &index_scheduler, index_uid.to_string(), &index, features)?;
|
||||
let search_kind = search_kind(&search_query, &index_scheduler, index_uid.to_string(), &index)?;
|
||||
let permit = search_queue.try_get_search_permit().await?;
|
||||
let search_result = tokio::task::spawn_blocking(move || {
|
||||
perform_facet_search(
|
||||
|
@ -1,7 +1,7 @@
|
||||
use actix_web::web::Data;
|
||||
use actix_web::{web, HttpRequest, HttpResponse};
|
||||
use deserr::actix_web::{AwebJson, AwebQueryParameter};
|
||||
use index_scheduler::{IndexScheduler, RoFeatures};
|
||||
use index_scheduler::IndexScheduler;
|
||||
use meilisearch_types::deserr::query_params::Param;
|
||||
use meilisearch_types::deserr::{DeserrJsonError, DeserrQueryParamError};
|
||||
use meilisearch_types::error::deserr_codes::*;
|
||||
@ -121,7 +121,7 @@ pub struct SearchQueryGet {
|
||||
#[deserr(default, error = DeserrQueryParamError<InvalidSearchAttributesToSearchOn>)]
|
||||
#[param(value_type = Vec<String>, explode = false)]
|
||||
pub attributes_to_search_on: Option<CS<String>>,
|
||||
#[deserr(default, error = DeserrQueryParamError<InvalidEmbedder>)]
|
||||
#[deserr(default, error = DeserrQueryParamError<InvalidSearchEmbedder>)]
|
||||
pub hybrid_embedder: Option<String>,
|
||||
#[deserr(default, error = DeserrQueryParamError<InvalidSearchSemanticRatio>)]
|
||||
#[param(value_type = f32)]
|
||||
@ -185,7 +185,7 @@ impl TryFrom<SearchQueryGet> for SearchQuery {
|
||||
(None, Some(_)) => {
|
||||
return Err(ResponseError::from_msg(
|
||||
"`hybridEmbedder` is mandatory when `hybridSemanticRatio` is present".into(),
|
||||
meilisearch_types::error::Code::InvalidHybridQuery,
|
||||
meilisearch_types::error::Code::InvalidSearchHybridQuery,
|
||||
));
|
||||
}
|
||||
(Some(embedder), None) => {
|
||||
@ -336,11 +336,10 @@ pub async fn search_with_url_query(
|
||||
let mut aggregate = SearchAggregator::<SearchGET>::from_query(&query);
|
||||
|
||||
let index = index_scheduler.index(&index_uid)?;
|
||||
let features = index_scheduler.features();
|
||||
|
||||
let search_kind =
|
||||
search_kind(&query, index_scheduler.get_ref(), index_uid.to_string(), &index, features)?;
|
||||
let retrieve_vector = RetrieveVectors::new(query.retrieve_vectors, features)?;
|
||||
search_kind(&query, index_scheduler.get_ref(), index_uid.to_string(), &index)?;
|
||||
let retrieve_vector = RetrieveVectors::new(query.retrieve_vectors);
|
||||
let permit = search_queue.try_get_search_permit().await?;
|
||||
let search_result = tokio::task::spawn_blocking(move || {
|
||||
perform_search(
|
||||
@ -444,11 +443,9 @@ pub async fn search_with_post(
|
||||
|
||||
let index = index_scheduler.index(&index_uid)?;
|
||||
|
||||
let features = index_scheduler.features();
|
||||
|
||||
let search_kind =
|
||||
search_kind(&query, index_scheduler.get_ref(), index_uid.to_string(), &index, features)?;
|
||||
let retrieve_vectors = RetrieveVectors::new(query.retrieve_vectors, features)?;
|
||||
search_kind(&query, index_scheduler.get_ref(), index_uid.to_string(), &index)?;
|
||||
let retrieve_vectors = RetrieveVectors::new(query.retrieve_vectors);
|
||||
|
||||
let permit = search_queue.try_get_search_permit().await?;
|
||||
let search_result = tokio::task::spawn_blocking(move || {
|
||||
@ -483,15 +480,7 @@ pub fn search_kind(
|
||||
index_scheduler: &IndexScheduler,
|
||||
index_uid: String,
|
||||
index: &milli::Index,
|
||||
features: RoFeatures,
|
||||
) -> Result<SearchKind, ResponseError> {
|
||||
if query.vector.is_some() {
|
||||
features.check_vector("Passing `vector` as a parameter")?;
|
||||
}
|
||||
if query.hybrid.is_some() {
|
||||
features.check_vector("Passing `hybrid` as a parameter")?;
|
||||
}
|
||||
|
||||
// handle with care, the order of cases matters, the semantics is subtle
|
||||
match (query.q.as_deref(), &query.hybrid, query.vector.as_deref()) {
|
||||
// empty query, no vector => placeholder search
|
||||
|
@ -5,7 +5,6 @@ use index_scheduler::IndexScheduler;
|
||||
use meilisearch_types::deserr::DeserrJsonError;
|
||||
use meilisearch_types::error::ResponseError;
|
||||
use meilisearch_types::index_uid::IndexUid;
|
||||
use meilisearch_types::milli::update::Setting;
|
||||
use meilisearch_types::settings::{
|
||||
settings, SecretPolicy, SettingEmbeddingSettings, Settings, Unchecked,
|
||||
};
|
||||
@ -711,10 +710,7 @@ pub async fn delete_all(
|
||||
|
||||
fn validate_settings(
|
||||
settings: Settings<Unchecked>,
|
||||
index_scheduler: &IndexScheduler,
|
||||
_index_scheduler: &IndexScheduler,
|
||||
) -> Result<Settings<Unchecked>, ResponseError> {
|
||||
if matches!(settings.embedders, Setting::Set(_)) {
|
||||
index_scheduler.features().check_vector("Passing `embedders` in settings")?
|
||||
}
|
||||
Ok(settings.validate()?)
|
||||
}
|
||||
|
@ -19,8 +19,8 @@ use crate::extractors::authentication::GuardedData;
|
||||
use crate::extractors::sequential_extractor::SeqHandler;
|
||||
use crate::routes::indexes::similar_analytics::{SimilarAggregator, SimilarGET, SimilarPOST};
|
||||
use crate::search::{
|
||||
add_search_rules, perform_similar, RankingScoreThresholdSimilar, RetrieveVectors, SearchKind,
|
||||
SimilarQuery, SimilarResult, DEFAULT_SEARCH_LIMIT, DEFAULT_SEARCH_OFFSET,
|
||||
add_search_rules, perform_similar, RankingScoreThresholdSimilar, RetrieveVectors, Route,
|
||||
SearchKind, SimilarQuery, SimilarResult, DEFAULT_SEARCH_LIMIT, DEFAULT_SEARCH_OFFSET,
|
||||
};
|
||||
|
||||
#[derive(OpenApi)]
|
||||
@ -216,11 +216,7 @@ async fn similar(
|
||||
index_uid: IndexUid,
|
||||
mut query: SimilarQuery,
|
||||
) -> Result<SimilarResult, ResponseError> {
|
||||
let features = index_scheduler.features();
|
||||
|
||||
features.check_vector("Using the similar API")?;
|
||||
|
||||
let retrieve_vectors = RetrieveVectors::new(query.retrieve_vectors, features)?;
|
||||
let retrieve_vectors = RetrieveVectors::new(query.retrieve_vectors);
|
||||
|
||||
// Tenant token search_rules.
|
||||
if let Some(search_rules) = index_scheduler.filters().get_index_search_rules(&index_uid) {
|
||||
@ -235,6 +231,7 @@ async fn similar(
|
||||
&index,
|
||||
&query.embedder,
|
||||
None,
|
||||
Route::Similar,
|
||||
)?;
|
||||
|
||||
tokio::task::spawn_blocking(move || {
|
||||
@ -281,7 +278,7 @@ pub struct SimilarQueryGet {
|
||||
#[deserr(default, error = DeserrQueryParamError<InvalidSimilarRankingScoreThreshold>, default)]
|
||||
#[param(value_type = Option<f32>)]
|
||||
pub ranking_score_threshold: Option<RankingScoreThresholdGet>,
|
||||
#[deserr(error = DeserrQueryParamError<InvalidEmbedder>)]
|
||||
#[deserr(error = DeserrQueryParamError<InvalidSimilarEmbedder>)]
|
||||
pub embedder: String,
|
||||
}
|
||||
|
||||
|
@ -240,11 +240,9 @@ pub async fn multi_search_with_post(
|
||||
index_scheduler.get_ref(),
|
||||
index_uid_str.clone(),
|
||||
&index,
|
||||
features,
|
||||
)
|
||||
.with_index(query_index)?;
|
||||
let retrieve_vector = RetrieveVectors::new(query.retrieve_vectors, features)
|
||||
.with_index(query_index)?;
|
||||
let retrieve_vector = RetrieveVectors::new(query.retrieve_vectors);
|
||||
|
||||
let search_result = tokio::task::spawn_blocking(move || {
|
||||
perform_search(
|
||||
|
@ -912,14 +912,14 @@ mod tests {
|
||||
{
|
||||
let params = "types=createIndex";
|
||||
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
||||
snapshot!(meili_snap::json_string!(err), @r###"
|
||||
snapshot!(meili_snap::json_string!(err), @r#"
|
||||
{
|
||||
"message": "Invalid value in parameter `types`: `createIndex` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`.",
|
||||
"message": "Invalid value in parameter `types`: `createIndex` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `upgradeDatabase`.",
|
||||
"code": "invalid_task_types",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
||||
}
|
||||
"###);
|
||||
"#);
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
|
@ -569,7 +569,7 @@ pub fn perform_federated_search(
|
||||
|
||||
let res: Result<(), ResponseError> = (|| {
|
||||
let search_kind =
|
||||
search_kind(&query, index_scheduler, index_uid.to_string(), &index, features)?;
|
||||
search_kind(&query, index_scheduler, index_uid.to_string(), &index)?;
|
||||
|
||||
let canonicalization_kind = match (&search_kind, &query.q) {
|
||||
(SearchKind::SemanticOnly { .. }, _) => {
|
||||
@ -631,7 +631,7 @@ pub fn perform_federated_search(
|
||||
_ => semantic_hit_count = Some(0),
|
||||
}
|
||||
|
||||
let retrieve_vectors = RetrieveVectors::new(query.retrieve_vectors, features)?;
|
||||
let retrieve_vectors = RetrieveVectors::new(query.retrieve_vectors);
|
||||
|
||||
let time_budget = match cutoff {
|
||||
Some(cutoff) => TimeBudget::new(Duration::from_millis(cutoff)),
|
||||
|
@ -63,7 +63,7 @@ pub struct SearchQuery {
|
||||
pub q: Option<String>,
|
||||
#[deserr(default, error = DeserrJsonError<InvalidSearchVector>)]
|
||||
pub vector: Option<Vec<f32>>,
|
||||
#[deserr(default, error = DeserrJsonError<InvalidHybridQuery>)]
|
||||
#[deserr(default, error = DeserrJsonError<InvalidSearchHybridQuery>)]
|
||||
pub hybrid: Option<HybridQuery>,
|
||||
#[deserr(default = DEFAULT_SEARCH_OFFSET(), error = DeserrJsonError<InvalidSearchOffset>)]
|
||||
#[schema(default = DEFAULT_SEARCH_OFFSET)]
|
||||
@ -276,12 +276,12 @@ impl fmt::Debug for SearchQuery {
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, PartialEq, Deserr, ToSchema)]
|
||||
#[deserr(error = DeserrJsonError<InvalidHybridQuery>, rename_all = camelCase, deny_unknown_fields)]
|
||||
#[deserr(error = DeserrJsonError<InvalidSearchHybridQuery>, rename_all = camelCase, deny_unknown_fields)]
|
||||
pub struct HybridQuery {
|
||||
#[deserr(default, error = DeserrJsonError<InvalidSearchSemanticRatio>, default)]
|
||||
#[schema(value_type = f32, default)]
|
||||
pub semantic_ratio: SemanticRatio,
|
||||
#[deserr(error = DeserrJsonError<InvalidEmbedder>)]
|
||||
#[deserr(error = DeserrJsonError<InvalidSearchEmbedder>)]
|
||||
pub embedder: String,
|
||||
}
|
||||
|
||||
@ -300,8 +300,14 @@ impl SearchKind {
|
||||
embedder_name: &str,
|
||||
vector_len: Option<usize>,
|
||||
) -> Result<Self, ResponseError> {
|
||||
let (embedder_name, embedder, quantized) =
|
||||
Self::embedder(index_scheduler, index_uid, index, embedder_name, vector_len)?;
|
||||
let (embedder_name, embedder, quantized) = Self::embedder(
|
||||
index_scheduler,
|
||||
index_uid,
|
||||
index,
|
||||
embedder_name,
|
||||
vector_len,
|
||||
Route::Search,
|
||||
)?;
|
||||
Ok(Self::SemanticOnly { embedder_name, embedder, quantized })
|
||||
}
|
||||
|
||||
@ -313,8 +319,14 @@ impl SearchKind {
|
||||
semantic_ratio: f32,
|
||||
vector_len: Option<usize>,
|
||||
) -> Result<Self, ResponseError> {
|
||||
let (embedder_name, embedder, quantized) =
|
||||
Self::embedder(index_scheduler, index_uid, index, embedder_name, vector_len)?;
|
||||
let (embedder_name, embedder, quantized) = Self::embedder(
|
||||
index_scheduler,
|
||||
index_uid,
|
||||
index,
|
||||
embedder_name,
|
||||
vector_len,
|
||||
Route::Search,
|
||||
)?;
|
||||
Ok(Self::Hybrid { embedder_name, embedder, quantized, semantic_ratio })
|
||||
}
|
||||
|
||||
@ -324,13 +336,21 @@ impl SearchKind {
|
||||
index: &Index,
|
||||
embedder_name: &str,
|
||||
vector_len: Option<usize>,
|
||||
route: Route,
|
||||
) -> Result<(String, Arc<Embedder>, bool), ResponseError> {
|
||||
let embedder_configs = index.embedding_configs(&index.read_txn()?)?;
|
||||
let embedders = index_scheduler.embedders(index_uid, embedder_configs)?;
|
||||
|
||||
let (embedder, _, quantized) = embedders
|
||||
.get(embedder_name)
|
||||
.ok_or(milli::UserError::InvalidEmbedder(embedder_name.to_owned()))
|
||||
.ok_or(match route {
|
||||
Route::Search | Route::MultiSearch => {
|
||||
milli::UserError::InvalidSearchEmbedder(embedder_name.to_owned())
|
||||
}
|
||||
Route::Similar => {
|
||||
milli::UserError::InvalidSimilarEmbedder(embedder_name.to_owned())
|
||||
}
|
||||
})
|
||||
.map_err(milli::Error::from)?;
|
||||
|
||||
if let Some(vector_len) = vector_len {
|
||||
@ -401,7 +421,7 @@ pub struct SearchQueryWithIndex {
|
||||
pub q: Option<String>,
|
||||
#[deserr(default, error = DeserrJsonError<InvalidSearchQ>)]
|
||||
pub vector: Option<Vec<f32>>,
|
||||
#[deserr(default, error = DeserrJsonError<InvalidHybridQuery>)]
|
||||
#[deserr(default, error = DeserrJsonError<InvalidSearchHybridQuery>)]
|
||||
pub hybrid: Option<HybridQuery>,
|
||||
#[deserr(default, error = DeserrJsonError<InvalidSearchOffset>)]
|
||||
pub offset: Option<usize>,
|
||||
@ -553,7 +573,7 @@ pub struct SimilarQuery {
|
||||
pub limit: usize,
|
||||
#[deserr(default, error = DeserrJsonError<InvalidSimilarFilter>)]
|
||||
pub filter: Option<Value>,
|
||||
#[deserr(error = DeserrJsonError<InvalidEmbedder>)]
|
||||
#[deserr(error = DeserrJsonError<InvalidSimilarEmbedder>)]
|
||||
pub embedder: String,
|
||||
#[deserr(default, error = DeserrJsonError<InvalidSimilarAttributesToRetrieve>)]
|
||||
pub attributes_to_retrieve: Option<BTreeSet<String>>,
|
||||
@ -1048,9 +1068,10 @@ pub struct ComputedFacets {
|
||||
pub stats: BTreeMap<String, FacetStats>,
|
||||
}
|
||||
|
||||
enum Route {
|
||||
pub enum Route {
|
||||
Search,
|
||||
MultiSearch,
|
||||
Similar,
|
||||
}
|
||||
|
||||
fn compute_facet_distribution_stats<S: AsRef<str>>(
|
||||
@ -1141,10 +1162,6 @@ struct AttributesFormat {
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum RetrieveVectors {
|
||||
/// Do not touch the `_vectors` field
|
||||
///
|
||||
/// this is the behavior when the vectorStore feature is disabled
|
||||
Ignore,
|
||||
/// Remove the `_vectors` field
|
||||
///
|
||||
/// this is the behavior when the vectorStore feature is enabled, and `retrieveVectors` is `false`
|
||||
@ -1156,15 +1173,11 @@ pub enum RetrieveVectors {
|
||||
}
|
||||
|
||||
impl RetrieveVectors {
|
||||
pub fn new(
|
||||
retrieve_vector: bool,
|
||||
features: index_scheduler::RoFeatures,
|
||||
) -> Result<Self, index_scheduler::Error> {
|
||||
match (retrieve_vector, features.check_vector("Passing `retrieveVectors` as a parameter")) {
|
||||
(true, Ok(())) => Ok(Self::Retrieve),
|
||||
(true, Err(error)) => Err(error),
|
||||
(false, Ok(())) => Ok(Self::Hide),
|
||||
(false, Err(_)) => Ok(Self::Ignore),
|
||||
pub fn new(retrieve_vector: bool) -> Self {
|
||||
if retrieve_vector {
|
||||
Self::Retrieve
|
||||
} else {
|
||||
Self::Hide
|
||||
}
|
||||
}
|
||||
}
|
||||
|
1
crates/meilisearch/src/upgrade/mod.rs
Normal file
1
crates/meilisearch/src/upgrade/mod.rs
Normal file
@ -0,0 +1 @@
|
||||
|
@ -42,7 +42,7 @@ async fn batch_bad_types() {
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r#"
|
||||
{
|
||||
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`.",
|
||||
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `upgradeDatabase`.",
|
||||
"code": "invalid_task_types",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
||||
|
@ -760,15 +760,6 @@ async fn retrieve_vectors() {
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_document_retrieve_vectors"
|
||||
}
|
||||
"###);
|
||||
let (response, _code) = index.get_all_documents_raw("?retrieveVectors=true").await;
|
||||
snapshot!(response, @r###"
|
||||
{
|
||||
"message": "Passing `retrieveVectors` as a parameter requires enabling the `vector store` experimental feature. See https://github.com/meilisearch/product/discussions/677",
|
||||
"code": "feature_not_enabled",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#feature_not_enabled"
|
||||
}
|
||||
"###);
|
||||
|
||||
// FETCH ALL DOCUMENTS BY POST
|
||||
let (response, _code) =
|
||||
@ -781,15 +772,6 @@ async fn retrieve_vectors() {
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_document_retrieve_vectors"
|
||||
}
|
||||
"###);
|
||||
let (response, _code) = index.get_document_by_filter(json!({ "retrieveVectors": true })).await;
|
||||
snapshot!(response, @r###"
|
||||
{
|
||||
"message": "Passing `retrieveVectors` as a parameter requires enabling the `vector store` experimental feature. See https://github.com/meilisearch/product/discussions/677",
|
||||
"code": "feature_not_enabled",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#feature_not_enabled"
|
||||
}
|
||||
"###);
|
||||
|
||||
// GET A SINGLE DOCUMENT
|
||||
let (response, _code) = index.get_document(0, Some(json!({"retrieveVectors": "tamo"}))).await;
|
||||
@ -801,13 +783,4 @@ async fn retrieve_vectors() {
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_document_retrieve_vectors"
|
||||
}
|
||||
"###);
|
||||
let (response, _code) = index.get_document(0, Some(json!({"retrieveVectors": true}))).await;
|
||||
snapshot!(response, @r###"
|
||||
{
|
||||
"message": "Passing `retrieveVectors` as a parameter requires enabling the `vector store` experimental feature. See https://github.com/meilisearch/product/discussions/677",
|
||||
"code": "feature_not_enabled",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#feature_not_enabled"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
|
@ -518,17 +518,6 @@ async fn get_document_by_filter() {
|
||||
async fn get_document_with_vectors() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("doggo");
|
||||
let (value, code) = server.set_features(json!({"vectorStore": true})).await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(value, @r###"
|
||||
{
|
||||
"vectorStore": true,
|
||||
"metrics": false,
|
||||
"logsRoute": false,
|
||||
"editDocumentsByFunction": false,
|
||||
"containsFilter": false
|
||||
}
|
||||
"###);
|
||||
|
||||
let (response, code) = index
|
||||
.update_settings(json!({
|
||||
|
@ -78,6 +78,7 @@ async fn import_dump_v1_movie_raw() {
|
||||
"pagination": {
|
||||
"maxTotalHits": 1000
|
||||
},
|
||||
"embedders": {},
|
||||
"searchCutoffMs": null,
|
||||
"localizedAttributes": null,
|
||||
"facetSearch": true,
|
||||
@ -243,6 +244,7 @@ async fn import_dump_v1_movie_with_settings() {
|
||||
"pagination": {
|
||||
"maxTotalHits": 1000
|
||||
},
|
||||
"embedders": {},
|
||||
"searchCutoffMs": null,
|
||||
"localizedAttributes": null,
|
||||
"facetSearch": true,
|
||||
@ -394,6 +396,7 @@ async fn import_dump_v1_rubygems_with_settings() {
|
||||
"pagination": {
|
||||
"maxTotalHits": 1000
|
||||
},
|
||||
"embedders": {},
|
||||
"searchCutoffMs": null,
|
||||
"localizedAttributes": null,
|
||||
"facetSearch": true,
|
||||
@ -531,6 +534,7 @@ async fn import_dump_v2_movie_raw() {
|
||||
"pagination": {
|
||||
"maxTotalHits": 1000
|
||||
},
|
||||
"embedders": {},
|
||||
"searchCutoffMs": null,
|
||||
"localizedAttributes": null,
|
||||
"facetSearch": true,
|
||||
@ -680,6 +684,7 @@ async fn import_dump_v2_movie_with_settings() {
|
||||
"pagination": {
|
||||
"maxTotalHits": 1000
|
||||
},
|
||||
"embedders": {},
|
||||
"searchCutoffMs": null,
|
||||
"localizedAttributes": null,
|
||||
"facetSearch": true,
|
||||
@ -828,6 +833,7 @@ async fn import_dump_v2_rubygems_with_settings() {
|
||||
"pagination": {
|
||||
"maxTotalHits": 1000
|
||||
},
|
||||
"embedders": {},
|
||||
"searchCutoffMs": null,
|
||||
"localizedAttributes": null,
|
||||
"facetSearch": true,
|
||||
@ -965,6 +971,7 @@ async fn import_dump_v3_movie_raw() {
|
||||
"pagination": {
|
||||
"maxTotalHits": 1000
|
||||
},
|
||||
"embedders": {},
|
||||
"searchCutoffMs": null,
|
||||
"localizedAttributes": null,
|
||||
"facetSearch": true,
|
||||
@ -1114,6 +1121,7 @@ async fn import_dump_v3_movie_with_settings() {
|
||||
"pagination": {
|
||||
"maxTotalHits": 1000
|
||||
},
|
||||
"embedders": {},
|
||||
"searchCutoffMs": null,
|
||||
"localizedAttributes": null,
|
||||
"facetSearch": true,
|
||||
@ -1262,6 +1270,7 @@ async fn import_dump_v3_rubygems_with_settings() {
|
||||
"pagination": {
|
||||
"maxTotalHits": 1000
|
||||
},
|
||||
"embedders": {},
|
||||
"searchCutoffMs": null,
|
||||
"localizedAttributes": null,
|
||||
"facetSearch": true,
|
||||
@ -1399,6 +1408,7 @@ async fn import_dump_v4_movie_raw() {
|
||||
"pagination": {
|
||||
"maxTotalHits": 1000
|
||||
},
|
||||
"embedders": {},
|
||||
"searchCutoffMs": null,
|
||||
"localizedAttributes": null,
|
||||
"facetSearch": true,
|
||||
@ -1548,6 +1558,7 @@ async fn import_dump_v4_movie_with_settings() {
|
||||
"pagination": {
|
||||
"maxTotalHits": 1000
|
||||
},
|
||||
"embedders": {},
|
||||
"searchCutoffMs": null,
|
||||
"localizedAttributes": null,
|
||||
"facetSearch": true,
|
||||
@ -1696,6 +1707,7 @@ async fn import_dump_v4_rubygems_with_settings() {
|
||||
"pagination": {
|
||||
"maxTotalHits": 1000
|
||||
},
|
||||
"embedders": {},
|
||||
"searchCutoffMs": null,
|
||||
"localizedAttributes": null,
|
||||
"facetSearch": true,
|
||||
@ -1893,7 +1905,6 @@ async fn import_dump_v6_containing_experimental_features() {
|
||||
meili_snap::snapshot!(code, @"200 OK");
|
||||
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||
{
|
||||
"vectorStore": false,
|
||||
"metrics": false,
|
||||
"logsRoute": false,
|
||||
"editDocumentsByFunction": false,
|
||||
@ -1945,6 +1956,7 @@ async fn import_dump_v6_containing_experimental_features() {
|
||||
"pagination": {
|
||||
"maxTotalHits": 1000
|
||||
},
|
||||
"embedders": {},
|
||||
"searchCutoffMs": null,
|
||||
"localizedAttributes": null,
|
||||
"facetSearch": true,
|
||||
@ -1988,16 +2000,7 @@ async fn generate_and_import_dump_containing_vectors() {
|
||||
let temp = tempfile::tempdir().unwrap();
|
||||
let mut opt = default_settings(temp.path());
|
||||
let server = Server::new_with_options(opt.clone()).await.unwrap();
|
||||
let (code, _) = server.set_features(json!({"vectorStore": true})).await;
|
||||
snapshot!(code, @r###"
|
||||
{
|
||||
"vectorStore": true,
|
||||
"metrics": false,
|
||||
"logsRoute": false,
|
||||
"editDocumentsByFunction": false,
|
||||
"containsFilter": false
|
||||
}
|
||||
"###);
|
||||
|
||||
let index = server.index("pets");
|
||||
let (response, code) = index
|
||||
.update_settings(json!(
|
||||
@ -2063,7 +2066,6 @@ async fn generate_and_import_dump_containing_vectors() {
|
||||
meili_snap::snapshot!(code, @"200 OK");
|
||||
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||
{
|
||||
"vectorStore": true,
|
||||
"metrics": false,
|
||||
"logsRoute": false,
|
||||
"editDocumentsByFunction": false,
|
||||
|
@ -7,7 +7,7 @@ use crate::json;
|
||||
/// Feature name to test against.
|
||||
/// This will have to be changed by a different one when that feature is stabilized.
|
||||
/// All tests that need to set a feature can make use of this constant.
|
||||
const FEATURE_NAME: &str = "vectorStore";
|
||||
const FEATURE_NAME: &str = "metrics";
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn experimental_features() {
|
||||
@ -18,7 +18,6 @@ async fn experimental_features() {
|
||||
meili_snap::snapshot!(code, @"200 OK");
|
||||
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||
{
|
||||
"vectorStore": false,
|
||||
"metrics": false,
|
||||
"logsRoute": false,
|
||||
"editDocumentsByFunction": false,
|
||||
@ -31,8 +30,7 @@ async fn experimental_features() {
|
||||
meili_snap::snapshot!(code, @"200 OK");
|
||||
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||
{
|
||||
"vectorStore": true,
|
||||
"metrics": false,
|
||||
"metrics": true,
|
||||
"logsRoute": false,
|
||||
"editDocumentsByFunction": false,
|
||||
"containsFilter": false
|
||||
@ -44,8 +42,7 @@ async fn experimental_features() {
|
||||
meili_snap::snapshot!(code, @"200 OK");
|
||||
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||
{
|
||||
"vectorStore": true,
|
||||
"metrics": false,
|
||||
"metrics": true,
|
||||
"logsRoute": false,
|
||||
"editDocumentsByFunction": false,
|
||||
"containsFilter": false
|
||||
@ -58,8 +55,7 @@ async fn experimental_features() {
|
||||
meili_snap::snapshot!(code, @"200 OK");
|
||||
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||
{
|
||||
"vectorStore": true,
|
||||
"metrics": false,
|
||||
"metrics": true,
|
||||
"logsRoute": false,
|
||||
"editDocumentsByFunction": false,
|
||||
"containsFilter": false
|
||||
@ -72,8 +68,7 @@ async fn experimental_features() {
|
||||
meili_snap::snapshot!(code, @"200 OK");
|
||||
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||
{
|
||||
"vectorStore": true,
|
||||
"metrics": false,
|
||||
"metrics": true,
|
||||
"logsRoute": false,
|
||||
"editDocumentsByFunction": false,
|
||||
"containsFilter": false
|
||||
@ -93,7 +88,6 @@ async fn experimental_feature_metrics() {
|
||||
meili_snap::snapshot!(code, @"200 OK");
|
||||
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||
{
|
||||
"vectorStore": false,
|
||||
"metrics": true,
|
||||
"logsRoute": false,
|
||||
"editDocumentsByFunction": false,
|
||||
@ -152,7 +146,7 @@ async fn errors() {
|
||||
meili_snap::snapshot!(code, @"400 Bad Request");
|
||||
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||
{
|
||||
"message": "Unknown field `NotAFeature`: expected one of `vectorStore`, `metrics`, `logsRoute`, `editDocumentsByFunction`, `containsFilter`",
|
||||
"message": "Unknown field `NotAFeature`: expected one of `metrics`, `logsRoute`, `editDocumentsByFunction`, `containsFilter`",
|
||||
"code": "bad_request",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#bad_request"
|
||||
@ -165,7 +159,7 @@ async fn errors() {
|
||||
meili_snap::snapshot!(code, @"400 Bad Request");
|
||||
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||
{
|
||||
"message": "Invalid value type at `.vectorStore`: expected a boolean, but found a positive integer: `42`",
|
||||
"message": "Invalid value type at `.metrics`: expected a boolean, but found a positive integer: `42`",
|
||||
"code": "bad_request",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#bad_request"
|
||||
@ -178,7 +172,7 @@ async fn errors() {
|
||||
meili_snap::snapshot!(code, @"400 Bad Request");
|
||||
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||
{
|
||||
"message": "Invalid value type at `.vectorStore`: expected a boolean, but found a string: `\"true\"`",
|
||||
"message": "Invalid value type at `.metrics`: expected a boolean, but found a string: `\"true\"`",
|
||||
"code": "bad_request",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#bad_request"
|
||||
|
@ -14,6 +14,7 @@ mod snapshot;
|
||||
mod stats;
|
||||
mod swap_indexes;
|
||||
mod tasks;
|
||||
mod upgrade;
|
||||
mod vector;
|
||||
|
||||
// Tests are isolated by features in different modules to allow better readability, test
|
||||
|
@ -11,19 +11,6 @@ async fn index_with_documents_user_provided<'a>(
|
||||
) -> Index<'a> {
|
||||
let index = server.index("test");
|
||||
|
||||
let (response, code) = server.set_features(json!({"vectorStore": true})).await;
|
||||
|
||||
meili_snap::snapshot!(code, @"200 OK");
|
||||
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||
{
|
||||
"vectorStore": true,
|
||||
"metrics": false,
|
||||
"logsRoute": false,
|
||||
"editDocumentsByFunction": false,
|
||||
"containsFilter": false
|
||||
}
|
||||
"###);
|
||||
|
||||
let (response, code) = index
|
||||
.update_settings(json!({ "embedders": {"default": {
|
||||
"source": "userProvided",
|
||||
@ -41,19 +28,6 @@ async fn index_with_documents_user_provided<'a>(
|
||||
async fn index_with_documents_hf<'a>(server: &'a Server, documents: &Value) -> Index<'a> {
|
||||
let index = server.index("test");
|
||||
|
||||
let (response, code) = server.set_features(json!({"vectorStore": true})).await;
|
||||
|
||||
meili_snap::snapshot!(code, @"200 OK");
|
||||
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||
{
|
||||
"vectorStore": true,
|
||||
"metrics": false,
|
||||
"logsRoute": false,
|
||||
"editDocumentsByFunction": false,
|
||||
"containsFilter": false
|
||||
}
|
||||
"###);
|
||||
|
||||
let (response, code) = index
|
||||
.update_settings(json!({ "embedders": {"default": {
|
||||
"source": "huggingFace",
|
||||
|
@ -818,13 +818,6 @@ async fn test_score_details() {
|
||||
"green",
|
||||
"red"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
-100,
|
||||
231,
|
||||
32
|
||||
]
|
||||
},
|
||||
"_rankingScoreDetails": {
|
||||
"words": {
|
||||
"order": 0,
|
||||
@ -1159,206 +1152,6 @@ async fn test_degraded_score_details() {
|
||||
.await;
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn experimental_feature_vector_store() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
|
||||
let documents = DOCUMENTS.clone();
|
||||
|
||||
let (task, _status_code) = index.add_documents(json!(documents), None).await;
|
||||
index.wait_task(task.uid()).await.succeeded();
|
||||
|
||||
let (response, code) = index
|
||||
.search_post(json!({
|
||||
"vector": [1.0, 2.0, 3.0],
|
||||
"hybrid": {
|
||||
"embedder": "manual",
|
||||
},
|
||||
"showRankingScore": true
|
||||
}))
|
||||
.await;
|
||||
|
||||
{
|
||||
meili_snap::snapshot!(code, @"400 Bad Request");
|
||||
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||
{
|
||||
"message": "Passing `vector` as a parameter requires enabling the `vector store` experimental feature. See https://github.com/meilisearch/product/discussions/677",
|
||||
"code": "feature_not_enabled",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#feature_not_enabled"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
|
||||
index
|
||||
.search(json!({
|
||||
"retrieveVectors": true,
|
||||
"showRankingScore": true
|
||||
}), |response, code|{
|
||||
meili_snap::snapshot!(code, @"400 Bad Request");
|
||||
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||
{
|
||||
"message": "Passing `retrieveVectors` as a parameter requires enabling the `vector store` experimental feature. See https://github.com/meilisearch/product/discussions/677",
|
||||
"code": "feature_not_enabled",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#feature_not_enabled"
|
||||
}
|
||||
"###);
|
||||
})
|
||||
.await;
|
||||
|
||||
let (response, code) = server.set_features(json!({"vectorStore": true})).await;
|
||||
meili_snap::snapshot!(code, @"200 OK");
|
||||
meili_snap::snapshot!(response["vectorStore"], @"true");
|
||||
|
||||
let (response, code) = index
|
||||
.update_settings(json!({"embedders": {
|
||||
"manual": {
|
||||
"source": "userProvided",
|
||||
"dimensions": 3,
|
||||
}
|
||||
}}))
|
||||
.await;
|
||||
|
||||
meili_snap::snapshot!(response, @r###"
|
||||
{
|
||||
"taskUid": 1,
|
||||
"indexUid": "test",
|
||||
"status": "enqueued",
|
||||
"type": "settingsUpdate",
|
||||
"enqueuedAt": "[date]"
|
||||
}
|
||||
"###);
|
||||
meili_snap::snapshot!(code, @"202 Accepted");
|
||||
let response = index.wait_task(response.uid()).await;
|
||||
|
||||
meili_snap::snapshot!(meili_snap::json_string!(response["status"]), @"\"succeeded\"");
|
||||
|
||||
let (response, code) = index
|
||||
.search_post(json!({
|
||||
"vector": [1.0, 2.0, 3.0],
|
||||
"hybrid": {
|
||||
"embedder": "manual",
|
||||
},
|
||||
"showRankingScore": true,
|
||||
"retrieveVectors": true,
|
||||
}))
|
||||
.await;
|
||||
|
||||
meili_snap::snapshot!(code, @"200 OK");
|
||||
// vector search returns all documents that don't have vectors in the last bucket, like all sorts
|
||||
meili_snap::snapshot!(meili_snap::json_string!(response["hits"]), @r###"
|
||||
[
|
||||
{
|
||||
"title": "Shazam!",
|
||||
"id": "287947",
|
||||
"color": [
|
||||
"green",
|
||||
"blue"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": {
|
||||
"embeddings": [
|
||||
[
|
||||
1.0,
|
||||
2.0,
|
||||
3.0
|
||||
]
|
||||
],
|
||||
"regenerate": false
|
||||
}
|
||||
},
|
||||
"_rankingScore": 1.0
|
||||
},
|
||||
{
|
||||
"title": "Captain Marvel",
|
||||
"id": "299537",
|
||||
"color": [
|
||||
"yellow",
|
||||
"blue"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": {
|
||||
"embeddings": [
|
||||
[
|
||||
1.0,
|
||||
2.0,
|
||||
54.0
|
||||
]
|
||||
],
|
||||
"regenerate": false
|
||||
}
|
||||
},
|
||||
"_rankingScore": 0.9129111766815186
|
||||
},
|
||||
{
|
||||
"title": "Gläss",
|
||||
"id": "450465",
|
||||
"color": [
|
||||
"blue",
|
||||
"red"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": {
|
||||
"embeddings": [
|
||||
[
|
||||
-100.0,
|
||||
340.0,
|
||||
90.0
|
||||
]
|
||||
],
|
||||
"regenerate": false
|
||||
}
|
||||
},
|
||||
"_rankingScore": 0.8106412887573242
|
||||
},
|
||||
{
|
||||
"title": "How to Train Your Dragon: The Hidden World",
|
||||
"id": "166428",
|
||||
"color": [
|
||||
"green",
|
||||
"red"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": {
|
||||
"embeddings": [
|
||||
[
|
||||
-100.0,
|
||||
231.0,
|
||||
32.0
|
||||
]
|
||||
],
|
||||
"regenerate": false
|
||||
}
|
||||
},
|
||||
"_rankingScore": 0.7412010431289673
|
||||
},
|
||||
{
|
||||
"title": "Escape Room",
|
||||
"id": "522681",
|
||||
"color": [
|
||||
"yellow",
|
||||
"red"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": {
|
||||
"embeddings": [
|
||||
[
|
||||
10.0,
|
||||
-23.0,
|
||||
32.0
|
||||
]
|
||||
],
|
||||
"regenerate": false
|
||||
}
|
||||
},
|
||||
"_rankingScore": 0.6972063183784485
|
||||
}
|
||||
]
|
||||
"###);
|
||||
}
|
||||
|
||||
#[cfg(feature = "default")]
|
||||
#[actix_rt::test]
|
||||
async fn camelcased_words() {
|
||||
@ -1611,14 +1404,7 @@ async fn simple_search_with_strange_synonyms() {
|
||||
"color": [
|
||||
"green",
|
||||
"red"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
-100,
|
||||
231,
|
||||
32
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
"###);
|
||||
@ -1636,14 +1422,7 @@ async fn simple_search_with_strange_synonyms() {
|
||||
"color": [
|
||||
"green",
|
||||
"red"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
-100,
|
||||
231,
|
||||
32
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
"###);
|
||||
@ -1661,14 +1440,7 @@ async fn simple_search_with_strange_synonyms() {
|
||||
"color": [
|
||||
"green",
|
||||
"red"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
-100,
|
||||
231,
|
||||
32
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
"###);
|
||||
@ -1752,3 +1524,57 @@ async fn change_attributes_settings() {
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
/// Modifying facets with different casing should work correctly
|
||||
#[actix_rt::test]
|
||||
async fn change_facet_casing() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
|
||||
let (response, code) = index
|
||||
.update_settings(json!({
|
||||
"filterableAttributes": ["dog"],
|
||||
}))
|
||||
.await;
|
||||
assert_eq!("202", code.as_str(), "{:?}", response);
|
||||
index.wait_task(response.uid()).await;
|
||||
|
||||
let (response, _code) = index
|
||||
.add_documents(
|
||||
json!([
|
||||
{
|
||||
"id": 1,
|
||||
"dog": "Bouvier Bernois"
|
||||
}
|
||||
]),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
index.wait_task(response.uid()).await;
|
||||
|
||||
let (response, _code) = index
|
||||
.add_documents(
|
||||
json!([
|
||||
{
|
||||
"id": 1,
|
||||
"dog": "bouvier bernois"
|
||||
}
|
||||
]),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
index.wait_task(response.uid()).await;
|
||||
|
||||
index
|
||||
.search(json!({ "facets": ["dog"] }), |response, code| {
|
||||
meili_snap::snapshot!(code, @"200 OK");
|
||||
meili_snap::snapshot!(meili_snap::json_string!(response["facetDistribution"]), @r###"
|
||||
{
|
||||
"dog": {
|
||||
"bouvier bernois": 1
|
||||
}
|
||||
}
|
||||
"###);
|
||||
})
|
||||
.await;
|
||||
}
|
||||
|
@ -110,14 +110,7 @@ async fn simple_search_single_index() {
|
||||
"color": [
|
||||
"blue",
|
||||
"red"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
-100,
|
||||
340,
|
||||
90
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"query": "glass",
|
||||
@ -135,14 +128,7 @@ async fn simple_search_single_index() {
|
||||
"color": [
|
||||
"yellow",
|
||||
"blue"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
1,
|
||||
2,
|
||||
54
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"query": "captain",
|
||||
@ -180,13 +166,6 @@ async fn federation_single_search_single_index() {
|
||||
"blue",
|
||||
"red"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
-100,
|
||||
340,
|
||||
90
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "test",
|
||||
"queriesPosition": 0,
|
||||
@ -303,13 +282,6 @@ async fn federation_two_search_single_index() {
|
||||
"blue",
|
||||
"red"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
-100,
|
||||
340,
|
||||
90
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "test",
|
||||
"queriesPosition": 0,
|
||||
@ -323,13 +295,6 @@ async fn federation_two_search_single_index() {
|
||||
"yellow",
|
||||
"blue"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
1,
|
||||
2,
|
||||
54
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "test",
|
||||
"queriesPosition": 1,
|
||||
@ -477,14 +442,7 @@ async fn simple_search_two_indexes() {
|
||||
"color": [
|
||||
"blue",
|
||||
"red"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
-100,
|
||||
340,
|
||||
90
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"query": "glass",
|
||||
@ -510,14 +468,7 @@ async fn simple_search_two_indexes() {
|
||||
"age": 4
|
||||
}
|
||||
],
|
||||
"cattos": "pésti",
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
1,
|
||||
2,
|
||||
3
|
||||
]
|
||||
}
|
||||
"cattos": "pésti"
|
||||
},
|
||||
{
|
||||
"id": 654,
|
||||
@ -532,14 +483,7 @@ async fn simple_search_two_indexes() {
|
||||
"cattos": [
|
||||
"simba",
|
||||
"pestiféré"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
1,
|
||||
2,
|
||||
54
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"query": "pésti",
|
||||
@ -583,13 +527,6 @@ async fn federation_two_search_two_indexes() {
|
||||
"blue",
|
||||
"red"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
-100,
|
||||
340,
|
||||
90
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "test",
|
||||
"queriesPosition": 0,
|
||||
@ -611,13 +548,6 @@ async fn federation_two_search_two_indexes() {
|
||||
}
|
||||
],
|
||||
"cattos": "pésti",
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
1,
|
||||
2,
|
||||
3
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "nested",
|
||||
"queriesPosition": 1,
|
||||
@ -638,13 +568,6 @@ async fn federation_two_search_two_indexes() {
|
||||
"simba",
|
||||
"pestiféré"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
1,
|
||||
2,
|
||||
54
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "nested",
|
||||
"queriesPosition": 1,
|
||||
@ -705,13 +628,6 @@ async fn federation_multiple_search_multiple_indexes() {
|
||||
"blue",
|
||||
"red"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
-100,
|
||||
340,
|
||||
90
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "test",
|
||||
"queriesPosition": 0,
|
||||
@ -733,13 +649,6 @@ async fn federation_multiple_search_multiple_indexes() {
|
||||
}
|
||||
],
|
||||
"cattos": "pésti",
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
1,
|
||||
2,
|
||||
3
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "nested",
|
||||
"queriesPosition": 2,
|
||||
@ -771,13 +680,6 @@ async fn federation_multiple_search_multiple_indexes() {
|
||||
"yellow",
|
||||
"blue"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
1,
|
||||
2,
|
||||
54
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "test",
|
||||
"queriesPosition": 1,
|
||||
@ -791,13 +693,6 @@ async fn federation_multiple_search_multiple_indexes() {
|
||||
"yellow",
|
||||
"red"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
10,
|
||||
-23,
|
||||
32
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "test",
|
||||
"queriesPosition": 3,
|
||||
@ -822,13 +717,6 @@ async fn federation_multiple_search_multiple_indexes() {
|
||||
"moumoute",
|
||||
"gomez"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
10,
|
||||
23,
|
||||
32
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "nested",
|
||||
"queriesPosition": 4,
|
||||
@ -867,13 +755,6 @@ async fn federation_multiple_search_multiple_indexes() {
|
||||
"simba",
|
||||
"pestiféré"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
1,
|
||||
2,
|
||||
54
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "nested",
|
||||
"queriesPosition": 2,
|
||||
@ -896,13 +777,6 @@ async fn federation_multiple_search_multiple_indexes() {
|
||||
"green",
|
||||
"red"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
-100,
|
||||
231,
|
||||
32
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "test",
|
||||
"queriesPosition": 6,
|
||||
@ -1391,13 +1265,6 @@ async fn federation_sort_same_indexes_same_criterion_same_direction() {
|
||||
}
|
||||
],
|
||||
"cattos": "pésti",
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
1,
|
||||
2,
|
||||
3
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "nested",
|
||||
"queriesPosition": 0,
|
||||
@ -1412,13 +1279,6 @@ async fn federation_sort_same_indexes_same_criterion_same_direction() {
|
||||
"cattos": [
|
||||
"enigma"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
10,
|
||||
23,
|
||||
32
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "nested",
|
||||
"queriesPosition": 0,
|
||||
@ -1440,13 +1300,6 @@ async fn federation_sort_same_indexes_same_criterion_same_direction() {
|
||||
"simba",
|
||||
"pestiféré"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
1,
|
||||
2,
|
||||
54
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "nested",
|
||||
"queriesPosition": 0,
|
||||
@ -1472,13 +1325,6 @@ async fn federation_sort_same_indexes_same_criterion_same_direction() {
|
||||
"moumoute",
|
||||
"gomez"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
10,
|
||||
23,
|
||||
32
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "nested",
|
||||
"queriesPosition": 0,
|
||||
@ -1520,13 +1366,6 @@ async fn federation_sort_same_indexes_same_criterion_same_direction() {
|
||||
}
|
||||
],
|
||||
"cattos": "pésti",
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
1,
|
||||
2,
|
||||
3
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "nested",
|
||||
"queriesPosition": 0,
|
||||
@ -1548,13 +1387,6 @@ async fn federation_sort_same_indexes_same_criterion_same_direction() {
|
||||
"simba",
|
||||
"pestiféré"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
1,
|
||||
2,
|
||||
54
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "nested",
|
||||
"queriesPosition": 0,
|
||||
@ -1580,13 +1412,6 @@ async fn federation_sort_same_indexes_same_criterion_same_direction() {
|
||||
"moumoute",
|
||||
"gomez"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
10,
|
||||
23,
|
||||
32
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "nested",
|
||||
"queriesPosition": 1,
|
||||
@ -1714,13 +1539,6 @@ async fn federation_sort_same_indexes_different_criterion_same_direction() {
|
||||
}
|
||||
],
|
||||
"cattos": "pésti",
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
1,
|
||||
2,
|
||||
3
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "nested",
|
||||
"queriesPosition": 1,
|
||||
@ -1746,13 +1564,6 @@ async fn federation_sort_same_indexes_different_criterion_same_direction() {
|
||||
"moumoute",
|
||||
"gomez"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
10,
|
||||
23,
|
||||
32
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "nested",
|
||||
"queriesPosition": 1,
|
||||
@ -1767,13 +1578,6 @@ async fn federation_sort_same_indexes_different_criterion_same_direction() {
|
||||
"cattos": [
|
||||
"enigma"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
10,
|
||||
23,
|
||||
32
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "nested",
|
||||
"queriesPosition": 0,
|
||||
@ -1795,13 +1599,6 @@ async fn federation_sort_same_indexes_different_criterion_same_direction() {
|
||||
"simba",
|
||||
"pestiféré"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
1,
|
||||
2,
|
||||
54
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "nested",
|
||||
"queriesPosition": 1,
|
||||
@ -1843,13 +1640,6 @@ async fn federation_sort_same_indexes_different_criterion_same_direction() {
|
||||
"simba",
|
||||
"pestiféré"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
1,
|
||||
2,
|
||||
54
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "nested",
|
||||
"queriesPosition": 0,
|
||||
@ -1872,13 +1662,6 @@ async fn federation_sort_same_indexes_different_criterion_same_direction() {
|
||||
}
|
||||
],
|
||||
"cattos": "pésti",
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
1,
|
||||
2,
|
||||
3
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "nested",
|
||||
"queriesPosition": 0,
|
||||
@ -1904,13 +1687,6 @@ async fn federation_sort_same_indexes_different_criterion_same_direction() {
|
||||
"moumoute",
|
||||
"gomez"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
10,
|
||||
23,
|
||||
32
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "nested",
|
||||
"queriesPosition": 1,
|
||||
@ -2101,13 +1877,6 @@ async fn federation_sort_different_indexes_same_criterion_same_direction() {
|
||||
"yellow",
|
||||
"blue"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
1,
|
||||
2,
|
||||
54
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "movies",
|
||||
"queriesPosition": 0,
|
||||
@ -2122,13 +1891,6 @@ async fn federation_sort_different_indexes_same_criterion_same_direction() {
|
||||
"yellow",
|
||||
"red"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
10,
|
||||
-23,
|
||||
32
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "movies",
|
||||
"queriesPosition": 0,
|
||||
@ -2143,13 +1905,6 @@ async fn federation_sort_different_indexes_same_criterion_same_direction() {
|
||||
"blue",
|
||||
"red"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
-100,
|
||||
340,
|
||||
90
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "movies",
|
||||
"queriesPosition": 0,
|
||||
@ -2164,13 +1919,6 @@ async fn federation_sort_different_indexes_same_criterion_same_direction() {
|
||||
"green",
|
||||
"red"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
-100,
|
||||
231,
|
||||
32
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "movies",
|
||||
"queriesPosition": 0,
|
||||
@ -2185,13 +1933,6 @@ async fn federation_sort_different_indexes_same_criterion_same_direction() {
|
||||
"green",
|
||||
"blue"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
1,
|
||||
2,
|
||||
3
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "movies",
|
||||
"queriesPosition": 0,
|
||||
@ -2226,13 +1967,6 @@ async fn federation_sort_different_indexes_same_criterion_same_direction() {
|
||||
"yellow",
|
||||
"blue"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
1,
|
||||
2,
|
||||
54
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "movies",
|
||||
"queriesPosition": 1,
|
||||
@ -2413,13 +2147,6 @@ async fn federation_sort_different_ranking_rules() {
|
||||
"yellow",
|
||||
"blue"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
1,
|
||||
2,
|
||||
54
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "movies",
|
||||
"queriesPosition": 0,
|
||||
@ -2434,13 +2161,6 @@ async fn federation_sort_different_ranking_rules() {
|
||||
"yellow",
|
||||
"red"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
10,
|
||||
-23,
|
||||
32
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "movies",
|
||||
"queriesPosition": 0,
|
||||
@ -2455,13 +2175,6 @@ async fn federation_sort_different_ranking_rules() {
|
||||
"blue",
|
||||
"red"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
-100,
|
||||
340,
|
||||
90
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "movies",
|
||||
"queriesPosition": 0,
|
||||
@ -2476,13 +2189,6 @@ async fn federation_sort_different_ranking_rules() {
|
||||
"green",
|
||||
"red"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
-100,
|
||||
231,
|
||||
32
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "movies",
|
||||
"queriesPosition": 0,
|
||||
@ -2497,13 +2203,6 @@ async fn federation_sort_different_ranking_rules() {
|
||||
"green",
|
||||
"blue"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
1,
|
||||
2,
|
||||
3
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "movies",
|
||||
"queriesPosition": 0,
|
||||
@ -2714,13 +2413,6 @@ async fn federation_sort_different_indexes_different_criterion_same_direction()
|
||||
"yellow",
|
||||
"blue"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
1,
|
||||
2,
|
||||
54
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "movies",
|
||||
"queriesPosition": 0,
|
||||
@ -2755,13 +2447,6 @@ async fn federation_sort_different_indexes_different_criterion_same_direction()
|
||||
"yellow",
|
||||
"red"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
10,
|
||||
-23,
|
||||
32
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "movies",
|
||||
"queriesPosition": 0,
|
||||
@ -2776,13 +2461,6 @@ async fn federation_sort_different_indexes_different_criterion_same_direction()
|
||||
"blue",
|
||||
"red"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
-100,
|
||||
340,
|
||||
90
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "movies",
|
||||
"queriesPosition": 0,
|
||||
@ -2797,13 +2475,6 @@ async fn federation_sort_different_indexes_different_criterion_same_direction()
|
||||
"green",
|
||||
"red"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
-100,
|
||||
231,
|
||||
32
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "movies",
|
||||
"queriesPosition": 0,
|
||||
@ -2818,13 +2489,6 @@ async fn federation_sort_different_indexes_different_criterion_same_direction()
|
||||
"green",
|
||||
"blue"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
1,
|
||||
2,
|
||||
3
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "movies",
|
||||
"queriesPosition": 0,
|
||||
@ -2879,13 +2543,6 @@ async fn federation_sort_different_indexes_different_criterion_same_direction()
|
||||
"yellow",
|
||||
"blue"
|
||||
],
|
||||
"_vectors": {
|
||||
"manual": [
|
||||
1,
|
||||
2,
|
||||
54
|
||||
]
|
||||
},
|
||||
"_federation": {
|
||||
"indexUid": "movies",
|
||||
"queriesPosition": 1,
|
||||
@ -4094,13 +3751,6 @@ async fn federation_non_federated_contains_federation_option() {
|
||||
#[actix_rt::test]
|
||||
async fn federation_vector_single_index() {
|
||||
let server = Server::new().await;
|
||||
let (_, code) = server
|
||||
.set_features(json!({
|
||||
"vectorStore": true
|
||||
}))
|
||||
.await;
|
||||
|
||||
snapshot!(code, @"200 OK");
|
||||
|
||||
let index = server.index("vectors");
|
||||
|
||||
@ -4302,13 +3952,6 @@ async fn federation_vector_single_index() {
|
||||
#[actix_rt::test]
|
||||
async fn federation_vector_two_indexes() {
|
||||
let server = Server::new().await;
|
||||
let (_, code) = server
|
||||
.set_features(json!({
|
||||
"vectorStore": true
|
||||
}))
|
||||
.await;
|
||||
|
||||
snapshot!(code, @"200 OK");
|
||||
|
||||
let index = server.index("vectors-animal");
|
||||
|
||||
|
@ -0,0 +1,9 @@
|
||||
{"run_id":"1737725573-949900000","line":48,"new":{"module_name":"integration__settings__prefix_search_settings","snapshot_name":"add_docs_and_disable","metadata":{"source":"crates/meilisearch/tests/settings/prefix_search_settings.rs","assertion_line":48},"snapshot":"[\n {\n \"id\": 2,\n \"a\": \"Soup of day so\",\n \"b\": \"manythe manythelazyfish\",\n \"_formatted\": {\n \"id\": \"2\",\n \"a\": \"<em>So</em>up of day <em>so</em>\",\n \"b\": \"manythe manythelazyfish\"\n }\n },\n {\n \"id\": 1,\n \"a\": \"Soup of the day\",\n \"b\": \"manythefishou\",\n \"_formatted\": {\n \"id\": \"1\",\n \"a\": \"<em>So</em>up of the day\",\n \"b\": \"manythefishou\"\n }\n },\n {\n \"id\": 3,\n \"a\": \"the Soup of day\",\n \"b\": \"manythelazyfish\",\n \"_formatted\": {\n \"id\": \"3\",\n \"a\": \"the <em>So</em>up of day\",\n \"b\": \"manythelazyfish\"\n }\n }\n]"},"old":{"module_name":"integration__settings__prefix_search_settings","metadata":{},"snapshot":"[\n {\n \"id\": 2,\n \"a\": \"Soup of day so\",\n \"b\": \"manythe manythelazyfish\",\n \"_formatted\": {\n \"id\": \"2\",\n \"a\": \"Soup of day <em>so</em>\",\n \"b\": \"manythe manythelazyfish\"\n }\n }\n]"}}
|
||||
{"run_id":"1737725573-949900000","line":381,"new":null,"old":null}
|
||||
{"run_id":"1737725573-949900000","line":382,"new":null,"old":null}
|
||||
{"run_id":"1737725573-949900000","line":381,"new":null,"old":null}
|
||||
{"run_id":"1737725573-949900000","line":382,"new":null,"old":null}
|
||||
{"run_id":"1737725573-949900000","line":421,"new":null,"old":null}
|
||||
{"run_id":"1737725573-949900000","line":422,"new":null,"old":null}
|
||||
{"run_id":"1737725573-949900000","line":421,"new":null,"old":null}
|
||||
{"run_id":"1737725573-949900000","line":422,"new":null,"old":null}
|
@ -152,7 +152,7 @@ test_setting_routes!(
|
||||
{
|
||||
setting: embedders,
|
||||
update_verb: patch,
|
||||
default_value: null
|
||||
default_value: {}
|
||||
},
|
||||
{
|
||||
setting: facet_search,
|
||||
@ -197,7 +197,7 @@ async fn get_settings() {
|
||||
let (response, code) = index.settings().await;
|
||||
assert_eq!(code, 200);
|
||||
let settings = response.as_object().unwrap();
|
||||
assert_eq!(settings.keys().len(), 19);
|
||||
assert_eq!(settings.keys().len(), 20);
|
||||
assert_eq!(settings["displayedAttributes"], json!(["*"]));
|
||||
assert_eq!(settings["searchableAttributes"], json!(["*"]));
|
||||
assert_eq!(settings["filterableAttributes"], json!([]));
|
||||
@ -230,23 +230,12 @@ async fn get_settings() {
|
||||
assert_eq!(settings["searchCutoffMs"], json!(null));
|
||||
assert_eq!(settings["prefixSearch"], json!("indexingTime"));
|
||||
assert_eq!(settings["facetSearch"], json!(true));
|
||||
assert_eq!(settings["embedders"], json!({}));
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn secrets_are_hidden_in_settings() {
|
||||
let server = Server::new().await;
|
||||
let (response, code) = server.set_features(json!({"vectorStore": true})).await;
|
||||
|
||||
meili_snap::snapshot!(code, @"200 OK");
|
||||
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
||||
{
|
||||
"vectorStore": true,
|
||||
"metrics": false,
|
||||
"logsRoute": false,
|
||||
"editDocumentsByFunction": false,
|
||||
"containsFilter": false
|
||||
}
|
||||
"###);
|
||||
|
||||
let index = server.index("test");
|
||||
let (response, _code) = index.create(None).await;
|
||||
|
1
crates/meilisearch/tests/similar/.errors.rs.pending-snap
Normal file
1
crates/meilisearch/tests/similar/.errors.rs.pending-snap
Normal file
@ -0,0 +1 @@
|
||||
{"run_id":"1737725573-949900000","line":492,"new":{"module_name":"integration__similar__errors","snapshot_name":"filter_invalid_attribute_string","metadata":{"source":"crates/meilisearch/tests/similar/errors.rs","assertion_line":492},"snapshot":"500 Internal Server Error"},"old":{"module_name":"integration__similar__errors","metadata":{},"snapshot":"202 Accepted"}}
|
@ -8,7 +8,6 @@ use crate::json;
|
||||
async fn similar_unexisting_index() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
server.set_features(json!({"vectorStore": true})).await;
|
||||
|
||||
let expected_response = json!({
|
||||
"message": "Index `test` not found.",
|
||||
@ -29,7 +28,6 @@ async fn similar_unexisting_index() {
|
||||
async fn similar_unexisting_parameter() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
server.set_features(json!({"vectorStore": true})).await;
|
||||
|
||||
index
|
||||
.similar(json!({"id": 287947, "marin": "hello"}), |response, code| {
|
||||
@ -39,28 +37,10 @@ async fn similar_unexisting_parameter() {
|
||||
.await;
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn similar_feature_not_enabled() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
|
||||
let (response, code) = index.similar_post(json!({"id": 287947, "embedder": "manual"})).await;
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"message": "Using the similar API requires enabling the `vector store` experimental feature. See https://github.com/meilisearch/product/discussions/677",
|
||||
"code": "feature_not_enabled",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#feature_not_enabled"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn similar_bad_id() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
server.set_features(json!({"vectorStore": true})).await;
|
||||
|
||||
let (response, code) = index
|
||||
.update_settings(json!({
|
||||
@ -91,7 +71,6 @@ async fn similar_bad_id() {
|
||||
async fn similar_bad_ranking_score_threshold() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
server.set_features(json!({"vectorStore": true})).await;
|
||||
|
||||
let (response, code) = index
|
||||
.update_settings(json!({
|
||||
@ -122,7 +101,6 @@ async fn similar_bad_ranking_score_threshold() {
|
||||
async fn similar_invalid_ranking_score_threshold() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
server.set_features(json!({"vectorStore": true})).await;
|
||||
|
||||
let (response, code) = index
|
||||
.update_settings(json!({
|
||||
@ -153,7 +131,6 @@ async fn similar_invalid_ranking_score_threshold() {
|
||||
async fn similar_invalid_id() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
server.set_features(json!({"vectorStore": true})).await;
|
||||
|
||||
let (response, code) = index
|
||||
.update_settings(json!({
|
||||
@ -184,7 +161,6 @@ async fn similar_invalid_id() {
|
||||
async fn similar_not_found_id() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
server.set_features(json!({"vectorStore": true})).await;
|
||||
|
||||
let (response, code) = index
|
||||
.update_settings(json!({
|
||||
@ -216,7 +192,6 @@ async fn similar_not_found_id() {
|
||||
async fn similar_bad_offset() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
server.set_features(json!({"vectorStore": true})).await;
|
||||
|
||||
let (response, code) = index
|
||||
.update_settings(json!({
|
||||
@ -259,7 +234,6 @@ async fn similar_bad_offset() {
|
||||
async fn similar_bad_limit() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
server.set_features(json!({"vectorStore": true})).await;
|
||||
|
||||
let (response, code) = index
|
||||
.update_settings(json!({
|
||||
@ -304,7 +278,6 @@ async fn similar_bad_filter() {
|
||||
// Thus the error message is not generated by deserr but written by us.
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
server.set_features(json!({"vectorStore": true})).await;
|
||||
|
||||
let (response, code) = index
|
||||
.update_settings(json!({
|
||||
@ -344,7 +317,6 @@ async fn similar_bad_filter() {
|
||||
async fn filter_invalid_syntax_object() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
server.set_features(json!({"vectorStore": true})).await;
|
||||
|
||||
let (response, code) = index
|
||||
.update_settings(json!({
|
||||
@ -383,7 +355,6 @@ async fn filter_invalid_syntax_object() {
|
||||
async fn filter_invalid_syntax_array() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
server.set_features(json!({"vectorStore": true})).await;
|
||||
|
||||
let (response, code) = index
|
||||
.update_settings(json!({
|
||||
@ -422,7 +393,6 @@ async fn filter_invalid_syntax_array() {
|
||||
async fn filter_invalid_syntax_string() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
server.set_features(json!({"vectorStore": true})).await;
|
||||
|
||||
let (response, code) = index
|
||||
.update_settings(json!({
|
||||
@ -463,7 +433,6 @@ async fn filter_invalid_syntax_string() {
|
||||
async fn filter_invalid_attribute_array() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
server.set_features(json!({"vectorStore": true})).await;
|
||||
|
||||
let (response, code) = index
|
||||
.update_settings(json!({
|
||||
@ -504,7 +473,6 @@ async fn filter_invalid_attribute_array() {
|
||||
async fn filter_invalid_attribute_string() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
server.set_features(json!({"vectorStore": true})).await;
|
||||
|
||||
let (response, code) = index
|
||||
.update_settings(json!({
|
||||
@ -545,7 +513,6 @@ async fn filter_invalid_attribute_string() {
|
||||
async fn filter_reserved_geo_attribute_array() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
server.set_features(json!({"vectorStore": true})).await;
|
||||
|
||||
let (response, code) = index
|
||||
.update_settings(json!({
|
||||
@ -586,7 +553,6 @@ async fn filter_reserved_geo_attribute_array() {
|
||||
async fn filter_reserved_geo_attribute_string() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
server.set_features(json!({"vectorStore": true})).await;
|
||||
|
||||
let (response, code) = index
|
||||
.update_settings(json!({
|
||||
@ -627,7 +593,6 @@ async fn filter_reserved_geo_attribute_string() {
|
||||
async fn filter_reserved_attribute_array() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
server.set_features(json!({"vectorStore": true})).await;
|
||||
|
||||
let (response, code) = index
|
||||
.update_settings(json!({
|
||||
@ -668,7 +633,6 @@ async fn filter_reserved_attribute_array() {
|
||||
async fn filter_reserved_attribute_string() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
server.set_features(json!({"vectorStore": true})).await;
|
||||
|
||||
let (response, code) = index
|
||||
.update_settings(json!({
|
||||
@ -709,7 +673,6 @@ async fn filter_reserved_attribute_string() {
|
||||
async fn filter_reserved_geo_point_array() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
server.set_features(json!({"vectorStore": true})).await;
|
||||
|
||||
let (response, code) = index
|
||||
.update_settings(json!({
|
||||
@ -750,7 +713,6 @@ async fn filter_reserved_geo_point_array() {
|
||||
async fn filter_reserved_geo_point_string() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
server.set_features(json!({"vectorStore": true})).await;
|
||||
|
||||
let (response, code) = index
|
||||
.update_settings(json!({
|
||||
@ -790,7 +752,6 @@ async fn filter_reserved_geo_point_string() {
|
||||
#[actix_rt::test]
|
||||
async fn similar_bad_retrieve_vectors() {
|
||||
let server = Server::new().await;
|
||||
server.set_features(json!({"vectorStore": true})).await;
|
||||
let index = server.index("test");
|
||||
|
||||
let (response, code) =
|
||||
@ -839,3 +800,86 @@ async fn similar_bad_retrieve_vectors() {
|
||||
}
|
||||
"###);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn similar_bad_embedder() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
|
||||
let (response, code) = index
|
||||
.update_settings(json!({
|
||||
"embedders": {
|
||||
"manual": {
|
||||
"source": "userProvided",
|
||||
"dimensions": 3,
|
||||
}
|
||||
},
|
||||
"filterableAttributes": ["title"]}))
|
||||
.await;
|
||||
snapshot!(code, @"202 Accepted");
|
||||
server.wait_task(response.uid()).await;
|
||||
|
||||
let documents = DOCUMENTS.clone();
|
||||
let (value, code) = index.add_documents(documents, None).await;
|
||||
snapshot!(code, @"202 Accepted");
|
||||
index.wait_task(value.uid()).await;
|
||||
|
||||
let expected_response = json!({
|
||||
"message": "Cannot find embedder with name `auto`.",
|
||||
"code": "invalid_similar_embedder",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_similar_embedder"
|
||||
});
|
||||
|
||||
index
|
||||
.similar(json!({"id": 287947, "embedder": "auto"}), |response, code| {
|
||||
assert_eq!(response, expected_response);
|
||||
assert_eq!(code, 400);
|
||||
})
|
||||
.await;
|
||||
|
||||
let expected_response = json!({
|
||||
"message": "Invalid value type at `.embedder`: expected a string, but found a positive integer: `42`",
|
||||
"code": "invalid_similar_embedder",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_similar_embedder"
|
||||
});
|
||||
|
||||
let (response, code) = index.similar_post(json!({"id": 287947, "embedder": 42})).await;
|
||||
|
||||
assert_eq!(response, expected_response);
|
||||
assert_eq!(code, 400);
|
||||
|
||||
let expected_response = json!({
|
||||
"message": "Invalid value type at `.embedder`: expected a string, but found null",
|
||||
"code": "invalid_similar_embedder",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_similar_embedder"
|
||||
});
|
||||
|
||||
let (response, code) = index.similar_post(json!({"id": 287947, "embedder": null})).await;
|
||||
|
||||
assert_eq!(response, expected_response);
|
||||
assert_eq!(code, 400);
|
||||
|
||||
let expected_response = json!({
|
||||
"message": "Missing field `embedder`",
|
||||
"code": "bad_request",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#bad_request"
|
||||
});
|
||||
|
||||
let (response, code) = index.similar_post(json!({"id": 287947})).await;
|
||||
assert_eq!(response, expected_response);
|
||||
assert_eq!(code, 400);
|
||||
|
||||
let expected_response = json!({
|
||||
"message": "Missing parameter `embedder`",
|
||||
"code": "bad_request",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#bad_request"
|
||||
});
|
||||
let (response, code) = index.similar_get("?id=287947").await;
|
||||
assert_eq!(response, expected_response);
|
||||
assert_eq!(code, 400);
|
||||
}
|
||||
|
@ -49,17 +49,6 @@ static DOCUMENTS: Lazy<Value> = Lazy::new(|| {
|
||||
async fn basic() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
let (value, code) = server.set_features(json!({"vectorStore": true})).await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(value, @r###"
|
||||
{
|
||||
"vectorStore": true,
|
||||
"metrics": false,
|
||||
"logsRoute": false,
|
||||
"editDocumentsByFunction": false,
|
||||
"containsFilter": false
|
||||
}
|
||||
"###);
|
||||
|
||||
let (response, code) = index
|
||||
.update_settings(json!({
|
||||
@ -246,17 +235,6 @@ async fn basic() {
|
||||
async fn ranking_score_threshold() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
let (value, code) = server.set_features(json!({"vectorStore": true})).await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(value, @r###"
|
||||
{
|
||||
"vectorStore": true,
|
||||
"metrics": false,
|
||||
"logsRoute": false,
|
||||
"editDocumentsByFunction": false,
|
||||
"containsFilter": false
|
||||
}
|
||||
"###);
|
||||
|
||||
let (response, code) = index
|
||||
.update_settings(json!({
|
||||
@ -527,17 +505,6 @@ async fn ranking_score_threshold() {
|
||||
async fn filter() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
let (value, code) = server.set_features(json!({"vectorStore": true})).await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(value, @r###"
|
||||
{
|
||||
"vectorStore": true,
|
||||
"metrics": false,
|
||||
"logsRoute": false,
|
||||
"editDocumentsByFunction": false,
|
||||
"containsFilter": false
|
||||
}
|
||||
"###);
|
||||
|
||||
let (response, code) = index
|
||||
.update_settings(json!({
|
||||
@ -656,17 +623,6 @@ async fn filter() {
|
||||
async fn limit_and_offset() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
let (value, code) = server.set_features(json!({"vectorStore": true})).await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(value, @r###"
|
||||
{
|
||||
"vectorStore": true,
|
||||
"metrics": false,
|
||||
"logsRoute": false,
|
||||
"editDocumentsByFunction": false,
|
||||
"containsFilter": false
|
||||
}
|
||||
"###);
|
||||
|
||||
let (response, code) = index
|
||||
.update_settings(json!({
|
||||
|
@ -95,36 +95,36 @@ async fn task_bad_types() {
|
||||
|
||||
let (response, code) = server.tasks_filter("types=doggo").await;
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
snapshot!(json_string!(response), @r#"
|
||||
{
|
||||
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`.",
|
||||
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `upgradeDatabase`.",
|
||||
"code": "invalid_task_types",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
||||
}
|
||||
"###);
|
||||
"#);
|
||||
|
||||
let (response, code) = server.cancel_tasks("types=doggo").await;
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
snapshot!(json_string!(response), @r#"
|
||||
{
|
||||
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`.",
|
||||
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `upgradeDatabase`.",
|
||||
"code": "invalid_task_types",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
||||
}
|
||||
"###);
|
||||
"#);
|
||||
|
||||
let (response, code) = server.delete_tasks("types=doggo").await;
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
snapshot!(json_string!(response), @r#"
|
||||
{
|
||||
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`.",
|
||||
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentEdition`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`, `upgradeDatabase`.",
|
||||
"code": "invalid_task_types",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
||||
}
|
||||
"###);
|
||||
"#);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
|
87
crates/meilisearch/tests/upgrade/mod.rs
Normal file
87
crates/meilisearch/tests/upgrade/mod.rs
Normal file
@ -0,0 +1,87 @@
|
||||
mod v1_12;
|
||||
|
||||
use std::path::Path;
|
||||
use std::{fs, io};
|
||||
|
||||
use meili_snap::snapshot;
|
||||
use meilisearch::Opt;
|
||||
|
||||
use crate::common::{default_settings, Server};
|
||||
|
||||
fn copy_dir_all(src: impl AsRef<Path>, dst: impl AsRef<Path>) -> io::Result<()> {
|
||||
fs::create_dir_all(&dst)?;
|
||||
for entry in fs::read_dir(src)? {
|
||||
let entry = entry?;
|
||||
let ty = entry.file_type()?;
|
||||
if ty.is_dir() {
|
||||
copy_dir_all(entry.path(), dst.as_ref().join(entry.file_name()))?;
|
||||
} else {
|
||||
fs::copy(entry.path(), dst.as_ref().join(entry.file_name()))?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn malformed_version_file() {
|
||||
let temp = tempfile::tempdir().unwrap();
|
||||
let default_settings = default_settings(temp.path());
|
||||
let db_path = default_settings.db_path.clone();
|
||||
std::fs::create_dir_all(&db_path).unwrap();
|
||||
std::fs::write(db_path.join("VERSION"), "kefir").unwrap();
|
||||
let options = Opt { experimental_dumpless_upgrade: true, ..default_settings };
|
||||
let err = Server::new_with_options(options).await.map(|_| ()).unwrap_err();
|
||||
snapshot!(err, @"Version file is corrupted and thus Meilisearch is unable to determine the version of the database. The version contains 1 parts instead of 3 (major, minor and patch)");
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn version_too_old() {
|
||||
let temp = tempfile::tempdir().unwrap();
|
||||
let default_settings = default_settings(temp.path());
|
||||
let db_path = default_settings.db_path.clone();
|
||||
std::fs::create_dir_all(&db_path).unwrap();
|
||||
std::fs::write(db_path.join("VERSION"), "1.11.9999").unwrap();
|
||||
let options = Opt { experimental_dumpless_upgrade: true, ..default_settings };
|
||||
let err = Server::new_with_options(options).await.map(|_| ()).unwrap_err();
|
||||
snapshot!(err, @"Database version 1.11.9999 is too old for the experimental dumpless upgrade feature. Please generate a dump using the v1.11.9999 and import it in the v1.13.0");
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn version_requires_downgrade() {
|
||||
let temp = tempfile::tempdir().unwrap();
|
||||
let default_settings = default_settings(temp.path());
|
||||
let db_path = default_settings.db_path.clone();
|
||||
std::fs::create_dir_all(&db_path).unwrap();
|
||||
let major = meilisearch_types::versioning::VERSION_MAJOR;
|
||||
let minor = meilisearch_types::versioning::VERSION_MINOR;
|
||||
let patch = meilisearch_types::versioning::VERSION_PATCH.parse::<u32>().unwrap() + 1;
|
||||
std::fs::write(db_path.join("VERSION"), format!("{major}.{minor}.{patch}")).unwrap();
|
||||
let options = Opt { experimental_dumpless_upgrade: true, ..default_settings };
|
||||
let err = Server::new_with_options(options).await.map(|_| ()).unwrap_err();
|
||||
snapshot!(err, @"Database version 1.13.1 is higher than the Meilisearch version 1.13.0. Downgrade is not supported");
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn upgrade_to_the_current_version() {
|
||||
let temp = tempfile::tempdir().unwrap();
|
||||
let server = Server::new_with_options(default_settings(temp.path())).await.unwrap();
|
||||
drop(server);
|
||||
|
||||
let server = Server::new_with_options(Opt {
|
||||
experimental_dumpless_upgrade: true,
|
||||
..default_settings(temp.path())
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
// The upgrade tasks should NOT be spawned => task queue is empty
|
||||
let (tasks, _status) = server.tasks().await;
|
||||
snapshot!(tasks, @r#"
|
||||
{
|
||||
"results": [],
|
||||
"total": 0,
|
||||
"limit": 20,
|
||||
"from": null,
|
||||
"next": null
|
||||
}
|
||||
"#);
|
||||
}
|
1
crates/meilisearch/tests/upgrade/v1_12/mod.rs
Normal file
1
crates/meilisearch/tests/upgrade/v1_12/mod.rs
Normal file
@ -0,0 +1 @@
|
||||
mod v1_12_0;
|
@ -0,0 +1,78 @@
|
||||
---
|
||||
source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
||||
snapshot_kind: text
|
||||
---
|
||||
{
|
||||
"displayedAttributes": [
|
||||
"*"
|
||||
],
|
||||
"searchableAttributes": [
|
||||
"*"
|
||||
],
|
||||
"filterableAttributes": [
|
||||
"age",
|
||||
"surname"
|
||||
],
|
||||
"sortableAttributes": [
|
||||
"age"
|
||||
],
|
||||
"rankingRules": [
|
||||
"words",
|
||||
"typo",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"sort",
|
||||
"exactness"
|
||||
],
|
||||
"stopWords": [
|
||||
"le",
|
||||
"un"
|
||||
],
|
||||
"nonSeparatorTokens": [],
|
||||
"separatorTokens": [],
|
||||
"dictionary": [],
|
||||
"synonyms": {
|
||||
"boubou": [
|
||||
"kefir"
|
||||
]
|
||||
},
|
||||
"distinctAttribute": null,
|
||||
"proximityPrecision": "byWord",
|
||||
"typoTolerance": {
|
||||
"enabled": true,
|
||||
"minWordSizeForTypos": {
|
||||
"oneTypo": 4,
|
||||
"twoTypos": 9
|
||||
},
|
||||
"disableOnWords": [
|
||||
"kefir"
|
||||
],
|
||||
"disableOnAttributes": [
|
||||
"surname"
|
||||
]
|
||||
},
|
||||
"faceting": {
|
||||
"maxValuesPerFacet": 99,
|
||||
"sortFacetValuesBy": {
|
||||
"*": "alpha",
|
||||
"age": "count"
|
||||
}
|
||||
},
|
||||
"pagination": {
|
||||
"maxTotalHits": 15
|
||||
},
|
||||
"embedders": {},
|
||||
"searchCutoffMs": 8000,
|
||||
"localizedAttributes": [
|
||||
{
|
||||
"attributePatterns": [
|
||||
"description"
|
||||
],
|
||||
"locales": [
|
||||
"fra"
|
||||
]
|
||||
}
|
||||
],
|
||||
"facetSearch": true,
|
||||
"prefixSearch": "indexingTime"
|
||||
}
|
@ -0,0 +1,25 @@
|
||||
---
|
||||
source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
||||
snapshot_kind: text
|
||||
---
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "kefir",
|
||||
"surname": [
|
||||
"kef",
|
||||
"kefkef",
|
||||
"kefirounet",
|
||||
"boubou"
|
||||
],
|
||||
"age": 1.4,
|
||||
"description": "kefir est un petit chien blanc très mignon"
|
||||
}
|
||||
],
|
||||
"query": "",
|
||||
"processingTimeMs": "[duration]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1
|
||||
}
|
@ -0,0 +1,11 @@
|
||||
---
|
||||
source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
||||
snapshot_kind: text
|
||||
---
|
||||
{
|
||||
"results": [],
|
||||
"total": 0,
|
||||
"limit": 20,
|
||||
"from": null,
|
||||
"next": null
|
||||
}
|
@ -0,0 +1,505 @@
|
||||
---
|
||||
source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
||||
---
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"uid": 24,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"upgradeFrom": "v1.12.0",
|
||||
"upgradeTo": "v1.13.0"
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"upgradeDatabase": 1
|
||||
},
|
||||
"indexUids": {}
|
||||
},
|
||||
"duration": "[duration]",
|
||||
"startedAt": "[date]",
|
||||
"finishedAt": "[date]"
|
||||
},
|
||||
{
|
||||
"uid": 23,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"deletedDocuments": 0
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"indexDeletion": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"mieli": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.004146631S",
|
||||
"startedAt": "2025-01-23T11:38:57.012591321Z",
|
||||
"finishedAt": "2025-01-23T11:38:57.016737952Z"
|
||||
},
|
||||
{
|
||||
"uid": 22,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"receivedDocuments": 1,
|
||||
"indexedDocuments": 1
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"documentAdditionOrUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.102738497S",
|
||||
"startedAt": "2025-01-23T11:36:22.551906856Z",
|
||||
"finishedAt": "2025-01-23T11:36:22.654645353Z"
|
||||
},
|
||||
{
|
||||
"uid": 21,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"receivedDocuments": 1,
|
||||
"indexedDocuments": 0
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"failed": 1
|
||||
},
|
||||
"types": {
|
||||
"documentAdditionOrUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.005108474S",
|
||||
"startedAt": "2025-01-23T11:36:04.132670526Z",
|
||||
"finishedAt": "2025-01-23T11:36:04.137779Z"
|
||||
},
|
||||
{
|
||||
"uid": 20,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"receivedDocuments": 1,
|
||||
"indexedDocuments": 0
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"failed": 1
|
||||
},
|
||||
"types": {
|
||||
"documentAdditionOrUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"mieli": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.027954894S",
|
||||
"startedAt": "2025-01-23T11:35:53.631082795Z",
|
||||
"finishedAt": "2025-01-23T11:35:53.659037689Z"
|
||||
},
|
||||
{
|
||||
"uid": 19,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"deletedDocuments": 19546
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"indexDeletion": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"mieli": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.006903297S",
|
||||
"startedAt": "2025-01-20T11:50:52.874106134Z",
|
||||
"finishedAt": "2025-01-20T11:50:52.881009431Z"
|
||||
},
|
||||
{
|
||||
"uid": 18,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"receivedDocuments": 19547,
|
||||
"indexedDocuments": 0,
|
||||
"matchedTasks": 1,
|
||||
"canceledTasks": 1,
|
||||
"originalFilter": "?statuses=processing%2Cenqueued"
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 2,
|
||||
"status": {
|
||||
"succeeded": 1,
|
||||
"canceled": 1
|
||||
},
|
||||
"types": {
|
||||
"documentAdditionOrUpdate": 1,
|
||||
"taskCancelation": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"mieli": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.000481257S",
|
||||
"startedAt": "2025-01-20T11:48:04.92820416Z",
|
||||
"finishedAt": "2025-01-20T11:48:04.928685417Z"
|
||||
},
|
||||
{
|
||||
"uid": 17,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"matchedTasks": 0,
|
||||
"canceledTasks": 0,
|
||||
"originalFilter": "?statuses=processing%2Cenqueued"
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"taskCancelation": 1
|
||||
},
|
||||
"indexUids": {}
|
||||
},
|
||||
"duration": "PT0.000407005S",
|
||||
"startedAt": "2025-01-20T11:47:53.509403957Z",
|
||||
"finishedAt": "2025-01-20T11:47:53.509810962Z"
|
||||
},
|
||||
{
|
||||
"uid": 16,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"matchedTasks": 0,
|
||||
"canceledTasks": 0,
|
||||
"originalFilter": "?statuses=processing%2Cenqueued"
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"taskCancelation": 1
|
||||
},
|
||||
"indexUids": {}
|
||||
},
|
||||
"duration": "PT0.000403716S",
|
||||
"startedAt": "2025-01-20T11:47:48.430653005Z",
|
||||
"finishedAt": "2025-01-20T11:47:48.431056721Z"
|
||||
},
|
||||
{
|
||||
"uid": 15,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"matchedTasks": 0,
|
||||
"canceledTasks": 0,
|
||||
"originalFilter": "?statuses=processing"
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"taskCancelation": 1
|
||||
},
|
||||
"indexUids": {}
|
||||
},
|
||||
"duration": "PT0.000417016S",
|
||||
"startedAt": "2025-01-20T11:47:42.429678617Z",
|
||||
"finishedAt": "2025-01-20T11:47:42.430095633Z"
|
||||
},
|
||||
{
|
||||
"uid": 14,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"receivedDocuments": 19547,
|
||||
"indexedDocuments": 19546
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"documentAdditionOrUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"mieli": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT12.086284842S",
|
||||
"startedAt": "2025-01-20T11:47:03.092181576Z",
|
||||
"finishedAt": "2025-01-20T11:47:15.178466418Z"
|
||||
},
|
||||
{
|
||||
"uid": 13,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"localizedAttributes": [
|
||||
{
|
||||
"attributePatterns": [
|
||||
"description"
|
||||
],
|
||||
"locales": [
|
||||
"fr"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"settingsUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.011506614S",
|
||||
"startedAt": "2025-01-16T17:18:43.29334923Z",
|
||||
"finishedAt": "2025-01-16T17:18:43.304855844Z"
|
||||
},
|
||||
{
|
||||
"uid": 12,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"faceting": {
|
||||
"sortFacetValuesBy": {
|
||||
"*": "alpha",
|
||||
"age": "count"
|
||||
}
|
||||
}
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"settingsUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.007640163S",
|
||||
"startedAt": "2025-01-16T17:02:52.539749853Z",
|
||||
"finishedAt": "2025-01-16T17:02:52.547390016Z"
|
||||
},
|
||||
{
|
||||
"uid": 11,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"searchCutoffMs": 8000
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"settingsUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.007307840S",
|
||||
"startedAt": "2025-01-16T17:01:14.112756687Z",
|
||||
"finishedAt": "2025-01-16T17:01:14.120064527Z"
|
||||
},
|
||||
{
|
||||
"uid": 10,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"faceting": {
|
||||
"maxValuesPerFacet": 99
|
||||
},
|
||||
"pagination": {
|
||||
"maxTotalHits": 15
|
||||
}
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"settingsUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.007391353S",
|
||||
"startedAt": "2025-01-16T17:00:29.201180268Z",
|
||||
"finishedAt": "2025-01-16T17:00:29.208571621Z"
|
||||
},
|
||||
{
|
||||
"uid": 9,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"faceting": {
|
||||
"maxValuesPerFacet": 100
|
||||
},
|
||||
"pagination": {
|
||||
"maxTotalHits": 1000
|
||||
}
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"settingsUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.007445825S",
|
||||
"startedAt": "2025-01-16T17:00:15.77629445Z",
|
||||
"finishedAt": "2025-01-16T17:00:15.783740275Z"
|
||||
},
|
||||
{
|
||||
"uid": 8,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"typoTolerance": {
|
||||
"minWordSizeForTypos": {
|
||||
"oneTypo": 4
|
||||
},
|
||||
"disableOnWords": [
|
||||
"kefir"
|
||||
],
|
||||
"disableOnAttributes": [
|
||||
"surname"
|
||||
]
|
||||
}
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"settingsUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.012020083S",
|
||||
"startedAt": "2025-01-16T16:59:42.744086671Z",
|
||||
"finishedAt": "2025-01-16T16:59:42.756106754Z"
|
||||
},
|
||||
{
|
||||
"uid": 7,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"typoTolerance": {
|
||||
"minWordSizeForTypos": {
|
||||
"oneTypo": 4
|
||||
}
|
||||
}
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"settingsUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.007440092S",
|
||||
"startedAt": "2025-01-16T16:58:41.2155771Z",
|
||||
"finishedAt": "2025-01-16T16:58:41.223017192Z"
|
||||
},
|
||||
{
|
||||
"uid": 6,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"synonyms": {
|
||||
"boubou": [
|
||||
"kefir"
|
||||
]
|
||||
}
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"settingsUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.007565161S",
|
||||
"startedAt": "2025-01-16T16:54:51.940332781Z",
|
||||
"finishedAt": "2025-01-16T16:54:51.947897942Z"
|
||||
},
|
||||
{
|
||||
"uid": 5,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"stopWords": [
|
||||
"le",
|
||||
"un"
|
||||
]
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"settingsUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.016307263S",
|
||||
"startedAt": "2025-01-16T16:53:19.913351957Z",
|
||||
"finishedAt": "2025-01-16T16:53:19.92965922Z"
|
||||
}
|
||||
],
|
||||
"total": 23,
|
||||
"limit": 20,
|
||||
"from": 24,
|
||||
"next": 4
|
||||
}
|
@ -0,0 +1,505 @@
|
||||
---
|
||||
source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
||||
---
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"uid": 24,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"upgradeFrom": "v1.12.0",
|
||||
"upgradeTo": "v1.13.0"
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"upgradeDatabase": 1
|
||||
},
|
||||
"indexUids": {}
|
||||
},
|
||||
"duration": "[duration]",
|
||||
"startedAt": "[date]",
|
||||
"finishedAt": "[date]"
|
||||
},
|
||||
{
|
||||
"uid": 23,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"deletedDocuments": 0
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"indexDeletion": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"mieli": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.004146631S",
|
||||
"startedAt": "2025-01-23T11:38:57.012591321Z",
|
||||
"finishedAt": "2025-01-23T11:38:57.016737952Z"
|
||||
},
|
||||
{
|
||||
"uid": 22,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"receivedDocuments": 1,
|
||||
"indexedDocuments": 1
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"documentAdditionOrUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.102738497S",
|
||||
"startedAt": "2025-01-23T11:36:22.551906856Z",
|
||||
"finishedAt": "2025-01-23T11:36:22.654645353Z"
|
||||
},
|
||||
{
|
||||
"uid": 21,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"receivedDocuments": 1,
|
||||
"indexedDocuments": 0
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"failed": 1
|
||||
},
|
||||
"types": {
|
||||
"documentAdditionOrUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.005108474S",
|
||||
"startedAt": "2025-01-23T11:36:04.132670526Z",
|
||||
"finishedAt": "2025-01-23T11:36:04.137779Z"
|
||||
},
|
||||
{
|
||||
"uid": 20,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"receivedDocuments": 1,
|
||||
"indexedDocuments": 0
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"failed": 1
|
||||
},
|
||||
"types": {
|
||||
"documentAdditionOrUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"mieli": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.027954894S",
|
||||
"startedAt": "2025-01-23T11:35:53.631082795Z",
|
||||
"finishedAt": "2025-01-23T11:35:53.659037689Z"
|
||||
},
|
||||
{
|
||||
"uid": 19,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"deletedDocuments": 19546
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"indexDeletion": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"mieli": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.006903297S",
|
||||
"startedAt": "2025-01-20T11:50:52.874106134Z",
|
||||
"finishedAt": "2025-01-20T11:50:52.881009431Z"
|
||||
},
|
||||
{
|
||||
"uid": 18,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"receivedDocuments": 19547,
|
||||
"indexedDocuments": 0,
|
||||
"matchedTasks": 1,
|
||||
"canceledTasks": 1,
|
||||
"originalFilter": "?statuses=processing%2Cenqueued"
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 2,
|
||||
"status": {
|
||||
"succeeded": 1,
|
||||
"canceled": 1
|
||||
},
|
||||
"types": {
|
||||
"documentAdditionOrUpdate": 1,
|
||||
"taskCancelation": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"mieli": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.000481257S",
|
||||
"startedAt": "2025-01-20T11:48:04.92820416Z",
|
||||
"finishedAt": "2025-01-20T11:48:04.928685417Z"
|
||||
},
|
||||
{
|
||||
"uid": 17,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"matchedTasks": 0,
|
||||
"canceledTasks": 0,
|
||||
"originalFilter": "?statuses=processing%2Cenqueued"
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"taskCancelation": 1
|
||||
},
|
||||
"indexUids": {}
|
||||
},
|
||||
"duration": "PT0.000407005S",
|
||||
"startedAt": "2025-01-20T11:47:53.509403957Z",
|
||||
"finishedAt": "2025-01-20T11:47:53.509810962Z"
|
||||
},
|
||||
{
|
||||
"uid": 16,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"matchedTasks": 0,
|
||||
"canceledTasks": 0,
|
||||
"originalFilter": "?statuses=processing%2Cenqueued"
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"taskCancelation": 1
|
||||
},
|
||||
"indexUids": {}
|
||||
},
|
||||
"duration": "PT0.000403716S",
|
||||
"startedAt": "2025-01-20T11:47:48.430653005Z",
|
||||
"finishedAt": "2025-01-20T11:47:48.431056721Z"
|
||||
},
|
||||
{
|
||||
"uid": 15,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"matchedTasks": 0,
|
||||
"canceledTasks": 0,
|
||||
"originalFilter": "?statuses=processing"
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"taskCancelation": 1
|
||||
},
|
||||
"indexUids": {}
|
||||
},
|
||||
"duration": "PT0.000417016S",
|
||||
"startedAt": "2025-01-20T11:47:42.429678617Z",
|
||||
"finishedAt": "2025-01-20T11:47:42.430095633Z"
|
||||
},
|
||||
{
|
||||
"uid": 14,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"receivedDocuments": 19547,
|
||||
"indexedDocuments": 19546
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"documentAdditionOrUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"mieli": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT12.086284842S",
|
||||
"startedAt": "2025-01-20T11:47:03.092181576Z",
|
||||
"finishedAt": "2025-01-20T11:47:15.178466418Z"
|
||||
},
|
||||
{
|
||||
"uid": 13,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"localizedAttributes": [
|
||||
{
|
||||
"attributePatterns": [
|
||||
"description"
|
||||
],
|
||||
"locales": [
|
||||
"fr"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"settingsUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.011506614S",
|
||||
"startedAt": "2025-01-16T17:18:43.29334923Z",
|
||||
"finishedAt": "2025-01-16T17:18:43.304855844Z"
|
||||
},
|
||||
{
|
||||
"uid": 12,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"faceting": {
|
||||
"sortFacetValuesBy": {
|
||||
"*": "alpha",
|
||||
"age": "count"
|
||||
}
|
||||
}
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"settingsUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.007640163S",
|
||||
"startedAt": "2025-01-16T17:02:52.539749853Z",
|
||||
"finishedAt": "2025-01-16T17:02:52.547390016Z"
|
||||
},
|
||||
{
|
||||
"uid": 11,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"searchCutoffMs": 8000
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"settingsUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.007307840S",
|
||||
"startedAt": "2025-01-16T17:01:14.112756687Z",
|
||||
"finishedAt": "2025-01-16T17:01:14.120064527Z"
|
||||
},
|
||||
{
|
||||
"uid": 10,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"faceting": {
|
||||
"maxValuesPerFacet": 99
|
||||
},
|
||||
"pagination": {
|
||||
"maxTotalHits": 15
|
||||
}
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"settingsUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.007391353S",
|
||||
"startedAt": "2025-01-16T17:00:29.201180268Z",
|
||||
"finishedAt": "2025-01-16T17:00:29.208571621Z"
|
||||
},
|
||||
{
|
||||
"uid": 9,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"faceting": {
|
||||
"maxValuesPerFacet": 100
|
||||
},
|
||||
"pagination": {
|
||||
"maxTotalHits": 1000
|
||||
}
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"settingsUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.007445825S",
|
||||
"startedAt": "2025-01-16T17:00:15.77629445Z",
|
||||
"finishedAt": "2025-01-16T17:00:15.783740275Z"
|
||||
},
|
||||
{
|
||||
"uid": 8,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"typoTolerance": {
|
||||
"minWordSizeForTypos": {
|
||||
"oneTypo": 4
|
||||
},
|
||||
"disableOnWords": [
|
||||
"kefir"
|
||||
],
|
||||
"disableOnAttributes": [
|
||||
"surname"
|
||||
]
|
||||
}
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"settingsUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.012020083S",
|
||||
"startedAt": "2025-01-16T16:59:42.744086671Z",
|
||||
"finishedAt": "2025-01-16T16:59:42.756106754Z"
|
||||
},
|
||||
{
|
||||
"uid": 7,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"typoTolerance": {
|
||||
"minWordSizeForTypos": {
|
||||
"oneTypo": 4
|
||||
}
|
||||
}
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"settingsUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.007440092S",
|
||||
"startedAt": "2025-01-16T16:58:41.2155771Z",
|
||||
"finishedAt": "2025-01-16T16:58:41.223017192Z"
|
||||
},
|
||||
{
|
||||
"uid": 6,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"synonyms": {
|
||||
"boubou": [
|
||||
"kefir"
|
||||
]
|
||||
}
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"settingsUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.007565161S",
|
||||
"startedAt": "2025-01-16T16:54:51.940332781Z",
|
||||
"finishedAt": "2025-01-16T16:54:51.947897942Z"
|
||||
},
|
||||
{
|
||||
"uid": 5,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"stopWords": [
|
||||
"le",
|
||||
"un"
|
||||
]
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"settingsUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.016307263S",
|
||||
"startedAt": "2025-01-16T16:53:19.913351957Z",
|
||||
"finishedAt": "2025-01-16T16:53:19.92965922Z"
|
||||
}
|
||||
],
|
||||
"total": 23,
|
||||
"limit": 20,
|
||||
"from": 24,
|
||||
"next": 4
|
||||
}
|
@ -0,0 +1,505 @@
|
||||
---
|
||||
source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
||||
---
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"uid": 24,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"upgradeFrom": "v1.12.0",
|
||||
"upgradeTo": "v1.13.0"
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"upgradeDatabase": 1
|
||||
},
|
||||
"indexUids": {}
|
||||
},
|
||||
"duration": "[duration]",
|
||||
"startedAt": "[date]",
|
||||
"finishedAt": "[date]"
|
||||
},
|
||||
{
|
||||
"uid": 23,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"deletedDocuments": 0
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"indexDeletion": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"mieli": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.004146631S",
|
||||
"startedAt": "2025-01-23T11:38:57.012591321Z",
|
||||
"finishedAt": "2025-01-23T11:38:57.016737952Z"
|
||||
},
|
||||
{
|
||||
"uid": 22,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"receivedDocuments": 1,
|
||||
"indexedDocuments": 1
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"documentAdditionOrUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.102738497S",
|
||||
"startedAt": "2025-01-23T11:36:22.551906856Z",
|
||||
"finishedAt": "2025-01-23T11:36:22.654645353Z"
|
||||
},
|
||||
{
|
||||
"uid": 21,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"receivedDocuments": 1,
|
||||
"indexedDocuments": 0
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"failed": 1
|
||||
},
|
||||
"types": {
|
||||
"documentAdditionOrUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.005108474S",
|
||||
"startedAt": "2025-01-23T11:36:04.132670526Z",
|
||||
"finishedAt": "2025-01-23T11:36:04.137779Z"
|
||||
},
|
||||
{
|
||||
"uid": 20,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"receivedDocuments": 1,
|
||||
"indexedDocuments": 0
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"failed": 1
|
||||
},
|
||||
"types": {
|
||||
"documentAdditionOrUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"mieli": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.027954894S",
|
||||
"startedAt": "2025-01-23T11:35:53.631082795Z",
|
||||
"finishedAt": "2025-01-23T11:35:53.659037689Z"
|
||||
},
|
||||
{
|
||||
"uid": 19,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"deletedDocuments": 19546
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"indexDeletion": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"mieli": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.006903297S",
|
||||
"startedAt": "2025-01-20T11:50:52.874106134Z",
|
||||
"finishedAt": "2025-01-20T11:50:52.881009431Z"
|
||||
},
|
||||
{
|
||||
"uid": 18,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"receivedDocuments": 19547,
|
||||
"indexedDocuments": 0,
|
||||
"matchedTasks": 1,
|
||||
"canceledTasks": 1,
|
||||
"originalFilter": "?statuses=processing%2Cenqueued"
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 2,
|
||||
"status": {
|
||||
"succeeded": 1,
|
||||
"canceled": 1
|
||||
},
|
||||
"types": {
|
||||
"documentAdditionOrUpdate": 1,
|
||||
"taskCancelation": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"mieli": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.000481257S",
|
||||
"startedAt": "2025-01-20T11:48:04.92820416Z",
|
||||
"finishedAt": "2025-01-20T11:48:04.928685417Z"
|
||||
},
|
||||
{
|
||||
"uid": 17,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"matchedTasks": 0,
|
||||
"canceledTasks": 0,
|
||||
"originalFilter": "?statuses=processing%2Cenqueued"
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"taskCancelation": 1
|
||||
},
|
||||
"indexUids": {}
|
||||
},
|
||||
"duration": "PT0.000407005S",
|
||||
"startedAt": "2025-01-20T11:47:53.509403957Z",
|
||||
"finishedAt": "2025-01-20T11:47:53.509810962Z"
|
||||
},
|
||||
{
|
||||
"uid": 16,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"matchedTasks": 0,
|
||||
"canceledTasks": 0,
|
||||
"originalFilter": "?statuses=processing%2Cenqueued"
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"taskCancelation": 1
|
||||
},
|
||||
"indexUids": {}
|
||||
},
|
||||
"duration": "PT0.000403716S",
|
||||
"startedAt": "2025-01-20T11:47:48.430653005Z",
|
||||
"finishedAt": "2025-01-20T11:47:48.431056721Z"
|
||||
},
|
||||
{
|
||||
"uid": 15,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"matchedTasks": 0,
|
||||
"canceledTasks": 0,
|
||||
"originalFilter": "?statuses=processing"
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"taskCancelation": 1
|
||||
},
|
||||
"indexUids": {}
|
||||
},
|
||||
"duration": "PT0.000417016S",
|
||||
"startedAt": "2025-01-20T11:47:42.429678617Z",
|
||||
"finishedAt": "2025-01-20T11:47:42.430095633Z"
|
||||
},
|
||||
{
|
||||
"uid": 14,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"receivedDocuments": 19547,
|
||||
"indexedDocuments": 19546
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"documentAdditionOrUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"mieli": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT12.086284842S",
|
||||
"startedAt": "2025-01-20T11:47:03.092181576Z",
|
||||
"finishedAt": "2025-01-20T11:47:15.178466418Z"
|
||||
},
|
||||
{
|
||||
"uid": 13,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"localizedAttributes": [
|
||||
{
|
||||
"attributePatterns": [
|
||||
"description"
|
||||
],
|
||||
"locales": [
|
||||
"fr"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"settingsUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.011506614S",
|
||||
"startedAt": "2025-01-16T17:18:43.29334923Z",
|
||||
"finishedAt": "2025-01-16T17:18:43.304855844Z"
|
||||
},
|
||||
{
|
||||
"uid": 12,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"faceting": {
|
||||
"sortFacetValuesBy": {
|
||||
"*": "alpha",
|
||||
"age": "count"
|
||||
}
|
||||
}
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"settingsUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.007640163S",
|
||||
"startedAt": "2025-01-16T17:02:52.539749853Z",
|
||||
"finishedAt": "2025-01-16T17:02:52.547390016Z"
|
||||
},
|
||||
{
|
||||
"uid": 11,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"searchCutoffMs": 8000
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"settingsUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.007307840S",
|
||||
"startedAt": "2025-01-16T17:01:14.112756687Z",
|
||||
"finishedAt": "2025-01-16T17:01:14.120064527Z"
|
||||
},
|
||||
{
|
||||
"uid": 10,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"faceting": {
|
||||
"maxValuesPerFacet": 99
|
||||
},
|
||||
"pagination": {
|
||||
"maxTotalHits": 15
|
||||
}
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"settingsUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.007391353S",
|
||||
"startedAt": "2025-01-16T17:00:29.201180268Z",
|
||||
"finishedAt": "2025-01-16T17:00:29.208571621Z"
|
||||
},
|
||||
{
|
||||
"uid": 9,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"faceting": {
|
||||
"maxValuesPerFacet": 100
|
||||
},
|
||||
"pagination": {
|
||||
"maxTotalHits": 1000
|
||||
}
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"settingsUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.007445825S",
|
||||
"startedAt": "2025-01-16T17:00:15.77629445Z",
|
||||
"finishedAt": "2025-01-16T17:00:15.783740275Z"
|
||||
},
|
||||
{
|
||||
"uid": 8,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"typoTolerance": {
|
||||
"minWordSizeForTypos": {
|
||||
"oneTypo": 4
|
||||
},
|
||||
"disableOnWords": [
|
||||
"kefir"
|
||||
],
|
||||
"disableOnAttributes": [
|
||||
"surname"
|
||||
]
|
||||
}
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"settingsUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.012020083S",
|
||||
"startedAt": "2025-01-16T16:59:42.744086671Z",
|
||||
"finishedAt": "2025-01-16T16:59:42.756106754Z"
|
||||
},
|
||||
{
|
||||
"uid": 7,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"typoTolerance": {
|
||||
"minWordSizeForTypos": {
|
||||
"oneTypo": 4
|
||||
}
|
||||
}
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"settingsUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.007440092S",
|
||||
"startedAt": "2025-01-16T16:58:41.2155771Z",
|
||||
"finishedAt": "2025-01-16T16:58:41.223017192Z"
|
||||
},
|
||||
{
|
||||
"uid": 6,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"synonyms": {
|
||||
"boubou": [
|
||||
"kefir"
|
||||
]
|
||||
}
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"settingsUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.007565161S",
|
||||
"startedAt": "2025-01-16T16:54:51.940332781Z",
|
||||
"finishedAt": "2025-01-16T16:54:51.947897942Z"
|
||||
},
|
||||
{
|
||||
"uid": 5,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"stopWords": [
|
||||
"le",
|
||||
"un"
|
||||
]
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"settingsUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.016307263S",
|
||||
"startedAt": "2025-01-16T16:53:19.913351957Z",
|
||||
"finishedAt": "2025-01-16T16:53:19.92965922Z"
|
||||
}
|
||||
],
|
||||
"total": 23,
|
||||
"limit": 20,
|
||||
"from": 24,
|
||||
"next": 4
|
||||
}
|
@ -0,0 +1,39 @@
|
||||
---
|
||||
source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
||||
snapshot_kind: text
|
||||
---
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"uid": 10,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"faceting": {
|
||||
"maxValuesPerFacet": 99
|
||||
},
|
||||
"pagination": {
|
||||
"maxTotalHits": 15
|
||||
}
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"settingsUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "[duration]",
|
||||
"startedAt": "[date]",
|
||||
"finishedAt": "[date]"
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
"limit": 20,
|
||||
"from": 10,
|
||||
"next": null
|
||||
}
|
@ -0,0 +1,57 @@
|
||||
---
|
||||
source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
||||
---
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"uid": 1,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"receivedDocuments": 1,
|
||||
"indexedDocuments": 1
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"documentAdditionOrUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "[duration]",
|
||||
"startedAt": "[date]",
|
||||
"finishedAt": "[date]"
|
||||
},
|
||||
{
|
||||
"uid": 0,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"receivedDocuments": 1,
|
||||
"indexedDocuments": 1
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"documentAdditionOrUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.111055654S",
|
||||
"startedAt": "2025-01-16T16:45:16.020248085Z",
|
||||
"finishedAt": "2025-01-16T16:45:16.131303739Z"
|
||||
}
|
||||
],
|
||||
"total": 2,
|
||||
"limit": 20,
|
||||
"from": 1,
|
||||
"next": null
|
||||
}
|
@ -0,0 +1,57 @@
|
||||
---
|
||||
source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
||||
---
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"uid": 1,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"receivedDocuments": 1,
|
||||
"indexedDocuments": 1
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"documentAdditionOrUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "[duration]",
|
||||
"startedAt": "[date]",
|
||||
"finishedAt": "[date]"
|
||||
},
|
||||
{
|
||||
"uid": 0,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"receivedDocuments": 1,
|
||||
"indexedDocuments": 1
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"documentAdditionOrUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.111055654S",
|
||||
"startedAt": "2025-01-16T16:45:16.020248085Z",
|
||||
"finishedAt": "2025-01-16T16:45:16.131303739Z"
|
||||
}
|
||||
],
|
||||
"total": 2,
|
||||
"limit": 20,
|
||||
"from": 1,
|
||||
"next": null
|
||||
}
|
@ -0,0 +1,57 @@
|
||||
---
|
||||
source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
||||
---
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"uid": 1,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"receivedDocuments": 1,
|
||||
"indexedDocuments": 1
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"documentAdditionOrUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "[duration]",
|
||||
"startedAt": "[date]",
|
||||
"finishedAt": "[date]"
|
||||
},
|
||||
{
|
||||
"uid": 0,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"receivedDocuments": 1,
|
||||
"indexedDocuments": 1
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"documentAdditionOrUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "PT0.111055654S",
|
||||
"startedAt": "2025-01-16T16:45:16.020248085Z",
|
||||
"finishedAt": "2025-01-16T16:45:16.131303739Z"
|
||||
}
|
||||
],
|
||||
"total": 2,
|
||||
"limit": 20,
|
||||
"from": 1,
|
||||
"next": null
|
||||
}
|
@ -0,0 +1,40 @@
|
||||
---
|
||||
source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
||||
snapshot_kind: text
|
||||
---
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"uid": 18,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"receivedDocuments": 19547,
|
||||
"indexedDocuments": 0,
|
||||
"matchedTasks": 1,
|
||||
"canceledTasks": 1,
|
||||
"originalFilter": "?statuses=processing%2Cenqueued"
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 2,
|
||||
"status": {
|
||||
"succeeded": 1,
|
||||
"canceled": 1
|
||||
},
|
||||
"types": {
|
||||
"documentAdditionOrUpdate": 1,
|
||||
"taskCancelation": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"mieli": 1
|
||||
}
|
||||
},
|
||||
"duration": "[duration]",
|
||||
"startedAt": "[date]",
|
||||
"finishedAt": "[date]"
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
"limit": 20,
|
||||
"from": 18,
|
||||
"next": null
|
||||
}
|
@ -0,0 +1,40 @@
|
||||
---
|
||||
source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
||||
snapshot_kind: text
|
||||
---
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"uid": 18,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"receivedDocuments": 19547,
|
||||
"indexedDocuments": 0,
|
||||
"matchedTasks": 1,
|
||||
"canceledTasks": 1,
|
||||
"originalFilter": "?statuses=processing%2Cenqueued"
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 2,
|
||||
"status": {
|
||||
"succeeded": 1,
|
||||
"canceled": 1
|
||||
},
|
||||
"types": {
|
||||
"documentAdditionOrUpdate": 1,
|
||||
"taskCancelation": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"mieli": 1
|
||||
}
|
||||
},
|
||||
"duration": "[duration]",
|
||||
"startedAt": "[date]",
|
||||
"finishedAt": "[date]"
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
"limit": 20,
|
||||
"from": 18,
|
||||
"next": null
|
||||
}
|
@ -0,0 +1,39 @@
|
||||
---
|
||||
source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
||||
snapshot_kind: text
|
||||
---
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"uid": 10,
|
||||
"progress": null,
|
||||
"details": {
|
||||
"faceting": {
|
||||
"maxValuesPerFacet": 99
|
||||
},
|
||||
"pagination": {
|
||||
"maxTotalHits": 15
|
||||
}
|
||||
},
|
||||
"stats": {
|
||||
"totalNbTasks": 1,
|
||||
"status": {
|
||||
"succeeded": 1
|
||||
},
|
||||
"types": {
|
||||
"settingsUpdate": 1
|
||||
},
|
||||
"indexUids": {
|
||||
"kefir": 1
|
||||
}
|
||||
},
|
||||
"duration": "[duration]",
|
||||
"startedAt": "[date]",
|
||||
"finishedAt": "[date]"
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
"limit": 20,
|
||||
"from": 10,
|
||||
"next": null
|
||||
}
|
@ -0,0 +1,11 @@
|
||||
---
|
||||
source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
||||
snapshot_kind: text
|
||||
---
|
||||
{
|
||||
"results": [],
|
||||
"total": 0,
|
||||
"limit": 20,
|
||||
"from": null,
|
||||
"next": null
|
||||
}
|
@ -0,0 +1,397 @@
|
||||
---
|
||||
source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
||||
---
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"uid": 25,
|
||||
"batchUid": 24,
|
||||
"indexUid": null,
|
||||
"status": "succeeded",
|
||||
"type": "upgradeDatabase",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"upgradeFrom": "v1.12.0",
|
||||
"upgradeTo": "v1.13.0"
|
||||
},
|
||||
"error": null,
|
||||
"duration": "[duration]",
|
||||
"enqueuedAt": "[date]",
|
||||
"startedAt": "[date]",
|
||||
"finishedAt": "[date]"
|
||||
},
|
||||
{
|
||||
"uid": 24,
|
||||
"batchUid": 23,
|
||||
"indexUid": "mieli",
|
||||
"status": "succeeded",
|
||||
"type": "indexDeletion",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"deletedDocuments": 0
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.004146631S",
|
||||
"enqueuedAt": "2025-01-23T11:38:57.000009177Z",
|
||||
"startedAt": "2025-01-23T11:38:57.012591321Z",
|
||||
"finishedAt": "2025-01-23T11:38:57.016737952Z"
|
||||
},
|
||||
{
|
||||
"uid": 23,
|
||||
"batchUid": 22,
|
||||
"indexUid": "kefir",
|
||||
"status": "succeeded",
|
||||
"type": "documentAdditionOrUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"receivedDocuments": 1,
|
||||
"indexedDocuments": 1
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.102738497S",
|
||||
"enqueuedAt": "2025-01-23T11:36:22.53917994Z",
|
||||
"startedAt": "2025-01-23T11:36:22.551906856Z",
|
||||
"finishedAt": "2025-01-23T11:36:22.654645353Z"
|
||||
},
|
||||
{
|
||||
"uid": 22,
|
||||
"batchUid": 21,
|
||||
"indexUid": "kefir",
|
||||
"status": "failed",
|
||||
"type": "documentAdditionOrUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"receivedDocuments": 1,
|
||||
"indexedDocuments": 0
|
||||
},
|
||||
"error": {
|
||||
"message": "Document doesn't have a `id` attribute: `{\"age\":1.4}`.",
|
||||
"code": "missing_document_id",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#missing_document_id"
|
||||
},
|
||||
"duration": "PT0.005108474S",
|
||||
"enqueuedAt": "2025-01-23T11:36:04.115475071Z",
|
||||
"startedAt": "2025-01-23T11:36:04.132670526Z",
|
||||
"finishedAt": "2025-01-23T11:36:04.137779Z"
|
||||
},
|
||||
{
|
||||
"uid": 21,
|
||||
"batchUid": 20,
|
||||
"indexUid": "mieli",
|
||||
"status": "failed",
|
||||
"type": "documentAdditionOrUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"receivedDocuments": 1,
|
||||
"indexedDocuments": 0
|
||||
},
|
||||
"error": {
|
||||
"message": "The primary key inference failed as the engine did not find any field ending with `id` in its name. Please specify the primary key manually using the `primaryKey` query parameter.",
|
||||
"code": "index_primary_key_no_candidate_found",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#index_primary_key_no_candidate_found"
|
||||
},
|
||||
"duration": "PT0.027954894S",
|
||||
"enqueuedAt": "2025-01-23T11:35:53.625718309Z",
|
||||
"startedAt": "2025-01-23T11:35:53.631082795Z",
|
||||
"finishedAt": "2025-01-23T11:35:53.659037689Z"
|
||||
},
|
||||
{
|
||||
"uid": 20,
|
||||
"batchUid": 19,
|
||||
"indexUid": "mieli",
|
||||
"status": "succeeded",
|
||||
"type": "indexDeletion",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"deletedDocuments": 19546
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.006903297S",
|
||||
"enqueuedAt": "2025-01-20T11:50:52.862223877Z",
|
||||
"startedAt": "2025-01-20T11:50:52.874106134Z",
|
||||
"finishedAt": "2025-01-20T11:50:52.881009431Z"
|
||||
},
|
||||
{
|
||||
"uid": 19,
|
||||
"batchUid": 18,
|
||||
"indexUid": null,
|
||||
"status": "succeeded",
|
||||
"type": "taskCancelation",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"matchedTasks": 1,
|
||||
"canceledTasks": 1,
|
||||
"originalFilter": "?statuses=processing%2Cenqueued"
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.000481257S",
|
||||
"enqueuedAt": "2025-01-20T11:48:04.618121963Z",
|
||||
"startedAt": "2025-01-20T11:48:04.92820416Z",
|
||||
"finishedAt": "2025-01-20T11:48:04.928685417Z"
|
||||
},
|
||||
{
|
||||
"uid": 18,
|
||||
"batchUid": 18,
|
||||
"indexUid": "mieli",
|
||||
"status": "canceled",
|
||||
"type": "documentAdditionOrUpdate",
|
||||
"canceledBy": 19,
|
||||
"details": {
|
||||
"receivedDocuments": 19547,
|
||||
"indexedDocuments": 0
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.000481257S",
|
||||
"enqueuedAt": "2025-01-20T11:48:04.596815611Z",
|
||||
"startedAt": "2025-01-20T11:48:04.92820416Z",
|
||||
"finishedAt": "2025-01-20T11:48:04.928685417Z"
|
||||
},
|
||||
{
|
||||
"uid": 17,
|
||||
"batchUid": 17,
|
||||
"indexUid": null,
|
||||
"status": "succeeded",
|
||||
"type": "taskCancelation",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"matchedTasks": 0,
|
||||
"canceledTasks": 0,
|
||||
"originalFilter": "?statuses=processing%2Cenqueued"
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.000407005S",
|
||||
"enqueuedAt": "2025-01-20T11:47:53.498618093Z",
|
||||
"startedAt": "2025-01-20T11:47:53.509403957Z",
|
||||
"finishedAt": "2025-01-20T11:47:53.509810962Z"
|
||||
},
|
||||
{
|
||||
"uid": 16,
|
||||
"batchUid": 16,
|
||||
"indexUid": null,
|
||||
"status": "succeeded",
|
||||
"type": "taskCancelation",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"matchedTasks": 0,
|
||||
"canceledTasks": 0,
|
||||
"originalFilter": "?statuses=processing%2Cenqueued"
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.000403716S",
|
||||
"enqueuedAt": "2025-01-20T11:47:48.426597451Z",
|
||||
"startedAt": "2025-01-20T11:47:48.430653005Z",
|
||||
"finishedAt": "2025-01-20T11:47:48.431056721Z"
|
||||
},
|
||||
{
|
||||
"uid": 15,
|
||||
"batchUid": 15,
|
||||
"indexUid": null,
|
||||
"status": "succeeded",
|
||||
"type": "taskCancelation",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"matchedTasks": 0,
|
||||
"canceledTasks": 0,
|
||||
"originalFilter": "?statuses=processing"
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.000417016S",
|
||||
"enqueuedAt": "2025-01-20T11:47:42.414346511Z",
|
||||
"startedAt": "2025-01-20T11:47:42.429678617Z",
|
||||
"finishedAt": "2025-01-20T11:47:42.430095633Z"
|
||||
},
|
||||
{
|
||||
"uid": 14,
|
||||
"batchUid": 14,
|
||||
"indexUid": "mieli",
|
||||
"status": "succeeded",
|
||||
"type": "documentAdditionOrUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"receivedDocuments": 19547,
|
||||
"indexedDocuments": 19546
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT12.086284842S",
|
||||
"enqueuedAt": "2025-01-20T11:47:03.079292487Z",
|
||||
"startedAt": "2025-01-20T11:47:03.092181576Z",
|
||||
"finishedAt": "2025-01-20T11:47:15.178466418Z"
|
||||
},
|
||||
{
|
||||
"uid": 13,
|
||||
"batchUid": 13,
|
||||
"indexUid": "kefir",
|
||||
"status": "succeeded",
|
||||
"type": "settingsUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"localizedAttributes": [
|
||||
{
|
||||
"attributePatterns": [
|
||||
"description"
|
||||
],
|
||||
"locales": [
|
||||
"fr"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.011506614S",
|
||||
"enqueuedAt": "2025-01-16T17:18:43.280901282Z",
|
||||
"startedAt": "2025-01-16T17:18:43.29334923Z",
|
||||
"finishedAt": "2025-01-16T17:18:43.304855844Z"
|
||||
},
|
||||
{
|
||||
"uid": 12,
|
||||
"batchUid": 12,
|
||||
"indexUid": "kefir",
|
||||
"status": "succeeded",
|
||||
"type": "settingsUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"faceting": {
|
||||
"sortFacetValuesBy": {
|
||||
"*": "alpha",
|
||||
"age": "count"
|
||||
}
|
||||
}
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.007640163S",
|
||||
"enqueuedAt": "2025-01-16T17:02:52.527382964Z",
|
||||
"startedAt": "2025-01-16T17:02:52.539749853Z",
|
||||
"finishedAt": "2025-01-16T17:02:52.547390016Z"
|
||||
},
|
||||
{
|
||||
"uid": 11,
|
||||
"batchUid": 11,
|
||||
"indexUid": "kefir",
|
||||
"status": "succeeded",
|
||||
"type": "settingsUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"searchCutoffMs": 8000
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.007307840S",
|
||||
"enqueuedAt": "2025-01-16T17:01:14.100316617Z",
|
||||
"startedAt": "2025-01-16T17:01:14.112756687Z",
|
||||
"finishedAt": "2025-01-16T17:01:14.120064527Z"
|
||||
},
|
||||
{
|
||||
"uid": 10,
|
||||
"batchUid": 10,
|
||||
"indexUid": "kefir",
|
||||
"status": "succeeded",
|
||||
"type": "settingsUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"faceting": {
|
||||
"maxValuesPerFacet": 99
|
||||
},
|
||||
"pagination": {
|
||||
"maxTotalHits": 15
|
||||
}
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.007391353S",
|
||||
"enqueuedAt": "2025-01-16T17:00:29.188815062Z",
|
||||
"startedAt": "2025-01-16T17:00:29.201180268Z",
|
||||
"finishedAt": "2025-01-16T17:00:29.208571621Z"
|
||||
},
|
||||
{
|
||||
"uid": 9,
|
||||
"batchUid": 9,
|
||||
"indexUid": "kefir",
|
||||
"status": "succeeded",
|
||||
"type": "settingsUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"faceting": {
|
||||
"maxValuesPerFacet": 100
|
||||
},
|
||||
"pagination": {
|
||||
"maxTotalHits": 1000
|
||||
}
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.007445825S",
|
||||
"enqueuedAt": "2025-01-16T17:00:15.759501709Z",
|
||||
"startedAt": "2025-01-16T17:00:15.77629445Z",
|
||||
"finishedAt": "2025-01-16T17:00:15.783740275Z"
|
||||
},
|
||||
{
|
||||
"uid": 8,
|
||||
"batchUid": 8,
|
||||
"indexUid": "kefir",
|
||||
"status": "succeeded",
|
||||
"type": "settingsUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"typoTolerance": {
|
||||
"minWordSizeForTypos": {
|
||||
"oneTypo": 4
|
||||
},
|
||||
"disableOnWords": [
|
||||
"kefir"
|
||||
],
|
||||
"disableOnAttributes": [
|
||||
"surname"
|
||||
]
|
||||
}
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.012020083S",
|
||||
"enqueuedAt": "2025-01-16T16:59:42.727292501Z",
|
||||
"startedAt": "2025-01-16T16:59:42.744086671Z",
|
||||
"finishedAt": "2025-01-16T16:59:42.756106754Z"
|
||||
},
|
||||
{
|
||||
"uid": 7,
|
||||
"batchUid": 7,
|
||||
"indexUid": "kefir",
|
||||
"status": "succeeded",
|
||||
"type": "settingsUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"typoTolerance": {
|
||||
"minWordSizeForTypos": {
|
||||
"oneTypo": 4
|
||||
}
|
||||
}
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.007440092S",
|
||||
"enqueuedAt": "2025-01-16T16:58:41.203145044Z",
|
||||
"startedAt": "2025-01-16T16:58:41.2155771Z",
|
||||
"finishedAt": "2025-01-16T16:58:41.223017192Z"
|
||||
},
|
||||
{
|
||||
"uid": 6,
|
||||
"batchUid": 6,
|
||||
"indexUid": "kefir",
|
||||
"status": "succeeded",
|
||||
"type": "settingsUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"synonyms": {
|
||||
"boubou": [
|
||||
"kefir"
|
||||
]
|
||||
}
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.007565161S",
|
||||
"enqueuedAt": "2025-01-16T16:54:51.927866243Z",
|
||||
"startedAt": "2025-01-16T16:54:51.940332781Z",
|
||||
"finishedAt": "2025-01-16T16:54:51.947897942Z"
|
||||
}
|
||||
],
|
||||
"total": 24,
|
||||
"limit": 20,
|
||||
"from": 25,
|
||||
"next": 5
|
||||
}
|
@ -0,0 +1,397 @@
|
||||
---
|
||||
source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
||||
---
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"uid": 25,
|
||||
"batchUid": 24,
|
||||
"indexUid": null,
|
||||
"status": "succeeded",
|
||||
"type": "upgradeDatabase",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"upgradeFrom": "v1.12.0",
|
||||
"upgradeTo": "v1.13.0"
|
||||
},
|
||||
"error": null,
|
||||
"duration": "[duration]",
|
||||
"enqueuedAt": "[date]",
|
||||
"startedAt": "[date]",
|
||||
"finishedAt": "[date]"
|
||||
},
|
||||
{
|
||||
"uid": 24,
|
||||
"batchUid": 23,
|
||||
"indexUid": "mieli",
|
||||
"status": "succeeded",
|
||||
"type": "indexDeletion",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"deletedDocuments": 0
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.004146631S",
|
||||
"enqueuedAt": "2025-01-23T11:38:57.000009177Z",
|
||||
"startedAt": "2025-01-23T11:38:57.012591321Z",
|
||||
"finishedAt": "2025-01-23T11:38:57.016737952Z"
|
||||
},
|
||||
{
|
||||
"uid": 23,
|
||||
"batchUid": 22,
|
||||
"indexUid": "kefir",
|
||||
"status": "succeeded",
|
||||
"type": "documentAdditionOrUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"receivedDocuments": 1,
|
||||
"indexedDocuments": 1
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.102738497S",
|
||||
"enqueuedAt": "2025-01-23T11:36:22.53917994Z",
|
||||
"startedAt": "2025-01-23T11:36:22.551906856Z",
|
||||
"finishedAt": "2025-01-23T11:36:22.654645353Z"
|
||||
},
|
||||
{
|
||||
"uid": 22,
|
||||
"batchUid": 21,
|
||||
"indexUid": "kefir",
|
||||
"status": "failed",
|
||||
"type": "documentAdditionOrUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"receivedDocuments": 1,
|
||||
"indexedDocuments": 0
|
||||
},
|
||||
"error": {
|
||||
"message": "Document doesn't have a `id` attribute: `{\"age\":1.4}`.",
|
||||
"code": "missing_document_id",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#missing_document_id"
|
||||
},
|
||||
"duration": "PT0.005108474S",
|
||||
"enqueuedAt": "2025-01-23T11:36:04.115475071Z",
|
||||
"startedAt": "2025-01-23T11:36:04.132670526Z",
|
||||
"finishedAt": "2025-01-23T11:36:04.137779Z"
|
||||
},
|
||||
{
|
||||
"uid": 21,
|
||||
"batchUid": 20,
|
||||
"indexUid": "mieli",
|
||||
"status": "failed",
|
||||
"type": "documentAdditionOrUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"receivedDocuments": 1,
|
||||
"indexedDocuments": 0
|
||||
},
|
||||
"error": {
|
||||
"message": "The primary key inference failed as the engine did not find any field ending with `id` in its name. Please specify the primary key manually using the `primaryKey` query parameter.",
|
||||
"code": "index_primary_key_no_candidate_found",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#index_primary_key_no_candidate_found"
|
||||
},
|
||||
"duration": "PT0.027954894S",
|
||||
"enqueuedAt": "2025-01-23T11:35:53.625718309Z",
|
||||
"startedAt": "2025-01-23T11:35:53.631082795Z",
|
||||
"finishedAt": "2025-01-23T11:35:53.659037689Z"
|
||||
},
|
||||
{
|
||||
"uid": 20,
|
||||
"batchUid": 19,
|
||||
"indexUid": "mieli",
|
||||
"status": "succeeded",
|
||||
"type": "indexDeletion",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"deletedDocuments": 19546
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.006903297S",
|
||||
"enqueuedAt": "2025-01-20T11:50:52.862223877Z",
|
||||
"startedAt": "2025-01-20T11:50:52.874106134Z",
|
||||
"finishedAt": "2025-01-20T11:50:52.881009431Z"
|
||||
},
|
||||
{
|
||||
"uid": 19,
|
||||
"batchUid": 18,
|
||||
"indexUid": null,
|
||||
"status": "succeeded",
|
||||
"type": "taskCancelation",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"matchedTasks": 1,
|
||||
"canceledTasks": 1,
|
||||
"originalFilter": "?statuses=processing%2Cenqueued"
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.000481257S",
|
||||
"enqueuedAt": "2025-01-20T11:48:04.618121963Z",
|
||||
"startedAt": "2025-01-20T11:48:04.92820416Z",
|
||||
"finishedAt": "2025-01-20T11:48:04.928685417Z"
|
||||
},
|
||||
{
|
||||
"uid": 18,
|
||||
"batchUid": 18,
|
||||
"indexUid": "mieli",
|
||||
"status": "canceled",
|
||||
"type": "documentAdditionOrUpdate",
|
||||
"canceledBy": 19,
|
||||
"details": {
|
||||
"receivedDocuments": 19547,
|
||||
"indexedDocuments": 0
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.000481257S",
|
||||
"enqueuedAt": "2025-01-20T11:48:04.596815611Z",
|
||||
"startedAt": "2025-01-20T11:48:04.92820416Z",
|
||||
"finishedAt": "2025-01-20T11:48:04.928685417Z"
|
||||
},
|
||||
{
|
||||
"uid": 17,
|
||||
"batchUid": 17,
|
||||
"indexUid": null,
|
||||
"status": "succeeded",
|
||||
"type": "taskCancelation",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"matchedTasks": 0,
|
||||
"canceledTasks": 0,
|
||||
"originalFilter": "?statuses=processing%2Cenqueued"
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.000407005S",
|
||||
"enqueuedAt": "2025-01-20T11:47:53.498618093Z",
|
||||
"startedAt": "2025-01-20T11:47:53.509403957Z",
|
||||
"finishedAt": "2025-01-20T11:47:53.509810962Z"
|
||||
},
|
||||
{
|
||||
"uid": 16,
|
||||
"batchUid": 16,
|
||||
"indexUid": null,
|
||||
"status": "succeeded",
|
||||
"type": "taskCancelation",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"matchedTasks": 0,
|
||||
"canceledTasks": 0,
|
||||
"originalFilter": "?statuses=processing%2Cenqueued"
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.000403716S",
|
||||
"enqueuedAt": "2025-01-20T11:47:48.426597451Z",
|
||||
"startedAt": "2025-01-20T11:47:48.430653005Z",
|
||||
"finishedAt": "2025-01-20T11:47:48.431056721Z"
|
||||
},
|
||||
{
|
||||
"uid": 15,
|
||||
"batchUid": 15,
|
||||
"indexUid": null,
|
||||
"status": "succeeded",
|
||||
"type": "taskCancelation",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"matchedTasks": 0,
|
||||
"canceledTasks": 0,
|
||||
"originalFilter": "?statuses=processing"
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.000417016S",
|
||||
"enqueuedAt": "2025-01-20T11:47:42.414346511Z",
|
||||
"startedAt": "2025-01-20T11:47:42.429678617Z",
|
||||
"finishedAt": "2025-01-20T11:47:42.430095633Z"
|
||||
},
|
||||
{
|
||||
"uid": 14,
|
||||
"batchUid": 14,
|
||||
"indexUid": "mieli",
|
||||
"status": "succeeded",
|
||||
"type": "documentAdditionOrUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"receivedDocuments": 19547,
|
||||
"indexedDocuments": 19546
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT12.086284842S",
|
||||
"enqueuedAt": "2025-01-20T11:47:03.079292487Z",
|
||||
"startedAt": "2025-01-20T11:47:03.092181576Z",
|
||||
"finishedAt": "2025-01-20T11:47:15.178466418Z"
|
||||
},
|
||||
{
|
||||
"uid": 13,
|
||||
"batchUid": 13,
|
||||
"indexUid": "kefir",
|
||||
"status": "succeeded",
|
||||
"type": "settingsUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"localizedAttributes": [
|
||||
{
|
||||
"attributePatterns": [
|
||||
"description"
|
||||
],
|
||||
"locales": [
|
||||
"fr"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.011506614S",
|
||||
"enqueuedAt": "2025-01-16T17:18:43.280901282Z",
|
||||
"startedAt": "2025-01-16T17:18:43.29334923Z",
|
||||
"finishedAt": "2025-01-16T17:18:43.304855844Z"
|
||||
},
|
||||
{
|
||||
"uid": 12,
|
||||
"batchUid": 12,
|
||||
"indexUid": "kefir",
|
||||
"status": "succeeded",
|
||||
"type": "settingsUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"faceting": {
|
||||
"sortFacetValuesBy": {
|
||||
"*": "alpha",
|
||||
"age": "count"
|
||||
}
|
||||
}
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.007640163S",
|
||||
"enqueuedAt": "2025-01-16T17:02:52.527382964Z",
|
||||
"startedAt": "2025-01-16T17:02:52.539749853Z",
|
||||
"finishedAt": "2025-01-16T17:02:52.547390016Z"
|
||||
},
|
||||
{
|
||||
"uid": 11,
|
||||
"batchUid": 11,
|
||||
"indexUid": "kefir",
|
||||
"status": "succeeded",
|
||||
"type": "settingsUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"searchCutoffMs": 8000
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.007307840S",
|
||||
"enqueuedAt": "2025-01-16T17:01:14.100316617Z",
|
||||
"startedAt": "2025-01-16T17:01:14.112756687Z",
|
||||
"finishedAt": "2025-01-16T17:01:14.120064527Z"
|
||||
},
|
||||
{
|
||||
"uid": 10,
|
||||
"batchUid": 10,
|
||||
"indexUid": "kefir",
|
||||
"status": "succeeded",
|
||||
"type": "settingsUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"faceting": {
|
||||
"maxValuesPerFacet": 99
|
||||
},
|
||||
"pagination": {
|
||||
"maxTotalHits": 15
|
||||
}
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.007391353S",
|
||||
"enqueuedAt": "2025-01-16T17:00:29.188815062Z",
|
||||
"startedAt": "2025-01-16T17:00:29.201180268Z",
|
||||
"finishedAt": "2025-01-16T17:00:29.208571621Z"
|
||||
},
|
||||
{
|
||||
"uid": 9,
|
||||
"batchUid": 9,
|
||||
"indexUid": "kefir",
|
||||
"status": "succeeded",
|
||||
"type": "settingsUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"faceting": {
|
||||
"maxValuesPerFacet": 100
|
||||
},
|
||||
"pagination": {
|
||||
"maxTotalHits": 1000
|
||||
}
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.007445825S",
|
||||
"enqueuedAt": "2025-01-16T17:00:15.759501709Z",
|
||||
"startedAt": "2025-01-16T17:00:15.77629445Z",
|
||||
"finishedAt": "2025-01-16T17:00:15.783740275Z"
|
||||
},
|
||||
{
|
||||
"uid": 8,
|
||||
"batchUid": 8,
|
||||
"indexUid": "kefir",
|
||||
"status": "succeeded",
|
||||
"type": "settingsUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"typoTolerance": {
|
||||
"minWordSizeForTypos": {
|
||||
"oneTypo": 4
|
||||
},
|
||||
"disableOnWords": [
|
||||
"kefir"
|
||||
],
|
||||
"disableOnAttributes": [
|
||||
"surname"
|
||||
]
|
||||
}
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.012020083S",
|
||||
"enqueuedAt": "2025-01-16T16:59:42.727292501Z",
|
||||
"startedAt": "2025-01-16T16:59:42.744086671Z",
|
||||
"finishedAt": "2025-01-16T16:59:42.756106754Z"
|
||||
},
|
||||
{
|
||||
"uid": 7,
|
||||
"batchUid": 7,
|
||||
"indexUid": "kefir",
|
||||
"status": "succeeded",
|
||||
"type": "settingsUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"typoTolerance": {
|
||||
"minWordSizeForTypos": {
|
||||
"oneTypo": 4
|
||||
}
|
||||
}
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.007440092S",
|
||||
"enqueuedAt": "2025-01-16T16:58:41.203145044Z",
|
||||
"startedAt": "2025-01-16T16:58:41.2155771Z",
|
||||
"finishedAt": "2025-01-16T16:58:41.223017192Z"
|
||||
},
|
||||
{
|
||||
"uid": 6,
|
||||
"batchUid": 6,
|
||||
"indexUid": "kefir",
|
||||
"status": "succeeded",
|
||||
"type": "settingsUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"synonyms": {
|
||||
"boubou": [
|
||||
"kefir"
|
||||
]
|
||||
}
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.007565161S",
|
||||
"enqueuedAt": "2025-01-16T16:54:51.927866243Z",
|
||||
"startedAt": "2025-01-16T16:54:51.940332781Z",
|
||||
"finishedAt": "2025-01-16T16:54:51.947897942Z"
|
||||
}
|
||||
],
|
||||
"total": 24,
|
||||
"limit": 20,
|
||||
"from": 25,
|
||||
"next": 5
|
||||
}
|
@ -0,0 +1,397 @@
|
||||
---
|
||||
source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
||||
---
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"uid": 25,
|
||||
"batchUid": 24,
|
||||
"indexUid": null,
|
||||
"status": "succeeded",
|
||||
"type": "upgradeDatabase",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"upgradeFrom": "v1.12.0",
|
||||
"upgradeTo": "v1.13.0"
|
||||
},
|
||||
"error": null,
|
||||
"duration": "[duration]",
|
||||
"enqueuedAt": "[date]",
|
||||
"startedAt": "[date]",
|
||||
"finishedAt": "[date]"
|
||||
},
|
||||
{
|
||||
"uid": 24,
|
||||
"batchUid": 23,
|
||||
"indexUid": "mieli",
|
||||
"status": "succeeded",
|
||||
"type": "indexDeletion",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"deletedDocuments": 0
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.004146631S",
|
||||
"enqueuedAt": "2025-01-23T11:38:57.000009177Z",
|
||||
"startedAt": "2025-01-23T11:38:57.012591321Z",
|
||||
"finishedAt": "2025-01-23T11:38:57.016737952Z"
|
||||
},
|
||||
{
|
||||
"uid": 23,
|
||||
"batchUid": 22,
|
||||
"indexUid": "kefir",
|
||||
"status": "succeeded",
|
||||
"type": "documentAdditionOrUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"receivedDocuments": 1,
|
||||
"indexedDocuments": 1
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.102738497S",
|
||||
"enqueuedAt": "2025-01-23T11:36:22.53917994Z",
|
||||
"startedAt": "2025-01-23T11:36:22.551906856Z",
|
||||
"finishedAt": "2025-01-23T11:36:22.654645353Z"
|
||||
},
|
||||
{
|
||||
"uid": 22,
|
||||
"batchUid": 21,
|
||||
"indexUid": "kefir",
|
||||
"status": "failed",
|
||||
"type": "documentAdditionOrUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"receivedDocuments": 1,
|
||||
"indexedDocuments": 0
|
||||
},
|
||||
"error": {
|
||||
"message": "Document doesn't have a `id` attribute: `{\"age\":1.4}`.",
|
||||
"code": "missing_document_id",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#missing_document_id"
|
||||
},
|
||||
"duration": "PT0.005108474S",
|
||||
"enqueuedAt": "2025-01-23T11:36:04.115475071Z",
|
||||
"startedAt": "2025-01-23T11:36:04.132670526Z",
|
||||
"finishedAt": "2025-01-23T11:36:04.137779Z"
|
||||
},
|
||||
{
|
||||
"uid": 21,
|
||||
"batchUid": 20,
|
||||
"indexUid": "mieli",
|
||||
"status": "failed",
|
||||
"type": "documentAdditionOrUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"receivedDocuments": 1,
|
||||
"indexedDocuments": 0
|
||||
},
|
||||
"error": {
|
||||
"message": "The primary key inference failed as the engine did not find any field ending with `id` in its name. Please specify the primary key manually using the `primaryKey` query parameter.",
|
||||
"code": "index_primary_key_no_candidate_found",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#index_primary_key_no_candidate_found"
|
||||
},
|
||||
"duration": "PT0.027954894S",
|
||||
"enqueuedAt": "2025-01-23T11:35:53.625718309Z",
|
||||
"startedAt": "2025-01-23T11:35:53.631082795Z",
|
||||
"finishedAt": "2025-01-23T11:35:53.659037689Z"
|
||||
},
|
||||
{
|
||||
"uid": 20,
|
||||
"batchUid": 19,
|
||||
"indexUid": "mieli",
|
||||
"status": "succeeded",
|
||||
"type": "indexDeletion",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"deletedDocuments": 19546
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.006903297S",
|
||||
"enqueuedAt": "2025-01-20T11:50:52.862223877Z",
|
||||
"startedAt": "2025-01-20T11:50:52.874106134Z",
|
||||
"finishedAt": "2025-01-20T11:50:52.881009431Z"
|
||||
},
|
||||
{
|
||||
"uid": 19,
|
||||
"batchUid": 18,
|
||||
"indexUid": null,
|
||||
"status": "succeeded",
|
||||
"type": "taskCancelation",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"matchedTasks": 1,
|
||||
"canceledTasks": 1,
|
||||
"originalFilter": "?statuses=processing%2Cenqueued"
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.000481257S",
|
||||
"enqueuedAt": "2025-01-20T11:48:04.618121963Z",
|
||||
"startedAt": "2025-01-20T11:48:04.92820416Z",
|
||||
"finishedAt": "2025-01-20T11:48:04.928685417Z"
|
||||
},
|
||||
{
|
||||
"uid": 18,
|
||||
"batchUid": 18,
|
||||
"indexUid": "mieli",
|
||||
"status": "canceled",
|
||||
"type": "documentAdditionOrUpdate",
|
||||
"canceledBy": 19,
|
||||
"details": {
|
||||
"receivedDocuments": 19547,
|
||||
"indexedDocuments": 0
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.000481257S",
|
||||
"enqueuedAt": "2025-01-20T11:48:04.596815611Z",
|
||||
"startedAt": "2025-01-20T11:48:04.92820416Z",
|
||||
"finishedAt": "2025-01-20T11:48:04.928685417Z"
|
||||
},
|
||||
{
|
||||
"uid": 17,
|
||||
"batchUid": 17,
|
||||
"indexUid": null,
|
||||
"status": "succeeded",
|
||||
"type": "taskCancelation",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"matchedTasks": 0,
|
||||
"canceledTasks": 0,
|
||||
"originalFilter": "?statuses=processing%2Cenqueued"
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.000407005S",
|
||||
"enqueuedAt": "2025-01-20T11:47:53.498618093Z",
|
||||
"startedAt": "2025-01-20T11:47:53.509403957Z",
|
||||
"finishedAt": "2025-01-20T11:47:53.509810962Z"
|
||||
},
|
||||
{
|
||||
"uid": 16,
|
||||
"batchUid": 16,
|
||||
"indexUid": null,
|
||||
"status": "succeeded",
|
||||
"type": "taskCancelation",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"matchedTasks": 0,
|
||||
"canceledTasks": 0,
|
||||
"originalFilter": "?statuses=processing%2Cenqueued"
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.000403716S",
|
||||
"enqueuedAt": "2025-01-20T11:47:48.426597451Z",
|
||||
"startedAt": "2025-01-20T11:47:48.430653005Z",
|
||||
"finishedAt": "2025-01-20T11:47:48.431056721Z"
|
||||
},
|
||||
{
|
||||
"uid": 15,
|
||||
"batchUid": 15,
|
||||
"indexUid": null,
|
||||
"status": "succeeded",
|
||||
"type": "taskCancelation",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"matchedTasks": 0,
|
||||
"canceledTasks": 0,
|
||||
"originalFilter": "?statuses=processing"
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.000417016S",
|
||||
"enqueuedAt": "2025-01-20T11:47:42.414346511Z",
|
||||
"startedAt": "2025-01-20T11:47:42.429678617Z",
|
||||
"finishedAt": "2025-01-20T11:47:42.430095633Z"
|
||||
},
|
||||
{
|
||||
"uid": 14,
|
||||
"batchUid": 14,
|
||||
"indexUid": "mieli",
|
||||
"status": "succeeded",
|
||||
"type": "documentAdditionOrUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"receivedDocuments": 19547,
|
||||
"indexedDocuments": 19546
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT12.086284842S",
|
||||
"enqueuedAt": "2025-01-20T11:47:03.079292487Z",
|
||||
"startedAt": "2025-01-20T11:47:03.092181576Z",
|
||||
"finishedAt": "2025-01-20T11:47:15.178466418Z"
|
||||
},
|
||||
{
|
||||
"uid": 13,
|
||||
"batchUid": 13,
|
||||
"indexUid": "kefir",
|
||||
"status": "succeeded",
|
||||
"type": "settingsUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"localizedAttributes": [
|
||||
{
|
||||
"attributePatterns": [
|
||||
"description"
|
||||
],
|
||||
"locales": [
|
||||
"fr"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.011506614S",
|
||||
"enqueuedAt": "2025-01-16T17:18:43.280901282Z",
|
||||
"startedAt": "2025-01-16T17:18:43.29334923Z",
|
||||
"finishedAt": "2025-01-16T17:18:43.304855844Z"
|
||||
},
|
||||
{
|
||||
"uid": 12,
|
||||
"batchUid": 12,
|
||||
"indexUid": "kefir",
|
||||
"status": "succeeded",
|
||||
"type": "settingsUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"faceting": {
|
||||
"sortFacetValuesBy": {
|
||||
"*": "alpha",
|
||||
"age": "count"
|
||||
}
|
||||
}
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.007640163S",
|
||||
"enqueuedAt": "2025-01-16T17:02:52.527382964Z",
|
||||
"startedAt": "2025-01-16T17:02:52.539749853Z",
|
||||
"finishedAt": "2025-01-16T17:02:52.547390016Z"
|
||||
},
|
||||
{
|
||||
"uid": 11,
|
||||
"batchUid": 11,
|
||||
"indexUid": "kefir",
|
||||
"status": "succeeded",
|
||||
"type": "settingsUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"searchCutoffMs": 8000
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.007307840S",
|
||||
"enqueuedAt": "2025-01-16T17:01:14.100316617Z",
|
||||
"startedAt": "2025-01-16T17:01:14.112756687Z",
|
||||
"finishedAt": "2025-01-16T17:01:14.120064527Z"
|
||||
},
|
||||
{
|
||||
"uid": 10,
|
||||
"batchUid": 10,
|
||||
"indexUid": "kefir",
|
||||
"status": "succeeded",
|
||||
"type": "settingsUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"faceting": {
|
||||
"maxValuesPerFacet": 99
|
||||
},
|
||||
"pagination": {
|
||||
"maxTotalHits": 15
|
||||
}
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.007391353S",
|
||||
"enqueuedAt": "2025-01-16T17:00:29.188815062Z",
|
||||
"startedAt": "2025-01-16T17:00:29.201180268Z",
|
||||
"finishedAt": "2025-01-16T17:00:29.208571621Z"
|
||||
},
|
||||
{
|
||||
"uid": 9,
|
||||
"batchUid": 9,
|
||||
"indexUid": "kefir",
|
||||
"status": "succeeded",
|
||||
"type": "settingsUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"faceting": {
|
||||
"maxValuesPerFacet": 100
|
||||
},
|
||||
"pagination": {
|
||||
"maxTotalHits": 1000
|
||||
}
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.007445825S",
|
||||
"enqueuedAt": "2025-01-16T17:00:15.759501709Z",
|
||||
"startedAt": "2025-01-16T17:00:15.77629445Z",
|
||||
"finishedAt": "2025-01-16T17:00:15.783740275Z"
|
||||
},
|
||||
{
|
||||
"uid": 8,
|
||||
"batchUid": 8,
|
||||
"indexUid": "kefir",
|
||||
"status": "succeeded",
|
||||
"type": "settingsUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"typoTolerance": {
|
||||
"minWordSizeForTypos": {
|
||||
"oneTypo": 4
|
||||
},
|
||||
"disableOnWords": [
|
||||
"kefir"
|
||||
],
|
||||
"disableOnAttributes": [
|
||||
"surname"
|
||||
]
|
||||
}
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.012020083S",
|
||||
"enqueuedAt": "2025-01-16T16:59:42.727292501Z",
|
||||
"startedAt": "2025-01-16T16:59:42.744086671Z",
|
||||
"finishedAt": "2025-01-16T16:59:42.756106754Z"
|
||||
},
|
||||
{
|
||||
"uid": 7,
|
||||
"batchUid": 7,
|
||||
"indexUid": "kefir",
|
||||
"status": "succeeded",
|
||||
"type": "settingsUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"typoTolerance": {
|
||||
"minWordSizeForTypos": {
|
||||
"oneTypo": 4
|
||||
}
|
||||
}
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.007440092S",
|
||||
"enqueuedAt": "2025-01-16T16:58:41.203145044Z",
|
||||
"startedAt": "2025-01-16T16:58:41.2155771Z",
|
||||
"finishedAt": "2025-01-16T16:58:41.223017192Z"
|
||||
},
|
||||
{
|
||||
"uid": 6,
|
||||
"batchUid": 6,
|
||||
"indexUid": "kefir",
|
||||
"status": "succeeded",
|
||||
"type": "settingsUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"synonyms": {
|
||||
"boubou": [
|
||||
"kefir"
|
||||
]
|
||||
}
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.007565161S",
|
||||
"enqueuedAt": "2025-01-16T16:54:51.927866243Z",
|
||||
"startedAt": "2025-01-16T16:54:51.940332781Z",
|
||||
"finishedAt": "2025-01-16T16:54:51.947897942Z"
|
||||
}
|
||||
],
|
||||
"total": 24,
|
||||
"limit": 20,
|
||||
"from": 25,
|
||||
"next": 5
|
||||
}
|
@ -0,0 +1,33 @@
|
||||
---
|
||||
source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
||||
snapshot_kind: text
|
||||
---
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"uid": 10,
|
||||
"batchUid": 10,
|
||||
"indexUid": "kefir",
|
||||
"status": "succeeded",
|
||||
"type": "settingsUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"faceting": {
|
||||
"maxValuesPerFacet": 99
|
||||
},
|
||||
"pagination": {
|
||||
"maxTotalHits": 15
|
||||
}
|
||||
},
|
||||
"error": null,
|
||||
"duration": "[duration]",
|
||||
"enqueuedAt": "[date]",
|
||||
"startedAt": "[date]",
|
||||
"finishedAt": "[date]"
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
"limit": 20,
|
||||
"from": 10,
|
||||
"next": null
|
||||
}
|
@ -0,0 +1,45 @@
|
||||
---
|
||||
source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
||||
---
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"uid": 1,
|
||||
"batchUid": 1,
|
||||
"indexUid": "kefir",
|
||||
"status": "succeeded",
|
||||
"type": "documentAdditionOrUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"receivedDocuments": 1,
|
||||
"indexedDocuments": 1
|
||||
},
|
||||
"error": null,
|
||||
"duration": "[duration]",
|
||||
"enqueuedAt": "[date]",
|
||||
"startedAt": "[date]",
|
||||
"finishedAt": "[date]"
|
||||
},
|
||||
{
|
||||
"uid": 0,
|
||||
"batchUid": 0,
|
||||
"indexUid": "kefir",
|
||||
"status": "succeeded",
|
||||
"type": "documentAdditionOrUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"receivedDocuments": 1,
|
||||
"indexedDocuments": 1
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.111055654S",
|
||||
"enqueuedAt": "2025-01-16T16:45:16.003570092Z",
|
||||
"startedAt": "2025-01-16T16:45:16.020248085Z",
|
||||
"finishedAt": "2025-01-16T16:45:16.131303739Z"
|
||||
}
|
||||
],
|
||||
"total": 2,
|
||||
"limit": 20,
|
||||
"from": 1,
|
||||
"next": null
|
||||
}
|
@ -0,0 +1,45 @@
|
||||
---
|
||||
source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
||||
---
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"uid": 1,
|
||||
"batchUid": 1,
|
||||
"indexUid": "kefir",
|
||||
"status": "succeeded",
|
||||
"type": "documentAdditionOrUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"receivedDocuments": 1,
|
||||
"indexedDocuments": 1
|
||||
},
|
||||
"error": null,
|
||||
"duration": "[duration]",
|
||||
"enqueuedAt": "[date]",
|
||||
"startedAt": "[date]",
|
||||
"finishedAt": "[date]"
|
||||
},
|
||||
{
|
||||
"uid": 0,
|
||||
"batchUid": 0,
|
||||
"indexUid": "kefir",
|
||||
"status": "succeeded",
|
||||
"type": "documentAdditionOrUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"receivedDocuments": 1,
|
||||
"indexedDocuments": 1
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.111055654S",
|
||||
"enqueuedAt": "2025-01-16T16:45:16.003570092Z",
|
||||
"startedAt": "2025-01-16T16:45:16.020248085Z",
|
||||
"finishedAt": "2025-01-16T16:45:16.131303739Z"
|
||||
}
|
||||
],
|
||||
"total": 2,
|
||||
"limit": 20,
|
||||
"from": 1,
|
||||
"next": null
|
||||
}
|
@ -0,0 +1,45 @@
|
||||
---
|
||||
source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
||||
---
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"uid": 1,
|
||||
"batchUid": 1,
|
||||
"indexUid": "kefir",
|
||||
"status": "succeeded",
|
||||
"type": "documentAdditionOrUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"receivedDocuments": 1,
|
||||
"indexedDocuments": 1
|
||||
},
|
||||
"error": null,
|
||||
"duration": "[duration]",
|
||||
"enqueuedAt": "[date]",
|
||||
"startedAt": "[date]",
|
||||
"finishedAt": "[date]"
|
||||
},
|
||||
{
|
||||
"uid": 0,
|
||||
"batchUid": 0,
|
||||
"indexUid": "kefir",
|
||||
"status": "succeeded",
|
||||
"type": "documentAdditionOrUpdate",
|
||||
"canceledBy": null,
|
||||
"details": {
|
||||
"receivedDocuments": 1,
|
||||
"indexedDocuments": 1
|
||||
},
|
||||
"error": null,
|
||||
"duration": "PT0.111055654S",
|
||||
"enqueuedAt": "2025-01-16T16:45:16.003570092Z",
|
||||
"startedAt": "2025-01-16T16:45:16.020248085Z",
|
||||
"finishedAt": "2025-01-16T16:45:16.131303739Z"
|
||||
}
|
||||
],
|
||||
"total": 2,
|
||||
"limit": 20,
|
||||
"from": 1,
|
||||
"next": null
|
||||
}
|
@ -0,0 +1,29 @@
|
||||
---
|
||||
source: crates/meilisearch/tests/upgrade/v1_12/v1_12_0.rs
|
||||
snapshot_kind: text
|
||||
---
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"uid": 18,
|
||||
"batchUid": 18,
|
||||
"indexUid": "mieli",
|
||||
"status": "canceled",
|
||||
"type": "documentAdditionOrUpdate",
|
||||
"canceledBy": 19,
|
||||
"details": {
|
||||
"receivedDocuments": 19547,
|
||||
"indexedDocuments": 0
|
||||
},
|
||||
"error": null,
|
||||
"duration": "[duration]",
|
||||
"enqueuedAt": "[date]",
|
||||
"startedAt": "[date]",
|
||||
"finishedAt": "[date]"
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
"limit": 20,
|
||||
"from": 18,
|
||||
"next": null
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user