meilisearch/index-scheduler/src/batch.rs

653 lines
24 KiB
Rust
Raw Normal View History

use crate::{
autobatcher::BatchKind,
2022-09-19 04:04:36 +08:00
task::{Details, Kind, KindWithContent, Status, Task},
2022-09-16 07:58:08 +08:00
Error, IndexScheduler, Result, TaskId,
};
2022-09-14 04:38:43 +08:00
use index::{Settings, Unchecked};
2022-09-27 22:33:37 +08:00
use milli::heed::RoTxn;
use milli::update::{DocumentAdditionResult, DocumentDeletionResult, IndexDocumentsMethod};
2022-09-09 07:09:50 +08:00
use uuid::Uuid;
pub(crate) enum Batch {
Cancel(Task),
Snapshot(Vec<Task>),
Dump(Vec<Task>),
DocumentAddition {
index_uid: String,
primary_key: Option<String>,
content_files: Vec<Uuid>,
tasks: Vec<Task>,
},
DocumentUpdate {
index_uid: String,
primary_key: Option<String>,
content_files: Vec<Uuid>,
tasks: Vec<Task>,
},
DocumentDeletion {
index_uid: String,
documents: Vec<String>,
tasks: Vec<Task>,
},
DocumentClear {
index_uid: String,
tasks: Vec<Task>,
},
Settings {
index_uid: String,
2022-09-29 19:57:28 +08:00
// TODO what's that boolean, does it mean that it removes things or what?
settings: Vec<(bool, Settings<Unchecked>)>,
tasks: Vec<Task>,
},
DocumentClearAndSetting {
index_uid: String,
cleared_tasks: Vec<Task>,
// TODO what's that boolean, does it mean that it removes things or what?
settings: Vec<(bool, Settings<Unchecked>)>,
settings_tasks: Vec<Task>,
},
SettingsAndDocumentAddition {
index_uid: String,
primary_key: Option<String>,
content_files: Vec<Uuid>,
document_addition_tasks: Vec<Task>,
// TODO what's that boolean, does it mean that it removes things or what?
settings: Vec<(bool, Settings<Unchecked>)>,
settings_tasks: Vec<Task>,
},
SettingsAndDocumentUpdate {
index_uid: String,
primary_key: Option<String>,
content_files: Vec<Uuid>,
document_update_tasks: Vec<Task>,
// TODO what's that boolean, does it mean that it removes things or what?
settings: Vec<(bool, Settings<Unchecked>)>,
settings_tasks: Vec<Task>,
},
IndexCreation {
index_uid: String,
primary_key: Option<String>,
task: Task,
},
IndexUpdate {
index_uid: String,
primary_key: Option<String>,
task: Task,
},
IndexDeletion {
index_uid: String,
tasks: Vec<Task>,
},
2022-09-09 07:09:50 +08:00
}
2022-09-16 07:58:08 +08:00
impl Batch {
pub fn ids(&self) -> Vec<TaskId> {
match self {
Batch::Cancel(task)
| Batch::IndexCreation { task, .. }
| Batch::IndexUpdate { task, .. } => vec![task.uid],
Batch::Snapshot(tasks)
| Batch::Dump(tasks)
| Batch::DocumentAddition { tasks, .. }
| Batch::DocumentUpdate { tasks, .. }
| Batch::DocumentDeletion { tasks, .. }
| Batch::Settings { tasks, .. }
| Batch::DocumentClear { tasks, .. }
| Batch::IndexDeletion { tasks, .. } => tasks.iter().map(|task| task.uid).collect(),
2022-09-16 07:58:08 +08:00
Batch::SettingsAndDocumentAddition {
document_addition_tasks: tasks,
settings_tasks: other,
..
}
| Batch::DocumentClearAndSetting {
cleared_tasks: tasks,
settings_tasks: other,
..
}
| Batch::SettingsAndDocumentUpdate {
document_update_tasks: tasks,
settings_tasks: other,
2022-09-16 07:58:08 +08:00
..
} => tasks.iter().chain(other).map(|task| task.uid).collect(),
2022-09-16 07:58:08 +08:00
}
}
}
impl IndexScheduler {
pub(crate) fn create_next_batch_index(
&self,
rtxn: &RoTxn,
index_uid: String,
batch: BatchKind,
) -> Result<Option<Batch>> {
match batch {
BatchKind::DocumentClear { ids } => Ok(Some(Batch::DocumentClear {
tasks: self.get_existing_tasks(rtxn, ids)?,
index_uid,
})),
BatchKind::DocumentAddition { addition_ids } => {
let tasks = self.get_existing_tasks(rtxn, addition_ids)?;
let primary_key = match &tasks[0].kind {
KindWithContent::DocumentAddition { primary_key, .. } => primary_key.clone(),
_ => unreachable!(),
};
let content_files = tasks
.iter()
.map(|task| match task.kind {
KindWithContent::DocumentAddition { content_file, .. } => content_file,
_ => unreachable!(),
})
.collect();
Ok(Some(Batch::DocumentAddition {
index_uid,
primary_key,
content_files,
tasks,
}))
}
BatchKind::DocumentUpdate { update_ids } => {
let tasks = self.get_existing_tasks(rtxn, update_ids)?;
let primary_key = match &tasks[0].kind {
KindWithContent::DocumentUpdate { primary_key, .. } => primary_key.clone(),
_ => unreachable!(),
};
let content_files = tasks
.iter()
.map(|task| match task.kind {
KindWithContent::DocumentUpdate { content_file, .. } => content_file,
_ => unreachable!(),
})
.collect();
Ok(Some(Batch::DocumentUpdate {
index_uid,
primary_key,
content_files,
tasks,
}))
}
BatchKind::DocumentDeletion { deletion_ids } => {
let tasks = self.get_existing_tasks(rtxn, deletion_ids)?;
let mut documents = Vec::new();
for task in &tasks {
match task.kind {
KindWithContent::DocumentDeletion {
ref documents_ids, ..
} => documents.extend_from_slice(documents_ids),
_ => unreachable!(),
}
}
Ok(Some(Batch::DocumentDeletion {
index_uid,
documents,
tasks,
}))
}
BatchKind::Settings { settings_ids } => {
let tasks = self.get_existing_tasks(rtxn, settings_ids)?;
let mut settings = Vec::new();
for task in &tasks {
match task.kind {
KindWithContent::Settings {
ref new_settings,
is_deletion,
..
} => settings.push((is_deletion, new_settings.clone())),
_ => unreachable!(),
}
}
Ok(Some(Batch::Settings {
index_uid,
settings,
tasks,
}))
}
BatchKind::ClearAndSettings {
other,
settings_ids,
} => {
let (index_uid, settings, settings_tasks) = match self
.create_next_batch_index(rtxn, index_uid, BatchKind::Settings { settings_ids })?
.unwrap()
{
Batch::Settings {
index_uid,
settings,
tasks,
} => (index_uid, settings, tasks),
_ => unreachable!(),
};
let (index_uid, cleared_tasks) = match self
.create_next_batch_index(
rtxn,
index_uid,
BatchKind::DocumentClear { ids: other },
)?
.unwrap()
{
Batch::DocumentClear { index_uid, tasks } => (index_uid, tasks),
_ => unreachable!(),
};
Ok(Some(Batch::DocumentClearAndSetting {
index_uid,
cleared_tasks,
settings,
settings_tasks,
}))
}
BatchKind::SettingsAndDocumentAddition {
addition_ids,
settings_ids,
} => {
let (index_uid, settings, settings_tasks) = match self
.create_next_batch_index(rtxn, index_uid, BatchKind::Settings { settings_ids })?
.unwrap()
{
Batch::Settings {
index_uid,
settings,
tasks,
} => (index_uid, settings, tasks),
_ => unreachable!(),
};
let (index_uid, primary_key, content_files, document_addition_tasks) = match self
.create_next_batch_index(
rtxn,
index_uid,
BatchKind::DocumentAddition { addition_ids },
)?
.unwrap()
{
Batch::DocumentAddition {
index_uid,
primary_key,
content_files,
tasks,
} => (index_uid, primary_key, content_files, tasks),
_ => unreachable!(),
};
Ok(Some(Batch::SettingsAndDocumentAddition {
index_uid,
primary_key,
content_files,
document_addition_tasks,
settings,
settings_tasks,
}))
}
BatchKind::SettingsAndDocumentUpdate {
update_ids,
settings_ids,
} => {
let settings = self.create_next_batch_index(
rtxn,
index_uid.clone(),
BatchKind::Settings { settings_ids },
)?;
let document_update = self.create_next_batch_index(
rtxn,
index_uid.clone(),
BatchKind::DocumentUpdate { update_ids },
)?;
match (document_update, settings) {
(
Some(Batch::DocumentUpdate {
primary_key,
content_files,
tasks: document_update_tasks,
..
}),
Some(Batch::Settings {
settings,
tasks: settings_tasks,
..
}),
) => Ok(Some(Batch::SettingsAndDocumentUpdate {
index_uid,
primary_key,
content_files,
document_update_tasks,
settings,
settings_tasks,
})),
_ => unreachable!(),
}
}
BatchKind::IndexCreation { id } => {
let task = self.get_task(rtxn, id)?.ok_or(Error::CorruptedTaskQueue)?;
let (index_uid, primary_key) = match &task.kind {
KindWithContent::IndexCreation {
index_uid,
primary_key,
} => (index_uid.clone(), primary_key.clone()),
_ => unreachable!(),
};
Ok(Some(Batch::IndexCreation {
index_uid,
primary_key,
task,
}))
}
BatchKind::IndexUpdate { id } => {
let task = self.get_task(rtxn, id)?.ok_or(Error::CorruptedTaskQueue)?;
let primary_key = match &task.kind {
KindWithContent::IndexUpdate { primary_key, .. } => primary_key.clone(),
_ => unreachable!(),
};
Ok(Some(Batch::IndexUpdate {
index_uid,
primary_key,
task,
}))
}
BatchKind::IndexDeletion { ids } => Ok(Some(Batch::IndexDeletion {
index_uid,
tasks: self.get_existing_tasks(rtxn, ids)?,
})),
2022-09-14 19:13:44 +08:00
BatchKind::IndexSwap { id: _ } => todo!(),
BatchKind::IndexRename { id: _ } => todo!(),
}
}
/// Create the next batch to be processed;
/// 1. We get the *last* task to cancel.
/// 2. We get the *next* snapshot to process.
/// 3. We get the *next* dump to process.
/// 4. We get the *next* tasks to process for a specific index.
2022-09-09 07:40:28 +08:00
pub(crate) fn create_next_batch(&self, rtxn: &RoTxn) -> Result<Option<Batch>> {
let enqueued = &self.get_status(rtxn, Status::Enqueued)?;
let to_cancel = self.get_kind(rtxn, Kind::CancelTask)? & enqueued;
// 1. we get the last task to cancel.
if let Some(task_id) = to_cancel.max() {
2022-09-09 07:40:28 +08:00
return Ok(Some(Batch::Cancel(
self.get_task(rtxn, task_id)?
.ok_or(Error::CorruptedTaskQueue)?,
2022-09-09 07:40:28 +08:00
)));
}
// 2. we batch the snapshot.
let to_snapshot = self.get_kind(rtxn, Kind::Snapshot)? & enqueued;
if !to_snapshot.is_empty() {
2022-09-09 07:40:28 +08:00
return Ok(Some(Batch::Snapshot(
self.get_existing_tasks(rtxn, to_snapshot)?,
)));
}
// 3. we batch the dumps.
let to_dump = self.get_kind(rtxn, Kind::DumpExport)? & enqueued;
if !to_dump.is_empty() {
2022-09-09 07:40:28 +08:00
return Ok(Some(Batch::Dump(self.get_existing_tasks(rtxn, to_dump)?)));
}
// 4. We take the next task and try to batch all the tasks associated with this index.
if let Some(task_id) = enqueued.min() {
let task = self
.get_task(rtxn, task_id)?
.ok_or(Error::CorruptedTaskQueue)?;
2022-09-09 07:40:28 +08:00
// This is safe because all the remaining task are associated with
// AT LEAST one index. We can use the right or left one it doesn't
// matter.
let index_name = task.indexes().unwrap()[0];
2022-09-14 19:13:44 +08:00
let _index = self.get_index(rtxn, &index_name)? & enqueued;
2022-09-09 07:40:28 +08:00
let enqueued = enqueued
.into_iter()
.map(|task_id| {
self.get_task(rtxn, task_id)
.and_then(|task| task.ok_or(Error::CorruptedTaskQueue))
.map(|task| (task.uid, task.kind.as_kind()))
})
.collect::<Result<Vec<_>>>()?;
if let Some(batchkind) = crate::autobatcher::autobatch(enqueued) {
return self.create_next_batch_index(rtxn, index_name.to_string(), batchkind);
}
}
// If we found no tasks then we were notified for something that got autobatched
// somehow and there is nothing to do.
2022-09-09 07:40:28 +08:00
Ok(None)
}
2022-09-27 04:26:30 +08:00
pub(crate) fn process_batch(&self, batch: Batch) -> Result<Vec<Task>> {
match batch {
Batch::Cancel(_) => todo!(),
Batch::Snapshot(_) => todo!(),
Batch::Dump(_) => todo!(),
Batch::DocumentClear {
index_uid,
mut tasks,
} => {
let rtxn = self.env.read_txn()?;
let index = self.index_mapper.index(&rtxn, &index_uid)?;
rtxn.abort()?;
let ret = index.clear_documents();
for task in &mut tasks {
task.details = Some(Details::ClearAll {
// TODO where can I find this information of how many documents did we delete?
deleted_documents: None,
});
if let Err(ref error) = ret {
task.error = Some(error.into());
}
}
Ok(tasks)
}
// TODO we should merge both document import with a method field
Batch::DocumentAddition {
2022-09-19 04:04:36 +08:00
index_uid,
primary_key,
content_files,
mut tasks,
} => {
2022-09-27 04:26:30 +08:00
// we NEED a write transaction for the index creation.
// To avoid blocking the whole process we're going to commit asap.
let mut wtxn = self.env.write_txn()?;
let index = self.index_mapper.create_index(&mut wtxn, &index_uid)?;
wtxn.commit()?;
2022-09-19 04:04:36 +08:00
let ret = index.update_documents(
IndexDocumentsMethod::ReplaceDocuments,
primary_key,
self.file_store.clone(),
content_files,
)?;
for (task, ret) in tasks.iter_mut().zip(ret) {
match ret {
Ok(DocumentAdditionResult {
indexed_documents,
number_of_documents,
}) => {
task.details = Some(Details::DocumentAddition {
received_documents: number_of_documents,
indexed_documents,
});
}
2022-09-29 19:57:28 +08:00
Err(error) => task.error = Some(error.into()),
2022-09-19 04:04:36 +08:00
}
}
Ok(tasks)
2022-09-19 04:04:36 +08:00
}
Batch::SettingsAndDocumentAddition {
index_uid,
primary_key,
content_files,
document_addition_tasks,
2022-09-14 19:13:44 +08:00
settings: _,
settings_tasks: _,
} => {
todo!();
}
// TODO we should merge both document import with a method field
Batch::DocumentUpdate {
index_uid,
primary_key,
content_files,
mut tasks,
} => {
// we NEED a write transaction for the index creation.
// To avoid blocking the whole process we're going to commit asap.
let mut wtxn = self.env.write_txn()?;
let index = self.index_mapper.create_index(&mut wtxn, &index_uid)?;
wtxn.commit()?;
let ret = index.update_documents(
IndexDocumentsMethod::UpdateDocuments,
primary_key,
self.file_store.clone(),
content_files,
)?;
for (task, ret) in tasks.iter_mut().zip(ret) {
match ret {
Ok(DocumentAdditionResult {
indexed_documents,
number_of_documents,
}) => {
task.details = Some(Details::DocumentAddition {
received_documents: number_of_documents,
indexed_documents,
});
}
2022-09-29 19:57:28 +08:00
Err(error) => task.error = Some(error.into()),
}
}
Ok(tasks)
}
Batch::DocumentDeletion {
index_uid,
documents,
mut tasks,
} => {
let rtxn = self.env.read_txn()?;
let index = self.index_mapper.index(&rtxn, &index_uid)?;
let ret = index.delete_documents(&documents);
for task in &mut tasks {
match ret {
Ok(DocumentDeletionResult {
deleted_documents,
remaining_documents: _,
}) => {
// TODO we are assigning the same amount of documents to
// all the tasks that are in the same batch. That's wrong!
task.details = Some(Details::DocumentDeletion {
received_document_ids: documents.len(),
deleted_documents: Some(deleted_documents),
});
}
2022-09-29 19:57:28 +08:00
Err(ref error) => task.error = Some(error.into()),
}
}
Ok(tasks)
}
Batch::Settings {
index_uid,
settings,
2022-09-29 19:57:28 +08:00
mut tasks,
} => {
// we NEED a write transaction for the index creation.
// To avoid blocking the whole process we're going to commit asap.
let mut wtxn = self.env.write_txn()?;
let index = self.index_mapper.create_index(&mut wtxn, &index_uid)?;
wtxn.commit()?;
// TODO merge the settings to only do a reindexation once.
for (task, (_, settings)) in tasks.iter_mut().zip(settings) {
let checked_settings = settings.clone().check();
task.details = Some(Details::Settings { settings });
if let Err(error) = index.update_settings(&checked_settings) {
task.error = Some(error.into());
}
}
Ok(tasks)
}
Batch::DocumentClearAndSetting {
index_uid,
mut cleared_tasks,
settings,
mut settings_tasks,
} => {
// If the settings were given before the document clear
// we must create the index first.
// we NEED a write transaction for the index creation.
// To avoid blocking the whole process we're going to commit asap.
let mut wtxn = self.env.write_txn()?;
let index = self.index_mapper.create_index(&mut wtxn, &index_uid)?;
wtxn.commit()?;
// TODO We must use the same write transaction to commit
// the clear AND the settings in one transaction.
let ret = index.clear_documents();
for task in &mut cleared_tasks {
task.details = Some(Details::ClearAll {
// TODO where can I find this information of how many documents did we delete?
deleted_documents: None,
});
if let Err(ref error) = ret {
task.error = Some(error.into());
}
}
// TODO merge the settings to only do a reindexation once.
for (task, (_, settings)) in settings_tasks.iter_mut().zip(settings) {
let checked_settings = settings.clone().check();
task.details = Some(Details::Settings { settings });
if let Err(error) = index.update_settings(&checked_settings) {
task.error = Some(error.into());
}
}
let mut tasks = cleared_tasks;
tasks.append(&mut settings_tasks);
Ok(tasks)
}
Batch::SettingsAndDocumentUpdate {
index_uid,
primary_key,
content_files,
document_update_tasks,
settings,
settings_tasks,
} => todo!(),
Batch::IndexCreation {
index_uid,
primary_key,
task,
} => todo!(),
Batch::IndexUpdate {
index_uid,
primary_key,
task,
} => todo!(),
Batch::IndexDeletion { index_uid, tasks } => todo!(),
}
}
}