fix the create_new_batch method

This commit is contained in:
Tamo 2022-09-09 01:40:28 +02:00 committed by Clément Renault
parent 6b9689a1c0
commit 516860f342
No known key found for this signature in database
GPG Key ID: 92ADA4E935E71FA4

View File

@ -11,12 +11,7 @@ pub(crate) enum Batch {
Cancel(Task), Cancel(Task),
Snapshot(Vec<Task>), Snapshot(Vec<Task>),
Dump(Vec<Task>), Dump(Vec<Task>),
DocumentAddition { IndexSpecific { index_uid: String, kind: BatchKind },
index_uid: String,
tasks: Vec<Task>,
primary_key: Option<String>,
content_files: Vec<Uuid>,
},
} }
impl IndexScheduler { impl IndexScheduler {
@ -66,28 +61,30 @@ impl IndexScheduler {
/// 2. We get the *next* snapshot to process. /// 2. We get the *next* snapshot to process.
/// 3. We get the *next* dump to process. /// 3. We get the *next* dump to process.
/// 4. We get the *next* tasks to process for a specific index. /// 4. We get the *next* tasks to process for a specific index.
pub(crate) fn create_next_batch(&self, rtxn: &RoTxn) -> Result<Batch> { pub(crate) fn create_next_batch(&self, rtxn: &RoTxn) -> Result<Option<Batch>> {
let enqueued = &self.get_status(rtxn, Status::Enqueued)?; let enqueued = &self.get_status(rtxn, Status::Enqueued)?;
let to_cancel = self.get_kind(rtxn, Kind::CancelTask)? & enqueued; let to_cancel = self.get_kind(rtxn, Kind::CancelTask)? & enqueued;
// 1. we get the last task to cancel. // 1. we get the last task to cancel.
if let Some(task_id) = to_cancel.max() { if let Some(task_id) = to_cancel.max() {
return Ok(Batch::Cancel( return Ok(Some(Batch::Cancel(
self.get_task(rtxn, task_id)? self.get_task(rtxn, task_id)?
.ok_or(Error::CorruptedTaskQueue)?, .ok_or(Error::CorruptedTaskQueue)?,
)); )));
} }
// 2. we batch the snapshot. // 2. we batch the snapshot.
let to_snapshot = self.get_kind(rtxn, Kind::Snapshot)? & enqueued; let to_snapshot = self.get_kind(rtxn, Kind::Snapshot)? & enqueued;
if !to_snapshot.is_empty() { if !to_snapshot.is_empty() {
return Ok(Batch::Snapshot(self.get_existing_tasks(rtxn, to_snapshot)?)); return Ok(Some(Batch::Snapshot(
self.get_existing_tasks(rtxn, to_snapshot)?,
)));
} }
// 3. we batch the dumps. // 3. we batch the dumps.
let to_dump = self.get_kind(rtxn, Kind::DumpExport)? & enqueued; let to_dump = self.get_kind(rtxn, Kind::DumpExport)? & enqueued;
if !to_dump.is_empty() { if !to_dump.is_empty() {
return Ok(Batch::Dump(self.get_existing_tasks(rtxn, to_dump)?)); return Ok(Some(Batch::Dump(self.get_existing_tasks(rtxn, to_dump)?)));
} }
// 4. We take the next task and try to batch all the tasks associated with this index. // 4. We take the next task and try to batch all the tasks associated with this index.
@ -95,36 +92,34 @@ impl IndexScheduler {
let task = self let task = self
.get_task(rtxn, task_id)? .get_task(rtxn, task_id)?
.ok_or(Error::CorruptedTaskQueue)?; .ok_or(Error::CorruptedTaskQueue)?;
match task.kind {
// We can batch all the consecutive tasks coming next which
// have the kind `DocumentAddition`.
KindWithContent::DocumentAddition { index_name, .. } => {
return self.batch_contiguous_kind(rtxn, &index_name, Kind::DocumentAddition)
}
// We can batch all the consecutive tasks coming next which
// have the kind `DocumentDeletion`.
KindWithContent::DocumentDeletion { index_name, .. } => {
return self.batch_contiguous_kind(rtxn, &index_name, Kind::DocumentAddition)
}
// The following tasks can't be batched
KindWithContent::ClearAllDocuments { .. }
| KindWithContent::RenameIndex { .. }
| KindWithContent::CreateIndex { .. }
| KindWithContent::DeleteIndex { .. }
| KindWithContent::SwapIndex { .. } => return Ok(Batch::One(task)),
// The following tasks have already been batched and thus can't appear here. // This is safe because all the remaining task are associated with
KindWithContent::CancelTask { .. } // AT LEAST one index. We can use the right or left one it doesn't
| KindWithContent::DumpExport { .. } // matter.
| KindWithContent::Snapshot => { let index_name = task.indexes().unwrap()[0];
unreachable!()
} let index = self.get_index(rtxn, &index_name)? & enqueued;
}
let enqueued = enqueued
.into_iter()
.map(|task_id| {
self.get_task(rtxn, task_id)
.and_then(|task| task.ok_or(Error::CorruptedTaskQueue))
.map(|task| (task.uid, task.kind.as_kind()))
})
.collect::<Result<Vec<_>>>()?;
return Ok(
autobatcher(enqueued).map(|batch_kind| Batch::IndexSpecific {
index_uid: index_name.to_string(),
kind: batch_kind,
}),
);
} }
// If we found no tasks then we were notified for something that got autobatched // If we found no tasks then we were notified for something that got autobatched
// somehow and there is nothing to do. // somehow and there is nothing to do.
Ok(Batch::Empty) Ok(None)
} }
/// Batch all the consecutive tasks coming next that shares the same `Kind` /// Batch all the consecutive tasks coming next that shares the same `Kind`