mirror of
https://github.com/meilisearch/meilisearch.git
synced 2025-02-23 19:15:31 +08:00
Compare commits
13 Commits
62a19fd7bd
...
da158f950b
Author | SHA1 | Date | |
---|---|---|---|
|
da158f950b | ||
|
42257eec53 | ||
|
1beda3b9af | ||
|
8676e94f5c | ||
|
ef47a0d820 | ||
|
e0f0da57e2 | ||
|
485e3127c7 | ||
|
58f90b70c7 | ||
|
508db9020d | ||
|
6ff37c6fc4 | ||
|
f21ae1f5d1 | ||
|
483c52f07b | ||
|
f88f415a00 |
@ -1,7 +1,7 @@
|
|||||||
use std::collections::BTreeSet;
|
use std::collections::BTreeSet;
|
||||||
use std::fmt::Write;
|
use std::fmt::Write;
|
||||||
|
|
||||||
use meilisearch_types::batches::Batch;
|
use meilisearch_types::batches::{Batch, BatchEnqueuedAt};
|
||||||
use meilisearch_types::heed::types::{SerdeBincode, SerdeJson, Str};
|
use meilisearch_types::heed::types::{SerdeBincode, SerdeJson, Str};
|
||||||
use meilisearch_types::heed::{Database, RoTxn};
|
use meilisearch_types::heed::{Database, RoTxn};
|
||||||
use meilisearch_types::milli::{CboRoaringBitmapCodec, RoaringBitmapCodec, BEU32};
|
use meilisearch_types::milli::{CboRoaringBitmapCodec, RoaringBitmapCodec, BEU32};
|
||||||
@ -341,10 +341,14 @@ pub fn snapshot_canceled_by(rtxn: &RoTxn, db: Database<BEU32, RoaringBitmapCodec
|
|||||||
|
|
||||||
pub fn snapshot_batch(batch: &Batch) -> String {
|
pub fn snapshot_batch(batch: &Batch) -> String {
|
||||||
let mut snap = String::new();
|
let mut snap = String::new();
|
||||||
let Batch { uid, details, stats, started_at, finished_at, progress: _ } = batch;
|
let Batch { uid, details, stats, started_at, finished_at, progress: _, enqueued_at } = batch;
|
||||||
if let Some(finished_at) = finished_at {
|
if let Some(finished_at) = finished_at {
|
||||||
assert!(finished_at > started_at);
|
assert!(finished_at > started_at);
|
||||||
}
|
}
|
||||||
|
let BatchEnqueuedAt { earliest, oldest } = enqueued_at.unwrap();
|
||||||
|
assert!(*started_at > earliest);
|
||||||
|
assert!(earliest >= oldest);
|
||||||
|
|
||||||
snap.push('{');
|
snap.push('{');
|
||||||
snap.push_str(&format!("uid: {uid}, "));
|
snap.push_str(&format!("uid: {uid}, "));
|
||||||
snap.push_str(&format!("details: {}, ", serde_json::to_string(details).unwrap()));
|
snap.push_str(&format!("details: {}, ", serde_json::to_string(details).unwrap()));
|
||||||
|
@ -12,8 +12,8 @@ use time::OffsetDateTime;
|
|||||||
use super::{Query, Queue};
|
use super::{Query, Queue};
|
||||||
use crate::processing::ProcessingTasks;
|
use crate::processing::ProcessingTasks;
|
||||||
use crate::utils::{
|
use crate::utils::{
|
||||||
insert_task_datetime, keep_ids_within_datetimes, map_bound, remove_task_datetime,
|
insert_task_datetime, keep_ids_within_datetimes, map_bound,
|
||||||
ProcessingBatch,
|
remove_n_tasks_datetime_earlier_than, remove_task_datetime, ProcessingBatch,
|
||||||
};
|
};
|
||||||
use crate::{Error, Result, BEI128};
|
use crate::{Error, Result, BEI128};
|
||||||
|
|
||||||
@ -181,6 +181,7 @@ impl BatchQueue {
|
|||||||
stats: batch.stats,
|
stats: batch.stats,
|
||||||
started_at: batch.started_at,
|
started_at: batch.started_at,
|
||||||
finished_at: batch.finished_at,
|
finished_at: batch.finished_at,
|
||||||
|
enqueued_at: batch.enqueued_at,
|
||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
@ -234,34 +235,25 @@ impl BatchQueue {
|
|||||||
// What we know, though, is that the task date is from before the enqueued_at, and max two timestamps have been written
|
// What we know, though, is that the task date is from before the enqueued_at, and max two timestamps have been written
|
||||||
// to the DB per batches.
|
// to the DB per batches.
|
||||||
if let Some(ref old_batch) = old_batch {
|
if let Some(ref old_batch) = old_batch {
|
||||||
let started_at = old_batch.started_at.unix_timestamp_nanos();
|
if let Some(enqueued_at) = old_batch.enqueued_at {
|
||||||
|
remove_task_datetime(wtxn, self.enqueued_at, enqueued_at.earliest, old_batch.uid)?;
|
||||||
// We have either one or two enqueued at to remove
|
remove_task_datetime(wtxn, self.enqueued_at, enqueued_at.oldest, old_batch.uid)?;
|
||||||
let mut exit = old_batch.stats.total_nb_tasks.clamp(0, 2);
|
} else {
|
||||||
let mut iterator = self.enqueued_at.rev_iter_mut(wtxn)?;
|
// If we don't have the enqueued at in the batch it means the database comes from the v1.12
|
||||||
while let Some(entry) = iterator.next() {
|
// and we still need to find the date by scrolling the database
|
||||||
let (key, mut value) = entry?;
|
remove_n_tasks_datetime_earlier_than(
|
||||||
if key > started_at {
|
wtxn,
|
||||||
continue;
|
self.enqueued_at,
|
||||||
}
|
old_batch.started_at,
|
||||||
if value.remove(old_batch.uid) {
|
old_batch.stats.total_nb_tasks.clamp(1, 2) as usize,
|
||||||
exit = exit.saturating_sub(1);
|
old_batch.uid,
|
||||||
// Safe because the key and value are owned
|
)?;
|
||||||
unsafe {
|
|
||||||
iterator.put_current(&key, &value)?;
|
|
||||||
}
|
|
||||||
if exit == 0 {
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
// A finished batch MUST contains at least one task and have an enqueued_at
|
||||||
}
|
let enqueued_at = batch.enqueued_at.as_ref().unwrap();
|
||||||
if let Some(enqueued_at) = batch.oldest_enqueued_at {
|
insert_task_datetime(wtxn, self.enqueued_at, enqueued_at.earliest, batch.uid)?;
|
||||||
insert_task_datetime(wtxn, self.enqueued_at, enqueued_at, batch.uid)?;
|
insert_task_datetime(wtxn, self.enqueued_at, enqueued_at.oldest, batch.uid)?;
|
||||||
}
|
|
||||||
if let Some(enqueued_at) = batch.earliest_enqueued_at {
|
|
||||||
insert_task_datetime(wtxn, self.enqueued_at, enqueued_at, batch.uid)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update the started at and finished at
|
// Update the started at and finished at
|
||||||
if let Some(ref old_batch) = old_batch {
|
if let Some(ref old_batch) = old_batch {
|
||||||
|
@ -102,6 +102,8 @@ fn query_batches_simple() {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(batches.len(), 1);
|
assert_eq!(batches.len(), 1);
|
||||||
batches[0].started_at = OffsetDateTime::UNIX_EPOCH;
|
batches[0].started_at = OffsetDateTime::UNIX_EPOCH;
|
||||||
|
assert!(batches[0].enqueued_at.is_some());
|
||||||
|
batches[0].enqueued_at = None;
|
||||||
// Insta cannot snapshot our batches because the batch stats contains an enum as key: https://github.com/mitsuhiko/insta/issues/689
|
// Insta cannot snapshot our batches because the batch stats contains an enum as key: https://github.com/mitsuhiko/insta/issues/689
|
||||||
let batch = serde_json::to_string_pretty(&batches[0]).unwrap();
|
let batch = serde_json::to_string_pretty(&batches[0]).unwrap();
|
||||||
snapshot!(batch, @r#"
|
snapshot!(batch, @r#"
|
||||||
@ -123,7 +125,8 @@ fn query_batches_simple() {
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"startedAt": "1970-01-01T00:00:00Z",
|
"startedAt": "1970-01-01T00:00:00Z",
|
||||||
"finishedAt": null
|
"finishedAt": null,
|
||||||
|
"enqueuedAt": null
|
||||||
}
|
}
|
||||||
"#);
|
"#);
|
||||||
|
|
||||||
|
@ -2,7 +2,7 @@ use std::collections::{BTreeSet, HashMap, HashSet};
|
|||||||
use std::panic::{catch_unwind, AssertUnwindSafe};
|
use std::panic::{catch_unwind, AssertUnwindSafe};
|
||||||
use std::sync::atomic::Ordering;
|
use std::sync::atomic::Ordering;
|
||||||
|
|
||||||
use meilisearch_types::batches::BatchId;
|
use meilisearch_types::batches::{BatchEnqueuedAt, BatchId};
|
||||||
use meilisearch_types::heed::{RoTxn, RwTxn};
|
use meilisearch_types::heed::{RoTxn, RwTxn};
|
||||||
use meilisearch_types::milli::progress::{Progress, VariableNameStep};
|
use meilisearch_types::milli::progress::{Progress, VariableNameStep};
|
||||||
use meilisearch_types::milli::{self};
|
use meilisearch_types::milli::{self};
|
||||||
@ -16,7 +16,10 @@ use crate::processing::{
|
|||||||
InnerSwappingTwoIndexes, SwappingTheIndexes, TaskCancelationProgress, TaskDeletionProgress,
|
InnerSwappingTwoIndexes, SwappingTheIndexes, TaskCancelationProgress, TaskDeletionProgress,
|
||||||
UpdateIndexProgress,
|
UpdateIndexProgress,
|
||||||
};
|
};
|
||||||
use crate::utils::{self, swap_index_uid_in_task, ProcessingBatch};
|
use crate::utils::{
|
||||||
|
self, remove_n_tasks_datetime_earlier_than, remove_task_datetime, swap_index_uid_in_task,
|
||||||
|
ProcessingBatch,
|
||||||
|
};
|
||||||
use crate::{Error, IndexScheduler, Result, TaskId};
|
use crate::{Error, IndexScheduler, Result, TaskId};
|
||||||
|
|
||||||
impl IndexScheduler {
|
impl IndexScheduler {
|
||||||
@ -418,7 +421,6 @@ impl IndexScheduler {
|
|||||||
to_delete_tasks -= &enqueued_tasks;
|
to_delete_tasks -= &enqueued_tasks;
|
||||||
|
|
||||||
// 2. We now have a list of tasks to delete, delete them
|
// 2. We now have a list of tasks to delete, delete them
|
||||||
|
|
||||||
let mut affected_indexes = HashSet::new();
|
let mut affected_indexes = HashSet::new();
|
||||||
let mut affected_statuses = HashSet::new();
|
let mut affected_statuses = HashSet::new();
|
||||||
let mut affected_kinds = HashSet::new();
|
let mut affected_kinds = HashSet::new();
|
||||||
@ -515,9 +517,51 @@ impl IndexScheduler {
|
|||||||
tasks -= &to_delete_tasks;
|
tasks -= &to_delete_tasks;
|
||||||
// We must remove the batch entirely
|
// We must remove the batch entirely
|
||||||
if tasks.is_empty() {
|
if tasks.is_empty() {
|
||||||
|
if let Some(batch) = self.queue.batches.get_batch(wtxn, batch_id)? {
|
||||||
|
if let Some(BatchEnqueuedAt { earliest, oldest }) = batch.enqueued_at {
|
||||||
|
remove_task_datetime(
|
||||||
|
wtxn,
|
||||||
|
self.queue.batches.enqueued_at,
|
||||||
|
earliest,
|
||||||
|
batch_id,
|
||||||
|
)?;
|
||||||
|
remove_task_datetime(
|
||||||
|
wtxn,
|
||||||
|
self.queue.batches.enqueued_at,
|
||||||
|
oldest,
|
||||||
|
batch_id,
|
||||||
|
)?;
|
||||||
|
} else {
|
||||||
|
// If we don't have the enqueued at in the batch it means the database comes from the v1.12
|
||||||
|
// and we still need to find the date by scrolling the database
|
||||||
|
remove_n_tasks_datetime_earlier_than(
|
||||||
|
wtxn,
|
||||||
|
self.queue.batches.enqueued_at,
|
||||||
|
batch.started_at,
|
||||||
|
batch.stats.total_nb_tasks.clamp(1, 2) as usize,
|
||||||
|
batch_id,
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
remove_task_datetime(
|
||||||
|
wtxn,
|
||||||
|
self.queue.batches.started_at,
|
||||||
|
batch.started_at,
|
||||||
|
batch_id,
|
||||||
|
)?;
|
||||||
|
if let Some(finished_at) = batch.finished_at {
|
||||||
|
remove_task_datetime(
|
||||||
|
wtxn,
|
||||||
|
self.queue.batches.finished_at,
|
||||||
|
finished_at,
|
||||||
|
batch_id,
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
|
||||||
self.queue.batches.all_batches.delete(wtxn, &batch_id)?;
|
self.queue.batches.all_batches.delete(wtxn, &batch_id)?;
|
||||||
self.queue.batch_to_tasks_mapping.delete(wtxn, &batch_id)?;
|
self.queue.batch_to_tasks_mapping.delete(wtxn, &batch_id)?;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Anyway, we must remove the batch from all its reverse indexes.
|
// Anyway, we must remove the batch from all its reverse indexes.
|
||||||
// The only way to do that is to check
|
// The only way to do that is to check
|
||||||
|
|
||||||
|
@ -56,16 +56,13 @@ succeeded [1,]
|
|||||||
### Batches Index Tasks:
|
### Batches Index Tasks:
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batches Enqueued At:
|
### Batches Enqueued At:
|
||||||
[timestamp] [0,]
|
|
||||||
[timestamp] [1,]
|
[timestamp] [1,]
|
||||||
[timestamp] [1,]
|
[timestamp] [1,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batches Started At:
|
### Batches Started At:
|
||||||
[timestamp] [0,]
|
|
||||||
[timestamp] [1,]
|
[timestamp] [1,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batches Finished At:
|
### Batches Finished At:
|
||||||
[timestamp] [0,]
|
|
||||||
[timestamp] [1,]
|
[timestamp] [1,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### File Store:
|
### File Store:
|
||||||
|
@ -54,15 +54,12 @@ succeeded [1,]
|
|||||||
### Batches Index Tasks:
|
### Batches Index Tasks:
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batches Enqueued At:
|
### Batches Enqueued At:
|
||||||
[timestamp] [0,]
|
|
||||||
[timestamp] [1,]
|
[timestamp] [1,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batches Started At:
|
### Batches Started At:
|
||||||
[timestamp] [0,]
|
|
||||||
[timestamp] [1,]
|
[timestamp] [1,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batches Finished At:
|
### Batches Finished At:
|
||||||
[timestamp] [0,]
|
|
||||||
[timestamp] [1,]
|
[timestamp] [1,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### File Store:
|
### File Store:
|
||||||
|
@ -87,7 +87,6 @@ doggo [2,3,]
|
|||||||
girafo [4,]
|
girafo [4,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batches Enqueued At:
|
### Batches Enqueued At:
|
||||||
[timestamp] [0,]
|
|
||||||
[timestamp] [1,]
|
[timestamp] [1,]
|
||||||
[timestamp] [2,]
|
[timestamp] [2,]
|
||||||
[timestamp] [3,]
|
[timestamp] [3,]
|
||||||
@ -95,7 +94,6 @@ girafo [4,]
|
|||||||
[timestamp] [5,]
|
[timestamp] [5,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batches Started At:
|
### Batches Started At:
|
||||||
[timestamp] [0,]
|
|
||||||
[timestamp] [1,]
|
[timestamp] [1,]
|
||||||
[timestamp] [2,]
|
[timestamp] [2,]
|
||||||
[timestamp] [3,]
|
[timestamp] [3,]
|
||||||
@ -103,7 +101,6 @@ girafo [4,]
|
|||||||
[timestamp] [5,]
|
[timestamp] [5,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batches Finished At:
|
### Batches Finished At:
|
||||||
[timestamp] [0,]
|
|
||||||
[timestamp] [1,]
|
[timestamp] [1,]
|
||||||
[timestamp] [2,]
|
[timestamp] [2,]
|
||||||
[timestamp] [3,]
|
[timestamp] [3,]
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
use std::collections::{BTreeSet, HashSet};
|
use std::collections::{BTreeSet, HashSet};
|
||||||
use std::ops::Bound;
|
use std::ops::Bound;
|
||||||
|
|
||||||
use meilisearch_types::batches::{Batch, BatchId, BatchStats};
|
use meilisearch_types::batches::{Batch, BatchEnqueuedAt, BatchId, BatchStats};
|
||||||
use meilisearch_types::heed::{Database, RoTxn, RwTxn};
|
use meilisearch_types::heed::{Database, RoTxn, RwTxn};
|
||||||
use meilisearch_types::milli::CboRoaringBitmapCodec;
|
use meilisearch_types::milli::CboRoaringBitmapCodec;
|
||||||
use meilisearch_types::task_view::DetailsView;
|
use meilisearch_types::task_view::DetailsView;
|
||||||
@ -30,8 +30,7 @@ pub struct ProcessingBatch {
|
|||||||
pub kinds: HashSet<Kind>,
|
pub kinds: HashSet<Kind>,
|
||||||
pub indexes: HashSet<String>,
|
pub indexes: HashSet<String>,
|
||||||
pub canceled_by: HashSet<TaskId>,
|
pub canceled_by: HashSet<TaskId>,
|
||||||
pub oldest_enqueued_at: Option<OffsetDateTime>,
|
pub enqueued_at: Option<BatchEnqueuedAt>,
|
||||||
pub earliest_enqueued_at: Option<OffsetDateTime>,
|
|
||||||
pub started_at: OffsetDateTime,
|
pub started_at: OffsetDateTime,
|
||||||
pub finished_at: Option<OffsetDateTime>,
|
pub finished_at: Option<OffsetDateTime>,
|
||||||
}
|
}
|
||||||
@ -51,8 +50,7 @@ impl ProcessingBatch {
|
|||||||
kinds: HashSet::default(),
|
kinds: HashSet::default(),
|
||||||
indexes: HashSet::default(),
|
indexes: HashSet::default(),
|
||||||
canceled_by: HashSet::default(),
|
canceled_by: HashSet::default(),
|
||||||
oldest_enqueued_at: None,
|
enqueued_at: None,
|
||||||
earliest_enqueued_at: None,
|
|
||||||
started_at: OffsetDateTime::now_utc(),
|
started_at: OffsetDateTime::now_utc(),
|
||||||
finished_at: None,
|
finished_at: None,
|
||||||
}
|
}
|
||||||
@ -80,14 +78,18 @@ impl ProcessingBatch {
|
|||||||
if let Some(canceled_by) = task.canceled_by {
|
if let Some(canceled_by) = task.canceled_by {
|
||||||
self.canceled_by.insert(canceled_by);
|
self.canceled_by.insert(canceled_by);
|
||||||
}
|
}
|
||||||
self.oldest_enqueued_at =
|
match self.enqueued_at.as_mut() {
|
||||||
Some(self.oldest_enqueued_at.map_or(task.enqueued_at, |oldest_enqueued_at| {
|
Some(BatchEnqueuedAt { earliest, oldest }) => {
|
||||||
task.enqueued_at.min(oldest_enqueued_at)
|
*oldest = task.enqueued_at.min(*oldest);
|
||||||
}));
|
*earliest = task.enqueued_at.max(*earliest);
|
||||||
self.earliest_enqueued_at =
|
}
|
||||||
Some(self.earliest_enqueued_at.map_or(task.enqueued_at, |earliest_enqueued_at| {
|
None => {
|
||||||
task.enqueued_at.max(earliest_enqueued_at)
|
self.enqueued_at = Some(BatchEnqueuedAt {
|
||||||
}));
|
earliest: task.enqueued_at,
|
||||||
|
oldest: task.enqueued_at,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -138,6 +140,7 @@ impl ProcessingBatch {
|
|||||||
stats: self.stats.clone(),
|
stats: self.stats.clone(),
|
||||||
started_at: self.started_at,
|
started_at: self.started_at,
|
||||||
finished_at: self.finished_at,
|
finished_at: self.finished_at,
|
||||||
|
enqueued_at: self.enqueued_at,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -174,6 +177,33 @@ pub(crate) fn remove_task_datetime(
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn remove_n_tasks_datetime_earlier_than(
|
||||||
|
wtxn: &mut RwTxn,
|
||||||
|
database: Database<BEI128, CboRoaringBitmapCodec>,
|
||||||
|
earlier_than: OffsetDateTime,
|
||||||
|
mut count: usize,
|
||||||
|
task_id: TaskId,
|
||||||
|
) -> Result<()> {
|
||||||
|
let earlier_than = earlier_than.unix_timestamp_nanos();
|
||||||
|
let mut iter = database.rev_range_mut(wtxn, &(..earlier_than))?;
|
||||||
|
while let Some((current, mut existing)) = iter.next().transpose()? {
|
||||||
|
count -= existing.remove(task_id) as usize;
|
||||||
|
|
||||||
|
if existing.is_empty() {
|
||||||
|
// safety: We don't keep references to the database
|
||||||
|
unsafe { iter.del_current()? };
|
||||||
|
} else {
|
||||||
|
// safety: We don't keep references to the database
|
||||||
|
unsafe { iter.put_current(¤t, &existing)? };
|
||||||
|
}
|
||||||
|
if count == 0 {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub(crate) fn keep_ids_within_datetimes(
|
pub(crate) fn keep_ids_within_datetimes(
|
||||||
rtxn: &RoTxn,
|
rtxn: &RoTxn,
|
||||||
ids: &mut RoaringBitmap,
|
ids: &mut RoaringBitmap,
|
||||||
@ -329,14 +359,27 @@ impl crate::IndexScheduler {
|
|||||||
kind,
|
kind,
|
||||||
} = task;
|
} = task;
|
||||||
assert_eq!(uid, task.uid);
|
assert_eq!(uid, task.uid);
|
||||||
if let Some(ref batch) = batch_uid {
|
if task.status != Status::Enqueued {
|
||||||
|
let batch_uid = batch_uid.expect("All non enqueued tasks must be part of a batch");
|
||||||
assert!(self
|
assert!(self
|
||||||
.queue
|
.queue
|
||||||
.batch_to_tasks_mapping
|
.batch_to_tasks_mapping
|
||||||
.get(&rtxn, batch)
|
.get(&rtxn, &batch_uid)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.contains(uid));
|
.contains(uid));
|
||||||
|
let batch = self.queue.batches.get_batch(&rtxn, batch_uid).unwrap().unwrap();
|
||||||
|
assert_eq!(batch.uid, batch_uid);
|
||||||
|
if task.status == Status::Processing {
|
||||||
|
assert!(batch.progress.is_some());
|
||||||
|
} else {
|
||||||
|
assert!(batch.progress.is_none());
|
||||||
|
}
|
||||||
|
assert_eq!(batch.started_at, task.started_at.unwrap());
|
||||||
|
assert_eq!(batch.finished_at, task.finished_at);
|
||||||
|
let enqueued_at = batch.enqueued_at.unwrap();
|
||||||
|
assert!(task.enqueued_at >= enqueued_at.oldest);
|
||||||
|
assert!(task.enqueued_at <= enqueued_at.earliest);
|
||||||
}
|
}
|
||||||
if let Some(task_index_uid) = &task_index_uid {
|
if let Some(task_index_uid) = &task_index_uid {
|
||||||
assert!(self
|
assert!(self
|
||||||
|
@ -24,6 +24,18 @@ pub struct Batch {
|
|||||||
pub started_at: OffsetDateTime,
|
pub started_at: OffsetDateTime,
|
||||||
#[serde(with = "time::serde::rfc3339::option")]
|
#[serde(with = "time::serde::rfc3339::option")]
|
||||||
pub finished_at: Option<OffsetDateTime>,
|
pub finished_at: Option<OffsetDateTime>,
|
||||||
|
|
||||||
|
// Enqueued at is never displayed and is only required when removing a batch.
|
||||||
|
// It's always some except when upgrading from a database pre v1.12
|
||||||
|
pub enqueued_at: Option<BatchEnqueuedAt>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct BatchEnqueuedAt {
|
||||||
|
#[serde(with = "time::serde::rfc3339")]
|
||||||
|
pub earliest: OffsetDateTime,
|
||||||
|
#[serde(with = "time::serde::rfc3339")]
|
||||||
|
pub oldest: OffsetDateTime,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default, Debug, Clone, Serialize, Deserialize, ToSchema)]
|
#[derive(Default, Debug, Clone, Serialize, Deserialize, ToSchema)]
|
||||||
|
@ -41,9 +41,8 @@ async fn list_batches() {
|
|||||||
let index = server.index("test");
|
let index = server.index("test");
|
||||||
let (task, _status_code) = index.create(None).await;
|
let (task, _status_code) = index.create(None).await;
|
||||||
index.wait_task(task.uid()).await.succeeded();
|
index.wait_task(task.uid()).await.succeeded();
|
||||||
index
|
let (task, _status_code) = index.create(None).await;
|
||||||
.add_documents(serde_json::from_str(include_str!("../assets/test_set.json")).unwrap(), None)
|
index.wait_task(task.uid()).await.failed();
|
||||||
.await;
|
|
||||||
let (response, code) = index.list_batches().await;
|
let (response, code) = index.list_batches().await;
|
||||||
assert_eq!(code, 200);
|
assert_eq!(code, 200);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@ -96,11 +95,12 @@ async fn list_batches_pagination_and_reverse() {
|
|||||||
async fn list_batches_with_star_filters() {
|
async fn list_batches_with_star_filters() {
|
||||||
let server = Server::new().await;
|
let server = Server::new().await;
|
||||||
let index = server.index("test");
|
let index = server.index("test");
|
||||||
let (batch, _code) = index.create(None).await;
|
let (task, _code) = index.create(None).await;
|
||||||
index.wait_task(batch.uid()).await.succeeded();
|
index.wait_task(task.uid()).await.succeeded();
|
||||||
index
|
let index = server.index("test");
|
||||||
.add_documents(serde_json::from_str(include_str!("../assets/test_set.json")).unwrap(), None)
|
let (task, _code) = index.create(None).await;
|
||||||
.await;
|
index.wait_task(task.uid()).await.failed();
|
||||||
|
|
||||||
let (response, code) = index.service.get("/batches?indexUids=test").await;
|
let (response, code) = index.service.get("/batches?indexUids=test").await;
|
||||||
assert_eq!(code, 200);
|
assert_eq!(code, 200);
|
||||||
assert_eq!(response["results"].as_array().unwrap().len(), 2);
|
assert_eq!(response["results"].as_array().unwrap().len(), 2);
|
||||||
@ -187,9 +187,6 @@ async fn list_batches_invalid_canceled_by_filter() {
|
|||||||
let index = server.index("test");
|
let index = server.index("test");
|
||||||
let (task, _status_code) = index.create(None).await;
|
let (task, _status_code) = index.create(None).await;
|
||||||
index.wait_task(task.uid()).await.succeeded();
|
index.wait_task(task.uid()).await.succeeded();
|
||||||
index
|
|
||||||
.add_documents(serde_json::from_str(include_str!("../assets/test_set.json")).unwrap(), None)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
let (response, code) = index.filtered_batches(&[], &[], &["0"]).await;
|
let (response, code) = index.filtered_batches(&[], &[], &["0"]).await;
|
||||||
assert_eq!(code, 200, "{}", response);
|
assert_eq!(code, 200, "{}", response);
|
||||||
@ -202,9 +199,8 @@ async fn list_batches_status_and_type_filtered() {
|
|||||||
let index = server.index("test");
|
let index = server.index("test");
|
||||||
let (task, _status_code) = index.create(None).await;
|
let (task, _status_code) = index.create(None).await;
|
||||||
index.wait_task(task.uid()).await.succeeded();
|
index.wait_task(task.uid()).await.succeeded();
|
||||||
index
|
let (task, _status_code) = index.update(Some("id")).await;
|
||||||
.add_documents(serde_json::from_str(include_str!("../assets/test_set.json")).unwrap(), None)
|
index.wait_task(task.uid()).await.succeeded();
|
||||||
.await;
|
|
||||||
|
|
||||||
let (response, code) = index.filtered_batches(&["indexCreation"], &["failed"], &[]).await;
|
let (response, code) = index.filtered_batches(&["indexCreation"], &["failed"], &[]).await;
|
||||||
assert_eq!(code, 200, "{}", response);
|
assert_eq!(code, 200, "{}", response);
|
||||||
@ -212,7 +208,7 @@ async fn list_batches_status_and_type_filtered() {
|
|||||||
|
|
||||||
let (response, code) = index
|
let (response, code) = index
|
||||||
.filtered_batches(
|
.filtered_batches(
|
||||||
&["indexCreation", "documentAdditionOrUpdate"],
|
&["indexCreation", "IndexUpdate"],
|
||||||
&["succeeded", "processing", "enqueued"],
|
&["succeeded", "processing", "enqueued"],
|
||||||
&[],
|
&[],
|
||||||
)
|
)
|
||||||
|
@ -12,16 +12,6 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"precommands": [
|
"precommands": [
|
||||||
{
|
|
||||||
"route": "experimental-features",
|
|
||||||
"method": "PATCH",
|
|
||||||
"body": {
|
|
||||||
"inline": {
|
|
||||||
"vectorStore": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"synchronous": "DontWait"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"route": "indexes/movies/settings",
|
"route": "indexes/movies/settings",
|
||||||
"method": "PATCH",
|
"method": "PATCH",
|
||||||
|
@ -12,16 +12,6 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"precommands": [
|
"precommands": [
|
||||||
{
|
|
||||||
"route": "experimental-features",
|
|
||||||
"method": "PATCH",
|
|
||||||
"body": {
|
|
||||||
"inline": {
|
|
||||||
"vectorStore": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"synchronous": "DontWait"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"route": "indexes/movies/settings",
|
"route": "indexes/movies/settings",
|
||||||
"method": "PATCH",
|
"method": "PATCH",
|
||||||
|
@ -13,16 +13,6 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"precommands": [
|
"precommands": [
|
||||||
{
|
|
||||||
"route": "experimental-features",
|
|
||||||
"method": "PATCH",
|
|
||||||
"body": {
|
|
||||||
"inline": {
|
|
||||||
"vectorStore": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"synchronous": "DontWait"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"route": "indexes/movies/settings",
|
"route": "indexes/movies/settings",
|
||||||
"method": "PATCH",
|
"method": "PATCH",
|
||||||
|
Loading…
x
Reference in New Issue
Block a user