mirror of
https://github.com/meilisearch/meilisearch.git
synced 2024-12-02 01:55:03 +08:00
Merge #3673
3673: Handle the task queue being full r=irevoire a=dureuill # Pull Request ## Related issue Fixes a remaining issue with #3659 where it was not always possible to send tasks back even after deleting some tasks when prompted. ## Tests - see integration test - also manually tested with a 1MiB task queue. Was not possible to become unblocked before this PR, is now possible. ## What does this PR do? - Use the `non_free_pages_size` method to compute the space occupied by the task db instead of the `real_disk_size` which is not always affected by task deletion. - Expand the test so that it adds a task after the deletion. The test now fails before this PR and succeeds after this PR. Co-authored-by: Louis Dureuil <louis@meilisearch.com>
This commit is contained in:
commit
4b953d62fb
@ -822,7 +822,7 @@ impl IndexScheduler {
|
|||||||
|
|
||||||
// if the task doesn't delete anything and 50% of the task queue is full, we must refuse to enqueue the incomming task
|
// if the task doesn't delete anything and 50% of the task queue is full, we must refuse to enqueue the incomming task
|
||||||
if !matches!(&kind, KindWithContent::TaskDeletion { tasks, .. } if !tasks.is_empty())
|
if !matches!(&kind, KindWithContent::TaskDeletion { tasks, .. } if !tasks.is_empty())
|
||||||
&& (self.env.real_disk_size()? * 100) / self.env.map_size()? as u64 > 50
|
&& (self.env.non_free_pages_size()? * 100) / self.env.map_size()? as u64 > 50
|
||||||
{
|
{
|
||||||
return Err(Error::NoSpaceLeftInTaskQueue);
|
return Err(Error::NoSpaceLeftInTaskQueue);
|
||||||
}
|
}
|
||||||
|
@ -1050,7 +1050,7 @@ async fn test_task_queue_is_full() {
|
|||||||
"###);
|
"###);
|
||||||
|
|
||||||
// But we should still be able to register tasks deletion IF they delete something
|
// But we should still be able to register tasks deletion IF they delete something
|
||||||
let (result, code) = server.delete_tasks("uids=0").await;
|
let (result, code) = server.delete_tasks("uids=*").await;
|
||||||
snapshot!(code, @"200 OK");
|
snapshot!(code, @"200 OK");
|
||||||
snapshot!(json_string!(result, { ".enqueuedAt" => "[date]", ".taskUid" => "uid" }), @r###"
|
snapshot!(json_string!(result, { ".enqueuedAt" => "[date]", ".taskUid" => "uid" }), @r###"
|
||||||
{
|
{
|
||||||
@ -1062,6 +1062,22 @@ async fn test_task_queue_is_full() {
|
|||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
|
let result = server.wait_task(result["taskUid"].as_u64().unwrap()).await;
|
||||||
|
snapshot!(json_string!(result["status"]), @r###""succeeded""###);
|
||||||
|
|
||||||
|
// Now we should be able to register tasks again
|
||||||
|
let (result, code) = server.create_index(json!({ "uid": "doggo" })).await;
|
||||||
|
snapshot!(code, @"202 Accepted");
|
||||||
|
snapshot!(json_string!(result, { ".enqueuedAt" => "[date]", ".taskUid" => "uid" }), @r###"
|
||||||
|
{
|
||||||
|
"taskUid": "uid",
|
||||||
|
"indexUid": "doggo",
|
||||||
|
"status": "enqueued",
|
||||||
|
"type": "indexCreation",
|
||||||
|
"enqueuedAt": "[date]"
|
||||||
|
}
|
||||||
|
"###);
|
||||||
|
|
||||||
// we're going to fill up the queue once again
|
// we're going to fill up the queue once again
|
||||||
loop {
|
loop {
|
||||||
let (res, code) = server.delete_tasks("uids=0").await;
|
let (res, code) = server.delete_tasks("uids=0").await;
|
||||||
|
Loading…
Reference in New Issue
Block a user