Display the total number of tasks in the tasks route

This commit is contained in:
Clément Renault 2023-07-05 10:58:10 +02:00
parent 377fe33aac
commit 86b834c9e4
No known key found for this signature in database
GPG Key ID: 92ADA4E935E71FA4
2 changed files with 41 additions and 30 deletions

View File

@ -138,6 +138,12 @@ impl Query {
index_vec.push(index_uid); index_vec.push(index_uid);
Self { index_uids: Some(index_vec), ..self } Self { index_uids: Some(index_vec), ..self }
} }
// Removes the `from` and `limit` restrictions from the query.
// Useful to get the total number of tasks matching a filter.
pub fn without_limits(self) -> Self {
Query { limit: None, from: None, ..self }
}
} }
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
@ -822,7 +828,8 @@ impl IndexScheduler {
Ok(nbr_index_processing_tasks > 0) Ok(nbr_index_processing_tasks > 0)
} }
/// Return the task ids matching the query from the user's point of view. /// Return the task ids matching the query along with the total number of tasks
/// by ignoring the from and limit parameters from the user's point of view.
/// ///
/// There are two differences between an internal query and a query executed by /// There are two differences between an internal query and a query executed by
/// the user. /// the user.
@ -835,7 +842,13 @@ impl IndexScheduler {
rtxn: &RoTxn, rtxn: &RoTxn,
query: &Query, query: &Query,
filters: &meilisearch_auth::AuthFilter, filters: &meilisearch_auth::AuthFilter,
) -> Result<RoaringBitmap> { ) -> Result<(RoaringBitmap, u64)> {
// compute all tasks matching the filter by ignoring the limits, to find the number of tasks matching
// the filter.
// As this causes us to compute the filter twice it is slightly inefficient, but doing it this way spares
// us from modifying the underlying implementation, and the performance remains sufficient.
// Should this change, we would modify `get_task_ids` to directly return the number of matching tasks.
let total_tasks = self.get_task_ids(rtxn, &query.clone().without_limits())?;
let mut tasks = self.get_task_ids(rtxn, query)?; let mut tasks = self.get_task_ids(rtxn, query)?;
// If the query contains a list of index uid or there is a finite list of authorized indexes, // If the query contains a list of index uid or there is a finite list of authorized indexes,
@ -858,10 +871,11 @@ impl IndexScheduler {
} }
} }
Ok(tasks) Ok((tasks, total_tasks.len()))
} }
/// Return the tasks matching the query from the user's point of view. /// Return the tasks matching the query from the user's point of view along
/// with the total number of tasks matching the query, ignoring from and limit.
/// ///
/// There are two differences between an internal query and a query executed by /// There are two differences between an internal query and a query executed by
/// the user. /// the user.
@ -873,11 +887,10 @@ impl IndexScheduler {
&self, &self,
query: Query, query: Query,
filters: &meilisearch_auth::AuthFilter, filters: &meilisearch_auth::AuthFilter,
) -> Result<Vec<Task>> { ) -> Result<(Vec<Task>, u64)> {
let rtxn = self.env.read_txn()?; let rtxn = self.env.read_txn()?;
let tasks = self.get_task_ids_from_authorized_indexes(&rtxn, &query, filters)?; let (tasks, total) = self.get_task_ids_from_authorized_indexes(&rtxn, &query, filters)?;
let tasks = self.get_existing_tasks( let tasks = self.get_existing_tasks(
&rtxn, &rtxn,
tasks.into_iter().rev().take(query.limit.unwrap_or(u32::MAX) as usize), tasks.into_iter().rev().take(query.limit.unwrap_or(u32::MAX) as usize),
@ -888,16 +901,19 @@ impl IndexScheduler {
let ret = tasks.into_iter(); let ret = tasks.into_iter();
if processing.is_empty() { if processing.is_empty() {
Ok(ret.collect()) Ok((ret.collect(), total))
} else { } else {
Ok(ret Ok((
.map(|task| match processing.contains(task.uid) { ret.map(|task| {
true => { if processing.contains(task.uid) {
Task { status: Status::Processing, started_at: Some(started_at), ..task } Task { status: Status::Processing, started_at: Some(started_at), ..task }
} else {
task
} }
false => task,
}) })
.collect()) .collect(),
total,
))
} }
} }

View File

@ -325,7 +325,7 @@ async fn cancel_tasks(
let query = params.into_query(); let query = params.into_query();
let tasks = index_scheduler.get_task_ids_from_authorized_indexes( let (tasks, _) = index_scheduler.get_task_ids_from_authorized_indexes(
&index_scheduler.read_txn()?, &index_scheduler.read_txn()?,
&query, &query,
index_scheduler.filters(), index_scheduler.filters(),
@ -370,7 +370,7 @@ async fn delete_tasks(
); );
let query = params.into_query(); let query = params.into_query();
let tasks = index_scheduler.get_task_ids_from_authorized_indexes( let (tasks, _) = index_scheduler.get_task_ids_from_authorized_indexes(
&index_scheduler.read_txn()?, &index_scheduler.read_txn()?,
&query, &query,
index_scheduler.filters(), index_scheduler.filters(),
@ -387,6 +387,7 @@ async fn delete_tasks(
#[derive(Debug, Serialize)] #[derive(Debug, Serialize)]
pub struct AllTasks { pub struct AllTasks {
results: Vec<TaskView>, results: Vec<TaskView>,
total: u64,
limit: u32, limit: u32,
from: Option<u32>, from: Option<u32>,
next: Option<u32>, next: Option<u32>,
@ -406,23 +407,17 @@ async fn get_tasks(
let limit = params.limit.0; let limit = params.limit.0;
let query = params.into_query(); let query = params.into_query();
let mut tasks_results: Vec<TaskView> = index_scheduler let filters = index_scheduler.filters();
.get_tasks_from_authorized_indexes(query, index_scheduler.filters())? let (tasks, total) = index_scheduler.get_tasks_from_authorized_indexes(query, filters)?;
.into_iter() let mut results: Vec<_> = tasks.iter().map(TaskView::from_task).collect();
.map(|t| TaskView::from_task(&t))
.collect();
// If we were able to fetch the number +1 tasks we asked // If we were able to fetch the number +1 tasks we asked
// it means that there is more to come. // it means that there is more to come.
let next = if tasks_results.len() == limit as usize { let next = if results.len() == limit as usize { results.pop().map(|t| t.uid) } else { None };
tasks_results.pop().map(|t| t.uid)
} else {
None
};
let from = tasks_results.first().map(|t| t.uid); let from = results.first().map(|t| t.uid);
let tasks = AllTasks { results, limit: limit.saturating_sub(1), total, from, next };
let tasks = AllTasks { results: tasks_results, limit: limit.saturating_sub(1), from, next };
Ok(HttpResponse::Ok().json(tasks)) Ok(HttpResponse::Ok().json(tasks))
} }
@ -444,10 +439,10 @@ async fn get_task(
analytics.publish("Tasks Seen".to_string(), json!({ "per_task_uid": true }), Some(&req)); analytics.publish("Tasks Seen".to_string(), json!({ "per_task_uid": true }), Some(&req));
let query = index_scheduler::Query { uids: Some(vec![task_uid]), ..Query::default() }; let query = index_scheduler::Query { uids: Some(vec![task_uid]), ..Query::default() };
let filters = index_scheduler.filters();
let (tasks, _) = index_scheduler.get_tasks_from_authorized_indexes(query, filters)?;
if let Some(task) = if let Some(task) = tasks.first() {
index_scheduler.get_tasks_from_authorized_indexes(query, index_scheduler.filters())?.first()
{
let task_view = TaskView::from_task(task); let task_view = TaskView::from_task(task);
Ok(HttpResponse::Ok().json(task_view)) Ok(HttpResponse::Ok().json(task_view))
} else { } else {