mirror of
https://github.com/meilisearch/meilisearch.git
synced 2024-11-25 03:25:06 +08:00
Compare commits
14 Commits
d09a99c0b7
...
7679de5138
Author | SHA1 | Date | |
---|---|---|---|
|
7679de5138 | ||
|
c1d8ee2a8d | ||
|
94fb55bb6f | ||
|
009709eace | ||
|
2eb1801e85 | ||
|
a5d7ae23bd | ||
|
03886d0012 | ||
|
b427b9e88f | ||
|
8b95f5ccc6 | ||
|
da59a043ba | ||
|
da4d47b5d0 | ||
|
d0b1ba20cb | ||
|
c79ca9679b | ||
|
a934b0ac6a |
8
.github/workflows/flaky-tests.yml
vendored
8
.github/workflows/flaky-tests.yml
vendored
@ -21,10 +21,10 @@ jobs:
|
|||||||
- name: Install cargo-flaky
|
- name: Install cargo-flaky
|
||||||
run: cargo install cargo-flaky
|
run: cargo install cargo-flaky
|
||||||
- name: Run cargo flaky in the dumps
|
- name: Run cargo flaky in the dumps
|
||||||
run: cd dump; cargo flaky -i 100 --release
|
run: cd crates/dump; cargo flaky -i 100 --release
|
||||||
- name: Run cargo flaky in the index-scheduler
|
- name: Run cargo flaky in the index-scheduler
|
||||||
run: cd index-scheduler; cargo flaky -i 100 --release
|
run: cd crates/index-scheduler; cargo flaky -i 100 --release
|
||||||
- name: Run cargo flaky in the auth
|
- name: Run cargo flaky in the auth
|
||||||
run: cd meilisearch-auth; cargo flaky -i 100 --release
|
run: cd crates/meilisearch-auth; cargo flaky -i 100 --release
|
||||||
- name: Run cargo flaky in meilisearch
|
- name: Run cargo flaky in meilisearch
|
||||||
run: cd meilisearch; cargo flaky -i 100 --release
|
run: cd crates/meilisearch; cargo flaky -i 100 --release
|
||||||
|
@ -85,6 +85,8 @@ pub struct Query {
|
|||||||
pub limit: Option<u32>,
|
pub limit: Option<u32>,
|
||||||
/// The minimum [task id](`meilisearch_types::tasks::Task::uid`) to be matched
|
/// The minimum [task id](`meilisearch_types::tasks::Task::uid`) to be matched
|
||||||
pub from: Option<u32>,
|
pub from: Option<u32>,
|
||||||
|
/// The order used to return the tasks. By default the newest tasks are returned first and the boolean is `false`.
|
||||||
|
pub reverse: Option<bool>,
|
||||||
/// The allowed [statuses](`meilisearch_types::tasks::Task::status`) of the matched tasls
|
/// The allowed [statuses](`meilisearch_types::tasks::Task::status`) of the matched tasls
|
||||||
pub statuses: Option<Vec<Status>>,
|
pub statuses: Option<Vec<Status>>,
|
||||||
/// The allowed [kinds](meilisearch_types::tasks::Kind) of the matched tasks.
|
/// The allowed [kinds](meilisearch_types::tasks::Kind) of the matched tasks.
|
||||||
@ -127,6 +129,7 @@ impl Query {
|
|||||||
Query {
|
Query {
|
||||||
limit: None,
|
limit: None,
|
||||||
from: None,
|
from: None,
|
||||||
|
reverse: None,
|
||||||
statuses: None,
|
statuses: None,
|
||||||
types: None,
|
types: None,
|
||||||
index_uids: None,
|
index_uids: None,
|
||||||
@ -718,7 +721,12 @@ impl IndexScheduler {
|
|||||||
let mut tasks = self.all_task_ids(rtxn)?;
|
let mut tasks = self.all_task_ids(rtxn)?;
|
||||||
|
|
||||||
if let Some(from) = &query.from {
|
if let Some(from) = &query.from {
|
||||||
tasks.remove_range(from.saturating_add(1)..);
|
let range = if query.reverse.unwrap_or_default() {
|
||||||
|
u32::MIN..*from
|
||||||
|
} else {
|
||||||
|
from.saturating_add(1)..u32::MAX
|
||||||
|
};
|
||||||
|
tasks.remove_range(range);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(status) = &query.statuses {
|
if let Some(status) = &query.statuses {
|
||||||
@ -838,7 +846,11 @@ impl IndexScheduler {
|
|||||||
)?;
|
)?;
|
||||||
|
|
||||||
if let Some(limit) = query.limit {
|
if let Some(limit) = query.limit {
|
||||||
tasks = tasks.into_iter().rev().take(limit as usize).collect();
|
tasks = if query.reverse.unwrap_or_default() {
|
||||||
|
tasks.into_iter().take(limit as usize).collect()
|
||||||
|
} else {
|
||||||
|
tasks.into_iter().rev().take(limit as usize).collect()
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(tasks)
|
Ok(tasks)
|
||||||
@ -963,10 +975,13 @@ impl IndexScheduler {
|
|||||||
let rtxn = self.env.read_txn()?;
|
let rtxn = self.env.read_txn()?;
|
||||||
|
|
||||||
let (tasks, total) = self.get_task_ids_from_authorized_indexes(&rtxn, &query, filters)?;
|
let (tasks, total) = self.get_task_ids_from_authorized_indexes(&rtxn, &query, filters)?;
|
||||||
let tasks = self.get_existing_tasks(
|
let tasks = if query.reverse.unwrap_or_default() {
|
||||||
&rtxn,
|
Box::new(tasks.into_iter()) as Box<dyn Iterator<Item = u32>>
|
||||||
tasks.into_iter().rev().take(query.limit.unwrap_or(u32::MAX) as usize),
|
} else {
|
||||||
)?;
|
Box::new(tasks.into_iter().rev()) as Box<dyn Iterator<Item = u32>>
|
||||||
|
};
|
||||||
|
let tasks =
|
||||||
|
self.get_existing_tasks(&rtxn, tasks.take(query.limit.unwrap_or(u32::MAX) as usize))?;
|
||||||
|
|
||||||
let ProcessingTasks { started_at, processing, progress, .. } =
|
let ProcessingTasks { started_at, processing, progress, .. } =
|
||||||
self.processing_tasks.read().map_err(|_| Error::CorruptedTaskQueue)?.clone();
|
self.processing_tasks.read().map_err(|_| Error::CorruptedTaskQueue)?.clone();
|
||||||
|
@ -318,6 +318,7 @@ InvalidTaskBeforeStartedAt , InvalidRequest , BAD_REQUEST ;
|
|||||||
InvalidTaskCanceledBy , InvalidRequest , BAD_REQUEST ;
|
InvalidTaskCanceledBy , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidTaskFrom , InvalidRequest , BAD_REQUEST ;
|
InvalidTaskFrom , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidTaskLimit , InvalidRequest , BAD_REQUEST ;
|
InvalidTaskLimit , InvalidRequest , BAD_REQUEST ;
|
||||||
|
InvalidTaskReverse , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidTaskStatuses , InvalidRequest , BAD_REQUEST ;
|
InvalidTaskStatuses , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidTaskTypes , InvalidRequest , BAD_REQUEST ;
|
InvalidTaskTypes , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidTaskUids , InvalidRequest , BAD_REQUEST ;
|
InvalidTaskUids , InvalidRequest , BAD_REQUEST ;
|
||||||
|
@ -49,4 +49,18 @@ lazy_static! {
|
|||||||
pub static ref MEILISEARCH_IS_INDEXING: IntGauge =
|
pub static ref MEILISEARCH_IS_INDEXING: IntGauge =
|
||||||
register_int_gauge!(opts!("meilisearch_is_indexing", "Meilisearch Is Indexing"))
|
register_int_gauge!(opts!("meilisearch_is_indexing", "Meilisearch Is Indexing"))
|
||||||
.expect("Can't create a metric");
|
.expect("Can't create a metric");
|
||||||
|
pub static ref MEILISEARCH_SEARCH_QUEUE_SIZE: IntGauge = register_int_gauge!(opts!(
|
||||||
|
"meilisearch_search_queue_size",
|
||||||
|
"Meilisearch Search Queue Size"
|
||||||
|
))
|
||||||
|
.expect("Can't create a metric");
|
||||||
|
pub static ref MEILISEARCH_SEARCHES_RUNNING: IntGauge =
|
||||||
|
register_int_gauge!(opts!("meilisearch_searches_running", "Meilisearch Searches Running"))
|
||||||
|
.expect("Can't create a metric");
|
||||||
|
pub static ref MEILISEARCH_SEARCHES_WAITING_TO_BE_PROCESSED: IntGauge =
|
||||||
|
register_int_gauge!(opts!(
|
||||||
|
"meilisearch_searches_waiting_to_be_processed",
|
||||||
|
"Meilisearch Searches Being Processed"
|
||||||
|
))
|
||||||
|
.expect("Can't create a metric");
|
||||||
}
|
}
|
||||||
|
@ -10,6 +10,7 @@ use prometheus::{Encoder, TextEncoder};
|
|||||||
use crate::extractors::authentication::policies::ActionPolicy;
|
use crate::extractors::authentication::policies::ActionPolicy;
|
||||||
use crate::extractors::authentication::{AuthenticationError, GuardedData};
|
use crate::extractors::authentication::{AuthenticationError, GuardedData};
|
||||||
use crate::routes::create_all_stats;
|
use crate::routes::create_all_stats;
|
||||||
|
use crate::search_queue::SearchQueue;
|
||||||
|
|
||||||
pub fn configure(config: &mut web::ServiceConfig) {
|
pub fn configure(config: &mut web::ServiceConfig) {
|
||||||
config.service(web::resource("").route(web::get().to(get_metrics)));
|
config.service(web::resource("").route(web::get().to(get_metrics)));
|
||||||
@ -18,6 +19,7 @@ pub fn configure(config: &mut web::ServiceConfig) {
|
|||||||
pub async fn get_metrics(
|
pub async fn get_metrics(
|
||||||
index_scheduler: GuardedData<ActionPolicy<{ actions::METRICS_GET }>, Data<IndexScheduler>>,
|
index_scheduler: GuardedData<ActionPolicy<{ actions::METRICS_GET }>, Data<IndexScheduler>>,
|
||||||
auth_controller: Data<AuthController>,
|
auth_controller: Data<AuthController>,
|
||||||
|
search_queue: web::Data<SearchQueue>,
|
||||||
) -> Result<HttpResponse, ResponseError> {
|
) -> Result<HttpResponse, ResponseError> {
|
||||||
index_scheduler.features().check_metrics()?;
|
index_scheduler.features().check_metrics()?;
|
||||||
let auth_filters = index_scheduler.filters();
|
let auth_filters = index_scheduler.filters();
|
||||||
@ -35,6 +37,11 @@ pub async fn get_metrics(
|
|||||||
crate::metrics::MEILISEARCH_USED_DB_SIZE_BYTES.set(response.used_database_size as i64);
|
crate::metrics::MEILISEARCH_USED_DB_SIZE_BYTES.set(response.used_database_size as i64);
|
||||||
crate::metrics::MEILISEARCH_INDEX_COUNT.set(response.indexes.len() as i64);
|
crate::metrics::MEILISEARCH_INDEX_COUNT.set(response.indexes.len() as i64);
|
||||||
|
|
||||||
|
crate::metrics::MEILISEARCH_SEARCH_QUEUE_SIZE.set(search_queue.capacity() as i64);
|
||||||
|
crate::metrics::MEILISEARCH_SEARCHES_RUNNING.set(search_queue.searches_running() as i64);
|
||||||
|
crate::metrics::MEILISEARCH_SEARCHES_WAITING_TO_BE_PROCESSED
|
||||||
|
.set(search_queue.searches_waiting() as i64);
|
||||||
|
|
||||||
for (index, value) in response.indexes.iter() {
|
for (index, value) in response.indexes.iter() {
|
||||||
crate::metrics::MEILISEARCH_INDEX_DOCS_COUNT
|
crate::metrics::MEILISEARCH_INDEX_DOCS_COUNT
|
||||||
.with_label_values(&[index])
|
.with_label_values(&[index])
|
||||||
|
@ -42,6 +42,8 @@ pub struct TasksFilterQuery {
|
|||||||
pub limit: Param<u32>,
|
pub limit: Param<u32>,
|
||||||
#[deserr(default, error = DeserrQueryParamError<InvalidTaskFrom>)]
|
#[deserr(default, error = DeserrQueryParamError<InvalidTaskFrom>)]
|
||||||
pub from: Option<Param<TaskId>>,
|
pub from: Option<Param<TaskId>>,
|
||||||
|
#[deserr(default, error = DeserrQueryParamError<InvalidTaskReverse>)]
|
||||||
|
pub reverse: Option<Param<bool>>,
|
||||||
|
|
||||||
#[deserr(default, error = DeserrQueryParamError<InvalidTaskUids>)]
|
#[deserr(default, error = DeserrQueryParamError<InvalidTaskUids>)]
|
||||||
pub uids: OptionStarOrList<u32>,
|
pub uids: OptionStarOrList<u32>,
|
||||||
@ -73,6 +75,7 @@ impl TasksFilterQuery {
|
|||||||
Query {
|
Query {
|
||||||
limit: Some(self.limit.0),
|
limit: Some(self.limit.0),
|
||||||
from: self.from.as_deref().copied(),
|
from: self.from.as_deref().copied(),
|
||||||
|
reverse: self.reverse.as_deref().copied(),
|
||||||
statuses: self.statuses.merge_star_and_none(),
|
statuses: self.statuses.merge_star_and_none(),
|
||||||
types: self.types.merge_star_and_none(),
|
types: self.types.merge_star_and_none(),
|
||||||
index_uids: self.index_uids.map(|x| x.to_string()).merge_star_and_none(),
|
index_uids: self.index_uids.map(|x| x.to_string()).merge_star_and_none(),
|
||||||
@ -142,6 +145,7 @@ impl TaskDeletionOrCancelationQuery {
|
|||||||
Query {
|
Query {
|
||||||
limit: None,
|
limit: None,
|
||||||
from: None,
|
from: None,
|
||||||
|
reverse: None,
|
||||||
statuses: self.statuses.merge_star_and_none(),
|
statuses: self.statuses.merge_star_and_none(),
|
||||||
types: self.types.merge_star_and_none(),
|
types: self.types.merge_star_and_none(),
|
||||||
index_uids: self.index_uids.map(|x| x.to_string()).merge_star_and_none(),
|
index_uids: self.index_uids.map(|x| x.to_string()).merge_star_and_none(),
|
||||||
@ -701,14 +705,14 @@ mod tests {
|
|||||||
{
|
{
|
||||||
let params = "from=12&limit=15&indexUids=toto,tata-78&statuses=succeeded,enqueued&afterEnqueuedAt=2012-04-23&uids=1,2,3";
|
let params = "from=12&limit=15&indexUids=toto,tata-78&statuses=succeeded,enqueued&afterEnqueuedAt=2012-04-23&uids=1,2,3";
|
||||||
let query = deserr_query_params::<TasksFilterQuery>(params).unwrap();
|
let query = deserr_query_params::<TasksFilterQuery>(params).unwrap();
|
||||||
snapshot!(format!("{:?}", query), @r###"TasksFilterQuery { limit: Param(15), from: Some(Param(12)), uids: List([1, 2, 3]), canceled_by: None, types: None, statuses: List([Succeeded, Enqueued]), index_uids: List([IndexUid("toto"), IndexUid("tata-78")]), after_enqueued_at: Other(2012-04-24 0:00:00.0 +00:00:00), before_enqueued_at: None, after_started_at: None, before_started_at: None, after_finished_at: None, before_finished_at: None }"###);
|
snapshot!(format!("{:?}", query), @r###"TasksFilterQuery { limit: Param(15), from: Some(Param(12)), reverse: None, uids: List([1, 2, 3]), canceled_by: None, types: None, statuses: List([Succeeded, Enqueued]), index_uids: List([IndexUid("toto"), IndexUid("tata-78")]), after_enqueued_at: Other(2012-04-24 0:00:00.0 +00:00:00), before_enqueued_at: None, after_started_at: None, before_started_at: None, after_finished_at: None, before_finished_at: None }"###);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
// Stars should translate to `None` in the query
|
// Stars should translate to `None` in the query
|
||||||
// Verify value of the default limit
|
// Verify value of the default limit
|
||||||
let params = "indexUids=*&statuses=succeeded,*&afterEnqueuedAt=2012-04-23&uids=1,2,3";
|
let params = "indexUids=*&statuses=succeeded,*&afterEnqueuedAt=2012-04-23&uids=1,2,3";
|
||||||
let query = deserr_query_params::<TasksFilterQuery>(params).unwrap();
|
let query = deserr_query_params::<TasksFilterQuery>(params).unwrap();
|
||||||
snapshot!(format!("{:?}", query), @"TasksFilterQuery { limit: Param(20), from: None, uids: List([1, 2, 3]), canceled_by: None, types: None, statuses: Star, index_uids: Star, after_enqueued_at: Other(2012-04-24 0:00:00.0 +00:00:00), before_enqueued_at: None, after_started_at: None, before_started_at: None, after_finished_at: None, before_finished_at: None }");
|
snapshot!(format!("{:?}", query), @"TasksFilterQuery { limit: Param(20), from: None, reverse: None, uids: List([1, 2, 3]), canceled_by: None, types: None, statuses: Star, index_uids: Star, after_enqueued_at: Other(2012-04-24 0:00:00.0 +00:00:00), before_enqueued_at: None, after_started_at: None, before_started_at: None, after_finished_at: None, before_finished_at: None }");
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
// Stars should also translate to `None` in task deletion/cancelation queries
|
// Stars should also translate to `None` in task deletion/cancelation queries
|
||||||
|
@ -18,6 +18,8 @@
|
|||||||
//! And should drop the Permit only once you have freed all the RAM consumed by the method.
|
//! And should drop the Permit only once you have freed all the RAM consumed by the method.
|
||||||
|
|
||||||
use std::num::NonZeroUsize;
|
use std::num::NonZeroUsize;
|
||||||
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use rand::rngs::StdRng;
|
use rand::rngs::StdRng;
|
||||||
@ -33,6 +35,8 @@ pub struct SearchQueue {
|
|||||||
/// If we have waited longer than this to get a permit, we should abort the search request entirely.
|
/// If we have waited longer than this to get a permit, we should abort the search request entirely.
|
||||||
/// The client probably already closed the connection, but we have no way to find out.
|
/// The client probably already closed the connection, but we have no way to find out.
|
||||||
time_to_abort: Duration,
|
time_to_abort: Duration,
|
||||||
|
searches_running: Arc<AtomicUsize>,
|
||||||
|
searches_waiting_to_be_processed: Arc<AtomicUsize>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// You should only run search requests while holding this permit.
|
/// You should only run search requests while holding this permit.
|
||||||
@ -68,14 +72,41 @@ impl SearchQueue {
|
|||||||
// so let's not allocate any RAM and keep a capacity of 1.
|
// so let's not allocate any RAM and keep a capacity of 1.
|
||||||
let (sender, receiver) = mpsc::channel(1);
|
let (sender, receiver) = mpsc::channel(1);
|
||||||
|
|
||||||
tokio::task::spawn(Self::run(capacity, paralellism, receiver));
|
let instance = Self {
|
||||||
Self { sender, capacity, time_to_abort: Duration::from_secs(60) }
|
sender,
|
||||||
|
capacity,
|
||||||
|
time_to_abort: Duration::from_secs(60),
|
||||||
|
searches_running: Default::default(),
|
||||||
|
searches_waiting_to_be_processed: Default::default(),
|
||||||
|
};
|
||||||
|
|
||||||
|
tokio::task::spawn(Self::run(
|
||||||
|
capacity,
|
||||||
|
paralellism,
|
||||||
|
receiver,
|
||||||
|
Arc::clone(&instance.searches_running),
|
||||||
|
Arc::clone(&instance.searches_waiting_to_be_processed),
|
||||||
|
));
|
||||||
|
|
||||||
|
instance
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_time_to_abort(self, time_to_abort: Duration) -> Self {
|
pub fn with_time_to_abort(self, time_to_abort: Duration) -> Self {
|
||||||
Self { time_to_abort, ..self }
|
Self { time_to_abort, ..self }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn capacity(&self) -> usize {
|
||||||
|
self.capacity
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn searches_running(&self) -> usize {
|
||||||
|
self.searches_running.load(Ordering::Relaxed)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn searches_waiting(&self) -> usize {
|
||||||
|
self.searches_waiting_to_be_processed.load(Ordering::Relaxed)
|
||||||
|
}
|
||||||
|
|
||||||
/// This function is the main loop, it's in charge on scheduling which search request should execute first and
|
/// This function is the main loop, it's in charge on scheduling which search request should execute first and
|
||||||
/// how many should executes at the same time.
|
/// how many should executes at the same time.
|
||||||
///
|
///
|
||||||
@ -84,6 +115,8 @@ impl SearchQueue {
|
|||||||
capacity: usize,
|
capacity: usize,
|
||||||
parallelism: NonZeroUsize,
|
parallelism: NonZeroUsize,
|
||||||
mut receive_new_searches: mpsc::Receiver<oneshot::Sender<Permit>>,
|
mut receive_new_searches: mpsc::Receiver<oneshot::Sender<Permit>>,
|
||||||
|
metric_searches_running: Arc<AtomicUsize>,
|
||||||
|
metric_searches_waiting: Arc<AtomicUsize>,
|
||||||
) {
|
) {
|
||||||
let mut queue: Vec<oneshot::Sender<Permit>> = Default::default();
|
let mut queue: Vec<oneshot::Sender<Permit>> = Default::default();
|
||||||
let mut rng: StdRng = StdRng::from_entropy();
|
let mut rng: StdRng = StdRng::from_entropy();
|
||||||
@ -133,6 +166,9 @@ impl SearchQueue {
|
|||||||
queue.push(search_request);
|
queue.push(search_request);
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
metric_searches_running.store(searches_running, Ordering::Relaxed);
|
||||||
|
metric_searches_waiting.store(queue.len(), Ordering::Relaxed);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -389,3 +389,25 @@ pub static VECTOR_DOCUMENTS: Lazy<Value> = Lazy::new(|| {
|
|||||||
},
|
},
|
||||||
])
|
])
|
||||||
});
|
});
|
||||||
|
|
||||||
|
pub async fn shared_index_with_test_set() -> &'static Index<'static, Shared> {
|
||||||
|
static INDEX: OnceCell<Index<'static, Shared>> = OnceCell::const_new();
|
||||||
|
INDEX
|
||||||
|
.get_or_init(|| async {
|
||||||
|
let server = Server::new_shared();
|
||||||
|
let index = server._index("SHARED_TEST_SET").to_shared();
|
||||||
|
let url = format!("/indexes/{}/documents", urlencoding::encode(index.uid.as_ref()));
|
||||||
|
let (response, code) = index
|
||||||
|
.service
|
||||||
|
.post_str(
|
||||||
|
url,
|
||||||
|
include_str!("../assets/test_set.json"),
|
||||||
|
vec![("content-type", "application/json")],
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
assert_eq!(code, 202);
|
||||||
|
index.wait_task(response.uid()).await;
|
||||||
|
index
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
@ -4,24 +4,27 @@ use meili_snap::*;
|
|||||||
use urlencoding::encode as urlencode;
|
use urlencoding::encode as urlencode;
|
||||||
|
|
||||||
use crate::common::encoder::Encoder;
|
use crate::common::encoder::Encoder;
|
||||||
use crate::common::{GetAllDocumentsOptions, Server, Value};
|
use crate::common::{
|
||||||
|
shared_does_not_exists_index, shared_empty_index, shared_index_with_test_set,
|
||||||
|
GetAllDocumentsOptions, Server, Value,
|
||||||
|
};
|
||||||
use crate::json;
|
use crate::json;
|
||||||
|
|
||||||
// TODO: partial test since we are testing error, amd error is not yet fully implemented in
|
// TODO: partial test since we are testing error, amd error is not yet fully implemented in
|
||||||
// transplant
|
// transplant
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
async fn get_unexisting_index_single_document() {
|
async fn get_unexisting_index_single_document() {
|
||||||
let server = Server::new().await;
|
let (_response, code) = shared_does_not_exists_index().await.get_document(1, None).await;
|
||||||
let (_response, code) = server.index("test").get_document(1, None).await;
|
|
||||||
assert_eq!(code, 404);
|
assert_eq!(code, 404);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
async fn error_get_unexisting_document() {
|
async fn error_get_unexisting_document() {
|
||||||
let server = Server::new().await;
|
let server = Server::new_shared();
|
||||||
let index = server.index("test");
|
let index = server.unique_index();
|
||||||
index.create(None).await;
|
let (task, _code) = index.create(None).await;
|
||||||
index.wait_task(0).await;
|
index.wait_task(task.uid()).await.succeeded();
|
||||||
|
|
||||||
let (response, code) = index.get_document(1, None).await;
|
let (response, code) = index.get_document(1, None).await;
|
||||||
|
|
||||||
let expected_response = json!({
|
let expected_response = json!({
|
||||||
@ -37,18 +40,19 @@ async fn error_get_unexisting_document() {
|
|||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
async fn get_document() {
|
async fn get_document() {
|
||||||
let server = Server::new().await;
|
let server = Server::new_shared();
|
||||||
let index = server.index("test");
|
let index = server.unique_index();
|
||||||
index.create(None).await;
|
let (task, _code) = index.create(None).await;
|
||||||
|
index.wait_task(task.uid()).await.succeeded();
|
||||||
let documents = json!([
|
let documents = json!([
|
||||||
{
|
{
|
||||||
"id": 0,
|
"id": 0,
|
||||||
"nested": { "content": "foobar" },
|
"nested": { "content": "foobar" },
|
||||||
}
|
}
|
||||||
]);
|
]);
|
||||||
let (_, code) = index.add_documents(documents, None).await;
|
let (task, code) = index.add_documents(documents, None).await;
|
||||||
assert_eq!(code, 202);
|
assert_eq!(code, 202);
|
||||||
index.wait_task(1).await;
|
index.wait_task(task.uid()).await.succeeded();
|
||||||
let (response, code) = index.get_document(0, None).await;
|
let (response, code) = index.get_document(0, None).await;
|
||||||
assert_eq!(code, 200);
|
assert_eq!(code, 200);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@ -81,12 +85,11 @@ async fn get_document() {
|
|||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
async fn error_get_unexisting_index_all_documents() {
|
async fn error_get_unexisting_index_all_documents() {
|
||||||
let server = Server::new().await;
|
let index = shared_does_not_exists_index().await;
|
||||||
let (response, code) =
|
let (response, code) = index.get_all_documents(GetAllDocumentsOptions::default()).await;
|
||||||
server.index("test").get_all_documents(GetAllDocumentsOptions::default()).await;
|
|
||||||
|
|
||||||
let expected_response = json!({
|
let expected_response = json!({
|
||||||
"message": "Index `test` not found.",
|
"message": "Index `DOES_NOT_EXISTS` not found.",
|
||||||
"code": "index_not_found",
|
"code": "index_not_found",
|
||||||
"type": "invalid_request",
|
"type": "invalid_request",
|
||||||
"link": "https://docs.meilisearch.com/errors#index_not_found"
|
"link": "https://docs.meilisearch.com/errors#index_not_found"
|
||||||
@ -98,12 +101,7 @@ async fn error_get_unexisting_index_all_documents() {
|
|||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
async fn get_no_document() {
|
async fn get_no_document() {
|
||||||
let server = Server::new().await;
|
let index = shared_empty_index().await;
|
||||||
let index = server.index("test");
|
|
||||||
let (_, code) = index.create(None).await;
|
|
||||||
assert_eq!(code, 202);
|
|
||||||
|
|
||||||
index.wait_task(0).await;
|
|
||||||
|
|
||||||
let (response, code) = index.get_all_documents(GetAllDocumentsOptions::default()).await;
|
let (response, code) = index.get_all_documents(GetAllDocumentsOptions::default()).await;
|
||||||
assert_eq!(code, 200);
|
assert_eq!(code, 200);
|
||||||
@ -112,14 +110,12 @@ async fn get_no_document() {
|
|||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
async fn get_all_documents_no_options() {
|
async fn get_all_documents_no_options() {
|
||||||
let server = Server::new().await;
|
let index = shared_index_with_test_set().await;
|
||||||
let index = server.index("test");
|
|
||||||
index.load_test_set().await;
|
|
||||||
|
|
||||||
let (response, code) = index.get_all_documents(GetAllDocumentsOptions::default()).await;
|
let (response, code) = index.get_all_documents(GetAllDocumentsOptions::default()).await;
|
||||||
assert_eq!(code, 200);
|
assert_eq!(code, 200);
|
||||||
let arr = response["results"].as_array().unwrap();
|
let results = response["results"].as_array().unwrap();
|
||||||
assert_eq!(arr.len(), 20);
|
assert_eq!(results.len(), 20);
|
||||||
let first = json!({
|
let first = json!({
|
||||||
"id":0,
|
"id":0,
|
||||||
"isActive":false,
|
"isActive":false,
|
||||||
@ -138,19 +134,16 @@ async fn get_all_documents_no_options() {
|
|||||||
"longitude":-145.725388,
|
"longitude":-145.725388,
|
||||||
"tags":["bug"
|
"tags":["bug"
|
||||||
,"bug"]});
|
,"bug"]});
|
||||||
assert_eq!(first, arr[0]);
|
assert_eq!(first, results[0]);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
async fn get_all_documents_no_options_with_response_compression() {
|
async fn get_all_documents_no_options_with_response_compression() {
|
||||||
let server = Server::new().await;
|
let index = shared_index_with_test_set().await;
|
||||||
let index_uid = "test";
|
|
||||||
let index = server.index(index_uid);
|
|
||||||
index.load_test_set().await;
|
|
||||||
|
|
||||||
let app = server.init_web_app().await;
|
let app = Server::new_shared().init_web_app().await;
|
||||||
let req = test::TestRequest::get()
|
let req = test::TestRequest::get()
|
||||||
.uri(&format!("/indexes/{}/documents?", urlencode(index_uid)))
|
.uri(&format!("/indexes/{}/documents?", urlencode(&index.uid)))
|
||||||
.insert_header((ACCEPT_ENCODING, "gzip"))
|
.insert_header((ACCEPT_ENCODING, "gzip"))
|
||||||
.to_request();
|
.to_request();
|
||||||
|
|
||||||
@ -169,9 +162,7 @@ async fn get_all_documents_no_options_with_response_compression() {
|
|||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
async fn test_get_all_documents_limit() {
|
async fn test_get_all_documents_limit() {
|
||||||
let server = Server::new().await;
|
let index = shared_index_with_test_set().await;
|
||||||
let index = server.index("test");
|
|
||||||
index.load_test_set().await;
|
|
||||||
|
|
||||||
let (response, code) = index
|
let (response, code) = index
|
||||||
.get_all_documents(GetAllDocumentsOptions { limit: Some(5), ..Default::default() })
|
.get_all_documents(GetAllDocumentsOptions { limit: Some(5), ..Default::default() })
|
||||||
@ -186,9 +177,7 @@ async fn test_get_all_documents_limit() {
|
|||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
async fn test_get_all_documents_offset() {
|
async fn test_get_all_documents_offset() {
|
||||||
let server = Server::new().await;
|
let index = shared_index_with_test_set().await;
|
||||||
let index = server.index("test");
|
|
||||||
index.load_test_set().await;
|
|
||||||
|
|
||||||
let (response, code) = index
|
let (response, code) = index
|
||||||
.get_all_documents(GetAllDocumentsOptions { offset: Some(5), ..Default::default() })
|
.get_all_documents(GetAllDocumentsOptions { offset: Some(5), ..Default::default() })
|
||||||
@ -203,9 +192,7 @@ async fn test_get_all_documents_offset() {
|
|||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
async fn test_get_all_documents_attributes_to_retrieve() {
|
async fn test_get_all_documents_attributes_to_retrieve() {
|
||||||
let server = Server::new().await;
|
let index = shared_index_with_test_set().await;
|
||||||
let index = server.index("test");
|
|
||||||
index.load_test_set().await;
|
|
||||||
|
|
||||||
let (response, code) = index
|
let (response, code) = index
|
||||||
.get_all_documents(GetAllDocumentsOptions {
|
.get_all_documents(GetAllDocumentsOptions {
|
||||||
@ -286,9 +273,11 @@ async fn test_get_all_documents_attributes_to_retrieve() {
|
|||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
async fn get_document_s_nested_attributes_to_retrieve() {
|
async fn get_document_s_nested_attributes_to_retrieve() {
|
||||||
let server = Server::new().await;
|
let server = Server::new_shared();
|
||||||
let index = server.index("test");
|
let index = server.unique_index();
|
||||||
index.create(None).await;
|
let (task, _code) = index.create(None).await;
|
||||||
|
index.wait_task(task.uid()).await.succeeded();
|
||||||
|
|
||||||
let documents = json!([
|
let documents = json!([
|
||||||
{
|
{
|
||||||
"id": 0,
|
"id": 0,
|
||||||
@ -302,9 +291,9 @@ async fn get_document_s_nested_attributes_to_retrieve() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
]);
|
]);
|
||||||
let (_, code) = index.add_documents(documents, None).await;
|
let (task, code) = index.add_documents(documents, None).await;
|
||||||
assert_eq!(code, 202);
|
assert_eq!(code, 202);
|
||||||
index.wait_task(1).await;
|
index.wait_task(task.uid()).await.succeeded();
|
||||||
|
|
||||||
let (response, code) = index.get_document(0, Some(json!({ "fields": ["content"] }))).await;
|
let (response, code) = index.get_document(0, Some(json!({ "fields": ["content"] }))).await;
|
||||||
assert_eq!(code, 200);
|
assert_eq!(code, 200);
|
||||||
@ -343,10 +332,10 @@ async fn get_document_s_nested_attributes_to_retrieve() {
|
|||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
async fn get_documents_displayed_attributes_is_ignored() {
|
async fn get_documents_displayed_attributes_is_ignored() {
|
||||||
let server = Server::new().await;
|
let server = Server::new_shared();
|
||||||
let index = server.index("test");
|
let index = server.unique_index();
|
||||||
index.update_settings(json!({"displayedAttributes": ["gender"]})).await;
|
|
||||||
index.load_test_set().await;
|
index.load_test_set().await;
|
||||||
|
index.update_settings(json!({"displayedAttributes": ["gender"]})).await;
|
||||||
|
|
||||||
let (response, code) = index.get_all_documents(GetAllDocumentsOptions::default()).await;
|
let (response, code) = index.get_all_documents(GetAllDocumentsOptions::default()).await;
|
||||||
assert_eq!(code, 200);
|
assert_eq!(code, 200);
|
||||||
@ -366,10 +355,10 @@ async fn get_documents_displayed_attributes_is_ignored() {
|
|||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
async fn get_document_by_filter() {
|
async fn get_document_by_filter() {
|
||||||
let server = Server::new().await;
|
let server = Server::new_shared();
|
||||||
let index = server.index("doggo");
|
let index = server.unique_index();
|
||||||
index.update_settings_filterable_attributes(json!(["color"])).await;
|
index.update_settings_filterable_attributes(json!(["color"])).await;
|
||||||
index
|
let (task, _code) = index
|
||||||
.add_documents(
|
.add_documents(
|
||||||
json!([
|
json!([
|
||||||
{ "id": 0, "color": "red" },
|
{ "id": 0, "color": "red" },
|
||||||
@ -380,7 +369,7 @@ async fn get_document_by_filter() {
|
|||||||
Some("id"),
|
Some("id"),
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
index.wait_task(1).await;
|
index.wait_task(task.uid()).await.succeeded();
|
||||||
|
|
||||||
let (response, code) = index.get_document_by_filter(json!({})).await;
|
let (response, code) = index.get_document_by_filter(json!({})).await;
|
||||||
let (response2, code2) = index.get_all_documents_raw("").await;
|
let (response2, code2) = index.get_all_documents_raw("").await;
|
||||||
@ -552,7 +541,7 @@ async fn get_document_with_vectors() {
|
|||||||
}))
|
}))
|
||||||
.await;
|
.await;
|
||||||
snapshot!(code, @"202 Accepted");
|
snapshot!(code, @"202 Accepted");
|
||||||
server.wait_task(response.uid()).await;
|
server.wait_task(response.uid()).await.succeeded();
|
||||||
|
|
||||||
let documents = json!([
|
let documents = json!([
|
||||||
{"id": 0, "name": "kefir", "_vectors": { "manual": [0, 0, 0] }},
|
{"id": 0, "name": "kefir", "_vectors": { "manual": [0, 0, 0] }},
|
||||||
@ -560,7 +549,7 @@ async fn get_document_with_vectors() {
|
|||||||
]);
|
]);
|
||||||
let (value, code) = index.add_documents(documents, None).await;
|
let (value, code) = index.add_documents(documents, None).await;
|
||||||
snapshot!(code, @"202 Accepted");
|
snapshot!(code, @"202 Accepted");
|
||||||
index.wait_task(value.uid()).await;
|
index.wait_task(value.uid()).await.succeeded();
|
||||||
|
|
||||||
// by default you shouldn't see the `_vectors` object
|
// by default you shouldn't see the `_vectors` object
|
||||||
let (documents, _code) = index.get_all_documents(Default::default()).await;
|
let (documents, _code) = index.get_all_documents(Default::default()).await;
|
||||||
|
@ -6,14 +6,14 @@ use crate::json;
|
|||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
async fn formatted_contain_wildcard() {
|
async fn formatted_contain_wildcard() {
|
||||||
let server = Server::new().await;
|
let server = Server::new_shared();
|
||||||
let index = server.index("test");
|
let index = server.unique_index();
|
||||||
|
|
||||||
index.update_settings(json!({ "displayedAttributes": ["id", "cattos"] })).await;
|
index.update_settings(json!({ "displayedAttributes": ["id", "cattos"] })).await;
|
||||||
|
|
||||||
let documents = NESTED_DOCUMENTS.clone();
|
let documents = NESTED_DOCUMENTS.clone();
|
||||||
index.add_documents(documents, None).await;
|
let (response, _) = index.add_documents(documents, None).await;
|
||||||
index.wait_task(1).await;
|
index.wait_task(response.uid()).await;
|
||||||
|
|
||||||
index.search(json!({ "q": "pésti", "attributesToRetrieve": ["father", "mother"], "attributesToHighlight": ["father", "mother", "*"], "attributesToCrop": ["doggos"], "showMatchesPosition": true }),
|
index.search(json!({ "q": "pésti", "attributesToRetrieve": ["father", "mother"], "attributesToHighlight": ["father", "mother", "*"], "attributesToCrop": ["doggos"], "showMatchesPosition": true }),
|
||||||
|response, code|
|
|response, code|
|
||||||
@ -135,12 +135,7 @@ async fn formatted_contain_wildcard() {
|
|||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
async fn format_nested() {
|
async fn format_nested() {
|
||||||
let server = Server::new().await;
|
let index = shared_index_with_nested_documents().await;
|
||||||
let index = server.index("test");
|
|
||||||
|
|
||||||
let documents = NESTED_DOCUMENTS.clone();
|
|
||||||
index.add_documents(documents, None).await;
|
|
||||||
index.wait_task(0).await;
|
|
||||||
|
|
||||||
index
|
index
|
||||||
.search(json!({ "q": "pésti", "attributesToRetrieve": ["doggos"] }), |response, code| {
|
.search(json!({ "q": "pésti", "attributesToRetrieve": ["doggos"] }), |response, code| {
|
||||||
@ -340,15 +335,15 @@ async fn format_nested() {
|
|||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
async fn displayedattr_2_smol() {
|
async fn displayedattr_2_smol() {
|
||||||
let server = Server::new().await;
|
let server = Server::new_shared();
|
||||||
let index = server.index("test");
|
let index = server.unique_index();
|
||||||
|
|
||||||
// not enough displayed for the other settings
|
// not enough displayed for the other settings
|
||||||
index.update_settings(json!({ "displayedAttributes": ["id"] })).await;
|
index.update_settings(json!({ "displayedAttributes": ["id"] })).await;
|
||||||
|
|
||||||
let documents = NESTED_DOCUMENTS.clone();
|
let documents = NESTED_DOCUMENTS.clone();
|
||||||
index.add_documents(documents, None).await;
|
let (response, _) = index.add_documents(documents, None).await;
|
||||||
index.wait_task(1).await;
|
index.wait_task(response.uid()).await;
|
||||||
|
|
||||||
index
|
index
|
||||||
.search(json!({ "attributesToRetrieve": ["father", "id"], "attributesToHighlight": ["mother"], "attributesToCrop": ["cattos"] }),
|
.search(json!({ "attributesToRetrieve": ["father", "id"], "attributesToHighlight": ["mother"], "attributesToCrop": ["cattos"] }),
|
||||||
@ -538,15 +533,15 @@ async fn displayedattr_2_smol() {
|
|||||||
#[cfg(feature = "default")]
|
#[cfg(feature = "default")]
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
async fn test_cjk_highlight() {
|
async fn test_cjk_highlight() {
|
||||||
let server = Server::new().await;
|
let server = Server::new_shared();
|
||||||
let index = server.index("test");
|
let index = server.unique_index();
|
||||||
|
|
||||||
let documents = json!([
|
let documents = json!([
|
||||||
{ "id": 0, "title": "この度、クーポンで無料で頂きました。" },
|
{ "id": 0, "title": "この度、クーポンで無料で頂きました。" },
|
||||||
{ "id": 1, "title": "大卫到了扫罗那里" },
|
{ "id": 1, "title": "大卫到了扫罗那里" },
|
||||||
]);
|
]);
|
||||||
index.add_documents(documents, None).await;
|
let (response, _) = index.add_documents(documents, None).await;
|
||||||
index.wait_task(0).await;
|
index.wait_task(response.uid()).await;
|
||||||
|
|
||||||
index
|
index
|
||||||
.search(json!({"q": "で", "attributesToHighlight": ["title"]}), |response, code| {
|
.search(json!({"q": "で", "attributesToHighlight": ["title"]}), |response, code| {
|
||||||
|
@ -279,6 +279,55 @@ async fn task_bad_from() {
|
|||||||
"###);
|
"###);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[actix_rt::test]
|
||||||
|
async fn task_bad_reverse() {
|
||||||
|
let server = Server::new_shared();
|
||||||
|
|
||||||
|
let (response, code) = server.tasks_filter("reverse=doggo").await;
|
||||||
|
snapshot!(code, @"400 Bad Request");
|
||||||
|
snapshot!(response, @r###"
|
||||||
|
{
|
||||||
|
"message": "Invalid value in parameter `reverse`: could not parse `doggo` as a boolean, expected either `true` or `false`",
|
||||||
|
"code": "invalid_task_reverse",
|
||||||
|
"type": "invalid_request",
|
||||||
|
"link": "https://docs.meilisearch.com/errors#invalid_task_reverse"
|
||||||
|
}
|
||||||
|
"###);
|
||||||
|
|
||||||
|
let (response, code) = server.tasks_filter("reverse=*").await;
|
||||||
|
snapshot!(code, @"400 Bad Request");
|
||||||
|
snapshot!(response, @r###"
|
||||||
|
{
|
||||||
|
"message": "Invalid value in parameter `reverse`: could not parse `*` as a boolean, expected either `true` or `false`",
|
||||||
|
"code": "invalid_task_reverse",
|
||||||
|
"type": "invalid_request",
|
||||||
|
"link": "https://docs.meilisearch.com/errors#invalid_task_reverse"
|
||||||
|
}
|
||||||
|
"###);
|
||||||
|
|
||||||
|
let (response, code) = server.cancel_tasks("reverse=doggo").await;
|
||||||
|
snapshot!(code, @"400 Bad Request");
|
||||||
|
snapshot!(response, @r###"
|
||||||
|
{
|
||||||
|
"message": "Unknown parameter `reverse`: expected one of `uids`, `canceledBy`, `types`, `statuses`, `indexUids`, `afterEnqueuedAt`, `beforeEnqueuedAt`, `afterStartedAt`, `beforeStartedAt`, `afterFinishedAt`, `beforeFinishedAt`",
|
||||||
|
"code": "bad_request",
|
||||||
|
"type": "invalid_request",
|
||||||
|
"link": "https://docs.meilisearch.com/errors#bad_request"
|
||||||
|
}
|
||||||
|
"###);
|
||||||
|
|
||||||
|
let (response, code) = server.delete_tasks("reverse=doggo").await;
|
||||||
|
snapshot!(code, @"400 Bad Request");
|
||||||
|
snapshot!(response, @r###"
|
||||||
|
{
|
||||||
|
"message": "Unknown parameter `reverse`: expected one of `uids`, `canceledBy`, `types`, `statuses`, `indexUids`, `afterEnqueuedAt`, `beforeEnqueuedAt`, `afterStartedAt`, `beforeStartedAt`, `afterFinishedAt`, `beforeFinishedAt`",
|
||||||
|
"code": "bad_request",
|
||||||
|
"type": "invalid_request",
|
||||||
|
"link": "https://docs.meilisearch.com/errors#bad_request"
|
||||||
|
}
|
||||||
|
"###);
|
||||||
|
}
|
||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
async fn task_bad_after_enqueued_at() {
|
async fn task_bad_after_enqueued_at() {
|
||||||
let server = Server::new_shared();
|
let server = Server::new_shared();
|
||||||
|
@ -62,6 +62,44 @@ async fn list_tasks() {
|
|||||||
assert_eq!(response["results"].as_array().unwrap().len(), 2);
|
assert_eq!(response["results"].as_array().unwrap().len(), 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[actix_rt::test]
|
||||||
|
async fn list_tasks_pagination_and_reverse() {
|
||||||
|
let server = Server::new().await;
|
||||||
|
// First of all we want to create a lot of tasks very quickly. The fastest way is to delete a lot of unexisting indexes
|
||||||
|
let mut last_task = None;
|
||||||
|
for i in 0..10 {
|
||||||
|
let index = server.index(format!("test-{i}"));
|
||||||
|
last_task = Some(index.create(None).await.0.uid());
|
||||||
|
}
|
||||||
|
server.wait_task(last_task.unwrap()).await;
|
||||||
|
|
||||||
|
let (response, code) = server.tasks_filter("limit=3").await;
|
||||||
|
assert_eq!(code, 200);
|
||||||
|
let results = response["results"].as_array().unwrap();
|
||||||
|
let task_ids: Vec<_> = results.iter().map(|ret| ret["uid"].as_u64().unwrap()).collect();
|
||||||
|
snapshot!(format!("{task_ids:?}"), @"[9, 8, 7]");
|
||||||
|
|
||||||
|
let (response, code) = server.tasks_filter("limit=3&from=1").await;
|
||||||
|
assert_eq!(code, 200);
|
||||||
|
let results = response["results"].as_array().unwrap();
|
||||||
|
let task_ids: Vec<_> = results.iter().map(|ret| ret["uid"].as_u64().unwrap()).collect();
|
||||||
|
snapshot!(format!("{task_ids:?}"), @"[1, 0]");
|
||||||
|
|
||||||
|
// In reversed order
|
||||||
|
|
||||||
|
let (response, code) = server.tasks_filter("limit=3&reverse=true").await;
|
||||||
|
assert_eq!(code, 200);
|
||||||
|
let results = response["results"].as_array().unwrap();
|
||||||
|
let task_ids: Vec<_> = results.iter().map(|ret| ret["uid"].as_u64().unwrap()).collect();
|
||||||
|
snapshot!(format!("{task_ids:?}"), @"[0, 1, 2]");
|
||||||
|
|
||||||
|
let (response, code) = server.tasks_filter("limit=3&from=8&reverse=true").await;
|
||||||
|
assert_eq!(code, 200);
|
||||||
|
let results = response["results"].as_array().unwrap();
|
||||||
|
let task_ids: Vec<_> = results.iter().map(|ret| ret["uid"].as_u64().unwrap()).collect();
|
||||||
|
snapshot!(format!("{task_ids:?}"), @"[8, 9]");
|
||||||
|
}
|
||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
async fn list_tasks_with_star_filters() {
|
async fn list_tasks_with_star_filters() {
|
||||||
let server = Server::new().await;
|
let server = Server::new().await;
|
||||||
@ -193,131 +231,6 @@ async fn list_tasks_status_and_type_filtered() {
|
|||||||
assert_eq!(response["results"].as_array().unwrap().len(), 2);
|
assert_eq!(response["results"].as_array().unwrap().len(), 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[actix_rt::test]
|
|
||||||
async fn get_task_filter_error() {
|
|
||||||
let server = Server::new().await;
|
|
||||||
|
|
||||||
let (response, code) = server.tasks_filter("lol=pied").await;
|
|
||||||
assert_eq!(code, 400, "{}", response);
|
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
|
||||||
{
|
|
||||||
"message": "Unknown parameter `lol`: expected one of `limit`, `from`, `uids`, `canceledBy`, `types`, `statuses`, `indexUids`, `afterEnqueuedAt`, `beforeEnqueuedAt`, `afterStartedAt`, `beforeStartedAt`, `afterFinishedAt`, `beforeFinishedAt`",
|
|
||||||
"code": "bad_request",
|
|
||||||
"type": "invalid_request",
|
|
||||||
"link": "https://docs.meilisearch.com/errors#bad_request"
|
|
||||||
}
|
|
||||||
"###);
|
|
||||||
|
|
||||||
let (response, code) = server.tasks_filter("uids=pied").await;
|
|
||||||
assert_eq!(code, 400, "{}", response);
|
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
|
||||||
{
|
|
||||||
"message": "Invalid value in parameter `uids`: could not parse `pied` as a positive integer",
|
|
||||||
"code": "invalid_task_uids",
|
|
||||||
"type": "invalid_request",
|
|
||||||
"link": "https://docs.meilisearch.com/errors#invalid_task_uids"
|
|
||||||
}
|
|
||||||
"###);
|
|
||||||
|
|
||||||
let (response, code) = server.tasks_filter("from=pied").await;
|
|
||||||
assert_eq!(code, 400, "{}", response);
|
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
|
||||||
{
|
|
||||||
"message": "Invalid value in parameter `from`: could not parse `pied` as a positive integer",
|
|
||||||
"code": "invalid_task_from",
|
|
||||||
"type": "invalid_request",
|
|
||||||
"link": "https://docs.meilisearch.com/errors#invalid_task_from"
|
|
||||||
}
|
|
||||||
"###);
|
|
||||||
|
|
||||||
let (response, code) = server.tasks_filter("beforeStartedAt=pied").await;
|
|
||||||
assert_eq!(code, 400, "{}", response);
|
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
|
||||||
{
|
|
||||||
"message": "Invalid value in parameter `beforeStartedAt`: `pied` is an invalid date-time. It should follow the YYYY-MM-DD or RFC 3339 date-time format.",
|
|
||||||
"code": "invalid_task_before_started_at",
|
|
||||||
"type": "invalid_request",
|
|
||||||
"link": "https://docs.meilisearch.com/errors#invalid_task_before_started_at"
|
|
||||||
}
|
|
||||||
"###);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[actix_rt::test]
|
|
||||||
async fn delete_task_filter_error() {
|
|
||||||
let server = Server::new().await;
|
|
||||||
|
|
||||||
let (response, code) = server.delete_tasks("").await;
|
|
||||||
assert_eq!(code, 400, "{}", response);
|
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
|
||||||
{
|
|
||||||
"message": "Query parameters to filter the tasks to delete are missing. Available query parameters are: `uids`, `indexUids`, `statuses`, `types`, `canceledBy`, `beforeEnqueuedAt`, `afterEnqueuedAt`, `beforeStartedAt`, `afterStartedAt`, `beforeFinishedAt`, `afterFinishedAt`.",
|
|
||||||
"code": "missing_task_filters",
|
|
||||||
"type": "invalid_request",
|
|
||||||
"link": "https://docs.meilisearch.com/errors#missing_task_filters"
|
|
||||||
}
|
|
||||||
"###);
|
|
||||||
|
|
||||||
let (response, code) = server.delete_tasks("lol=pied").await;
|
|
||||||
assert_eq!(code, 400, "{}", response);
|
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
|
||||||
{
|
|
||||||
"message": "Unknown parameter `lol`: expected one of `uids`, `canceledBy`, `types`, `statuses`, `indexUids`, `afterEnqueuedAt`, `beforeEnqueuedAt`, `afterStartedAt`, `beforeStartedAt`, `afterFinishedAt`, `beforeFinishedAt`",
|
|
||||||
"code": "bad_request",
|
|
||||||
"type": "invalid_request",
|
|
||||||
"link": "https://docs.meilisearch.com/errors#bad_request"
|
|
||||||
}
|
|
||||||
"###);
|
|
||||||
|
|
||||||
let (response, code) = server.delete_tasks("uids=pied").await;
|
|
||||||
assert_eq!(code, 400, "{}", response);
|
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
|
||||||
{
|
|
||||||
"message": "Invalid value in parameter `uids`: could not parse `pied` as a positive integer",
|
|
||||||
"code": "invalid_task_uids",
|
|
||||||
"type": "invalid_request",
|
|
||||||
"link": "https://docs.meilisearch.com/errors#invalid_task_uids"
|
|
||||||
}
|
|
||||||
"###);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[actix_rt::test]
|
|
||||||
async fn cancel_task_filter_error() {
|
|
||||||
let server = Server::new().await;
|
|
||||||
|
|
||||||
let (response, code) = server.cancel_tasks("").await;
|
|
||||||
assert_eq!(code, 400, "{}", response);
|
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
|
||||||
{
|
|
||||||
"message": "Query parameters to filter the tasks to cancel are missing. Available query parameters are: `uids`, `indexUids`, `statuses`, `types`, `canceledBy`, `beforeEnqueuedAt`, `afterEnqueuedAt`, `beforeStartedAt`, `afterStartedAt`, `beforeFinishedAt`, `afterFinishedAt`.",
|
|
||||||
"code": "missing_task_filters",
|
|
||||||
"type": "invalid_request",
|
|
||||||
"link": "https://docs.meilisearch.com/errors#missing_task_filters"
|
|
||||||
}
|
|
||||||
"###);
|
|
||||||
|
|
||||||
let (response, code) = server.cancel_tasks("lol=pied").await;
|
|
||||||
assert_eq!(code, 400, "{}", response);
|
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
|
||||||
{
|
|
||||||
"message": "Unknown parameter `lol`: expected one of `uids`, `canceledBy`, `types`, `statuses`, `indexUids`, `afterEnqueuedAt`, `beforeEnqueuedAt`, `afterStartedAt`, `beforeStartedAt`, `afterFinishedAt`, `beforeFinishedAt`",
|
|
||||||
"code": "bad_request",
|
|
||||||
"type": "invalid_request",
|
|
||||||
"link": "https://docs.meilisearch.com/errors#bad_request"
|
|
||||||
}
|
|
||||||
"###);
|
|
||||||
|
|
||||||
let (response, code) = server.cancel_tasks("uids=pied").await;
|
|
||||||
assert_eq!(code, 400, "{}", response);
|
|
||||||
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
|
|
||||||
{
|
|
||||||
"message": "Invalid value in parameter `uids`: could not parse `pied` as a positive integer",
|
|
||||||
"code": "invalid_task_uids",
|
|
||||||
"type": "invalid_request",
|
|
||||||
"link": "https://docs.meilisearch.com/errors#invalid_task_uids"
|
|
||||||
}
|
|
||||||
"###);
|
|
||||||
}
|
|
||||||
|
|
||||||
macro_rules! assert_valid_summarized_task {
|
macro_rules! assert_valid_summarized_task {
|
||||||
($response:expr, $task_type:literal, $index:literal) => {{
|
($response:expr, $task_type:literal, $index:literal) => {{
|
||||||
assert_eq!($response.as_object().unwrap().len(), 5);
|
assert_eq!($response.as_object().unwrap().len(), 5);
|
||||||
|
Loading…
Reference in New Issue
Block a user