mirror of
https://github.com/meilisearch/meilisearch.git
synced 2024-11-22 18:17:39 +08:00
WIP: refactor IndexController
change the architecture of the index controller to allow it to own an index store.
This commit is contained in:
parent
686f987180
commit
6a3f625e11
40
Cargo.lock
generated
40
Cargo.lock
generated
@ -1,5 +1,15 @@
|
|||||||
# This file is automatically @generated by Cargo.
|
# This file is automatically @generated by Cargo.
|
||||||
# It is not intended for manual editing.
|
# It is not intended for manual editing.
|
||||||
|
[[package]]
|
||||||
|
name = "Inflector"
|
||||||
|
version = "0.11.4"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3"
|
||||||
|
dependencies = [
|
||||||
|
"lazy_static",
|
||||||
|
"regex",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "actix-codec"
|
name = "actix-codec"
|
||||||
version = "0.3.0"
|
version = "0.3.0"
|
||||||
@ -1639,6 +1649,7 @@ dependencies = [
|
|||||||
"mime",
|
"mime",
|
||||||
"obkv",
|
"obkv",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
|
"ouroboros",
|
||||||
"page_size",
|
"page_size",
|
||||||
"rand 0.7.3",
|
"rand 0.7.3",
|
||||||
"rayon",
|
"rayon",
|
||||||
@ -1941,6 +1952,29 @@ dependencies = [
|
|||||||
"num-traits",
|
"num-traits",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "ouroboros"
|
||||||
|
version = "0.8.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "069fb33e127cabdc8ad6a287eed9719b85c612d36199777f6dc41ad91f7be41a"
|
||||||
|
dependencies = [
|
||||||
|
"ouroboros_macro",
|
||||||
|
"stable_deref_trait",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "ouroboros_macro"
|
||||||
|
version = "0.8.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "ad938cc920f299d6dce91e43d3ce316e785f4aa4bc4243555634dc2967098fc6"
|
||||||
|
dependencies = [
|
||||||
|
"Inflector",
|
||||||
|
"proc-macro-error",
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"syn",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "page_size"
|
name = "page_size"
|
||||||
version = "0.4.2"
|
version = "0.4.2"
|
||||||
@ -2771,6 +2805,12 @@ version = "0.5.2"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d"
|
checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "stable_deref_trait"
|
||||||
|
version = "1.2.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "standback"
|
name = "standback"
|
||||||
version = "0.2.13"
|
version = "0.2.13"
|
||||||
|
@ -61,6 +61,7 @@ whoami = "1.0.0"
|
|||||||
dashmap = "4.0.2"
|
dashmap = "4.0.2"
|
||||||
page_size = "0.4.2"
|
page_size = "0.4.2"
|
||||||
obkv = "0.1.1"
|
obkv = "0.1.1"
|
||||||
|
ouroboros = "0.8.0"
|
||||||
|
|
||||||
[dependencies.sentry]
|
[dependencies.sentry]
|
||||||
default-features = false
|
default-features = false
|
||||||
|
@ -8,17 +8,16 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use sha2::Digest;
|
use sha2::Digest;
|
||||||
|
|
||||||
use crate::{option::Opt, updates::Settings};
|
use crate::{option::Opt, index_controller::Settings};
|
||||||
use crate::updates::UpdateQueue;
|
use crate::index_controller::{IndexStore, UpdateStore};
|
||||||
use crate::index_controller::IndexController;
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct Data {
|
pub struct Data {
|
||||||
inner: Arc<DataInner>,
|
inner: Arc<DataInner<UpdateStore>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Deref for Data {
|
impl Deref for Data {
|
||||||
type Target = DataInner;
|
type Target = DataInner<UpdateStore>;
|
||||||
|
|
||||||
fn deref(&self) -> &Self::Target {
|
fn deref(&self) -> &Self::Target {
|
||||||
&self.inner
|
&self.inner
|
||||||
@ -26,8 +25,8 @@ impl Deref for Data {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct DataInner {
|
pub struct DataInner<I> {
|
||||||
pub indexes: Arc<IndexController<UpdateQueue>>,
|
pub indexes: Arc<I>,
|
||||||
api_keys: ApiKeys,
|
api_keys: ApiKeys,
|
||||||
options: Opt,
|
options: Opt,
|
||||||
}
|
}
|
||||||
@ -58,11 +57,10 @@ impl ApiKeys {
|
|||||||
|
|
||||||
impl Data {
|
impl Data {
|
||||||
pub fn new(options: Opt) -> anyhow::Result<Data> {
|
pub fn new(options: Opt) -> anyhow::Result<Data> {
|
||||||
let db_size = options.max_mdb_size.get_bytes() as usize;
|
let path = options.db_path.clone();
|
||||||
let indexes = IndexController::new(&options.db_path)?;
|
let index_store = IndexStore::new(&path)?;
|
||||||
let indexes = Arc::new(indexes);
|
let index_controller = UpdateStore::new(index_store);
|
||||||
|
let indexes = Arc::new(index_controller);
|
||||||
let update_queue = Arc::new(UpdateQueue::new(&options, indexes.clone())?);
|
|
||||||
|
|
||||||
let mut api_keys = ApiKeys {
|
let mut api_keys = ApiKeys {
|
||||||
master: options.clone().master_key,
|
master: options.clone().master_key,
|
||||||
@ -72,31 +70,28 @@ impl Data {
|
|||||||
|
|
||||||
api_keys.generate_missing_api_keys();
|
api_keys.generate_missing_api_keys();
|
||||||
|
|
||||||
let inner = DataInner { indexes, options, update_queue, api_keys };
|
let inner = DataInner { indexes, options, api_keys };
|
||||||
let inner = Arc::new(inner);
|
let inner = Arc::new(inner);
|
||||||
|
|
||||||
Ok(Data { inner })
|
Ok(Data { inner })
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn settings<S: AsRef<str>>(&self, _index: S) -> anyhow::Result<Settings> {
|
pub fn settings<S: AsRef<str>>(&self, index_uid: S) -> anyhow::Result<Settings> {
|
||||||
let txn = self.indexes.env.read_txn()?;
|
let index = self.indexes
|
||||||
let fields_map = self.indexes.fields_ids_map(&txn)?;
|
.get(&index_uid)?
|
||||||
println!("fields_map: {:?}", fields_map);
|
.ok_or_else(|| anyhow::anyhow!("Index {} does not exist.", index_uid.as_ref()))?;
|
||||||
|
|
||||||
let displayed_attributes = self.indexes
|
let displayed_attributes = index
|
||||||
.displayed_fields(&txn)?
|
.displayed_fields()?
|
||||||
.map(|fields| fields.into_iter().map(String::from).collect())
|
.map(|fields| fields.into_iter().map(String::from).collect())
|
||||||
.unwrap_or_else(|| vec!["*".to_string()]);
|
.unwrap_or_else(|| vec!["*".to_string()]);
|
||||||
|
|
||||||
let searchable_attributes = self.indexes
|
let searchable_attributes = index
|
||||||
.searchable_fields(&txn)?
|
.searchable_fields()?
|
||||||
.map(|fields| fields
|
.map(|fields| fields.into_iter().map(String::from).collect())
|
||||||
.into_iter()
|
|
||||||
.map(String::from)
|
|
||||||
.collect())
|
|
||||||
.unwrap_or_else(|| vec!["*".to_string()]);
|
.unwrap_or_else(|| vec!["*".to_string()]);
|
||||||
|
|
||||||
let faceted_attributes = self.indexes.faceted_fields(&txn)?
|
let faceted_attributes = index.faceted_fields()?
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(k, v)| (k, v.to_string()))
|
.map(|(k, v)| (k, v.to_string()))
|
||||||
.collect();
|
.collect();
|
||||||
|
@ -107,10 +107,10 @@ impl Data {
|
|||||||
pub fn search<S: AsRef<str>>(&self, index: S, search_query: SearchQuery) -> anyhow::Result<SearchResult> {
|
pub fn search<S: AsRef<str>>(&self, index: S, search_query: SearchQuery) -> anyhow::Result<SearchResult> {
|
||||||
let start = Instant::now();
|
let start = Instant::now();
|
||||||
let index = self.indexes
|
let index = self.indexes
|
||||||
.get(index)?
|
.get(&index)?
|
||||||
.ok_or_else(|| Error::OpenIndex(format!("Index {} doesn't exists.", index.as_ref())))?;
|
.ok_or_else(|| Error::OpenIndex(format!("Index {} doesn't exists.", index.as_ref())))?;
|
||||||
|
|
||||||
let Results { found_words, documents_ids, nb_hits, limit, .. } = index.search(search_query)?;
|
let Results { found_words, documents_ids, nb_hits, limit, .. } = index.search(&search_query)?;
|
||||||
|
|
||||||
let fields_ids_map = index.fields_ids_map()?;
|
let fields_ids_map = index.fields_ids_map()?;
|
||||||
|
|
||||||
|
@ -1,18 +1,20 @@
|
|||||||
use std::ops::Deref;
|
use std::ops::Deref;
|
||||||
|
|
||||||
|
use milli::update::{IndexDocumentsMethod, UpdateFormat};
|
||||||
|
//use milli::update_store::UpdateStatus;
|
||||||
use async_compression::tokio_02::write::GzipEncoder;
|
use async_compression::tokio_02::write::GzipEncoder;
|
||||||
use futures_util::stream::StreamExt;
|
use futures_util::stream::StreamExt;
|
||||||
use tokio::io::AsyncWriteExt;
|
use tokio::io::AsyncWriteExt;
|
||||||
use milli::update::{IndexDocumentsMethod, UpdateFormat};
|
|
||||||
use milli::update_store::UpdateStatus;
|
|
||||||
|
|
||||||
use super::Data;
|
use super::Data;
|
||||||
use crate::updates::{UpdateMeta, UpdateResult, UpdateStatusResponse, Settings};
|
use crate::index_controller::IndexController;
|
||||||
|
use crate::index_controller::{UpdateStatusResponse, Settings};
|
||||||
|
|
||||||
|
|
||||||
impl Data {
|
impl Data {
|
||||||
pub async fn add_documents<B, E, S>(
|
pub async fn add_documents<B, E, S>(
|
||||||
&self,
|
&self,
|
||||||
_index: S,
|
index: S,
|
||||||
method: IndexDocumentsMethod,
|
method: IndexDocumentsMethod,
|
||||||
format: UpdateFormat,
|
format: UpdateFormat,
|
||||||
mut stream: impl futures::Stream<Item=Result<B, E>> + Unpin,
|
mut stream: impl futures::Stream<Item=Result<B, E>> + Unpin,
|
||||||
@ -20,7 +22,7 @@ impl Data {
|
|||||||
where
|
where
|
||||||
B: Deref<Target = [u8]>,
|
B: Deref<Target = [u8]>,
|
||||||
E: std::error::Error + Send + Sync + 'static,
|
E: std::error::Error + Send + Sync + 'static,
|
||||||
S: AsRef<str>,
|
S: AsRef<str> + Send + Sync + 'static,
|
||||||
{
|
{
|
||||||
let file = tokio::task::spawn_blocking(tempfile::tempfile).await?;
|
let file = tokio::task::spawn_blocking(tempfile::tempfile).await?;
|
||||||
let file = tokio::fs::File::from_std(file?);
|
let file = tokio::fs::File::from_std(file?);
|
||||||
@ -37,43 +39,39 @@ impl Data {
|
|||||||
let file = file.into_std().await;
|
let file = file.into_std().await;
|
||||||
let mmap = unsafe { memmap::Mmap::map(&file)? };
|
let mmap = unsafe { memmap::Mmap::map(&file)? };
|
||||||
|
|
||||||
let meta = UpdateMeta::DocumentsAddition { method, format };
|
let indexes = self.indexes.clone();
|
||||||
|
let update = tokio::task::spawn_blocking(move ||indexes.add_documents(index, method, format, &mmap[..])).await??;
|
||||||
let queue = self.update_queue.clone();
|
|
||||||
let update = tokio::task::spawn_blocking(move || queue.register_update(meta, &mmap[..])).await??;
|
|
||||||
|
|
||||||
Ok(update.into())
|
Ok(update.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn update_settings<S: AsRef<str>>(
|
pub async fn update_settings<S: AsRef<str> + Send + Sync + 'static>(
|
||||||
&self,
|
&self,
|
||||||
_index: S,
|
index: S,
|
||||||
settings: Settings
|
settings: Settings
|
||||||
) -> anyhow::Result<UpdateStatusResponse> {
|
) -> anyhow::Result<UpdateStatusResponse> {
|
||||||
let meta = UpdateMeta::Settings(settings);
|
let indexes = self.indexes.clone();
|
||||||
let queue = self.update_queue.clone();
|
let update = tokio::task::spawn_blocking(move || indexes.update_settings(index, settings)).await??;
|
||||||
let update = tokio::task::spawn_blocking(move || queue.register_update(meta, &[])).await??;
|
|
||||||
Ok(update.into())
|
Ok(update.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
//#[inline]
|
||||||
pub fn get_update_status(&self, _index: &str, uid: u64) -> anyhow::Result<Option<UpdateStatus<UpdateMeta, UpdateResult, String>>> {
|
//pub fn get_update_status<S: AsRef<str>>(&self, _index: S, uid: u64) -> anyhow::Result<Option<UpdateStatus<UpdateMeta, UpdateResult, String>>> {
|
||||||
self.update_queue.get_update_status(uid)
|
//self.indexes.get_update_status(uid)
|
||||||
}
|
//}
|
||||||
|
|
||||||
pub fn get_updates_status(&self, _index: &str) -> anyhow::Result<Vec<UpdateStatus<UpdateMeta, UpdateResult, String>>> {
|
//pub fn get_updates_status(&self, _index: &str) -> anyhow::Result<Vec<UpdateStatus<UpdateMeta, UpdateResult, String>>> {
|
||||||
let result = self.update_queue.iter_metas(|processing, processed, pending, aborted, failed| {
|
//let result = self.update_queue.iter_metas(|processing, processed, pending, aborted, failed| {
|
||||||
let mut metas = processing
|
//let mut metas = processing
|
||||||
.map(UpdateStatus::from)
|
//.map(UpdateStatus::from)
|
||||||
.into_iter()
|
//.into_iter()
|
||||||
.chain(processed.filter_map(|i| Some(i.ok()?.1)).map(UpdateStatus::from))
|
//.chain(processed.filter_map(|i| Some(i.ok()?.1)).map(UpdateStatus::from))
|
||||||
.chain(pending.filter_map(|i| Some(i.ok()?.1)).map(UpdateStatus::from))
|
//.chain(pending.filter_map(|i| Some(i.ok()?.1)).map(UpdateStatus::from))
|
||||||
.chain(aborted.filter_map(|i| Some(i.ok()?.1)).map(UpdateStatus::from))
|
//.chain(aborted.filter_map(|i| Some(i.ok()?.1)).map(UpdateStatus::from))
|
||||||
.chain(failed.filter_map(|i| Some(i.ok()?.1)).map(UpdateStatus::from))
|
//.chain(failed.filter_map(|i| Some(i.ok()?.1)).map(UpdateStatus::from))
|
||||||
.collect::<Vec<_>>();
|
//.collect::<Vec<_>>();
|
||||||
metas.sort_by(|a, b| a.id().cmp(&b.id()));
|
//metas.sort_by(|a, b| a.id().cmp(&b.id()));
|
||||||
Ok(metas)
|
//Ok(metas)
|
||||||
})?;
|
//})?;
|
||||||
Ok(result)
|
//Ok(result)
|
||||||
}
|
//}
|
||||||
}
|
}
|
||||||
|
255
src/index_controller/index_store.rs
Normal file
255
src/index_controller/index_store.rs
Normal file
@ -0,0 +1,255 @@
|
|||||||
|
use std::fs::File;
|
||||||
|
use std::io::{Read, Write};
|
||||||
|
use std::path::{Path, PathBuf};
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use anyhow::Result;
|
||||||
|
use chrono::{DateTime, Utc};
|
||||||
|
use dashmap::DashMap;
|
||||||
|
use heed::types::{Str, SerdeBincode};
|
||||||
|
use heed::{EnvOpenOptions, Env, Database};
|
||||||
|
use milli::{Index, FieldsIdsMap, SearchResult, FieldId, facet::FacetType};
|
||||||
|
use serde::{Serialize, Deserialize};
|
||||||
|
use ouroboros::self_referencing;
|
||||||
|
|
||||||
|
use crate::data::SearchQuery;
|
||||||
|
|
||||||
|
const CONTROLLER_META_FILENAME: &str = "index_controller_meta";
|
||||||
|
const INDEXES_CONTROLLER_FILENAME: &str = "indexes_db";
|
||||||
|
const INDEXES_DB_NAME: &str = "indexes_db";
|
||||||
|
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
struct IndexStoreMeta {
|
||||||
|
open_options: EnvOpenOptions,
|
||||||
|
created_at: DateTime<Utc>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl IndexStoreMeta {
|
||||||
|
fn from_path(path: impl AsRef<Path>) -> Result<Option<IndexStoreMeta>> {
|
||||||
|
let mut path = path.as_ref().to_path_buf();
|
||||||
|
path.push(CONTROLLER_META_FILENAME);
|
||||||
|
if path.exists() {
|
||||||
|
let mut file = File::open(path)?;
|
||||||
|
let mut buffer = Vec::new();
|
||||||
|
let n = file.read_to_end(&mut buffer)?;
|
||||||
|
let meta: IndexStoreMeta = serde_json::from_slice(&buffer[..n])?;
|
||||||
|
Ok(Some(meta))
|
||||||
|
} else {
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn to_path(self, path: impl AsRef<Path>) -> Result<()> {
|
||||||
|
let mut path = path.as_ref().to_path_buf();
|
||||||
|
path.push(CONTROLLER_META_FILENAME);
|
||||||
|
if path.exists() {
|
||||||
|
Err(anyhow::anyhow!("Index controller metadata already exists"))
|
||||||
|
} else {
|
||||||
|
let mut file = File::create(path)?;
|
||||||
|
let json = serde_json::to_vec(&self)?;
|
||||||
|
file.write_all(&json)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
pub struct IndexMetadata {
|
||||||
|
created_at: DateTime<Utc>,
|
||||||
|
open_options: EnvOpenOptions,
|
||||||
|
uuid: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl IndexMetadata {
|
||||||
|
fn open_index(self, path: impl AsRef<Path>) -> Result<Index> {
|
||||||
|
// create a path in the form "db_path/indexes/index_id"
|
||||||
|
let mut path = path.as_ref().to_path_buf();
|
||||||
|
path.push("indexes");
|
||||||
|
path.push(&self.uuid);
|
||||||
|
Ok(Index::new(self.open_options, path)?)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#[self_referencing]
|
||||||
|
pub struct IndexView {
|
||||||
|
pub index: Arc<Index>,
|
||||||
|
#[borrows(index)]
|
||||||
|
#[covariant]
|
||||||
|
pub txn: heed::RoTxn<'this>,
|
||||||
|
uuid: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl IndexView {
|
||||||
|
pub fn search(&self, search_query: &SearchQuery) -> Result<SearchResult> {
|
||||||
|
self.with(|this| {
|
||||||
|
let mut search = this.index.search(&this.txn);
|
||||||
|
if let Some(query) = &search_query.q {
|
||||||
|
search.query(query);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(offset) = search_query.offset {
|
||||||
|
search.offset(offset);
|
||||||
|
}
|
||||||
|
|
||||||
|
let limit = search_query.limit;
|
||||||
|
search.limit(limit);
|
||||||
|
|
||||||
|
Ok(search.execute()?)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
pub fn fields_ids_map(&self) -> Result<FieldsIdsMap> {
|
||||||
|
self.with(|this| Ok(this.index.fields_ids_map(&this.txn)?))
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
pub fn displayed_fields_ids(&self) -> Result<Option<Vec<FieldId>>> {
|
||||||
|
self.with(|this| Ok(this.index.displayed_fields_ids(&this.txn)?))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
pub fn displayed_fields(&self) -> Result<Option<Vec<String>>> {
|
||||||
|
self.with(|this| Ok(this.index
|
||||||
|
.displayed_fields(&this.txn)?
|
||||||
|
.map(|fields| fields.into_iter().map(String::from).collect())))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
pub fn searchable_fields(&self) -> Result<Option<Vec<String>>> {
|
||||||
|
self.with(|this| Ok(this.index
|
||||||
|
.searchable_fields(&this.txn)?
|
||||||
|
.map(|fields| fields.into_iter().map(String::from).collect())))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
pub fn faceted_fields(&self) -> Result<HashMap<std::string::String, FacetType>> {
|
||||||
|
self.with(|this| Ok(this.index.faceted_fields(&this.txn)?))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn documents(&self, ids: &[u32]) -> Result<Vec<(u32, obkv::KvReader<'_>)>> {
|
||||||
|
let txn = self.borrow_txn();
|
||||||
|
let index = self.borrow_index();
|
||||||
|
Ok(index.documents(txn, ids.into_iter().copied())?)
|
||||||
|
}
|
||||||
|
|
||||||
|
//pub async fn add_documents<B, E>(
|
||||||
|
//&self,
|
||||||
|
//method: IndexDocumentsMethod,
|
||||||
|
//format: UpdateFormat,
|
||||||
|
//mut stream: impl futures::Stream<Item=Result<B, E>> + Unpin,
|
||||||
|
//) -> anyhow::Result<UpdateStatusResponse>
|
||||||
|
//where
|
||||||
|
//B: Deref<Target = [u8]>,
|
||||||
|
//E: std::error::Error + Send + Sync + 'static,
|
||||||
|
//{
|
||||||
|
//let file = tokio::task::spawn_blocking(tempfile::tempfile).await?;
|
||||||
|
//let file = tokio::fs::File::from_std(file?);
|
||||||
|
//let mut encoder = GzipEncoder::new(file);
|
||||||
|
|
||||||
|
//while let Some(result) = stream.next().await {
|
||||||
|
//let bytes = &*result?;
|
||||||
|
//encoder.write_all(&bytes[..]).await?;
|
||||||
|
//}
|
||||||
|
|
||||||
|
//encoder.shutdown().await?;
|
||||||
|
//let mut file = encoder.into_inner();
|
||||||
|
//file.sync_all().await?;
|
||||||
|
//let file = file.into_std().await;
|
||||||
|
//let mmap = unsafe { memmap::Mmap::map(&file)? };
|
||||||
|
|
||||||
|
//let meta = UpdateMeta::DocumentsAddition { method, format };
|
||||||
|
|
||||||
|
//let index = self.index.clone();
|
||||||
|
//let queue = self.update_store.clone();
|
||||||
|
//let update = tokio::task::spawn_blocking(move || queue.register_update(index, meta, &mmap[..])).await??;
|
||||||
|
//Ok(update.into())
|
||||||
|
//}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct IndexStore {
|
||||||
|
path: PathBuf,
|
||||||
|
env: Env,
|
||||||
|
indexes_db: Database<Str, SerdeBincode<IndexMetadata>>,
|
||||||
|
indexes: DashMap<String, (String, Arc<Index>)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl IndexStore {
|
||||||
|
/// Open the index controller from meta found at path, and create a new one if no meta is
|
||||||
|
/// found.
|
||||||
|
pub fn new(path: impl AsRef<Path>) -> Result<Self> {
|
||||||
|
// If index controller metadata is present, we return the env, otherwise, we create a new
|
||||||
|
// metadata from scratch before returning a new env.
|
||||||
|
let path = path.as_ref().to_path_buf();
|
||||||
|
let env = match IndexStoreMeta::from_path(&path)? {
|
||||||
|
Some(meta) => meta.open_options.open(INDEXES_CONTROLLER_FILENAME)?,
|
||||||
|
None => {
|
||||||
|
let mut open_options = EnvOpenOptions::new();
|
||||||
|
open_options.map_size(page_size::get() * 1000);
|
||||||
|
let env = open_options.open(INDEXES_CONTROLLER_FILENAME)?;
|
||||||
|
let created_at = Utc::now();
|
||||||
|
let meta = IndexStoreMeta { open_options: open_options.clone(), created_at };
|
||||||
|
meta.to_path(&path)?;
|
||||||
|
env
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let indexes = DashMap::new();
|
||||||
|
let indexes_db = match env.open_database(Some(INDEXES_DB_NAME))? {
|
||||||
|
Some(indexes_db) => indexes_db,
|
||||||
|
None => env.create_database(Some(INDEXES_DB_NAME))?,
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(Self { env, indexes, indexes_db, path })
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_or_create<S: AsRef<str>>(&self, _name: S) -> Result<IndexView> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get an index with read access to the db. The index are lazily loaded, meaning that we first
|
||||||
|
/// check for its exixtence in the indexes map, and if it doesn't exist, the index db is check
|
||||||
|
/// for metadata to launch the index.
|
||||||
|
pub fn get<S: AsRef<str>>(&self, name: S) -> Result<Option<IndexView>> {
|
||||||
|
match self.indexes.get(name.as_ref()) {
|
||||||
|
Some(entry) => {
|
||||||
|
let index = entry.1.clone();
|
||||||
|
let uuid = entry.0.clone();
|
||||||
|
let view = IndexView::try_new(index, |index| index.read_txn(), uuid)?;
|
||||||
|
Ok(Some(view))
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
let txn = self.env.read_txn()?;
|
||||||
|
match self.indexes_db.get(&txn, name.as_ref())? {
|
||||||
|
Some(meta) => {
|
||||||
|
let uuid = meta.uuid.clone();
|
||||||
|
let index = Arc::new(meta.open_index(&self.path)?);
|
||||||
|
self.indexes.insert(name.as_ref().to_owned(), (uuid.clone(), index.clone()));
|
||||||
|
let view = IndexView::try_new(index, |index| index.read_txn(), uuid)?;
|
||||||
|
Ok(Some(view))
|
||||||
|
}
|
||||||
|
None => Ok(None)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_mut<S: AsRef<str>>(&self, _name: S) -> Result<Option<IndexView>> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn delete_index<S: AsRef<str>>(&self, _name:S) -> Result<()> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn list_indices(&self) -> Result<Vec<(String, IndexMetadata)>> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn rename_index(&self, _old: &str, _new: &str) -> Result<()> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
}
|
@ -1,196 +1,145 @@
|
|||||||
use std::fs::File;
|
mod index_store;
|
||||||
use std::io::{Read, Write};
|
mod update_store;
|
||||||
use std::path::{Path, PathBuf};
|
|
||||||
|
pub use index_store::IndexStore;
|
||||||
|
pub use update_store::UpdateStore;
|
||||||
|
|
||||||
|
use std::num::NonZeroUsize;
|
||||||
|
use std::ops::Deref;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use chrono::{DateTime, Utc};
|
use milli::update::{IndexDocumentsMethod, UpdateFormat};
|
||||||
use dashmap::DashMap;
|
use milli::update_store::{Processed, Processing, Failed, Pending, Aborted};
|
||||||
use dashmap::mapref::one::Ref;
|
use serde::{Serialize, Deserialize, de::Deserializer};
|
||||||
use heed::types::{Str, SerdeBincode};
|
|
||||||
use heed::{EnvOpenOptions, Env, Database};
|
|
||||||
use milli::{Index, FieldsIdsMap, SearchResult, FieldId};
|
|
||||||
use serde::{Serialize, Deserialize};
|
|
||||||
|
|
||||||
use crate::data::SearchQuery;
|
pub type UpdateStatusResponse = UpdateStatus<UpdateMeta, UpdateResult, String>;
|
||||||
|
|
||||||
const CONTROLLER_META_FILENAME: &str = "index_controller_meta";
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
const INDEXES_CONTROLLER_FILENAME: &str = "indexes_db";
|
#[serde(tag = "type")]
|
||||||
const INDEXES_DB_NAME: &str = "indexes_db";
|
pub enum UpdateMeta {
|
||||||
|
DocumentsAddition { method: IndexDocumentsMethod, format: UpdateFormat },
|
||||||
pub trait UpdateStore {}
|
ClearDocuments,
|
||||||
|
Settings(Settings),
|
||||||
pub struct IndexController<U> {
|
Facets(Facets),
|
||||||
path: PathBuf,
|
|
||||||
update_store: U,
|
|
||||||
env: Env,
|
|
||||||
indexes_db: Database<Str, SerdeBincode<IndexMetadata>>,
|
|
||||||
indexes: DashMap<String, Index>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
struct IndexControllerMeta {
|
#[serde(deny_unknown_fields)]
|
||||||
open_options: EnvOpenOptions,
|
#[serde(rename_all = "camelCase")]
|
||||||
created_at: DateTime<Utc>,
|
pub struct Facets {
|
||||||
|
pub level_group_size: Option<NonZeroUsize>,
|
||||||
|
pub min_level_size: Option<NonZeroUsize>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl IndexControllerMeta {
|
#[derive(Debug, Clone, Serialize)]
|
||||||
fn from_path(path: impl AsRef<Path>) -> Result<Option<IndexControllerMeta>> {
|
#[serde(tag = "type")]
|
||||||
let mut path = path.as_ref().to_path_buf();
|
pub enum UpdateStatus<M, P, N> {
|
||||||
path.push(CONTROLLER_META_FILENAME);
|
Pending { update_id: u64, meta: Pending<M> },
|
||||||
if path.exists() {
|
Progressing { update_id: u64, meta: P },
|
||||||
let mut file = File::open(path)?;
|
Processed { update_id: u64, meta: Processed<M, N> },
|
||||||
let mut buffer = Vec::new();
|
Aborted { update_id: u64, meta: Aborted<M> },
|
||||||
let n = file.read_to_end(&mut buffer)?;
|
}
|
||||||
let meta: IndexControllerMeta = serde_json::from_slice(&buffer[..n])?;
|
|
||||||
Ok(Some(meta))
|
|
||||||
} else {
|
|
||||||
Ok(None)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn to_path(self, path: impl AsRef<Path>) -> Result<()> {
|
fn deserialize_some<'de, T, D>(deserializer: D) -> Result<Option<T>, D::Error>
|
||||||
let mut path = path.as_ref().to_path_buf();
|
where T: Deserialize<'de>,
|
||||||
path.push(CONTROLLER_META_FILENAME);
|
D: Deserializer<'de>
|
||||||
if path.exists() {
|
{
|
||||||
Err(anyhow::anyhow!("Index controller metadata already exists"))
|
Deserialize::deserialize(deserializer).map(Some)
|
||||||
} else {
|
}
|
||||||
let mut file = File::create(path)?;
|
|
||||||
let json = serde_json::to_vec(&self)?;
|
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||||
file.write_all(&json)?;
|
#[serde(deny_unknown_fields)]
|
||||||
Ok(())
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct Settings {
|
||||||
|
#[serde(
|
||||||
|
default,
|
||||||
|
deserialize_with = "deserialize_some",
|
||||||
|
skip_serializing_if = "Option::is_none",
|
||||||
|
)]
|
||||||
|
pub displayed_attributes: Option<Option<Vec<String>>>,
|
||||||
|
|
||||||
|
#[serde(
|
||||||
|
default,
|
||||||
|
deserialize_with = "deserialize_some",
|
||||||
|
skip_serializing_if = "Option::is_none",
|
||||||
|
)]
|
||||||
|
pub searchable_attributes: Option<Option<Vec<String>>>,
|
||||||
|
|
||||||
|
#[serde(default)]
|
||||||
|
pub faceted_attributes: Option<Option<HashMap<String, String>>>,
|
||||||
|
|
||||||
|
#[serde(
|
||||||
|
default,
|
||||||
|
deserialize_with = "deserialize_some",
|
||||||
|
skip_serializing_if = "Option::is_none",
|
||||||
|
)]
|
||||||
|
pub criteria: Option<Option<Vec<String>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Settings {
|
||||||
|
pub fn cleared() -> Self {
|
||||||
|
Self {
|
||||||
|
displayed_attributes: Some(None),
|
||||||
|
searchable_attributes: Some(None),
|
||||||
|
faceted_attributes: Some(None),
|
||||||
|
criteria: Some(None),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
pub enum UpdateResult {
|
||||||
struct IndexMetadata {
|
//DocumentsAddition(DocumentAdditionResult),
|
||||||
created_at: DateTime<Utc>,
|
Other,
|
||||||
open_options: EnvOpenOptions,
|
|
||||||
id: String,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl IndexMetadata {
|
/// The `IndexController` is in charge of the access to the underlying indices. It splits the logic
|
||||||
fn open_index(&self, path: impl AsRef<Path>) -> Result<Index> {
|
/// for read access which is provided, and write access which must be provided. This allows the
|
||||||
// create a path in the form "db_path/indexes/index_id"
|
/// implementer to define the behaviour of write accesses to the indices, and abstract the
|
||||||
let mut path = path.as_ref().to_path_buf();
|
/// scheduling of the updates. The implementer must be able to provide an instance of `IndexStore`
|
||||||
path.push("indexes");
|
pub trait IndexController: Deref<Target = IndexStore> {
|
||||||
path.push(&self.id);
|
|
||||||
Ok(Index::new(self.open_options, path)?)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct IndexView<'a, U> {
|
/*
|
||||||
txn: heed::RoTxn<'a>,
|
* Write operations
|
||||||
index: Ref<'a, String, Index>,
|
*
|
||||||
update_store: &'a U,
|
* Logic for the write operation need to be provided by the implementer, since they can be made
|
||||||
}
|
* asynchronous thanks to an update_store for example.
|
||||||
|
*
|
||||||
|
* */
|
||||||
|
|
||||||
impl<'a, U: UpdateStore> IndexView<'a, U> {
|
/// Perform document addition on the database. If the provided index does not exist, it will be
|
||||||
pub fn search(&self, search_query: SearchQuery) -> Result<SearchResult> {
|
/// created when the addition is applied to the index.
|
||||||
let mut search = self.index.search(&self.txn);
|
fn add_documents<S: AsRef<str>>(
|
||||||
if let Some(query) = &search_query.q {
|
&self,
|
||||||
search.query(query);
|
index: S,
|
||||||
}
|
method: IndexDocumentsMethod,
|
||||||
|
format: UpdateFormat,
|
||||||
|
data: &[u8],
|
||||||
|
) -> anyhow::Result<UpdateStatusResponse>;
|
||||||
|
|
||||||
if let Some(offset) = search_query.offset {
|
/// Updates an index settings. If the index does not exist, it will be created when the update
|
||||||
search.offset(offset);
|
/// is applied to the index.
|
||||||
}
|
fn update_settings<S: AsRef<str>>(&self, index_uid: S, settings: Settings) -> anyhow::Result<UpdateStatusResponse>;
|
||||||
|
|
||||||
let limit = search_query.limit;
|
/// Create an index with the given `index_uid`.
|
||||||
search.limit(limit);
|
fn create_index<S: AsRef<str>>(&self, index_uid: S) -> Result<()>;
|
||||||
|
|
||||||
Ok(search.execute()?)
|
/// Delete index with the given `index_uid`, attempting to close it beforehand.
|
||||||
}
|
fn delete_index<S: AsRef<str>>(&self, index_uid: S) -> Result<()>;
|
||||||
|
|
||||||
pub fn fields_ids_map(&self) -> Result<FieldsIdsMap> {
|
/// Swap two indexes, concretely, it simply swaps the index the names point to.
|
||||||
Ok(self.index.fields_ids_map(&self.txn)?)
|
fn swap_indices<S1: AsRef<str>, S2: AsRef<str>>(&self, index1_uid: S1, index2_uid: S2) -> Result<()>;
|
||||||
}
|
|
||||||
|
|
||||||
pub fn fields_displayed_fields_ids(&self) -> Result<Option<Vec<FieldId>>> {
|
/// Apply an update to the given index. This method can be called when an update is ready to be
|
||||||
Ok(self.index.displayed_fields_ids(&self.txn)?)
|
/// processed
|
||||||
}
|
fn handle_update<S: AsRef<str>>(
|
||||||
|
&self,
|
||||||
pub fn documents(&self, ids: Vec<u32>) -> Result<Vec<(u32, obkv::KvReader<'_>)>> {
|
_index: S,
|
||||||
Ok(self.index.documents(&self.txn, ids)?)
|
_update_id: u64,
|
||||||
}
|
_meta: Processing<UpdateMeta>,
|
||||||
}
|
_content: &[u8]
|
||||||
|
) -> Result<Processed<UpdateMeta, UpdateResult>, Failed<UpdateMeta, String>> {
|
||||||
impl<U: UpdateStore> IndexController<U> {
|
|
||||||
/// Open the index controller from meta found at path, and create a new one if no meta is
|
|
||||||
/// found.
|
|
||||||
pub fn new(path: impl AsRef<Path>, update_store: U) -> Result<Self> {
|
|
||||||
// If index controller metadata is present, we return the env, otherwise, we create a new
|
|
||||||
// metadata from scratch before returning a new env.
|
|
||||||
let path = path.as_ref().to_path_buf();
|
|
||||||
let env = match IndexControllerMeta::from_path(&path)? {
|
|
||||||
Some(meta) => meta.open_options.open(INDEXES_CONTROLLER_FILENAME)?,
|
|
||||||
None => {
|
|
||||||
let open_options = EnvOpenOptions::new()
|
|
||||||
.map_size(page_size::get() * 1000);
|
|
||||||
let env = open_options.open(INDEXES_CONTROLLER_FILENAME)?;
|
|
||||||
let created_at = Utc::now();
|
|
||||||
let meta = IndexControllerMeta { open_options: open_options.clone(), created_at };
|
|
||||||
meta.to_path(path)?;
|
|
||||||
env
|
|
||||||
}
|
|
||||||
};
|
|
||||||
let indexes = DashMap::new();
|
|
||||||
let indexes_db = match env.open_database(Some(INDEXES_DB_NAME))? {
|
|
||||||
Some(indexes_db) => indexes_db,
|
|
||||||
None => env.create_database(Some(INDEXES_DB_NAME))?,
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(Self { env, indexes, indexes_db, update_store, path })
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_or_create<S: AsRef<str>>(&mut self, name: S) -> Result<IndexView<'_, U>> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get an index with read access to the db. The index are lazily loaded, meaning that we first
|
|
||||||
/// check for its exixtence in the indexes map, and if it doesn't exist, the index db is check
|
|
||||||
/// for metadata to launch the index.
|
|
||||||
pub fn get<S: AsRef<str>>(&self, name: S) -> Result<Option<IndexView<'_, U>>> {
|
|
||||||
let update_store = &self.update_store;
|
|
||||||
match self.indexes.get(name.as_ref()) {
|
|
||||||
Some(index) => {
|
|
||||||
let txn = index.read_txn()?;
|
|
||||||
Ok(Some(IndexView { index, update_store, txn }))
|
|
||||||
}
|
|
||||||
None => {
|
|
||||||
let txn = self.env.read_txn()?;
|
|
||||||
match self.indexes_db.get(&txn, name.as_ref())? {
|
|
||||||
Some(meta) => {
|
|
||||||
let index = meta.open_index(self.path)?;
|
|
||||||
self.indexes.insert(name.as_ref().to_owned(), index);
|
|
||||||
// TODO: create index view
|
|
||||||
match self.indexes.get(name.as_ref()) {
|
|
||||||
Some(index) => {
|
|
||||||
let txn = index.read_txn()?;
|
|
||||||
Ok(Some(IndexView { index, txn, update_store }))
|
|
||||||
}
|
|
||||||
None => Ok(None)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
None => Ok(None)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_mut<S: AsRef<str>>(&self, name: S) -> Result<Option<IndexView<'_, U>>> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn delete_index<S: AsRef<str>>(&self, name:S) -> Result<()> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn list_indices(&self) -> Result<Vec<(String, IndexMetadata)>> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn rename_index(&self, old: &str, new: &str) -> Result<()> {
|
|
||||||
todo!()
|
todo!()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
49
src/index_controller/update_store/mod.rs
Normal file
49
src/index_controller/update_store/mod.rs
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
use std::ops::Deref;
|
||||||
|
|
||||||
|
use super::{IndexStore, IndexController};
|
||||||
|
|
||||||
|
pub struct UpdateStore {
|
||||||
|
index_store: IndexStore,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Deref for UpdateStore {
|
||||||
|
type Target = IndexStore;
|
||||||
|
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
&self.index_store
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl UpdateStore {
|
||||||
|
pub fn new(index_store: IndexStore) -> Self {
|
||||||
|
Self { index_store }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl IndexController for UpdateStore {
|
||||||
|
fn add_documents<S: AsRef<str>>(
|
||||||
|
&self,
|
||||||
|
_index: S,
|
||||||
|
_method: milli::update::IndexDocumentsMethod,
|
||||||
|
_format: milli::update::UpdateFormat,
|
||||||
|
_data: &[u8],
|
||||||
|
) -> anyhow::Result<crate::index_controller::UpdateStatusResponse> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn update_settings<S: AsRef<str>>(&self, _index_uid: S, _settings: crate::index_controller::Settings) -> anyhow::Result<crate::index_controller::UpdateStatusResponse> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_index<S: AsRef<str>>(&self, _index_uid: S) -> anyhow::Result<()> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn delete_index<S: AsRef<str>>(&self, _index_uid: S) -> anyhow::Result<()> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn swap_indices<S1: AsRef<str>, S2: AsRef<str>>(&self, _index1_uid: S1, _index2_uid: S2) -> anyhow::Result<()> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
}
|
@ -5,7 +5,7 @@ pub mod error;
|
|||||||
pub mod helpers;
|
pub mod helpers;
|
||||||
pub mod option;
|
pub mod option;
|
||||||
pub mod routes;
|
pub mod routes;
|
||||||
mod updates;
|
//mod updates;
|
||||||
mod index_controller;
|
mod index_controller;
|
||||||
|
|
||||||
use actix_http::Error;
|
use actix_http::Error;
|
||||||
|
@ -9,10 +9,60 @@ use rustls::{
|
|||||||
AllowAnyAnonymousOrAuthenticatedClient, AllowAnyAuthenticatedClient, NoClientAuth,
|
AllowAnyAnonymousOrAuthenticatedClient, AllowAnyAuthenticatedClient, NoClientAuth,
|
||||||
RootCertStore,
|
RootCertStore,
|
||||||
};
|
};
|
||||||
|
use grenad::CompressionType;
|
||||||
use structopt::StructOpt;
|
use structopt::StructOpt;
|
||||||
|
|
||||||
use crate::updates::IndexerOpts;
|
#[derive(Debug, Clone, StructOpt)]
|
||||||
|
pub struct IndexerOpts {
|
||||||
|
/// The amount of documents to skip before printing
|
||||||
|
/// a log regarding the indexing advancement.
|
||||||
|
#[structopt(long, default_value = "100000")] // 100k
|
||||||
|
pub log_every_n: usize,
|
||||||
|
|
||||||
|
/// MTBL max number of chunks in bytes.
|
||||||
|
#[structopt(long)]
|
||||||
|
pub max_nb_chunks: Option<usize>,
|
||||||
|
|
||||||
|
/// The maximum amount of memory to use for the MTBL buffer. It is recommended
|
||||||
|
/// to use something like 80%-90% of the available memory.
|
||||||
|
///
|
||||||
|
/// It is automatically split by the number of jobs e.g. if you use 7 jobs
|
||||||
|
/// and 7 GB of max memory, each thread will use a maximum of 1 GB.
|
||||||
|
#[structopt(long, default_value = "7 GiB")]
|
||||||
|
pub max_memory: Byte,
|
||||||
|
|
||||||
|
/// Size of the linked hash map cache when indexing.
|
||||||
|
/// The bigger it is, the faster the indexing is but the more memory it takes.
|
||||||
|
#[structopt(long, default_value = "500")]
|
||||||
|
pub linked_hash_map_size: usize,
|
||||||
|
|
||||||
|
/// The name of the compression algorithm to use when compressing intermediate
|
||||||
|
/// chunks during indexing documents.
|
||||||
|
///
|
||||||
|
/// Choosing a fast algorithm will make the indexing faster but may consume more memory.
|
||||||
|
#[structopt(long, default_value = "snappy", possible_values = &["snappy", "zlib", "lz4", "lz4hc", "zstd"])]
|
||||||
|
pub chunk_compression_type: CompressionType,
|
||||||
|
|
||||||
|
/// The level of compression of the chosen algorithm.
|
||||||
|
#[structopt(long, requires = "chunk-compression-type")]
|
||||||
|
pub chunk_compression_level: Option<u32>,
|
||||||
|
|
||||||
|
/// The number of bytes to remove from the begining of the chunks while reading/sorting
|
||||||
|
/// or merging them.
|
||||||
|
///
|
||||||
|
/// File fusing must only be enable on file systems that support the `FALLOC_FL_COLLAPSE_RANGE`,
|
||||||
|
/// (i.e. ext4 and XFS). File fusing will only work if the `enable-chunk-fusing` is set.
|
||||||
|
#[structopt(long, default_value = "4 GiB")]
|
||||||
|
pub chunk_fusing_shrink_size: Byte,
|
||||||
|
|
||||||
|
/// Enable the chunk fusing or not, this reduces the amount of disk used by a factor of 2.
|
||||||
|
#[structopt(long)]
|
||||||
|
pub enable_chunk_fusing: bool,
|
||||||
|
|
||||||
|
/// Number of parallel jobs for indexing, defaults to # of CPUs.
|
||||||
|
#[structopt(long)]
|
||||||
|
pub indexing_jobs: Option<usize>,
|
||||||
|
}
|
||||||
const POSSIBLE_ENV: [&str; 2] = ["development", "production"];
|
const POSSIBLE_ENV: [&str; 2] = ["development", "production"];
|
||||||
|
|
||||||
#[derive(Debug, Clone, StructOpt)]
|
#[derive(Debug, Clone, StructOpt)]
|
||||||
|
@ -122,7 +122,7 @@ async fn add_documents_json(
|
|||||||
) -> Result<HttpResponse, ResponseError> {
|
) -> Result<HttpResponse, ResponseError> {
|
||||||
let addition_result = data
|
let addition_result = data
|
||||||
.add_documents(
|
.add_documents(
|
||||||
&path.index_uid,
|
path.into_inner().index_uid,
|
||||||
IndexDocumentsMethod::UpdateDocuments,
|
IndexDocumentsMethod::UpdateDocuments,
|
||||||
UpdateFormat::Json,
|
UpdateFormat::Json,
|
||||||
body
|
body
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
use actix_web::{delete, get, post, put};
|
use actix_web::{delete, get, post, put};
|
||||||
use actix_web::{web, HttpResponse};
|
use actix_web::{web, HttpResponse};
|
||||||
use chrono::{DateTime, Utc};
|
use chrono::{DateTime, Utc};
|
||||||
use log::error;
|
//use log::error;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use crate::Data;
|
use crate::Data;
|
||||||
@ -94,8 +94,8 @@ async fn delete_index(
|
|||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
struct UpdateParam {
|
struct UpdateParam {
|
||||||
index_uid: String,
|
_index_uid: String,
|
||||||
update_id: u64,
|
_update_id: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get(
|
#[get(
|
||||||
@ -103,39 +103,41 @@ struct UpdateParam {
|
|||||||
wrap = "Authentication::Private"
|
wrap = "Authentication::Private"
|
||||||
)]
|
)]
|
||||||
async fn get_update_status(
|
async fn get_update_status(
|
||||||
data: web::Data<Data>,
|
_data: web::Data<Data>,
|
||||||
path: web::Path<UpdateParam>,
|
_path: web::Path<UpdateParam>,
|
||||||
) -> Result<HttpResponse, ResponseError> {
|
) -> Result<HttpResponse, ResponseError> {
|
||||||
let result = data.get_update_status(&path.index_uid, path.update_id);
|
todo!()
|
||||||
match result {
|
//let result = data.get_update_status(&path.index_uid, path.update_id);
|
||||||
Ok(Some(meta)) => {
|
//match result {
|
||||||
let json = serde_json::to_string(&meta).unwrap();
|
//Ok(Some(meta)) => {
|
||||||
Ok(HttpResponse::Ok().body(json))
|
//let json = serde_json::to_string(&meta).unwrap();
|
||||||
}
|
//Ok(HttpResponse::Ok().body(json))
|
||||||
Ok(None) => {
|
//}
|
||||||
todo!()
|
//Ok(None) => {
|
||||||
}
|
//todo!()
|
||||||
Err(e) => {
|
//}
|
||||||
error!("{}", e);
|
//Err(e) => {
|
||||||
todo!()
|
//error!("{}", e);
|
||||||
}
|
//todo!()
|
||||||
}
|
//}
|
||||||
|
//}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/indexes/{index_uid}/updates", wrap = "Authentication::Private")]
|
#[get("/indexes/{index_uid}/updates", wrap = "Authentication::Private")]
|
||||||
async fn get_all_updates_status(
|
async fn get_all_updates_status(
|
||||||
data: web::Data<Data>,
|
_data: web::Data<Data>,
|
||||||
path: web::Path<IndexParam>,
|
_path: web::Path<IndexParam>,
|
||||||
) -> Result<HttpResponse, ResponseError> {
|
) -> Result<HttpResponse, ResponseError> {
|
||||||
let result = data.get_updates_status(&path.index_uid);
|
todo!()
|
||||||
match result {
|
//let result = data.get_updates_status(&path.index_uid);
|
||||||
Ok(metas) => {
|
//match result {
|
||||||
let json = serde_json::to_string(&metas).unwrap();
|
//Ok(metas) => {
|
||||||
Ok(HttpResponse::Ok().body(json))
|
//let json = serde_json::to_string(&metas).unwrap();
|
||||||
}
|
//Ok(HttpResponse::Ok().body(json))
|
||||||
Err(e) => {
|
//}
|
||||||
error!("{}", e);
|
//Err(e) => {
|
||||||
todo!()
|
//error!("{}", e);
|
||||||
}
|
//todo!()
|
||||||
}
|
//}
|
||||||
|
//}
|
||||||
}
|
}
|
||||||
|
@ -3,7 +3,7 @@ use log::error;
|
|||||||
|
|
||||||
use crate::Data;
|
use crate::Data;
|
||||||
use crate::error::ResponseError;
|
use crate::error::ResponseError;
|
||||||
use crate::updates::Settings;
|
use crate::index_controller::Settings;
|
||||||
use crate::helpers::Authentication;
|
use crate::helpers::Authentication;
|
||||||
|
|
||||||
#[macro_export]
|
#[macro_export]
|
||||||
@ -15,19 +15,19 @@ macro_rules! make_setting_route {
|
|||||||
use crate::data;
|
use crate::data;
|
||||||
use crate::error::ResponseError;
|
use crate::error::ResponseError;
|
||||||
use crate::helpers::Authentication;
|
use crate::helpers::Authentication;
|
||||||
use crate::updates::Settings;
|
use crate::index_controller::Settings;
|
||||||
|
|
||||||
#[actix_web::delete($route, wrap = "Authentication::Private")]
|
#[actix_web::delete($route, wrap = "Authentication::Private")]
|
||||||
pub async fn delete(
|
pub async fn delete(
|
||||||
data: web::Data<data::Data>,
|
data: web::Data<data::Data>,
|
||||||
index_uid: web::Path<String>,
|
index_uid: web::Path<String>,
|
||||||
) -> Result<HttpResponse, ResponseError> {
|
) -> Result<HttpResponse, ResponseError> {
|
||||||
use crate::updates::Settings;
|
use crate::index_controller::Settings;
|
||||||
let settings = Settings {
|
let settings = Settings {
|
||||||
$attr: Some(None),
|
$attr: Some(None),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
match data.update_settings(index_uid.as_ref(), settings).await {
|
match data.update_settings(index_uid.into_inner(), settings).await {
|
||||||
Ok(update_status) => {
|
Ok(update_status) => {
|
||||||
let json = serde_json::to_string(&update_status).unwrap();
|
let json = serde_json::to_string(&update_status).unwrap();
|
||||||
Ok(HttpResponse::Ok().body(json))
|
Ok(HttpResponse::Ok().body(json))
|
||||||
@ -50,7 +50,7 @@ macro_rules! make_setting_route {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
match data.update_settings(index_uid.as_ref(), settings).await {
|
match data.update_settings(index_uid.into_inner(), settings).await {
|
||||||
Ok(update_status) => {
|
Ok(update_status) => {
|
||||||
let json = serde_json::to_string(&update_status).unwrap();
|
let json = serde_json::to_string(&update_status).unwrap();
|
||||||
Ok(HttpResponse::Ok().body(json))
|
Ok(HttpResponse::Ok().body(json))
|
||||||
@ -141,7 +141,7 @@ async fn update_all(
|
|||||||
index_uid: web::Path<String>,
|
index_uid: web::Path<String>,
|
||||||
body: web::Json<Settings>,
|
body: web::Json<Settings>,
|
||||||
) -> Result<HttpResponse, ResponseError> {
|
) -> Result<HttpResponse, ResponseError> {
|
||||||
match data.update_settings(index_uid.as_ref(), body.into_inner()).await {
|
match data.update_settings(index_uid.into_inner(), body.into_inner()).await {
|
||||||
Ok(update_result) => {
|
Ok(update_result) => {
|
||||||
let json = serde_json::to_string(&update_result).unwrap();
|
let json = serde_json::to_string(&update_result).unwrap();
|
||||||
Ok(HttpResponse::Ok().body(json))
|
Ok(HttpResponse::Ok().body(json))
|
||||||
@ -176,7 +176,7 @@ async fn delete_all(
|
|||||||
index_uid: web::Path<String>,
|
index_uid: web::Path<String>,
|
||||||
) -> Result<HttpResponse, ResponseError> {
|
) -> Result<HttpResponse, ResponseError> {
|
||||||
let settings = Settings::cleared();
|
let settings = Settings::cleared();
|
||||||
match data.update_settings(index_uid.as_ref(), settings).await {
|
match data.update_settings(index_uid.into_inner(), settings).await {
|
||||||
Ok(update_result) => {
|
Ok(update_result) => {
|
||||||
let json = serde_json::to_string(&update_result).unwrap();
|
let json = serde_json::to_string(&update_result).unwrap();
|
||||||
Ok(HttpResponse::Ok().body(json))
|
Ok(HttpResponse::Ok().body(json))
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
mod settings;
|
mod settings;
|
||||||
|
mod update_store;
|
||||||
|
|
||||||
pub use settings::{Settings, Facets};
|
pub use settings::{Settings, Facets};
|
||||||
|
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::ops::Deref;
|
|
||||||
use std::fs::create_dir_all;
|
use std::fs::create_dir_all;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
@ -55,16 +55,6 @@ pub struct UpdateQueue {
|
|||||||
inner: Arc<UpdateStore<UpdateMeta, UpdateResult, String>>,
|
inner: Arc<UpdateStore<UpdateMeta, UpdateResult, String>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl crate::index_controller::UpdateStore for UpdateQueue {}
|
|
||||||
|
|
||||||
impl Deref for UpdateQueue {
|
|
||||||
type Target = Arc<UpdateStore<UpdateMeta, UpdateResult, String>>;
|
|
||||||
|
|
||||||
fn deref(&self) -> &Self::Target {
|
|
||||||
&self.inner
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, StructOpt)]
|
#[derive(Debug, Clone, StructOpt)]
|
||||||
pub struct IndexerOpts {
|
pub struct IndexerOpts {
|
||||||
/// The amount of documents to skip before printing
|
/// The amount of documents to skip before printing
|
||||||
|
581
src/updates/update_store.rs
Normal file
581
src/updates/update_store.rs
Normal file
@ -0,0 +1,581 @@
|
|||||||
|
use std::path::Path;
|
||||||
|
use std::sync::{Arc, RwLock};
|
||||||
|
|
||||||
|
use crossbeam_channel::Sender;
|
||||||
|
use heed::types::{OwnedType, DecodeIgnore, SerdeJson, ByteSlice};
|
||||||
|
use heed::{EnvOpenOptions, Env, Database};
|
||||||
|
use serde::{Serialize, Deserialize};
|
||||||
|
use chrono::{DateTime, Utc};
|
||||||
|
|
||||||
|
type BEU64 = heed::zerocopy::U64<heed::byteorder::BE>;
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct UpdateStore<M, N, E> {
|
||||||
|
env: Env,
|
||||||
|
pending_meta: Database<OwnedType<BEU64>, SerdeJson<Pending<M>>>,
|
||||||
|
pending: Database<OwnedType<BEU64>, ByteSlice>,
|
||||||
|
processed_meta: Database<OwnedType<BEU64>, SerdeJson<Processed<M, N>>>,
|
||||||
|
failed_meta: Database<OwnedType<BEU64>, SerdeJson<Failed<M, E>>>,
|
||||||
|
aborted_meta: Database<OwnedType<BEU64>, SerdeJson<Aborted<M>>>,
|
||||||
|
processing: Arc<RwLock<Option<Processing<M>>>>,
|
||||||
|
notification_sender: Sender<()>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub trait UpdateHandler<M, N, E> {
|
||||||
|
fn handle_update(&mut self, update_id: u64, meta: Processing<M>, content: &[u8]) -> Result<Processed<M, N>, Failed<M, E>>;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<M, N, F, E> UpdateHandler<M, N, E> for F
|
||||||
|
where F: FnMut(u64, Processing<M>, &[u8]) -> Result<Processed<M, N>, Failed<M, E>> + Send + 'static {
|
||||||
|
fn handle_update(&mut self, update_id: u64, meta: Processing<M>, content: &[u8]) -> Result<Processed<M, N>, Failed<M, E>> {
|
||||||
|
self(update_id, meta, content)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<M: 'static, N: 'static, E: 'static> UpdateStore<M, N, E> {
|
||||||
|
pub fn open<P, U>(
|
||||||
|
size: Option<usize>,
|
||||||
|
path: P,
|
||||||
|
mut update_handler: U,
|
||||||
|
) -> heed::Result<Arc<UpdateStore<M, N, E>>>
|
||||||
|
where
|
||||||
|
P: AsRef<Path>,
|
||||||
|
U: UpdateHandler<M, N, E> + Send + 'static,
|
||||||
|
M: for<'a> Deserialize<'a> + Serialize + Send + Sync + Clone,
|
||||||
|
N: Serialize,
|
||||||
|
E: Serialize,
|
||||||
|
{
|
||||||
|
let mut options = EnvOpenOptions::new();
|
||||||
|
if let Some(size) = size {
|
||||||
|
options.map_size(size);
|
||||||
|
}
|
||||||
|
options.max_dbs(5);
|
||||||
|
|
||||||
|
let env = options.open(path)?;
|
||||||
|
let pending_meta = env.create_database(Some("pending-meta"))?;
|
||||||
|
let pending = env.create_database(Some("pending"))?;
|
||||||
|
let processed_meta = env.create_database(Some("processed-meta"))?;
|
||||||
|
let aborted_meta = env.create_database(Some("aborted-meta"))?;
|
||||||
|
let failed_meta = env.create_database(Some("failed-meta"))?;
|
||||||
|
let processing = Arc::new(RwLock::new(None));
|
||||||
|
|
||||||
|
let (notification_sender, notification_receiver) = crossbeam_channel::bounded(1);
|
||||||
|
// Send a first notification to trigger the process.
|
||||||
|
let _ = notification_sender.send(());
|
||||||
|
|
||||||
|
let update_store = Arc::new(UpdateStore {
|
||||||
|
env,
|
||||||
|
pending,
|
||||||
|
pending_meta,
|
||||||
|
processed_meta,
|
||||||
|
aborted_meta,
|
||||||
|
notification_sender,
|
||||||
|
failed_meta,
|
||||||
|
processing,
|
||||||
|
});
|
||||||
|
|
||||||
|
let update_store_cloned = update_store.clone();
|
||||||
|
std::thread::spawn(move || {
|
||||||
|
// Block and wait for something to process.
|
||||||
|
for () in notification_receiver {
|
||||||
|
loop {
|
||||||
|
match update_store_cloned.process_pending_update(&mut update_handler) {
|
||||||
|
Ok(Some(_)) => (),
|
||||||
|
Ok(None) => break,
|
||||||
|
Err(e) => eprintln!("error while processing update: {}", e),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
Ok(update_store)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the new biggest id to use to store the new update.
|
||||||
|
fn new_update_id(&self, txn: &heed::RoTxn) -> heed::Result<u64> {
|
||||||
|
let last_pending = self.pending_meta
|
||||||
|
.remap_data_type::<DecodeIgnore>()
|
||||||
|
.last(txn)?
|
||||||
|
.map(|(k, _)| k.get());
|
||||||
|
|
||||||
|
let last_processed = self.processed_meta
|
||||||
|
.remap_data_type::<DecodeIgnore>()
|
||||||
|
.last(txn)?
|
||||||
|
.map(|(k, _)| k.get());
|
||||||
|
|
||||||
|
let last_aborted = self.aborted_meta
|
||||||
|
.remap_data_type::<DecodeIgnore>()
|
||||||
|
.last(txn)?
|
||||||
|
.map(|(k, _)| k.get());
|
||||||
|
|
||||||
|
let last_update_id = [last_pending, last_processed, last_aborted]
|
||||||
|
.iter()
|
||||||
|
.copied()
|
||||||
|
.flatten()
|
||||||
|
.max();
|
||||||
|
|
||||||
|
match last_update_id {
|
||||||
|
Some(last_id) => Ok(last_id + 1),
|
||||||
|
None => Ok(0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Registers the update content in the pending store and the meta
|
||||||
|
/// into the pending-meta store. Returns the new unique update id.
|
||||||
|
pub fn register_update(&self, meta: M, content: &[u8]) -> heed::Result<Pending<M>>
|
||||||
|
where M: Serialize,
|
||||||
|
{
|
||||||
|
let mut wtxn = self.env.write_txn()?;
|
||||||
|
|
||||||
|
// We ask the update store to give us a new update id, this is safe,
|
||||||
|
// no other update can have the same id because we use a write txn before
|
||||||
|
// asking for the id and registering it so other update registering
|
||||||
|
// will be forced to wait for a new write txn.
|
||||||
|
let update_id = self.new_update_id(&wtxn)?;
|
||||||
|
let update_key = BEU64::new(update_id);
|
||||||
|
|
||||||
|
let meta = Pending::new(meta, update_id);
|
||||||
|
self.pending_meta.put(&mut wtxn, &update_key, &meta)?;
|
||||||
|
self.pending.put(&mut wtxn, &update_key, content)?;
|
||||||
|
|
||||||
|
wtxn.commit()?;
|
||||||
|
|
||||||
|
if let Err(e) = self.notification_sender.try_send(()) {
|
||||||
|
assert!(!e.is_disconnected(), "update notification channel is disconnected");
|
||||||
|
}
|
||||||
|
Ok(meta)
|
||||||
|
}
|
||||||
|
/// Executes the user provided function on the next pending update (the one with the lowest id).
|
||||||
|
/// This is asynchronous as it let the user process the update with a read-only txn and
|
||||||
|
/// only writing the result meta to the processed-meta store *after* it has been processed.
|
||||||
|
fn process_pending_update<U>(&self, handler: &mut U) -> heed::Result<Option<()>>
|
||||||
|
where
|
||||||
|
U: UpdateHandler<M, N, E>,
|
||||||
|
M: for<'a> Deserialize<'a> + Serialize + Clone,
|
||||||
|
N: Serialize,
|
||||||
|
E: Serialize,
|
||||||
|
{
|
||||||
|
// Create a read transaction to be able to retrieve the pending update in order.
|
||||||
|
let rtxn = self.env.read_txn()?;
|
||||||
|
let first_meta = self.pending_meta.first(&rtxn)?;
|
||||||
|
|
||||||
|
// If there is a pending update we process and only keep
|
||||||
|
// a reader while processing it, not a writer.
|
||||||
|
match first_meta {
|
||||||
|
Some((first_id, pending)) => {
|
||||||
|
let first_content = self.pending
|
||||||
|
.get(&rtxn, &first_id)?
|
||||||
|
.expect("associated update content");
|
||||||
|
|
||||||
|
// we cahnge the state of the update from pending to processing before we pass it
|
||||||
|
// to the update handler. Processing store is non persistent to be able recover
|
||||||
|
// from a failure
|
||||||
|
let processing = pending.processing();
|
||||||
|
self.processing
|
||||||
|
.write()
|
||||||
|
.unwrap()
|
||||||
|
.replace(processing.clone());
|
||||||
|
// Process the pending update using the provided user function.
|
||||||
|
let result = handler.handle_update(first_id.get(), processing, first_content);
|
||||||
|
drop(rtxn);
|
||||||
|
|
||||||
|
// Once the pending update have been successfully processed
|
||||||
|
// we must remove the content from the pending and processing stores and
|
||||||
|
// write the *new* meta to the processed-meta store and commit.
|
||||||
|
let mut wtxn = self.env.write_txn()?;
|
||||||
|
self.processing
|
||||||
|
.write()
|
||||||
|
.unwrap()
|
||||||
|
.take();
|
||||||
|
self.pending_meta.delete(&mut wtxn, &first_id)?;
|
||||||
|
self.pending.delete(&mut wtxn, &first_id)?;
|
||||||
|
match result {
|
||||||
|
Ok(processed) => self.processed_meta.put(&mut wtxn, &first_id, &processed)?,
|
||||||
|
Err(failed) => self.failed_meta.put(&mut wtxn, &first_id, &failed)?,
|
||||||
|
}
|
||||||
|
wtxn.commit()?;
|
||||||
|
|
||||||
|
Ok(Some(()))
|
||||||
|
},
|
||||||
|
None => Ok(None)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The id and metadata of the update that is currently being processed,
|
||||||
|
/// `None` if no update is being processed.
|
||||||
|
pub fn processing_update(&self) -> heed::Result<Option<(u64, Pending<M>)>>
|
||||||
|
where M: for<'a> Deserialize<'a>,
|
||||||
|
{
|
||||||
|
let rtxn = self.env.read_txn()?;
|
||||||
|
match self.pending_meta.first(&rtxn)? {
|
||||||
|
Some((key, meta)) => Ok(Some((key.get(), meta))),
|
||||||
|
None => Ok(None),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Execute the user defined function with the meta-store iterators, the first
|
||||||
|
/// iterator is the *processed* meta one, the second the *aborted* meta one
|
||||||
|
/// and, the last is the *pending* meta one.
|
||||||
|
pub fn iter_metas<F, T>(&self, mut f: F) -> heed::Result<T>
|
||||||
|
where
|
||||||
|
M: for<'a> Deserialize<'a> + Clone,
|
||||||
|
N: for<'a> Deserialize<'a>,
|
||||||
|
F: for<'a> FnMut(
|
||||||
|
Option<Processing<M>>,
|
||||||
|
heed::RoIter<'a, OwnedType<BEU64>, SerdeJson<Processed<M, N>>>,
|
||||||
|
heed::RoIter<'a, OwnedType<BEU64>, SerdeJson<Aborted<M>>>,
|
||||||
|
heed::RoIter<'a, OwnedType<BEU64>, SerdeJson<Pending<M>>>,
|
||||||
|
heed::RoIter<'a, OwnedType<BEU64>, SerdeJson<Failed<M, E>>>,
|
||||||
|
) -> heed::Result<T>,
|
||||||
|
{
|
||||||
|
let rtxn = self.env.read_txn()?;
|
||||||
|
|
||||||
|
// We get the pending, processed and aborted meta iterators.
|
||||||
|
let processed_iter = self.processed_meta.iter(&rtxn)?;
|
||||||
|
let aborted_iter = self.aborted_meta.iter(&rtxn)?;
|
||||||
|
let pending_iter = self.pending_meta.iter(&rtxn)?;
|
||||||
|
let processing = self.processing.read().unwrap().clone();
|
||||||
|
let failed_iter = self.failed_meta.iter(&rtxn)?;
|
||||||
|
|
||||||
|
// We execute the user defined function with both iterators.
|
||||||
|
(f)(processing, processed_iter, aborted_iter, pending_iter, failed_iter)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the update associated meta or `None` if the update doesn't exist.
|
||||||
|
pub fn meta(&self, update_id: u64) -> heed::Result<Option<UpdateStatus<M, N, E>>>
|
||||||
|
where
|
||||||
|
M: for<'a> Deserialize<'a> + Clone,
|
||||||
|
N: for<'a> Deserialize<'a>,
|
||||||
|
E: for<'a> Deserialize<'a>,
|
||||||
|
{
|
||||||
|
let rtxn = self.env.read_txn()?;
|
||||||
|
let key = BEU64::new(update_id);
|
||||||
|
|
||||||
|
if let Some(ref meta) = *self.processing.read().unwrap() {
|
||||||
|
if meta.id() == update_id {
|
||||||
|
return Ok(Some(UpdateStatus::Processing(meta.clone())));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
println!("pending");
|
||||||
|
if let Some(meta) = self.pending_meta.get(&rtxn, &key)? {
|
||||||
|
return Ok(Some(UpdateStatus::Pending(meta)));
|
||||||
|
}
|
||||||
|
|
||||||
|
println!("processed");
|
||||||
|
if let Some(meta) = self.processed_meta.get(&rtxn, &key)? {
|
||||||
|
return Ok(Some(UpdateStatus::Processed(meta)));
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(meta) = self.aborted_meta.get(&rtxn, &key)? {
|
||||||
|
return Ok(Some(UpdateStatus::Aborted(meta)));
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(meta) = self.failed_meta.get(&rtxn, &key)? {
|
||||||
|
return Ok(Some(UpdateStatus::Failed(meta)));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Aborts an update, an aborted update content is deleted and
|
||||||
|
/// the meta of it is moved into the aborted updates database.
|
||||||
|
///
|
||||||
|
/// Trying to abort an update that is currently being processed, an update
|
||||||
|
/// that as already been processed or which doesn't actually exist, will
|
||||||
|
/// return `None`.
|
||||||
|
pub fn abort_update(&self, update_id: u64) -> heed::Result<Option<Aborted<M>>>
|
||||||
|
where M: Serialize + for<'a> Deserialize<'a>,
|
||||||
|
{
|
||||||
|
let mut wtxn = self.env.write_txn()?;
|
||||||
|
let key = BEU64::new(update_id);
|
||||||
|
|
||||||
|
// We cannot abort an update that is currently being processed.
|
||||||
|
if self.pending_meta.first(&wtxn)?.map(|(key, _)| key.get()) == Some(update_id) {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
let pending = match self.pending_meta.get(&wtxn, &key)? {
|
||||||
|
Some(meta) => meta,
|
||||||
|
None => return Ok(None),
|
||||||
|
};
|
||||||
|
|
||||||
|
let aborted = pending.abort();
|
||||||
|
|
||||||
|
self.aborted_meta.put(&mut wtxn, &key, &aborted)?;
|
||||||
|
self.pending_meta.delete(&mut wtxn, &key)?;
|
||||||
|
self.pending.delete(&mut wtxn, &key)?;
|
||||||
|
|
||||||
|
wtxn.commit()?;
|
||||||
|
|
||||||
|
Ok(Some(aborted))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Aborts all the pending updates, and not the one being currently processed.
|
||||||
|
/// Returns the update metas and ids that were successfully aborted.
|
||||||
|
pub fn abort_pendings(&self) -> heed::Result<Vec<(u64, Aborted<M>)>>
|
||||||
|
where M: Serialize + for<'a> Deserialize<'a>,
|
||||||
|
{
|
||||||
|
let mut wtxn = self.env.write_txn()?;
|
||||||
|
let mut aborted_updates = Vec::new();
|
||||||
|
|
||||||
|
// We skip the first pending update as it is currently being processed.
|
||||||
|
for result in self.pending_meta.iter(&wtxn)?.skip(1) {
|
||||||
|
let (key, pending) = result?;
|
||||||
|
let id = key.get();
|
||||||
|
aborted_updates.push((id, pending.abort()));
|
||||||
|
}
|
||||||
|
|
||||||
|
for (id, aborted) in &aborted_updates {
|
||||||
|
let key = BEU64::new(*id);
|
||||||
|
self.aborted_meta.put(&mut wtxn, &key, &aborted)?;
|
||||||
|
self.pending_meta.delete(&mut wtxn, &key)?;
|
||||||
|
self.pending.delete(&mut wtxn, &key)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
wtxn.commit()?;
|
||||||
|
|
||||||
|
Ok(aborted_updates)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq, Eq, Hash, Serialize, Deserialize, Clone)]
|
||||||
|
pub struct Pending<M> {
|
||||||
|
update_id: u64,
|
||||||
|
meta: M,
|
||||||
|
enqueued_at: DateTime<Utc>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<M> Pending<M> {
|
||||||
|
fn new(meta: M, update_id: u64) -> Self {
|
||||||
|
Self {
|
||||||
|
enqueued_at: Utc::now(),
|
||||||
|
meta,
|
||||||
|
update_id,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn processing(self) -> Processing<M> {
|
||||||
|
Processing {
|
||||||
|
from: self,
|
||||||
|
started_processing_at: Utc::now(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn abort(self) -> Aborted<M> {
|
||||||
|
Aborted {
|
||||||
|
from: self,
|
||||||
|
aborted_at: Utc::now(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn meta(&self) -> &M {
|
||||||
|
&self.meta
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn id(&self) -> u64 {
|
||||||
|
self.update_id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq, Eq, Hash, Serialize, Deserialize, Clone)]
|
||||||
|
pub struct Processed<M, N> {
|
||||||
|
success: N,
|
||||||
|
processed_at: DateTime<Utc>,
|
||||||
|
#[serde(flatten)]
|
||||||
|
from: Processing<M>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<M, N> Processed<M, N> {
|
||||||
|
fn id(&self) -> u64 {
|
||||||
|
self.from.id()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq, Eq, Hash, Serialize, Deserialize, Clone)]
|
||||||
|
pub struct Processing<M> {
|
||||||
|
#[serde(flatten)]
|
||||||
|
from: Pending<M>,
|
||||||
|
started_processing_at: DateTime<Utc>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<M> Processing<M> {
|
||||||
|
pub fn id(&self) -> u64 {
|
||||||
|
self.from.id()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn meta(&self) -> &M {
|
||||||
|
self.from.meta()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn process<N>(self, meta: N) -> Processed<M, N> {
|
||||||
|
Processed {
|
||||||
|
success: meta,
|
||||||
|
from: self,
|
||||||
|
processed_at: Utc::now(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn fail<E>(self, error: E) -> Failed<M, E> {
|
||||||
|
Failed {
|
||||||
|
from: self,
|
||||||
|
error,
|
||||||
|
failed_at: Utc::now(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq, Eq, Hash, Serialize, Deserialize, Clone)]
|
||||||
|
pub struct Aborted<M> {
|
||||||
|
#[serde(flatten)]
|
||||||
|
from: Pending<M>,
|
||||||
|
aborted_at: DateTime<Utc>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<M> Aborted<M> {
|
||||||
|
fn id(&self) -> u64 {
|
||||||
|
self.from.id()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq, Eq, Hash, Serialize, Deserialize, Clone)]
|
||||||
|
pub struct Failed<M, E> {
|
||||||
|
#[serde(flatten)]
|
||||||
|
from: Processing<M>,
|
||||||
|
error: E,
|
||||||
|
failed_at: DateTime<Utc>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<M, E> Failed<M, E> {
|
||||||
|
fn id(&self) -> u64 {
|
||||||
|
self.from.id()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq, Eq, Hash, Serialize)]
|
||||||
|
#[serde(tag = "status")]
|
||||||
|
pub enum UpdateStatus<M, N, E> {
|
||||||
|
Processing(Processing<M>),
|
||||||
|
Pending(Pending<M>),
|
||||||
|
Processed(Processed<M, N>),
|
||||||
|
Aborted(Aborted<M>),
|
||||||
|
Failed(Failed<M, E>),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<M, N, E> UpdateStatus<M, N, E> {
|
||||||
|
pub fn id(&self) -> u64 {
|
||||||
|
match self {
|
||||||
|
UpdateStatus::Processing(u) => u.id(),
|
||||||
|
UpdateStatus::Pending(u) => u.id(),
|
||||||
|
UpdateStatus::Processed(u) => u.id(),
|
||||||
|
UpdateStatus::Aborted(u) => u.id(),
|
||||||
|
UpdateStatus::Failed(u) => u.id(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<M, N, E> From<Pending<M>> for UpdateStatus<M, N, E> {
|
||||||
|
fn from(other: Pending<M>) -> Self {
|
||||||
|
Self::Pending(other)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<M, N, E> From<Aborted<M>> for UpdateStatus<M, N, E> {
|
||||||
|
fn from(other: Aborted<M>) -> Self {
|
||||||
|
Self::Aborted(other)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<M, N, E> From<Processed<M, N>> for UpdateStatus<M, N, E> {
|
||||||
|
fn from(other: Processed<M, N>) -> Self {
|
||||||
|
Self::Processed(other)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<M, N, E> From<Processing<M>> for UpdateStatus<M, N, E> {
|
||||||
|
fn from(other: Processing<M>) -> Self {
|
||||||
|
Self::Processing(other)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<M, N, E> From<Failed<M, E>> for UpdateStatus<M, N, E> {
|
||||||
|
fn from(other: Failed<M, E>) -> Self {
|
||||||
|
Self::Failed(other)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use std::thread;
|
||||||
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn simple() {
|
||||||
|
let dir = tempfile::tempdir().unwrap();
|
||||||
|
let update_store = UpdateStore::open(None, dir, |_id, meta: Processing<String>, _content: &_| -> Result<_, Failed<_, ()>> {
|
||||||
|
let new_meta = meta.meta().to_string() + " processed";
|
||||||
|
let processed = meta.process(new_meta);
|
||||||
|
Ok(processed)
|
||||||
|
}).unwrap();
|
||||||
|
|
||||||
|
let meta = String::from("kiki");
|
||||||
|
let update = update_store.register_update(meta, &[]).unwrap();
|
||||||
|
thread::sleep(Duration::from_millis(100));
|
||||||
|
let meta = update_store.meta(update.id()).unwrap().unwrap();
|
||||||
|
if let UpdateStatus::Processed(Processed { success, .. }) = meta {
|
||||||
|
assert_eq!(success, "kiki processed");
|
||||||
|
} else {
|
||||||
|
panic!()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[ignore]
|
||||||
|
fn long_running_update() {
|
||||||
|
let dir = tempfile::tempdir().unwrap();
|
||||||
|
let update_store = UpdateStore::open(None, dir, |_id, meta: Processing<String>, _content:&_| -> Result<_, Failed<_, ()>> {
|
||||||
|
thread::sleep(Duration::from_millis(400));
|
||||||
|
let new_meta = meta.meta().to_string() + "processed";
|
||||||
|
let processed = meta.process(new_meta);
|
||||||
|
Ok(processed)
|
||||||
|
}).unwrap();
|
||||||
|
|
||||||
|
let before_register = Instant::now();
|
||||||
|
|
||||||
|
let meta = String::from("kiki");
|
||||||
|
let update_kiki = update_store.register_update(meta, &[]).unwrap();
|
||||||
|
assert!(before_register.elapsed() < Duration::from_millis(200));
|
||||||
|
|
||||||
|
let meta = String::from("coco");
|
||||||
|
let update_coco = update_store.register_update(meta, &[]).unwrap();
|
||||||
|
assert!(before_register.elapsed() < Duration::from_millis(200));
|
||||||
|
|
||||||
|
let meta = String::from("cucu");
|
||||||
|
let update_cucu = update_store.register_update(meta, &[]).unwrap();
|
||||||
|
assert!(before_register.elapsed() < Duration::from_millis(200));
|
||||||
|
|
||||||
|
thread::sleep(Duration::from_millis(400 * 3 + 100));
|
||||||
|
|
||||||
|
let meta = update_store.meta(update_kiki.id()).unwrap().unwrap();
|
||||||
|
if let UpdateStatus::Processed(Processed { success, .. }) = meta {
|
||||||
|
assert_eq!(success, "kiki processed");
|
||||||
|
} else {
|
||||||
|
panic!()
|
||||||
|
}
|
||||||
|
|
||||||
|
let meta = update_store.meta(update_coco.id()).unwrap().unwrap();
|
||||||
|
if let UpdateStatus::Processed(Processed { success, .. }) = meta {
|
||||||
|
assert_eq!(success, "coco processed");
|
||||||
|
} else {
|
||||||
|
panic!()
|
||||||
|
}
|
||||||
|
|
||||||
|
let meta = update_store.meta(update_cucu.id()).unwrap().unwrap();
|
||||||
|
if let UpdateStatus::Processed(Processed { success, .. }) = meta {
|
||||||
|
assert_eq!(success, "cucu processed");
|
||||||
|
} else {
|
||||||
|
panic!()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user