2019-10-04 22:49:17 +08:00
|
|
|
use std::collections::HashMap;
|
|
|
|
use std::path::Path;
|
|
|
|
use std::sync::{Arc, RwLock};
|
2019-10-07 22:16:04 +08:00
|
|
|
use std::{fs, thread};
|
|
|
|
|
|
|
|
use crossbeam_channel::Receiver;
|
2019-10-08 20:53:35 +08:00
|
|
|
use log::{debug, error};
|
2019-10-07 22:16:04 +08:00
|
|
|
|
|
|
|
use crate::{store, update, Index, MResult};
|
2019-10-04 22:49:17 +08:00
|
|
|
|
|
|
|
pub struct Database {
|
|
|
|
pub rkv: Arc<RwLock<rkv::Rkv>>,
|
|
|
|
main_store: rkv::SingleStore,
|
2019-10-07 22:16:04 +08:00
|
|
|
indexes_store: rkv::SingleStore,
|
|
|
|
indexes: RwLock<HashMap<String, (Index, thread::JoinHandle<()>)>>,
|
|
|
|
}
|
|
|
|
|
|
|
|
fn update_awaiter(receiver: Receiver<()>, rkv: Arc<RwLock<rkv::Rkv>>, index: Index) {
|
|
|
|
for () in receiver {
|
|
|
|
// consume all updates in order (oldest first)
|
|
|
|
loop {
|
|
|
|
let rkv = match rkv.read() {
|
|
|
|
Ok(rkv) => rkv,
|
|
|
|
Err(e) => { error!("rkv RwLock read failed: {}", e); break }
|
|
|
|
};
|
2019-10-08 20:53:35 +08:00
|
|
|
|
2019-10-07 22:16:04 +08:00
|
|
|
let mut writer = match rkv.write() {
|
|
|
|
Ok(writer) => writer,
|
|
|
|
Err(e) => { error!("LMDB writer transaction begin failed: {}", e); break }
|
|
|
|
};
|
|
|
|
|
|
|
|
match update::update_task(&mut writer, index.clone(), None as Option::<fn(_)>) {
|
|
|
|
Ok(true) => if let Err(e) = writer.commit() { error!("update transaction failed: {}", e) },
|
|
|
|
// no more updates to handle for now
|
2019-10-08 20:53:35 +08:00
|
|
|
Ok(false) => { debug!("no more updates"); writer.abort(); break },
|
2019-10-07 22:16:04 +08:00
|
|
|
Err(e) => { error!("update task failed: {}", e); writer.abort() },
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-10-04 22:49:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Database {
|
|
|
|
pub fn open_or_create(path: impl AsRef<Path>) -> MResult<Database> {
|
|
|
|
let manager = rkv::Manager::singleton();
|
|
|
|
let mut rkv_write = manager.write().unwrap();
|
|
|
|
|
2019-10-07 22:16:04 +08:00
|
|
|
fs::create_dir_all(path.as_ref())?;
|
|
|
|
|
2019-10-04 22:49:17 +08:00
|
|
|
let rkv = rkv_write
|
|
|
|
.get_or_create(path.as_ref(), |path| {
|
|
|
|
let mut builder = rkv::Rkv::environment_builder();
|
|
|
|
builder.set_max_dbs(3000).set_map_size(10 * 1024 * 1024 * 1024); // 10GB
|
|
|
|
rkv::Rkv::from_env(path, builder)
|
|
|
|
})?;
|
|
|
|
|
|
|
|
drop(rkv_write);
|
|
|
|
|
|
|
|
let rkv_read = rkv.read().unwrap();
|
|
|
|
let create_options = rkv::store::Options::create();
|
2019-10-07 22:16:04 +08:00
|
|
|
let main_store = rkv_read.open_single("main", create_options)?;
|
|
|
|
let indexes_store = rkv_read.open_single("indexes", create_options)?;
|
2019-10-04 22:49:17 +08:00
|
|
|
|
|
|
|
// list all indexes that needs to be opened
|
|
|
|
let mut must_open = Vec::new();
|
|
|
|
let reader = rkv_read.read()?;
|
2019-10-07 22:16:04 +08:00
|
|
|
for result in indexes_store.iter_start(&reader)? {
|
2019-10-04 22:49:17 +08:00
|
|
|
let (key, _) = result?;
|
|
|
|
if let Ok(index_name) = std::str::from_utf8(key) {
|
|
|
|
must_open.push(index_name.to_owned());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
drop(reader);
|
|
|
|
|
|
|
|
// open the previously aggregated indexes
|
|
|
|
let mut indexes = HashMap::new();
|
|
|
|
for index_name in must_open {
|
2019-10-07 22:16:04 +08:00
|
|
|
|
|
|
|
let (sender, receiver) = crossbeam_channel::bounded(100);
|
|
|
|
let index = store::open(&rkv_read, &index_name, sender.clone())?;
|
|
|
|
let rkv_clone = rkv.clone();
|
|
|
|
let index_clone = index.clone();
|
|
|
|
let handle = thread::spawn(move || update_awaiter(receiver, rkv_clone, index_clone));
|
|
|
|
|
|
|
|
// send an update notification to make sure that
|
|
|
|
// possible previous boot updates are consumed
|
|
|
|
sender.send(()).unwrap();
|
|
|
|
|
|
|
|
let result = indexes.insert(index_name, (index, handle));
|
|
|
|
assert!(result.is_none(), "The index should not have been already open");
|
2019-10-04 22:49:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
drop(rkv_read);
|
|
|
|
|
2019-10-07 22:16:04 +08:00
|
|
|
Ok(Database { rkv, main_store, indexes_store, indexes: RwLock::new(indexes) })
|
2019-10-04 22:49:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn open_index(&self, name: impl Into<String>) -> MResult<Index> {
|
|
|
|
let indexes_lock = self.indexes.read().unwrap();
|
|
|
|
let name = name.into();
|
|
|
|
|
|
|
|
match indexes_lock.get(&name) {
|
2019-10-07 22:16:04 +08:00
|
|
|
Some((index, _)) => Ok(index.clone()),
|
2019-10-04 22:49:17 +08:00
|
|
|
None => {
|
|
|
|
drop(indexes_lock);
|
|
|
|
|
|
|
|
let rkv_lock = self.rkv.read().unwrap();
|
2019-10-07 22:16:04 +08:00
|
|
|
let (sender, receiver) = crossbeam_channel::bounded(100);
|
|
|
|
let index = store::create(&rkv_lock, &name, sender)?;
|
2019-10-04 22:49:17 +08:00
|
|
|
|
|
|
|
let mut writer = rkv_lock.write()?;
|
|
|
|
let value = rkv::Value::Blob(&[]);
|
2019-10-07 22:16:04 +08:00
|
|
|
self.indexes_store.put(&mut writer, &name, &value)?;
|
2019-10-04 22:49:17 +08:00
|
|
|
|
|
|
|
{
|
|
|
|
let mut indexes_write = self.indexes.write().unwrap();
|
2019-10-07 22:16:04 +08:00
|
|
|
indexes_write.entry(name).or_insert_with(|| {
|
|
|
|
let rkv_clone = self.rkv.clone();
|
|
|
|
let index_clone = index.clone();
|
|
|
|
let handle = thread::spawn(move || update_awaiter(receiver, rkv_clone, index_clone));
|
|
|
|
(index.clone(), handle)
|
|
|
|
});
|
2019-10-04 22:49:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
writer.commit()?;
|
|
|
|
|
|
|
|
Ok(index)
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn indexes_names(&self) -> MResult<Vec<String>> {
|
|
|
|
let indexes = self.indexes.read().unwrap();
|
|
|
|
Ok(indexes.keys().cloned().collect())
|
|
|
|
}
|
2019-10-07 22:16:04 +08:00
|
|
|
|
|
|
|
pub fn main_store(&self) -> rkv::SingleStore {
|
|
|
|
self.main_store
|
|
|
|
}
|
2019-10-04 22:49:17 +08:00
|
|
|
}
|