2021-04-27 16:27:43 +08:00
|
|
|
mod v1;
|
|
|
|
mod v2;
|
|
|
|
|
2021-05-05 20:11:56 +08:00
|
|
|
use std::{collections::HashSet, fs::{File}, path::{Path, PathBuf}, sync::Arc};
|
2021-04-28 22:43:49 +08:00
|
|
|
|
|
|
|
use anyhow::bail;
|
2021-05-05 20:11:56 +08:00
|
|
|
use chrono::Utc;
|
2021-04-28 22:43:49 +08:00
|
|
|
use heed::EnvOpenOptions;
|
|
|
|
use log::{error, info};
|
|
|
|
use milli::update::{IndexDocumentsMethod, UpdateBuilder, UpdateFormat};
|
2021-05-11 02:23:12 +08:00
|
|
|
use serde::{Deserialize, Serialize};
|
2021-04-28 22:43:49 +08:00
|
|
|
use tempfile::TempDir;
|
2021-05-05 20:11:56 +08:00
|
|
|
use tokio::task::spawn_blocking;
|
|
|
|
use tokio::fs;
|
|
|
|
use uuid::Uuid;
|
2021-04-28 22:43:49 +08:00
|
|
|
|
2021-05-05 20:11:56 +08:00
|
|
|
use super::{IndexController, IndexMetadata, update_actor::UpdateActorHandle, uuid_resolver::UuidResolverHandle};
|
2021-04-28 22:43:49 +08:00
|
|
|
use crate::index::Index;
|
|
|
|
use crate::index_controller::uuid_resolver;
|
2021-04-29 20:45:08 +08:00
|
|
|
use crate::helpers::compression;
|
2021-04-28 22:43:49 +08:00
|
|
|
|
|
|
|
#[derive(Debug, Serialize, Deserialize, Copy, Clone)]
|
|
|
|
enum DumpVersion {
|
|
|
|
V1,
|
2021-04-27 16:27:43 +08:00
|
|
|
V2,
|
2021-04-28 22:43:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
impl DumpVersion {
|
2021-05-05 20:11:56 +08:00
|
|
|
const CURRENT: Self = Self::V2;
|
2021-04-27 16:27:43 +08:00
|
|
|
|
|
|
|
/// Select the good importation function from the `DumpVersion` of metadata
|
|
|
|
pub fn import_index(self, size: usize, dump_path: &Path, index_path: &Path) -> anyhow::Result<()> {
|
|
|
|
match self {
|
|
|
|
Self::V1 => v1::import_index(size, dump_path, index_path),
|
|
|
|
Self::V2 => v2::import_index(size, dump_path, index_path),
|
|
|
|
}
|
|
|
|
}
|
2021-04-28 22:43:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Serialize, Deserialize)]
|
|
|
|
#[serde(rename_all = "camelCase")]
|
2021-04-29 20:45:08 +08:00
|
|
|
pub struct Metadata {
|
2021-04-28 22:43:49 +08:00
|
|
|
indexes: Vec<IndexMetadata>,
|
|
|
|
db_version: String,
|
|
|
|
dump_version: DumpVersion,
|
|
|
|
}
|
|
|
|
|
2021-04-29 20:45:08 +08:00
|
|
|
impl Metadata {
|
|
|
|
/// Create a Metadata with the current dump version of meilisearch.
|
2021-04-28 22:43:49 +08:00
|
|
|
pub fn new(indexes: Vec<IndexMetadata>, db_version: String) -> Self {
|
2021-04-29 20:45:08 +08:00
|
|
|
Metadata {
|
2021-04-28 22:43:49 +08:00
|
|
|
indexes,
|
|
|
|
db_version,
|
|
|
|
dump_version: DumpVersion::CURRENT,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-29 20:45:08 +08:00
|
|
|
/// Extract Metadata from `metadata.json` file present at provided `dir_path`
|
2021-04-28 22:43:49 +08:00
|
|
|
fn from_path(dir_path: &Path) -> anyhow::Result<Self> {
|
|
|
|
let path = dir_path.join("metadata.json");
|
|
|
|
let file = File::open(path)?;
|
|
|
|
let reader = std::io::BufReader::new(file);
|
|
|
|
let metadata = serde_json::from_reader(reader)?;
|
|
|
|
|
|
|
|
Ok(metadata)
|
|
|
|
}
|
|
|
|
|
2021-04-29 20:45:08 +08:00
|
|
|
/// Write Metadata in `metadata.json` file at provided `dir_path`
|
2021-05-05 20:11:56 +08:00
|
|
|
pub async fn to_path(&self, dir_path: &Path) -> anyhow::Result<()> {
|
2021-04-28 22:43:49 +08:00
|
|
|
let path = dir_path.join("metadata.json");
|
2021-05-05 20:11:56 +08:00
|
|
|
tokio::fs::write(path, serde_json::to_string(self)?).await?;
|
2021-04-28 22:43:49 +08:00
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-05 20:11:56 +08:00
|
|
|
/// Generate uid from creation date
|
|
|
|
fn generate_uid() -> String {
|
|
|
|
Utc::now().format("%Y%m%d-%H%M%S%3f").to_string()
|
2021-04-28 22:43:49 +08:00
|
|
|
}
|
|
|
|
|
2021-05-05 20:11:56 +08:00
|
|
|
pub async fn perform_dump(index_controller: &IndexController, dump_path: PathBuf) -> anyhow::Result<String> {
|
|
|
|
info!("Performing dump.");
|
2021-04-28 22:43:49 +08:00
|
|
|
|
2021-05-05 20:11:56 +08:00
|
|
|
let dump_dir = dump_path.clone();
|
|
|
|
let uid = generate_uid();
|
|
|
|
fs::create_dir_all(&dump_dir).await?;
|
|
|
|
let temp_dump_dir = spawn_blocking(move || tempfile::tempdir_in(dump_dir)).await??;
|
|
|
|
let temp_dump_path = temp_dump_dir.path().to_owned();
|
2021-04-28 22:43:49 +08:00
|
|
|
|
2021-05-05 20:11:56 +08:00
|
|
|
let uuids = index_controller.uuid_resolver.list().await?;
|
|
|
|
// maybe we could just keep the vec as-is
|
|
|
|
let uuids: HashSet<(String, Uuid)> = uuids.into_iter().collect();
|
2021-04-28 22:43:49 +08:00
|
|
|
|
2021-05-05 20:11:56 +08:00
|
|
|
if uuids.is_empty() {
|
|
|
|
return Ok(uid);
|
|
|
|
}
|
2021-04-28 22:43:49 +08:00
|
|
|
|
2021-05-05 20:11:56 +08:00
|
|
|
let indexes = index_controller.list_indexes().await?;
|
2021-04-28 22:43:49 +08:00
|
|
|
|
2021-05-05 20:11:56 +08:00
|
|
|
// we create one directory by index
|
|
|
|
for meta in indexes.iter() {
|
|
|
|
tokio::fs::create_dir(temp_dump_path.join(&meta.uid)).await?;
|
|
|
|
}
|
2021-04-28 22:43:49 +08:00
|
|
|
|
2021-05-05 20:11:56 +08:00
|
|
|
let metadata = Metadata::new(indexes, env!("CARGO_PKG_VERSION").to_string());
|
|
|
|
metadata.to_path(&temp_dump_path).await?;
|
2021-04-28 22:43:49 +08:00
|
|
|
|
2021-05-05 20:11:56 +08:00
|
|
|
index_controller.update_handle.dump(uuids, temp_dump_path.clone()).await?;
|
|
|
|
let dump_dir = dump_path.clone();
|
|
|
|
let dump_path = dump_path.join(format!("{}.dump", uid));
|
|
|
|
let dump_path = spawn_blocking(move || -> anyhow::Result<PathBuf> {
|
|
|
|
let temp_dump_file = tempfile::NamedTempFile::new_in(dump_dir)?;
|
|
|
|
let temp_dump_file_path = temp_dump_file.path().to_owned();
|
|
|
|
compression::to_tar_gz(temp_dump_path, temp_dump_file_path)?;
|
|
|
|
temp_dump_file.persist(&dump_path)?;
|
|
|
|
Ok(dump_path)
|
|
|
|
})
|
|
|
|
.await??;
|
2021-04-28 22:43:49 +08:00
|
|
|
|
2021-05-05 20:11:56 +08:00
|
|
|
info!("Created dump in {:?}.", dump_path);
|
2021-04-28 22:43:49 +08:00
|
|
|
|
2021-05-05 20:11:56 +08:00
|
|
|
Ok(uid)
|
2021-04-28 22:43:49 +08:00
|
|
|
}
|
|
|
|
|
2021-04-29 20:45:08 +08:00
|
|
|
/*
|
2021-04-28 22:43:49 +08:00
|
|
|
/// Write Settings in `settings.json` file at provided `dir_path`
|
|
|
|
fn settings_to_path(settings: &Settings, dir_path: &Path) -> anyhow::Result<()> {
|
2021-05-05 20:11:56 +08:00
|
|
|
let path = dir_path.join("settings.json");
|
|
|
|
let file = File::create(path)?;
|
2021-04-28 22:43:49 +08:00
|
|
|
|
2021-05-05 20:11:56 +08:00
|
|
|
serde_json::to_writer(file, settings)?;
|
2021-04-28 22:43:49 +08:00
|
|
|
|
2021-05-05 20:11:56 +08:00
|
|
|
Ok(())
|
2021-04-28 22:43:49 +08:00
|
|
|
}
|
2021-04-29 20:45:08 +08:00
|
|
|
*/
|
2021-04-28 22:43:49 +08:00
|
|
|
|
2021-05-11 02:23:12 +08:00
|
|
|
pub fn load_dump(
|
2021-04-28 22:43:49 +08:00
|
|
|
db_path: impl AsRef<Path>,
|
|
|
|
dump_path: impl AsRef<Path>,
|
|
|
|
size: usize,
|
|
|
|
) -> anyhow::Result<()> {
|
|
|
|
info!("Importing dump from {}...", dump_path.as_ref().display());
|
|
|
|
let db_path = db_path.as_ref();
|
|
|
|
let dump_path = dump_path.as_ref();
|
2021-05-11 02:23:12 +08:00
|
|
|
let uuid_resolver = uuid_resolver::HeedUuidStore::new(&db_path)?;
|
2021-04-28 22:43:49 +08:00
|
|
|
|
|
|
|
// extract the dump in a temporary directory
|
2021-04-29 20:45:08 +08:00
|
|
|
let tmp_dir = TempDir::new_in(db_path)?;
|
2021-04-28 22:43:49 +08:00
|
|
|
let tmp_dir_path = tmp_dir.path();
|
|
|
|
compression::from_tar_gz(dump_path, tmp_dir_path)?;
|
|
|
|
|
|
|
|
// read dump metadata
|
2021-04-29 20:45:08 +08:00
|
|
|
let metadata = Metadata::from_path(&tmp_dir_path)?;
|
2021-04-28 22:43:49 +08:00
|
|
|
|
|
|
|
// remove indexes which have same `uuid` than indexes to import and create empty indexes
|
2021-05-11 02:23:12 +08:00
|
|
|
let existing_index_uids = uuid_resolver.list()?;
|
2021-04-28 22:43:49 +08:00
|
|
|
|
2021-04-27 16:27:43 +08:00
|
|
|
info!("Deleting indexes already present in the db and provided in the dump...");
|
2021-04-28 22:43:49 +08:00
|
|
|
for idx in &metadata.indexes {
|
|
|
|
if let Some((_, uuid)) = existing_index_uids.iter().find(|(s, _)| s == &idx.uid) {
|
|
|
|
// if we find the index in the `uuid_resolver` it's supposed to exist on the file system
|
|
|
|
// and we want to delete it
|
|
|
|
let path = db_path.join(&format!("indexes/index-{}", uuid));
|
|
|
|
info!("Deleting {}", path.display());
|
|
|
|
use std::io::ErrorKind::*;
|
|
|
|
match std::fs::remove_dir_all(path) {
|
|
|
|
Ok(()) => (),
|
|
|
|
// if an index was present in the metadata but missing of the fs we can ignore the
|
|
|
|
// problem because we are going to create it later
|
|
|
|
Err(e) if e.kind() == NotFound => (),
|
|
|
|
Err(e) => bail!(e),
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// if the index does not exist in the `uuid_resolver` we create it
|
2021-05-11 02:23:12 +08:00
|
|
|
uuid_resolver.create_uuid(idx.uid.clone(), false)?;
|
2021-04-28 22:43:49 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// import each indexes content
|
|
|
|
for idx in metadata.indexes {
|
|
|
|
let dump_path = tmp_dir_path.join(&idx.uid);
|
2021-05-11 02:23:12 +08:00
|
|
|
// this cannot fail since we created all the missing uuid in the previous loop
|
|
|
|
let uuid = uuid_resolver.get_uuid(idx.uid)?.unwrap();
|
2021-04-28 22:43:49 +08:00
|
|
|
let index_path = db_path.join(&format!("indexes/index-{}", uuid));
|
2021-04-29 20:45:08 +08:00
|
|
|
// let update_path = db_path.join(&format!("updates/updates-{}", uuid)); // TODO: add the update db
|
2021-04-28 22:43:49 +08:00
|
|
|
|
|
|
|
info!("Importing dump from {} into {}...", dump_path.display(), index_path.display());
|
2021-04-27 16:27:43 +08:00
|
|
|
metadata.dump_version.import_index(size, &dump_path, &index_path).unwrap();
|
2021-04-28 22:43:49 +08:00
|
|
|
info!("Dump importation from {} succeed", dump_path.display());
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
info!("Dump importation from {} succeed", dump_path.display());
|
|
|
|
Ok(())
|
|
|
|
}
|