mirror of
https://github.com/meilisearch/meilisearch.git
synced 2024-11-22 18:17:39 +08:00
Reintroduce the versioning functions
This commit is contained in:
parent
89e127e4f4
commit
4cafc63561
1
Cargo.lock
generated
1
Cargo.lock
generated
@ -2337,6 +2337,7 @@ name = "meilisearch-types"
|
|||||||
version = "0.29.1"
|
version = "0.29.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"actix-web",
|
"actix-web",
|
||||||
|
"anyhow",
|
||||||
"csv",
|
"csv",
|
||||||
"either",
|
"either",
|
||||||
"enum-iterator",
|
"enum-iterator",
|
||||||
|
@ -33,7 +33,7 @@ use meilisearch_types::milli::update::{
|
|||||||
use meilisearch_types::milli::{self, BEU32};
|
use meilisearch_types::milli::{self, BEU32};
|
||||||
use meilisearch_types::settings::{apply_settings_to_builder, Settings, Unchecked};
|
use meilisearch_types::settings::{apply_settings_to_builder, Settings, Unchecked};
|
||||||
use meilisearch_types::tasks::{Details, Kind, KindWithContent, Status, Task};
|
use meilisearch_types::tasks::{Details, Kind, KindWithContent, Status, Task};
|
||||||
use meilisearch_types::Index;
|
use meilisearch_types::{Index, VERSION_FILE_NAME};
|
||||||
use roaring::RoaringBitmap;
|
use roaring::RoaringBitmap;
|
||||||
use time::OffsetDateTime;
|
use time::OffsetDateTime;
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
@ -559,9 +559,8 @@ impl IndexScheduler {
|
|||||||
|
|
||||||
// 1. Snapshot the version file.
|
// 1. Snapshot the version file.
|
||||||
// TODO where can I find the path of this file and do we create it anyway?
|
// TODO where can I find the path of this file and do we create it anyway?
|
||||||
// let dst = temp_snapshot_dir.path().join(VERSION_FILE_NAME);
|
let dst = temp_snapshot_dir.path().join(VERSION_FILE_NAME);
|
||||||
// let src = self.base_path.join(VERSION_FILE_NAME);
|
fs::copy(&self.version_file_path, dst)?;
|
||||||
// fs::copy(src, dst)?;
|
|
||||||
|
|
||||||
// TODO what is a meta-env in the previous version of the scheduler?
|
// TODO what is a meta-env in the previous version of the scheduler?
|
||||||
|
|
||||||
@ -601,7 +600,7 @@ impl IndexScheduler {
|
|||||||
// 3. Snapshot every indexes
|
// 3. Snapshot every indexes
|
||||||
// TODO we are opening all of the indexes it can be too much we should unload all
|
// TODO we are opening all of the indexes it can be too much we should unload all
|
||||||
// of the indexes we are trying to open. It would be even better to only unload
|
// of the indexes we are trying to open. It would be even better to only unload
|
||||||
// the one that were opened by us. Or maybe use a LRU in the index mapper.
|
// the ones that were opened by us. Or maybe use a LRU in the index mapper.
|
||||||
for result in self.index_mapper.index_mapping.iter(&rtxn)? {
|
for result in self.index_mapper.index_mapping.iter(&rtxn)? {
|
||||||
let (name, uuid) = result?;
|
let (name, uuid) = result?;
|
||||||
let index = self.index_mapper.index(&rtxn, name)?;
|
let index = self.index_mapper.index(&rtxn, name)?;
|
||||||
@ -618,7 +617,8 @@ impl IndexScheduler {
|
|||||||
// 4. Snapshot the auth LMDB env
|
// 4. Snapshot the auth LMDB env
|
||||||
let dst = temp_snapshot_dir.path().join("auth").join("data.mdb");
|
let dst = temp_snapshot_dir.path().join("auth").join("data.mdb");
|
||||||
fs::create_dir_all(&dst)?;
|
fs::create_dir_all(&dst)?;
|
||||||
let auth = milli::heed::EnvOpenOptions::new().open(&self.auth_path)?;
|
let src = self.auth_path.join("data.mdb");
|
||||||
|
let auth = milli::heed::EnvOpenOptions::new().open(src)?;
|
||||||
auth.copy_to_path(dst, CompactionOption::Enabled)?;
|
auth.copy_to_path(dst, CompactionOption::Enabled)?;
|
||||||
|
|
||||||
todo!("tar-gz and append .snapshot at the end of the file");
|
todo!("tar-gz and append .snapshot at the end of the file");
|
||||||
|
@ -28,6 +28,7 @@ pub fn snapshot_index_scheduler(scheduler: &IndexScheduler) -> String {
|
|||||||
dumps_path: _,
|
dumps_path: _,
|
||||||
snapshots_path: _,
|
snapshots_path: _,
|
||||||
auth_path: _,
|
auth_path: _,
|
||||||
|
version_file_path: _,
|
||||||
test_breakpoint_sdr: _,
|
test_breakpoint_sdr: _,
|
||||||
planned_failures: _,
|
planned_failures: _,
|
||||||
run_loop_iteration: _,
|
run_loop_iteration: _,
|
||||||
|
@ -248,6 +248,9 @@ pub struct IndexScheduler {
|
|||||||
/// The path to the folder containing the auth LMDB env.
|
/// The path to the folder containing the auth LMDB env.
|
||||||
pub(crate) auth_path: PathBuf,
|
pub(crate) auth_path: PathBuf,
|
||||||
|
|
||||||
|
/// The path to the version file of Meilisearch.
|
||||||
|
pub(crate) version_file_path: PathBuf,
|
||||||
|
|
||||||
// ================= test
|
// ================= test
|
||||||
// The next entry is dedicated to the tests.
|
// The next entry is dedicated to the tests.
|
||||||
/// Provide a way to set a breakpoint in multiple part of the scheduler.
|
/// Provide a way to set a breakpoint in multiple part of the scheduler.
|
||||||
@ -286,6 +289,7 @@ impl IndexScheduler {
|
|||||||
snapshots_path: self.snapshots_path.clone(),
|
snapshots_path: self.snapshots_path.clone(),
|
||||||
dumps_path: self.dumps_path.clone(),
|
dumps_path: self.dumps_path.clone(),
|
||||||
auth_path: self.auth_path.clone(),
|
auth_path: self.auth_path.clone(),
|
||||||
|
version_file_path: self.version_file_path.clone(),
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
test_breakpoint_sdr: self.test_breakpoint_sdr.clone(),
|
test_breakpoint_sdr: self.test_breakpoint_sdr.clone(),
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
@ -314,6 +318,7 @@ impl IndexScheduler {
|
|||||||
/// Create an index scheduler and start its run loop.
|
/// Create an index scheduler and start its run loop.
|
||||||
///
|
///
|
||||||
/// ## Arguments
|
/// ## Arguments
|
||||||
|
/// - `version_file_path`: the path to the version file of Meilisearch
|
||||||
/// - `auth_path`: the path to the folder containing the auth LMDB env
|
/// - `auth_path`: the path to the folder containing the auth LMDB env
|
||||||
/// - `tasks_path`: the path to the folder containing the task databases
|
/// - `tasks_path`: the path to the folder containing the task databases
|
||||||
/// - `update_file_path`: the path to the file store containing the files associated to the tasks
|
/// - `update_file_path`: the path to the file store containing the files associated to the tasks
|
||||||
@ -326,6 +331,7 @@ impl IndexScheduler {
|
|||||||
/// together, to process multiple tasks at once.
|
/// together, to process multiple tasks at once.
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn new(
|
pub fn new(
|
||||||
|
version_file_path: PathBuf,
|
||||||
auth_path: PathBuf,
|
auth_path: PathBuf,
|
||||||
tasks_path: PathBuf,
|
tasks_path: PathBuf,
|
||||||
update_file_path: PathBuf,
|
update_file_path: PathBuf,
|
||||||
@ -371,6 +377,7 @@ impl IndexScheduler {
|
|||||||
dumps_path,
|
dumps_path,
|
||||||
snapshots_path,
|
snapshots_path,
|
||||||
auth_path,
|
auth_path,
|
||||||
|
version_file_path,
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
test_breakpoint_sdr,
|
test_breakpoint_sdr,
|
||||||
@ -975,6 +982,8 @@ mod tests {
|
|||||||
let (sender, receiver) = crossbeam::channel::bounded(0);
|
let (sender, receiver) = crossbeam::channel::bounded(0);
|
||||||
|
|
||||||
let index_scheduler = Self::new(
|
let index_scheduler = Self::new(
|
||||||
|
tempdir.path().join(VERSION_FILE_NAME),
|
||||||
|
tempdir.path().join("auth"),
|
||||||
tempdir.path().join("db_path"),
|
tempdir.path().join("db_path"),
|
||||||
tempdir.path().join("file_store"),
|
tempdir.path().join("file_store"),
|
||||||
tempdir.path().join("indexes"),
|
tempdir.path().join("indexes"),
|
||||||
|
@ -34,8 +34,8 @@ use index_scheduler::IndexScheduler;
|
|||||||
use meilisearch_auth::AuthController;
|
use meilisearch_auth::AuthController;
|
||||||
use meilisearch_types::milli::documents::{DocumentsBatchBuilder, DocumentsBatchReader};
|
use meilisearch_types::milli::documents::{DocumentsBatchBuilder, DocumentsBatchReader};
|
||||||
use meilisearch_types::milli::update::{IndexDocumentsConfig, IndexDocumentsMethod};
|
use meilisearch_types::milli::update::{IndexDocumentsConfig, IndexDocumentsMethod};
|
||||||
use meilisearch_types::milli::{self};
|
|
||||||
use meilisearch_types::settings::apply_settings_to_builder;
|
use meilisearch_types::settings::apply_settings_to_builder;
|
||||||
|
use meilisearch_types::{milli, VERSION_FILE_NAME};
|
||||||
pub use option::Opt;
|
pub use option::Opt;
|
||||||
|
|
||||||
use crate::error::MeilisearchHttpError;
|
use crate::error::MeilisearchHttpError;
|
||||||
@ -110,7 +110,7 @@ pub fn setup_meilisearch(opt: &Opt) -> anyhow::Result<(IndexScheduler, AuthContr
|
|||||||
let auth_controller_builder = || AuthController::new(&opt.db_path, &opt.master_key);
|
let auth_controller_builder = || AuthController::new(&opt.db_path, &opt.master_key);
|
||||||
let index_scheduler_builder = || {
|
let index_scheduler_builder = || {
|
||||||
IndexScheduler::new(
|
IndexScheduler::new(
|
||||||
// TODO find a better way to have the path of the auth store
|
opt.db_path.join(VERSION_FILE_NAME),
|
||||||
opt.db_path.join("auth"),
|
opt.db_path.join("auth"),
|
||||||
opt.db_path.join("tasks"),
|
opt.db_path.join("tasks"),
|
||||||
opt.db_path.join("update_files"),
|
opt.db_path.join("update_files"),
|
||||||
|
@ -6,6 +6,7 @@ edition = "2021"
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
actix-web = { version = "4.2.1", default-features = false }
|
actix-web = { version = "4.2.1", default-features = false }
|
||||||
|
anyhow = "1.0.65"
|
||||||
csv = "1.1.6"
|
csv = "1.1.6"
|
||||||
either = { version = "1.6.1", features = ["serde"] }
|
either = { version = "1.6.1", features = ["serde"] }
|
||||||
enum-iterator = "1.1.3"
|
enum-iterator = "1.1.3"
|
||||||
|
@ -5,10 +5,12 @@ pub mod keys;
|
|||||||
pub mod settings;
|
pub mod settings;
|
||||||
pub mod star_or;
|
pub mod star_or;
|
||||||
pub mod tasks;
|
pub mod tasks;
|
||||||
|
pub mod versioning;
|
||||||
|
|
||||||
pub use milli;
|
pub use milli;
|
||||||
pub use milli::{heed, Index};
|
pub use milli::{heed, Index};
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
pub use versioning::VERSION_FILE_NAME;
|
||||||
|
|
||||||
pub type Document = serde_json::Map<String, serde_json::Value>;
|
pub type Document = serde_json::Map<String, serde_json::Value>;
|
||||||
pub type InstanceUid = Uuid;
|
pub type InstanceUid = Uuid;
|
||||||
|
61
meilisearch-types/src/versioning.rs
Normal file
61
meilisearch-types/src/versioning.rs
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
use std::fs;
|
||||||
|
use std::io::{self, ErrorKind};
|
||||||
|
use std::path::Path;
|
||||||
|
|
||||||
|
/// The name of the file that contains the version of the database.
|
||||||
|
pub const VERSION_FILE_NAME: &str = "VERSION";
|
||||||
|
|
||||||
|
static VERSION_MAJOR: &str = env!("CARGO_PKG_VERSION_MAJOR");
|
||||||
|
static VERSION_MINOR: &str = env!("CARGO_PKG_VERSION_MINOR");
|
||||||
|
static VERSION_PATCH: &str = env!("CARGO_PKG_VERSION_PATCH");
|
||||||
|
|
||||||
|
/// Persists the version of the current Meilisearch binary to a VERSION file
|
||||||
|
pub fn create_version_file(db_path: &Path) -> io::Result<()> {
|
||||||
|
let version_path = db_path.join(VERSION_FILE_NAME);
|
||||||
|
fs::write(version_path, format!("{}.{}.{}", VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Ensures Meilisearch version is compatible with the database, returns an error versions mismatch.
|
||||||
|
pub fn check_version_file(db_path: &Path) -> anyhow::Result<()> {
|
||||||
|
let version_path = db_path.join(VERSION_FILE_NAME);
|
||||||
|
|
||||||
|
match fs::read_to_string(&version_path) {
|
||||||
|
Ok(version) => {
|
||||||
|
let version_components = version.split('.').collect::<Vec<_>>();
|
||||||
|
let (major, minor, patch) = match &version_components[..] {
|
||||||
|
[major, minor, patch] => (major.to_string(), minor.to_string(), patch.to_string()),
|
||||||
|
_ => return Err(VersionFileError::MalformedVersionFile.into()),
|
||||||
|
};
|
||||||
|
|
||||||
|
if major != VERSION_MAJOR || minor != VERSION_MINOR {
|
||||||
|
return Err(VersionFileError::VersionMismatch { major, minor, patch }.into());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(error) => {
|
||||||
|
return match error.kind() {
|
||||||
|
ErrorKind::NotFound => Err(VersionFileError::MissingVersionFile.into()),
|
||||||
|
_ => Err(error.into()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(thiserror::Error, Debug)]
|
||||||
|
pub enum VersionFileError {
|
||||||
|
#[error(
|
||||||
|
"Meilisearch (v{}) failed to infer the version of the database.
|
||||||
|
To update Meilisearch please follow our guide on https://docs.meilisearch.com/learn/advanced/updating.html.",
|
||||||
|
env!("CARGO_PKG_VERSION").to_string()
|
||||||
|
)]
|
||||||
|
MissingVersionFile,
|
||||||
|
#[error("Version file is corrupted and thus Meilisearch is unable to determine the version of the database.")]
|
||||||
|
MalformedVersionFile,
|
||||||
|
#[error(
|
||||||
|
"Expected Meilisearch engine version: {major}.{minor}.{patch}, current engine version: {}.
|
||||||
|
To update Meilisearch please follow our guide on https://docs.meilisearch.com/learn/advanced/updating.html.",
|
||||||
|
env!("CARGO_PKG_VERSION").to_string()
|
||||||
|
)]
|
||||||
|
VersionMismatch { major: String, minor: String, patch: String },
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user