diff --git a/Cargo.lock b/Cargo.lock index d8e1e1899..41a6fab29 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2670,6 +2670,7 @@ dependencies = [ "walkdir", "yaup", "zip", + "zookeeper-client", ] [[package]] diff --git a/meilisearch-auth/Cargo.toml b/meilisearch-auth/Cargo.toml index 965a4f1ce..bb9439622 100644 --- a/meilisearch-auth/Cargo.toml +++ b/meilisearch-auth/Cargo.toml @@ -24,4 +24,4 @@ sha2 = "0.10.6" thiserror = "1.0.40" time = { version = "0.3.20", features = ["serde-well-known", "formatting", "parsing", "macros"] } uuid = { version = "1.3.1", features = ["serde", "v4"] } -zookeeper-client = "0.4.0" \ No newline at end of file +zookeeper-client = "0.4.0" diff --git a/meilisearch-auth/src/lib.rs b/meilisearch-auth/src/lib.rs index e74f1707c..861e47fa2 100644 --- a/meilisearch-auth/src/lib.rs +++ b/meilisearch-auth/src/lib.rs @@ -16,22 +16,28 @@ pub use store::open_auth_store_env; use store::{generate_key_as_hexa, HeedAuthStore}; use time::OffsetDateTime; use uuid::Uuid; +use zookeeper_client as zk; #[derive(Clone)] pub struct AuthController { store: Arc, master_key: Option, + zk: Option, } impl AuthController { - pub fn new(db_path: impl AsRef, master_key: &Option) -> Result { + pub fn new( + db_path: impl AsRef, + master_key: &Option, + zk: Option, + ) -> Result { let store = HeedAuthStore::new(db_path)?; if store.is_empty()? { generate_default_keys(&store)?; } - Ok(Self { store: Arc::new(store), master_key: master_key.clone() }) + Ok(Self { store: Arc::new(store), master_key: master_key.clone(), zk }) } /// Return `Ok(())` if the auth controller is able to access one of its database. diff --git a/meilisearch/Cargo.toml b/meilisearch/Cargo.toml index d33f53906..76a4e4979 100644 --- a/meilisearch/Cargo.toml +++ b/meilisearch/Cargo.toml @@ -105,6 +105,7 @@ walkdir = "2.3.3" yaup = "0.2.1" serde_urlencoded = "0.7.1" termcolor = "1.2.0" +zookeeper-client = "0.4.0" [dev-dependencies] actix-rt = "2.8.0" diff --git a/meilisearch/src/lib.rs b/meilisearch/src/lib.rs index ca775559c..96fdc3639 100644 --- a/meilisearch/src/lib.rs +++ b/meilisearch/src/lib.rs @@ -137,14 +137,17 @@ enum OnFailure { KeepDb, } -pub fn setup_meilisearch(opt: &Opt) -> anyhow::Result<(Arc, Arc)> { +pub fn setup_meilisearch( + opt: &Opt, + zk: Option, +) -> anyhow::Result<(Arc, Arc)> { let empty_db = is_empty_db(&opt.db_path); let (index_scheduler, auth_controller) = if let Some(ref snapshot_path) = opt.import_snapshot { let snapshot_path_exists = snapshot_path.exists(); // the db is empty and the snapshot exists, import it if empty_db && snapshot_path_exists { match compression::from_tar_gz(snapshot_path, &opt.db_path) { - Ok(()) => open_or_create_database_unchecked(opt, OnFailure::RemoveDb)?, + Ok(()) => open_or_create_database_unchecked(opt, OnFailure::RemoveDb, zk)?, Err(e) => { std::fs::remove_dir_all(&opt.db_path)?; return Err(e); @@ -161,14 +164,14 @@ pub fn setup_meilisearch(opt: &Opt) -> anyhow::Result<(Arc, Arc< bail!("snapshot doesn't exist at {}", snapshot_path.display()) // the snapshot and the db exist, and we can ignore the snapshot because of the ignore_snapshot_if_db_exists flag } else { - open_or_create_database(opt, empty_db)? + open_or_create_database(opt, empty_db, zk)? } } else if let Some(ref path) = opt.import_dump { let src_path_exists = path.exists(); // the db is empty and the dump exists, import it if empty_db && src_path_exists { let (mut index_scheduler, mut auth_controller) = - open_or_create_database_unchecked(opt, OnFailure::RemoveDb)?; + open_or_create_database_unchecked(opt, OnFailure::RemoveDb, zk)?; match import_dump(&opt.db_path, path, &mut index_scheduler, &mut auth_controller) { Ok(()) => (index_scheduler, auth_controller), Err(e) => { @@ -188,10 +191,10 @@ pub fn setup_meilisearch(opt: &Opt) -> anyhow::Result<(Arc, Arc< // the dump and the db exist and we can ignore the dump because of the ignore_dump_if_db_exists flag // or, the dump is missing but we can ignore that because of the ignore_missing_dump flag } else { - open_or_create_database(opt, empty_db)? + open_or_create_database(opt, empty_db, zk)? } } else { - open_or_create_database(opt, empty_db)? + open_or_create_database(opt, empty_db, zk)? }; // We create a loop in a thread that registers snapshotCreation tasks @@ -218,11 +221,11 @@ pub fn setup_meilisearch(opt: &Opt) -> anyhow::Result<(Arc, Arc< fn open_or_create_database_unchecked( opt: &Opt, on_failure: OnFailure, + zk: Option, ) -> anyhow::Result<(IndexScheduler, AuthController)> { // we don't want to create anything in the data.ms yet, thus we // wrap our two builders in a closure that'll be executed later. - let zk_client = zk::Client::connect(&opt.cluster).await.unwrap(); - let auth_controller = AuthController::new(&opt.db_path, &opt.master_key, zk_client.clone()); + let auth_controller = AuthController::new(&opt.db_path, &opt.master_key, zk); let instance_features = opt.to_instance_features(); let index_scheduler_builder = || -> anyhow::Result<_> { Ok(IndexScheduler::new(IndexSchedulerOptions { @@ -264,12 +267,13 @@ fn open_or_create_database_unchecked( fn open_or_create_database( opt: &Opt, empty_db: bool, + zk: Option, ) -> anyhow::Result<(IndexScheduler, AuthController)> { if !empty_db { check_version_file(&opt.db_path)?; } - open_or_create_database_unchecked(opt, OnFailure::KeepDb) + open_or_create_database_unchecked(opt, OnFailure::KeepDb, zk) } fn import_dump( diff --git a/meilisearch/src/main.rs b/meilisearch/src/main.rs index a3905d451..beea1a2ba 100644 --- a/meilisearch/src/main.rs +++ b/meilisearch/src/main.rs @@ -12,6 +12,7 @@ use meilisearch::analytics::Analytics; use meilisearch::{analytics, create_app, prototype_name, setup_meilisearch, Opt}; use meilisearch_auth::{generate_master_key, AuthController, MASTER_KEY_MIN_SIZE}; use termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor}; +use zookeeper_client as zk; #[global_allocator] static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc; @@ -63,7 +64,11 @@ async fn main() -> anyhow::Result<()> { _ => (), } - let (index_scheduler, auth_controller) = setup_meilisearch(&opt)?; + let zk = match opt.zk_url { + Some(ref url) => Some(zk::Client::connect(url).await.unwrap()), + None => None, + }; + let (index_scheduler, auth_controller) = setup_meilisearch(&opt, zk)?; #[cfg(all(not(debug_assertions), feature = "analytics"))] let analytics = if !opt.no_analytics { diff --git a/meilisearch/src/option.rs b/meilisearch/src/option.rs index eaf96ee0c..86c3d3493 100644 --- a/meilisearch/src/option.rs +++ b/meilisearch/src/option.rs @@ -28,6 +28,7 @@ const MEILI_DB_PATH: &str = "MEILI_DB_PATH"; const MEILI_HTTP_ADDR: &str = "MEILI_HTTP_ADDR"; const MEILI_MASTER_KEY: &str = "MEILI_MASTER_KEY"; const MEILI_ENV: &str = "MEILI_ENV"; +const MEILI_ZK_URL: &str = "MEILI_ZK_URL"; #[cfg(all(not(debug_assertions), feature = "analytics"))] const MEILI_NO_ANALYTICS: &str = "MEILI_NO_ANALYTICS"; const MEILI_HTTP_PAYLOAD_SIZE_LIMIT: &str = "MEILI_HTTP_PAYLOAD_SIZE_LIMIT"; @@ -154,6 +155,11 @@ pub struct Opt { #[serde(default = "default_env")] pub env: String, + /// Sets the HTTP address and port used to communicate with the zookeeper cluster. + /// If ran locally, the default url is `http://localhost:2181/`. + #[clap(long, env = MEILI_ZK_URL)] + pub zk_url: Option, + /// Deactivates Meilisearch's built-in telemetry when provided. /// /// Meilisearch automatically collects data from all instances that do not opt out using this flag. @@ -368,6 +374,7 @@ impl Opt { http_addr, master_key, env, + zk_url, max_index_size: _, max_task_db_size: _, http_payload_size_limit, @@ -401,6 +408,9 @@ impl Opt { export_to_env_if_not_present(MEILI_MASTER_KEY, master_key); } export_to_env_if_not_present(MEILI_ENV, env); + if let Some(zk_url) = zk_url { + export_to_env_if_not_present(MEILI_ZK_URL, zk_url); + } #[cfg(all(not(debug_assertions), feature = "analytics"))] { export_to_env_if_not_present(MEILI_NO_ANALYTICS, no_analytics.to_string()); @@ -715,6 +725,10 @@ fn default_env() -> String { DEFAULT_ENV.to_string() } +pub fn default_zk_url() -> String { + DEFAULT_HTTP_ADDR.to_string() +} + fn default_max_index_size() -> Byte { Byte::from_bytes(INDEX_SIZE) }