makes zk available inside the auth-controller with config coming from the cli, it compiles

This commit is contained in:
Tamo 2023-08-02 13:17:40 +02:00
parent dc38da95c4
commit 97e3dfd99d
7 changed files with 44 additions and 13 deletions

1
Cargo.lock generated
View File

@ -2670,6 +2670,7 @@ dependencies = [
"walkdir", "walkdir",
"yaup", "yaup",
"zip", "zip",
"zookeeper-client",
] ]
[[package]] [[package]]

View File

@ -24,4 +24,4 @@ sha2 = "0.10.6"
thiserror = "1.0.40" thiserror = "1.0.40"
time = { version = "0.3.20", features = ["serde-well-known", "formatting", "parsing", "macros"] } time = { version = "0.3.20", features = ["serde-well-known", "formatting", "parsing", "macros"] }
uuid = { version = "1.3.1", features = ["serde", "v4"] } uuid = { version = "1.3.1", features = ["serde", "v4"] }
zookeeper-client = "0.4.0" zookeeper-client = "0.4.0"

View File

@ -16,22 +16,28 @@ pub use store::open_auth_store_env;
use store::{generate_key_as_hexa, HeedAuthStore}; use store::{generate_key_as_hexa, HeedAuthStore};
use time::OffsetDateTime; use time::OffsetDateTime;
use uuid::Uuid; use uuid::Uuid;
use zookeeper_client as zk;
#[derive(Clone)] #[derive(Clone)]
pub struct AuthController { pub struct AuthController {
store: Arc<HeedAuthStore>, store: Arc<HeedAuthStore>,
master_key: Option<String>, master_key: Option<String>,
zk: Option<zk::Client>,
} }
impl AuthController { impl AuthController {
pub fn new(db_path: impl AsRef<Path>, master_key: &Option<String>) -> Result<Self> { pub fn new(
db_path: impl AsRef<Path>,
master_key: &Option<String>,
zk: Option<zk::Client>,
) -> Result<Self> {
let store = HeedAuthStore::new(db_path)?; let store = HeedAuthStore::new(db_path)?;
if store.is_empty()? { if store.is_empty()? {
generate_default_keys(&store)?; generate_default_keys(&store)?;
} }
Ok(Self { store: Arc::new(store), master_key: master_key.clone() }) Ok(Self { store: Arc::new(store), master_key: master_key.clone(), zk })
} }
/// Return `Ok(())` if the auth controller is able to access one of its database. /// Return `Ok(())` if the auth controller is able to access one of its database.

View File

@ -105,6 +105,7 @@ walkdir = "2.3.3"
yaup = "0.2.1" yaup = "0.2.1"
serde_urlencoded = "0.7.1" serde_urlencoded = "0.7.1"
termcolor = "1.2.0" termcolor = "1.2.0"
zookeeper-client = "0.4.0"
[dev-dependencies] [dev-dependencies]
actix-rt = "2.8.0" actix-rt = "2.8.0"

View File

@ -137,14 +137,17 @@ enum OnFailure {
KeepDb, KeepDb,
} }
pub fn setup_meilisearch(opt: &Opt) -> anyhow::Result<(Arc<IndexScheduler>, Arc<AuthController>)> { pub fn setup_meilisearch(
opt: &Opt,
zk: Option<zk::Client>,
) -> anyhow::Result<(Arc<IndexScheduler>, Arc<AuthController>)> {
let empty_db = is_empty_db(&opt.db_path); let empty_db = is_empty_db(&opt.db_path);
let (index_scheduler, auth_controller) = if let Some(ref snapshot_path) = opt.import_snapshot { let (index_scheduler, auth_controller) = if let Some(ref snapshot_path) = opt.import_snapshot {
let snapshot_path_exists = snapshot_path.exists(); let snapshot_path_exists = snapshot_path.exists();
// the db is empty and the snapshot exists, import it // the db is empty and the snapshot exists, import it
if empty_db && snapshot_path_exists { if empty_db && snapshot_path_exists {
match compression::from_tar_gz(snapshot_path, &opt.db_path) { match compression::from_tar_gz(snapshot_path, &opt.db_path) {
Ok(()) => open_or_create_database_unchecked(opt, OnFailure::RemoveDb)?, Ok(()) => open_or_create_database_unchecked(opt, OnFailure::RemoveDb, zk)?,
Err(e) => { Err(e) => {
std::fs::remove_dir_all(&opt.db_path)?; std::fs::remove_dir_all(&opt.db_path)?;
return Err(e); return Err(e);
@ -161,14 +164,14 @@ pub fn setup_meilisearch(opt: &Opt) -> anyhow::Result<(Arc<IndexScheduler>, Arc<
bail!("snapshot doesn't exist at {}", snapshot_path.display()) bail!("snapshot doesn't exist at {}", snapshot_path.display())
// the snapshot and the db exist, and we can ignore the snapshot because of the ignore_snapshot_if_db_exists flag // the snapshot and the db exist, and we can ignore the snapshot because of the ignore_snapshot_if_db_exists flag
} else { } else {
open_or_create_database(opt, empty_db)? open_or_create_database(opt, empty_db, zk)?
} }
} else if let Some(ref path) = opt.import_dump { } else if let Some(ref path) = opt.import_dump {
let src_path_exists = path.exists(); let src_path_exists = path.exists();
// the db is empty and the dump exists, import it // the db is empty and the dump exists, import it
if empty_db && src_path_exists { if empty_db && src_path_exists {
let (mut index_scheduler, mut auth_controller) = let (mut index_scheduler, mut auth_controller) =
open_or_create_database_unchecked(opt, OnFailure::RemoveDb)?; open_or_create_database_unchecked(opt, OnFailure::RemoveDb, zk)?;
match import_dump(&opt.db_path, path, &mut index_scheduler, &mut auth_controller) { match import_dump(&opt.db_path, path, &mut index_scheduler, &mut auth_controller) {
Ok(()) => (index_scheduler, auth_controller), Ok(()) => (index_scheduler, auth_controller),
Err(e) => { Err(e) => {
@ -188,10 +191,10 @@ pub fn setup_meilisearch(opt: &Opt) -> anyhow::Result<(Arc<IndexScheduler>, Arc<
// the dump and the db exist and we can ignore the dump because of the ignore_dump_if_db_exists flag // the dump and the db exist and we can ignore the dump because of the ignore_dump_if_db_exists flag
// or, the dump is missing but we can ignore that because of the ignore_missing_dump flag // or, the dump is missing but we can ignore that because of the ignore_missing_dump flag
} else { } else {
open_or_create_database(opt, empty_db)? open_or_create_database(opt, empty_db, zk)?
} }
} else { } else {
open_or_create_database(opt, empty_db)? open_or_create_database(opt, empty_db, zk)?
}; };
// We create a loop in a thread that registers snapshotCreation tasks // We create a loop in a thread that registers snapshotCreation tasks
@ -218,11 +221,11 @@ pub fn setup_meilisearch(opt: &Opt) -> anyhow::Result<(Arc<IndexScheduler>, Arc<
fn open_or_create_database_unchecked( fn open_or_create_database_unchecked(
opt: &Opt, opt: &Opt,
on_failure: OnFailure, on_failure: OnFailure,
zk: Option<zk::Client>,
) -> anyhow::Result<(IndexScheduler, AuthController)> { ) -> anyhow::Result<(IndexScheduler, AuthController)> {
// we don't want to create anything in the data.ms yet, thus we // we don't want to create anything in the data.ms yet, thus we
// wrap our two builders in a closure that'll be executed later. // wrap our two builders in a closure that'll be executed later.
let zk_client = zk::Client::connect(&opt.cluster).await.unwrap(); let auth_controller = AuthController::new(&opt.db_path, &opt.master_key, zk);
let auth_controller = AuthController::new(&opt.db_path, &opt.master_key, zk_client.clone());
let instance_features = opt.to_instance_features(); let instance_features = opt.to_instance_features();
let index_scheduler_builder = || -> anyhow::Result<_> { let index_scheduler_builder = || -> anyhow::Result<_> {
Ok(IndexScheduler::new(IndexSchedulerOptions { Ok(IndexScheduler::new(IndexSchedulerOptions {
@ -264,12 +267,13 @@ fn open_or_create_database_unchecked(
fn open_or_create_database( fn open_or_create_database(
opt: &Opt, opt: &Opt,
empty_db: bool, empty_db: bool,
zk: Option<zk::Client>,
) -> anyhow::Result<(IndexScheduler, AuthController)> { ) -> anyhow::Result<(IndexScheduler, AuthController)> {
if !empty_db { if !empty_db {
check_version_file(&opt.db_path)?; check_version_file(&opt.db_path)?;
} }
open_or_create_database_unchecked(opt, OnFailure::KeepDb) open_or_create_database_unchecked(opt, OnFailure::KeepDb, zk)
} }
fn import_dump( fn import_dump(

View File

@ -12,6 +12,7 @@ use meilisearch::analytics::Analytics;
use meilisearch::{analytics, create_app, prototype_name, setup_meilisearch, Opt}; use meilisearch::{analytics, create_app, prototype_name, setup_meilisearch, Opt};
use meilisearch_auth::{generate_master_key, AuthController, MASTER_KEY_MIN_SIZE}; use meilisearch_auth::{generate_master_key, AuthController, MASTER_KEY_MIN_SIZE};
use termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor}; use termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor};
use zookeeper_client as zk;
#[global_allocator] #[global_allocator]
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc; static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
@ -63,7 +64,11 @@ async fn main() -> anyhow::Result<()> {
_ => (), _ => (),
} }
let (index_scheduler, auth_controller) = setup_meilisearch(&opt)?; let zk = match opt.zk_url {
Some(ref url) => Some(zk::Client::connect(url).await.unwrap()),
None => None,
};
let (index_scheduler, auth_controller) = setup_meilisearch(&opt, zk)?;
#[cfg(all(not(debug_assertions), feature = "analytics"))] #[cfg(all(not(debug_assertions), feature = "analytics"))]
let analytics = if !opt.no_analytics { let analytics = if !opt.no_analytics {

View File

@ -28,6 +28,7 @@ const MEILI_DB_PATH: &str = "MEILI_DB_PATH";
const MEILI_HTTP_ADDR: &str = "MEILI_HTTP_ADDR"; const MEILI_HTTP_ADDR: &str = "MEILI_HTTP_ADDR";
const MEILI_MASTER_KEY: &str = "MEILI_MASTER_KEY"; const MEILI_MASTER_KEY: &str = "MEILI_MASTER_KEY";
const MEILI_ENV: &str = "MEILI_ENV"; const MEILI_ENV: &str = "MEILI_ENV";
const MEILI_ZK_URL: &str = "MEILI_ZK_URL";
#[cfg(all(not(debug_assertions), feature = "analytics"))] #[cfg(all(not(debug_assertions), feature = "analytics"))]
const MEILI_NO_ANALYTICS: &str = "MEILI_NO_ANALYTICS"; const MEILI_NO_ANALYTICS: &str = "MEILI_NO_ANALYTICS";
const MEILI_HTTP_PAYLOAD_SIZE_LIMIT: &str = "MEILI_HTTP_PAYLOAD_SIZE_LIMIT"; const MEILI_HTTP_PAYLOAD_SIZE_LIMIT: &str = "MEILI_HTTP_PAYLOAD_SIZE_LIMIT";
@ -154,6 +155,11 @@ pub struct Opt {
#[serde(default = "default_env")] #[serde(default = "default_env")]
pub env: String, pub env: String,
/// Sets the HTTP address and port used to communicate with the zookeeper cluster.
/// If ran locally, the default url is `http://localhost:2181/`.
#[clap(long, env = MEILI_ZK_URL)]
pub zk_url: Option<String>,
/// Deactivates Meilisearch's built-in telemetry when provided. /// Deactivates Meilisearch's built-in telemetry when provided.
/// ///
/// Meilisearch automatically collects data from all instances that do not opt out using this flag. /// Meilisearch automatically collects data from all instances that do not opt out using this flag.
@ -368,6 +374,7 @@ impl Opt {
http_addr, http_addr,
master_key, master_key,
env, env,
zk_url,
max_index_size: _, max_index_size: _,
max_task_db_size: _, max_task_db_size: _,
http_payload_size_limit, http_payload_size_limit,
@ -401,6 +408,9 @@ impl Opt {
export_to_env_if_not_present(MEILI_MASTER_KEY, master_key); export_to_env_if_not_present(MEILI_MASTER_KEY, master_key);
} }
export_to_env_if_not_present(MEILI_ENV, env); export_to_env_if_not_present(MEILI_ENV, env);
if let Some(zk_url) = zk_url {
export_to_env_if_not_present(MEILI_ZK_URL, zk_url);
}
#[cfg(all(not(debug_assertions), feature = "analytics"))] #[cfg(all(not(debug_assertions), feature = "analytics"))]
{ {
export_to_env_if_not_present(MEILI_NO_ANALYTICS, no_analytics.to_string()); export_to_env_if_not_present(MEILI_NO_ANALYTICS, no_analytics.to_string());
@ -715,6 +725,10 @@ fn default_env() -> String {
DEFAULT_ENV.to_string() DEFAULT_ENV.to_string()
} }
pub fn default_zk_url() -> String {
DEFAULT_HTTP_ADDR.to_string()
}
fn default_max_index_size() -> Byte { fn default_max_index_size() -> Byte {
Byte::from_bytes(INDEX_SIZE) Byte::from_bytes(INDEX_SIZE)
} }