mirror of
https://github.com/meilisearch/meilisearch.git
synced 2025-01-19 01:18:31 +08:00
Refactor default values for clap and serde
This commit is contained in:
parent
6520d3c474
commit
ef3fa92536
@ -45,15 +45,28 @@ const MEILI_LOG_LEVEL: &str = "MEILI_LOG_LEVEL";
|
|||||||
#[cfg(feature = "metrics")]
|
#[cfg(feature = "metrics")]
|
||||||
const MEILI_ENABLE_METRICS_ROUTE: &str = "MEILI_ENABLE_METRICS_ROUTE";
|
const MEILI_ENABLE_METRICS_ROUTE: &str = "MEILI_ENABLE_METRICS_ROUTE";
|
||||||
|
|
||||||
|
const DEFAULT_DB_PATH: &str = "./data.ms";
|
||||||
|
const DEFAULT_HTTP_ADDR: &str = "127.0.0.1:7700";
|
||||||
|
const DEFAULT_ENV: &str = "development";
|
||||||
|
const DEFAULT_MAX_INDEX_SIZE: &str = "100 GiB";
|
||||||
|
const DEFAULT_MAX_TASK_DB_SIZE: &str = "100 GiB";
|
||||||
|
const DEFAULT_HTTP_PAYLOAD_SIZE_LIMIT: &str = "100 MB";
|
||||||
|
const DEFAULT_SNAPSHOT_DIR: &str = "snapshots/";
|
||||||
|
const DEFAULT_SNAPSHOT_INTERVAL_SEC: u64 = 86400;
|
||||||
|
const DEFAULT_DUMPS_DIR: &str = "dumps/";
|
||||||
|
const DEFAULT_LOG_LEVEL: &str = "info";
|
||||||
|
|
||||||
#[derive(Debug, Clone, Parser, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Parser, Serialize, Deserialize)]
|
||||||
#[clap(version)]
|
#[clap(version)]
|
||||||
pub struct Opt {
|
pub struct Opt {
|
||||||
/// The destination where the database must be created.
|
/// The destination where the database must be created.
|
||||||
#[clap(long, env = MEILI_DB_PATH, default_value = "./data.ms")]
|
#[clap(long, env = MEILI_DB_PATH, default_value_os_t = default_db_path())]
|
||||||
|
#[serde(default = "default_db_path")]
|
||||||
pub db_path: PathBuf,
|
pub db_path: PathBuf,
|
||||||
|
|
||||||
/// The address on which the http server will listen.
|
/// The address on which the http server will listen.
|
||||||
#[clap(long, env = MEILI_HTTP_ADDR, default_value = "127.0.0.1:7700")]
|
#[clap(long, env = MEILI_HTTP_ADDR, default_value_t = default_http_addr())]
|
||||||
|
#[serde(default = "default_http_addr")]
|
||||||
pub http_addr: String,
|
pub http_addr: String,
|
||||||
|
|
||||||
/// The master key allowing you to do everything on the server.
|
/// The master key allowing you to do everything on the server.
|
||||||
@ -65,25 +78,29 @@ pub struct Opt {
|
|||||||
/// If the server is running in development mode more logs will be displayed,
|
/// If the server is running in development mode more logs will be displayed,
|
||||||
/// and the master key can be avoided which implies that there is no security on the updates routes.
|
/// and the master key can be avoided which implies that there is no security on the updates routes.
|
||||||
/// This is useful to debug when integrating the engine with another service.
|
/// This is useful to debug when integrating the engine with another service.
|
||||||
#[clap(long, env = MEILI_ENV, default_value = "development", possible_values = &POSSIBLE_ENV)]
|
#[clap(long, env = MEILI_ENV, default_value_t = default_env(), possible_values = &POSSIBLE_ENV)]
|
||||||
|
#[serde(default = "default_env")]
|
||||||
pub env: String,
|
pub env: String,
|
||||||
|
|
||||||
/// Do not send analytics to Meili.
|
/// Do not send analytics to Meili.
|
||||||
#[cfg(all(not(debug_assertions), feature = "analytics"))]
|
#[cfg(all(not(debug_assertions), feature = "analytics"))]
|
||||||
#[serde(skip_serializing)] // we can't send true
|
#[serde(skip_serializing, default)] // we can't send true
|
||||||
#[clap(long, env = MEILI_NO_ANALYTICS)]
|
#[clap(long, env = MEILI_NO_ANALYTICS)]
|
||||||
pub no_analytics: bool,
|
pub no_analytics: bool,
|
||||||
|
|
||||||
/// The maximum size, in bytes, of the main lmdb database directory
|
/// The maximum size, in bytes, of the main lmdb database directory
|
||||||
#[clap(long, env = MEILI_MAX_INDEX_SIZE, default_value = "100 GiB")]
|
#[clap(long, env = MEILI_MAX_INDEX_SIZE, default_value_t = default_max_index_size())]
|
||||||
|
#[serde(default = "default_max_index_size")]
|
||||||
pub max_index_size: Byte,
|
pub max_index_size: Byte,
|
||||||
|
|
||||||
/// The maximum size, in bytes, of the update lmdb database directory
|
/// The maximum size, in bytes, of the update lmdb database directory
|
||||||
#[clap(long, env = MEILI_MAX_TASK_DB_SIZE, default_value = "100 GiB")]
|
#[clap(long, env = MEILI_MAX_TASK_DB_SIZE, default_value_t = default_max_task_db_size())]
|
||||||
|
#[serde(default = "default_max_task_db_size")]
|
||||||
pub max_task_db_size: Byte,
|
pub max_task_db_size: Byte,
|
||||||
|
|
||||||
/// The maximum size, in bytes, of accepted JSON payloads
|
/// The maximum size, in bytes, of accepted JSON payloads
|
||||||
#[clap(long, env = MEILI_HTTP_PAYLOAD_SIZE_LIMIT, default_value = "100 MB")]
|
#[clap(long, env = MEILI_HTTP_PAYLOAD_SIZE_LIMIT, default_value_t = default_http_payload_size_limit())]
|
||||||
|
#[serde(default = "default_http_payload_size_limit")]
|
||||||
pub http_payload_size_limit: Byte,
|
pub http_payload_size_limit: Byte,
|
||||||
|
|
||||||
/// Read server certificates from CERTFILE.
|
/// Read server certificates from CERTFILE.
|
||||||
@ -113,17 +130,17 @@ pub struct Opt {
|
|||||||
pub ssl_ocsp_path: Option<PathBuf>,
|
pub ssl_ocsp_path: Option<PathBuf>,
|
||||||
|
|
||||||
/// Send a fatal alert if the client does not complete client authentication.
|
/// Send a fatal alert if the client does not complete client authentication.
|
||||||
#[serde(skip_serializing)]
|
#[serde(skip_serializing, default)]
|
||||||
#[clap(long, env = MEILI_SSL_REQUIRE_AUTH)]
|
#[clap(long, env = MEILI_SSL_REQUIRE_AUTH)]
|
||||||
pub ssl_require_auth: bool,
|
pub ssl_require_auth: bool,
|
||||||
|
|
||||||
/// SSL support session resumption
|
/// SSL support session resumption
|
||||||
#[serde(skip_serializing)]
|
#[serde(skip_serializing, default)]
|
||||||
#[clap(long, env = MEILI_SSL_RESUMPTION)]
|
#[clap(long, env = MEILI_SSL_RESUMPTION)]
|
||||||
pub ssl_resumption: bool,
|
pub ssl_resumption: bool,
|
||||||
|
|
||||||
/// SSL support tickets.
|
/// SSL support tickets.
|
||||||
#[serde(skip_serializing)]
|
#[serde(skip_serializing, default)]
|
||||||
#[clap(long, env = MEILI_SSL_TICKETS)]
|
#[clap(long, env = MEILI_SSL_TICKETS)]
|
||||||
pub ssl_tickets: bool,
|
pub ssl_tickets: bool,
|
||||||
|
|
||||||
@ -139,6 +156,7 @@ pub struct Opt {
|
|||||||
env = "MEILI_IGNORE_MISSING_SNAPSHOT",
|
env = "MEILI_IGNORE_MISSING_SNAPSHOT",
|
||||||
requires = "import-snapshot"
|
requires = "import-snapshot"
|
||||||
)]
|
)]
|
||||||
|
#[serde(default)]
|
||||||
pub ignore_missing_snapshot: bool,
|
pub ignore_missing_snapshot: bool,
|
||||||
|
|
||||||
/// The engine will skip snapshot importation and not return an error in such case.
|
/// The engine will skip snapshot importation and not return an error in such case.
|
||||||
@ -147,18 +165,23 @@ pub struct Opt {
|
|||||||
env = "MEILI_IGNORE_SNAPSHOT_IF_DB_EXISTS",
|
env = "MEILI_IGNORE_SNAPSHOT_IF_DB_EXISTS",
|
||||||
requires = "import-snapshot"
|
requires = "import-snapshot"
|
||||||
)]
|
)]
|
||||||
|
#[serde(default)]
|
||||||
pub ignore_snapshot_if_db_exists: bool,
|
pub ignore_snapshot_if_db_exists: bool,
|
||||||
|
|
||||||
/// Defines the directory path where meilisearch will create snapshot each snapshot_time_gap.
|
/// Defines the directory path where meilisearch will create snapshot each snapshot_time_gap.
|
||||||
#[clap(long, env = MEILI_SNAPSHOT_DIR, default_value = "snapshots/")]
|
#[clap(long, env = MEILI_SNAPSHOT_DIR, default_value_os_t = default_snapshot_dir())]
|
||||||
|
#[serde(default = "default_snapshot_dir")]
|
||||||
pub snapshot_dir: PathBuf,
|
pub snapshot_dir: PathBuf,
|
||||||
|
|
||||||
/// Activate snapshot scheduling.
|
/// Activate snapshot scheduling.
|
||||||
#[clap(long, env = MEILI_SCHEDULE_SNAPSHOT)]
|
#[clap(long, env = MEILI_SCHEDULE_SNAPSHOT)]
|
||||||
|
#[serde(default)]
|
||||||
pub schedule_snapshot: bool,
|
pub schedule_snapshot: bool,
|
||||||
|
|
||||||
/// Defines time interval, in seconds, between each snapshot creation.
|
/// Defines time interval, in seconds, between each snapshot creation.
|
||||||
#[clap(long, env = MEILI_SNAPSHOT_INTERVAL_SEC, default_value = "86400")] // 24h
|
#[clap(long, env = MEILI_SNAPSHOT_INTERVAL_SEC, default_value_t = default_snapshot_interval_sec())]
|
||||||
|
#[serde(default = "default_snapshot_interval_sec")]
|
||||||
|
// 24h
|
||||||
pub snapshot_interval_sec: u64,
|
pub snapshot_interval_sec: u64,
|
||||||
|
|
||||||
/// Import a dump from the specified path, must be a `.dump` file.
|
/// Import a dump from the specified path, must be a `.dump` file.
|
||||||
@ -167,23 +190,28 @@ pub struct Opt {
|
|||||||
|
|
||||||
/// If the dump doesn't exists, load or create the database specified by `db-path` instead.
|
/// If the dump doesn't exists, load or create the database specified by `db-path` instead.
|
||||||
#[clap(long, env = "MEILI_IGNORE_MISSING_DUMP", requires = "import-dump")]
|
#[clap(long, env = "MEILI_IGNORE_MISSING_DUMP", requires = "import-dump")]
|
||||||
|
#[serde(default)]
|
||||||
pub ignore_missing_dump: bool,
|
pub ignore_missing_dump: bool,
|
||||||
|
|
||||||
/// Ignore the dump if a database already exists, and load that database instead.
|
/// Ignore the dump if a database already exists, and load that database instead.
|
||||||
#[clap(long, env = "MEILI_IGNORE_DUMP_IF_DB_EXISTS", requires = "import-dump")]
|
#[clap(long, env = "MEILI_IGNORE_DUMP_IF_DB_EXISTS", requires = "import-dump")]
|
||||||
|
#[serde(default)]
|
||||||
pub ignore_dump_if_db_exists: bool,
|
pub ignore_dump_if_db_exists: bool,
|
||||||
|
|
||||||
/// Folder where dumps are created when the dump route is called.
|
/// Folder where dumps are created when the dump route is called.
|
||||||
#[clap(long, env = MEILI_DUMPS_DIR, default_value = "dumps/")]
|
#[clap(long, env = MEILI_DUMPS_DIR, default_value_os_t = default_dumps_dir())]
|
||||||
|
#[serde(default = "default_dumps_dir")]
|
||||||
pub dumps_dir: PathBuf,
|
pub dumps_dir: PathBuf,
|
||||||
|
|
||||||
/// Set the log level
|
/// Set the log level
|
||||||
#[clap(long, env = MEILI_LOG_LEVEL, default_value = "info")]
|
#[clap(long, env = MEILI_LOG_LEVEL, default_value_t = default_log_level())]
|
||||||
|
#[serde(default = "default_log_level")]
|
||||||
pub log_level: String,
|
pub log_level: String,
|
||||||
|
|
||||||
/// Enables Prometheus metrics and /metrics route.
|
/// Enables Prometheus metrics and /metrics route.
|
||||||
#[cfg(feature = "metrics")]
|
#[cfg(feature = "metrics")]
|
||||||
#[clap(long, env = MEILI_ENABLE_METRICS_ROUTE)]
|
#[clap(long, env = MEILI_ENABLE_METRICS_ROUTE)]
|
||||||
|
#[serde(default)]
|
||||||
pub enable_metrics_route: bool,
|
pub enable_metrics_route: bool,
|
||||||
|
|
||||||
#[serde(flatten)]
|
#[serde(flatten)]
|
||||||
@ -235,7 +263,7 @@ impl Opt {
|
|||||||
export_to_env_if_not_present(MEILI_ENV, self.env);
|
export_to_env_if_not_present(MEILI_ENV, self.env);
|
||||||
#[cfg(all(not(debug_assertions), feature = "analytics"))]
|
#[cfg(all(not(debug_assertions), feature = "analytics"))]
|
||||||
{
|
{
|
||||||
export_to_env_if_not_present(MEILI_NO_ANALYTICS, self.no_analytics);
|
export_to_env_if_not_present(MEILI_NO_ANALYTICS, self.no_analytics.to_string());
|
||||||
}
|
}
|
||||||
export_to_env_if_not_present(MEILI_MAX_INDEX_SIZE, self.max_index_size.to_string());
|
export_to_env_if_not_present(MEILI_MAX_INDEX_SIZE, self.max_index_size.to_string());
|
||||||
export_to_env_if_not_present(MEILI_MAX_TASK_DB_SIZE, self.max_task_db_size.to_string());
|
export_to_env_if_not_present(MEILI_MAX_TASK_DB_SIZE, self.max_task_db_size.to_string());
|
||||||
@ -375,6 +403,46 @@ fn load_ocsp(filename: &Option<PathBuf>) -> anyhow::Result<Vec<u8>> {
|
|||||||
Ok(ret)
|
Ok(ret)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn default_db_path() -> PathBuf {
|
||||||
|
PathBuf::from(DEFAULT_DB_PATH)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_http_addr() -> String {
|
||||||
|
DEFAULT_HTTP_ADDR.to_string()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_env() -> String {
|
||||||
|
DEFAULT_ENV.to_string()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_max_index_size() -> Byte {
|
||||||
|
Byte::from_str(DEFAULT_MAX_INDEX_SIZE).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_max_task_db_size() -> Byte {
|
||||||
|
Byte::from_str(DEFAULT_MAX_TASK_DB_SIZE).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_http_payload_size_limit() -> Byte {
|
||||||
|
Byte::from_str(DEFAULT_HTTP_PAYLOAD_SIZE_LIMIT).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_snapshot_dir() -> PathBuf {
|
||||||
|
PathBuf::from(DEFAULT_SNAPSHOT_DIR)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_snapshot_interval_sec() -> u64 {
|
||||||
|
DEFAULT_SNAPSHOT_INTERVAL_SEC
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_dumps_dir() -> PathBuf {
|
||||||
|
PathBuf::from(DEFAULT_DUMPS_DIR)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_log_level() -> String {
|
||||||
|
DEFAULT_LOG_LEVEL.to_string()
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
@ -13,12 +13,14 @@ const MEILI_MAX_INDEXING_MEMORY: &str = "MEILI_MAX_INDEXING_MEMORY";
|
|||||||
const MEILI_MAX_INDEXING_THREADS: &str = "MEILI_MAX_INDEXING_THREADS";
|
const MEILI_MAX_INDEXING_THREADS: &str = "MEILI_MAX_INDEXING_THREADS";
|
||||||
const DISABLE_AUTO_BATCHING: &str = "DISABLE_AUTO_BATCHING";
|
const DISABLE_AUTO_BATCHING: &str = "DISABLE_AUTO_BATCHING";
|
||||||
|
|
||||||
|
const DEFAULT_LOG_EVERY_N: usize = 100000;
|
||||||
|
|
||||||
#[derive(Debug, Clone, Parser, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Parser, Serialize, Deserialize)]
|
||||||
pub struct IndexerOpts {
|
pub struct IndexerOpts {
|
||||||
/// The amount of documents to skip before printing
|
/// The amount of documents to skip before printing
|
||||||
/// a log regarding the indexing advancement.
|
/// a log regarding the indexing advancement.
|
||||||
#[serde(skip_serializing)]
|
#[serde(skip_serializing, default = "default_log_every_n")]
|
||||||
#[clap(long, default_value = "100000", hide = true)] // 100k
|
#[clap(long, default_value_t = default_log_every_n(), hide = true)] // 100k
|
||||||
pub log_every_n: usize,
|
pub log_every_n: usize,
|
||||||
|
|
||||||
/// Grenad max number of chunks in bytes.
|
/// Grenad max number of chunks in bytes.
|
||||||
@ -34,6 +36,7 @@ pub struct IndexerOpts {
|
|||||||
/// try to use the memory it needs but without real limit, this can lead to
|
/// try to use the memory it needs but without real limit, this can lead to
|
||||||
/// Out-Of-Memory issues and it is recommended to specify the amount of memory to use.
|
/// Out-Of-Memory issues and it is recommended to specify the amount of memory to use.
|
||||||
#[clap(long, env = MEILI_MAX_INDEXING_MEMORY, default_value_t)]
|
#[clap(long, env = MEILI_MAX_INDEXING_MEMORY, default_value_t)]
|
||||||
|
#[serde(default)]
|
||||||
pub max_indexing_memory: MaxMemory,
|
pub max_indexing_memory: MaxMemory,
|
||||||
|
|
||||||
/// The maximum number of threads the indexer will use.
|
/// The maximum number of threads the indexer will use.
|
||||||
@ -42,6 +45,7 @@ pub struct IndexerOpts {
|
|||||||
///
|
///
|
||||||
/// It defaults to half of the available threads.
|
/// It defaults to half of the available threads.
|
||||||
#[clap(long, env = MEILI_MAX_INDEXING_THREADS, default_value_t)]
|
#[clap(long, env = MEILI_MAX_INDEXING_THREADS, default_value_t)]
|
||||||
|
#[serde(default)]
|
||||||
pub max_indexing_threads: MaxThreads,
|
pub max_indexing_threads: MaxThreads,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -50,6 +54,7 @@ pub struct SchedulerConfig {
|
|||||||
/// The engine will disable task auto-batching,
|
/// The engine will disable task auto-batching,
|
||||||
/// and will sequencialy compute each task one by one.
|
/// and will sequencialy compute each task one by one.
|
||||||
#[clap(long, env = DISABLE_AUTO_BATCHING)]
|
#[clap(long, env = DISABLE_AUTO_BATCHING)]
|
||||||
|
#[serde(default)]
|
||||||
pub disable_auto_batching: bool,
|
pub disable_auto_batching: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -194,3 +199,7 @@ impl Deref for MaxThreads {
|
|||||||
&self.0
|
&self.0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn default_log_every_n() -> usize {
|
||||||
|
DEFAULT_LOG_EVERY_N
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user