2020-11-02 20:01:32 +08:00
|
|
|
use std::borrow::Cow;
|
2020-08-05 19:52:27 +08:00
|
|
|
use std::collections::HashSet;
|
2020-10-19 22:03:17 +08:00
|
|
|
use std::fs::{File, create_dir_all};
|
2020-05-31 23:48:13 +08:00
|
|
|
use std::net::SocketAddr;
|
|
|
|
use std::path::PathBuf;
|
|
|
|
use std::str::FromStr;
|
2020-10-19 22:03:17 +08:00
|
|
|
use std::sync::Arc;
|
2020-05-31 23:48:13 +08:00
|
|
|
use std::time::Instant;
|
2020-11-02 20:01:32 +08:00
|
|
|
use std::{mem, io};
|
2020-05-31 23:48:13 +08:00
|
|
|
|
2020-07-11 20:17:37 +08:00
|
|
|
use askama_warp::Template;
|
2020-10-24 22:23:08 +08:00
|
|
|
use flate2::read::GzDecoder;
|
2020-10-20 17:19:34 +08:00
|
|
|
use futures::stream;
|
2020-10-22 00:26:29 +08:00
|
|
|
use futures::{FutureExt, StreamExt};
|
2020-10-27 03:18:10 +08:00
|
|
|
use grenad::CompressionType;
|
2020-05-31 23:48:13 +08:00
|
|
|
use heed::EnvOpenOptions;
|
2020-11-03 02:11:22 +08:00
|
|
|
use once_cell::sync::OnceCell;
|
|
|
|
use rayon::ThreadPool;
|
2020-11-02 22:47:21 +08:00
|
|
|
use serde::{Serialize, Deserialize, Deserializer};
|
2020-11-05 20:58:07 +08:00
|
|
|
use serde_json::{Map, Value};
|
2020-05-31 23:48:13 +08:00
|
|
|
use structopt::StructOpt;
|
2020-10-19 22:03:17 +08:00
|
|
|
use tokio::fs::File as TFile;
|
|
|
|
use tokio::io::AsyncWriteExt;
|
2020-10-20 17:19:34 +08:00
|
|
|
use tokio::sync::broadcast;
|
2020-10-19 22:03:17 +08:00
|
|
|
use warp::filters::ws::Message;
|
2020-05-31 23:48:13 +08:00
|
|
|
use warp::{Filter, http::Response};
|
|
|
|
|
2020-11-05 18:16:39 +08:00
|
|
|
use milli::tokenizer::{simple_tokenizer, TokenType};
|
2020-11-11 20:14:16 +08:00
|
|
|
use milli::update::UpdateIndexingStep::*;
|
2020-11-05 18:16:39 +08:00
|
|
|
use milli::update::{UpdateBuilder, IndexDocumentsMethod, UpdateFormat};
|
2020-11-05 20:58:07 +08:00
|
|
|
use milli::{obkv_to_json, Index, UpdateStore, SearchResult};
|
2020-06-11 04:05:01 +08:00
|
|
|
|
2020-11-03 02:11:22 +08:00
|
|
|
static GLOBAL_THREAD_POOL: OnceCell<ThreadPool> = OnceCell::new();
|
|
|
|
|
2020-05-31 23:48:13 +08:00
|
|
|
#[derive(Debug, StructOpt)]
|
2020-10-19 19:44:17 +08:00
|
|
|
/// The HTTP main server of the milli project.
|
|
|
|
pub struct Opt {
|
2020-05-31 23:48:13 +08:00
|
|
|
/// The database path where the LMDB database is located.
|
|
|
|
/// It is created if it doesn't already exist.
|
|
|
|
#[structopt(long = "db", parse(from_os_str))]
|
|
|
|
database: PathBuf,
|
|
|
|
|
|
|
|
/// The maximum size the database can take on disk. It is recommended to specify
|
|
|
|
/// the whole disk space (value must be a multiple of a page size).
|
|
|
|
#[structopt(long = "db-size", default_value = "107374182400")] // 100 GB
|
|
|
|
database_size: usize,
|
|
|
|
|
2020-10-19 22:03:17 +08:00
|
|
|
/// The maximum size the database that stores the updates can take on disk. It is recommended
|
|
|
|
/// to specify the whole disk space (value must be a multiple of a page size).
|
|
|
|
#[structopt(long = "udb-size", default_value = "10737418240")] // 10 GB
|
|
|
|
update_database_size: usize,
|
|
|
|
|
2020-07-14 17:27:46 +08:00
|
|
|
/// Disable document highlighting on the dashboard.
|
|
|
|
#[structopt(long)]
|
|
|
|
disable_highlighting: bool,
|
|
|
|
|
2020-07-12 17:04:35 +08:00
|
|
|
/// Verbose mode (-v, -vv, -vvv, etc.)
|
|
|
|
#[structopt(short, long, parse(from_occurrences))]
|
|
|
|
verbose: usize,
|
|
|
|
|
2020-05-31 23:48:13 +08:00
|
|
|
/// The ip and port on which the database will listen for HTTP requests.
|
|
|
|
#[structopt(short = "l", long, default_value = "127.0.0.1:9700")]
|
|
|
|
http_listen_addr: String,
|
2020-10-20 20:20:17 +08:00
|
|
|
|
|
|
|
#[structopt(flatten)]
|
|
|
|
indexer: IndexerOpt,
|
2020-05-31 23:48:13 +08:00
|
|
|
}
|
|
|
|
|
2020-10-27 03:18:10 +08:00
|
|
|
#[derive(Debug, Clone, StructOpt)]
|
|
|
|
pub struct IndexerOpt {
|
|
|
|
/// The amount of documents to skip before printing
|
|
|
|
/// a log regarding the indexing advancement.
|
2020-11-10 00:34:52 +08:00
|
|
|
#[structopt(long, default_value = "100000")] // 100k
|
2020-10-27 03:18:10 +08:00
|
|
|
pub log_every_n: usize,
|
|
|
|
|
|
|
|
/// MTBL max number of chunks in bytes.
|
|
|
|
#[structopt(long)]
|
|
|
|
pub max_nb_chunks: Option<usize>,
|
|
|
|
|
|
|
|
/// The maximum amount of memory to use for the MTBL buffer. It is recommended
|
|
|
|
/// to use something like 80%-90% of the available memory.
|
|
|
|
///
|
|
|
|
/// It is automatically split by the number of jobs e.g. if you use 7 jobs
|
|
|
|
/// and 7 GB of max memory, each thread will use a maximum of 1 GB.
|
|
|
|
#[structopt(long, default_value = "7516192768")] // 7 GB
|
|
|
|
pub max_memory: usize,
|
|
|
|
|
|
|
|
/// Size of the linked hash map cache when indexing.
|
|
|
|
/// The bigger it is, the faster the indexing is but the more memory it takes.
|
|
|
|
#[structopt(long, default_value = "500")]
|
|
|
|
pub linked_hash_map_size: usize,
|
|
|
|
|
|
|
|
/// The name of the compression algorithm to use when compressing intermediate
|
|
|
|
/// chunks during indexing documents.
|
|
|
|
///
|
|
|
|
/// Choosing a fast algorithm will make the indexing faster but may consume more memory.
|
|
|
|
#[structopt(long, default_value = "snappy", possible_values = &["snappy", "zlib", "lz4", "lz4hc", "zstd"])]
|
|
|
|
pub chunk_compression_type: CompressionType,
|
|
|
|
|
|
|
|
/// The level of compression of the chosen algorithm.
|
|
|
|
#[structopt(long, requires = "chunk-compression-type")]
|
|
|
|
pub chunk_compression_level: Option<u32>,
|
|
|
|
|
|
|
|
/// The number of bytes to remove from the begining of the chunks while reading/sorting
|
|
|
|
/// or merging them.
|
|
|
|
///
|
|
|
|
/// File fusing must only be enable on file systems that support the `FALLOC_FL_COLLAPSE_RANGE`,
|
|
|
|
/// (i.e. ext4 and XFS). File fusing will only work if the `enable-chunk-fusing` is set.
|
|
|
|
#[structopt(long, default_value = "4294967296")] // 4 GB
|
|
|
|
pub chunk_fusing_shrink_size: u64,
|
|
|
|
|
|
|
|
/// Enable the chunk fusing or not, this reduces the amount of disk used by a factor of 2.
|
|
|
|
#[structopt(long)]
|
|
|
|
pub enable_chunk_fusing: bool,
|
|
|
|
|
|
|
|
/// Number of parallel jobs for indexing, defaults to # of CPUs.
|
|
|
|
#[structopt(long)]
|
|
|
|
pub indexing_jobs: Option<usize>,
|
|
|
|
}
|
|
|
|
|
2020-11-05 20:58:07 +08:00
|
|
|
fn highlight_record(
|
|
|
|
object: &mut Map<String, Value>,
|
|
|
|
words_to_highlight: &HashSet<String>,
|
|
|
|
attributes_to_highlight: &HashSet<String>,
|
|
|
|
) {
|
|
|
|
// TODO do we need to create a string for element that are not and needs to be highlight?
|
|
|
|
fn highlight_value(value: Value, words_to_highlight: &HashSet<String>) -> Value {
|
|
|
|
match value {
|
|
|
|
Value::Null => Value::Null,
|
|
|
|
Value::Bool(boolean) => Value::Bool(boolean),
|
|
|
|
Value::Number(number) => Value::Number(number),
|
|
|
|
Value::String(old_string) => {
|
|
|
|
let mut string = String::new();
|
|
|
|
for (token_type, token) in simple_tokenizer(&old_string) {
|
|
|
|
if token_type == TokenType::Word {
|
|
|
|
let lowercase_token = token.to_lowercase();
|
|
|
|
let to_highlight = words_to_highlight.contains(&lowercase_token);
|
|
|
|
if to_highlight { string.push_str("<mark>") }
|
|
|
|
string.push_str(token);
|
|
|
|
if to_highlight { string.push_str("</mark>") }
|
|
|
|
} else {
|
|
|
|
string.push_str(token);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Value::String(string)
|
|
|
|
},
|
|
|
|
Value::Array(values) => {
|
|
|
|
Value::Array(values.into_iter()
|
|
|
|
.map(|v| highlight_value(v, words_to_highlight))
|
|
|
|
.collect())
|
|
|
|
},
|
|
|
|
Value::Object(object) => {
|
|
|
|
Value::Object(object.into_iter()
|
|
|
|
.map(|(k, v)| (k, highlight_value(v, words_to_highlight)))
|
|
|
|
.collect())
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (key, value) in object.iter_mut() {
|
|
|
|
if attributes_to_highlight.contains(key) {
|
|
|
|
let old_value = mem::take(value);
|
|
|
|
*value = highlight_value(old_value, words_to_highlight);
|
2020-08-31 03:50:30 +08:00
|
|
|
}
|
2020-08-05 19:52:27 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-11 20:17:37 +08:00
|
|
|
#[derive(Template)]
|
|
|
|
#[template(path = "index.html")]
|
|
|
|
struct IndexTemplate {
|
|
|
|
db_name: String,
|
|
|
|
db_size: usize,
|
|
|
|
docs_count: usize,
|
|
|
|
}
|
|
|
|
|
2020-10-20 01:57:15 +08:00
|
|
|
#[derive(Template)]
|
|
|
|
#[template(path = "updates.html")]
|
2020-10-21 21:38:28 +08:00
|
|
|
struct UpdatesTemplate<M: Serialize + Send, P: Serialize + Send, N: Serialize + Send> {
|
2020-10-20 01:57:15 +08:00
|
|
|
db_name: String,
|
2020-10-20 18:09:38 +08:00
|
|
|
db_size: usize,
|
|
|
|
docs_count: usize,
|
2020-10-21 21:38:28 +08:00
|
|
|
updates: Vec<UpdateStatus<M, P, N>>,
|
2020-10-20 18:09:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Clone, Serialize)]
|
|
|
|
#[serde(tag = "type")]
|
2020-10-21 21:38:28 +08:00
|
|
|
enum UpdateStatus<M, P, N> {
|
2020-10-20 18:09:38 +08:00
|
|
|
Pending { update_id: u64, meta: M },
|
2020-10-21 21:38:28 +08:00
|
|
|
Progressing { update_id: u64, meta: P },
|
|
|
|
Processed { update_id: u64, meta: N },
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
|
|
#[serde(tag = "type")]
|
|
|
|
enum UpdateMeta {
|
2020-11-01 00:48:24 +08:00
|
|
|
DocumentsAddition { method: String, format: String },
|
2020-10-30 20:12:55 +08:00
|
|
|
ClearDocuments,
|
2020-11-02 22:47:21 +08:00
|
|
|
Settings(Settings),
|
2020-10-21 21:38:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
|
|
#[serde(tag = "type")]
|
|
|
|
enum UpdateMetaProgress {
|
|
|
|
DocumentsAddition {
|
2020-11-11 20:14:16 +08:00
|
|
|
step: usize,
|
|
|
|
total_steps: usize,
|
|
|
|
current: usize,
|
|
|
|
total: Option<usize>,
|
2020-10-21 21:38:28 +08:00
|
|
|
},
|
2020-10-20 01:57:15 +08:00
|
|
|
}
|
|
|
|
|
2020-11-02 22:47:21 +08:00
|
|
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
|
|
struct Settings {
|
|
|
|
#[serde(
|
|
|
|
default,
|
|
|
|
deserialize_with = "deserialize_some",
|
|
|
|
skip_serializing_if = "Option::is_none",
|
|
|
|
)]
|
|
|
|
displayed_attributes: Option<Option<Vec<String>>>,
|
2020-11-04 02:35:55 +08:00
|
|
|
|
|
|
|
#[serde(
|
|
|
|
default,
|
|
|
|
deserialize_with = "deserialize_some",
|
|
|
|
skip_serializing_if = "Option::is_none",
|
|
|
|
)]
|
|
|
|
searchable_attributes: Option<Option<Vec<String>>>,
|
2020-11-02 22:47:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Any value that is present is considered Some value, including null.
|
|
|
|
fn deserialize_some<'de, T, D>(deserializer: D) -> Result<Option<T>, D::Error>
|
|
|
|
where T: Deserialize<'de>,
|
|
|
|
D: Deserializer<'de>
|
|
|
|
{
|
|
|
|
Deserialize::deserialize(deserializer).map(Some)
|
|
|
|
}
|
|
|
|
|
2020-11-05 18:16:39 +08:00
|
|
|
#[tokio::main]
|
|
|
|
async fn main() -> anyhow::Result<()> {
|
|
|
|
let opt = Opt::from_args();
|
|
|
|
|
2020-07-12 17:04:35 +08:00
|
|
|
stderrlog::new()
|
|
|
|
.verbosity(opt.verbose)
|
|
|
|
.show_level(false)
|
|
|
|
.timestamp(stderrlog::Timestamp::Off)
|
|
|
|
.init()?;
|
|
|
|
|
2020-10-20 21:00:58 +08:00
|
|
|
create_dir_all(&opt.database)?;
|
2020-10-30 17:56:35 +08:00
|
|
|
let mut options = EnvOpenOptions::new();
|
|
|
|
options.map_size(opt.database_size);
|
2020-05-31 23:48:13 +08:00
|
|
|
|
2020-11-03 02:11:22 +08:00
|
|
|
// Setup the global thread pool
|
|
|
|
let jobs = opt.indexer.indexing_jobs.unwrap_or(0);
|
|
|
|
let pool = rayon::ThreadPoolBuilder::new().num_threads(jobs).build()?;
|
|
|
|
GLOBAL_THREAD_POOL.set(pool).unwrap();
|
|
|
|
|
2020-08-07 19:11:31 +08:00
|
|
|
// Open the LMDB database.
|
2020-10-30 17:56:35 +08:00
|
|
|
let index = Index::new(options, &opt.database)?;
|
2020-08-07 19:11:31 +08:00
|
|
|
|
2020-10-19 22:03:17 +08:00
|
|
|
// Setup the LMDB based update database.
|
|
|
|
let mut update_store_options = EnvOpenOptions::new();
|
|
|
|
update_store_options.map_size(opt.update_database_size);
|
|
|
|
|
|
|
|
let update_store_path = opt.database.join("updates.mdb");
|
|
|
|
create_dir_all(&update_store_path)?;
|
|
|
|
|
2020-10-20 17:19:34 +08:00
|
|
|
let (update_status_sender, _) = broadcast::channel(100);
|
2020-10-19 22:03:17 +08:00
|
|
|
let update_status_sender_cloned = update_status_sender.clone();
|
2020-10-20 21:00:58 +08:00
|
|
|
let index_cloned = index.clone();
|
|
|
|
let indexer_opt_cloned = opt.indexer.clone();
|
2020-10-19 22:03:17 +08:00
|
|
|
let update_store = UpdateStore::open(
|
|
|
|
update_store_options,
|
|
|
|
update_store_path,
|
2020-10-21 21:38:28 +08:00
|
|
|
move |update_id, meta, content| {
|
2020-10-27 03:18:10 +08:00
|
|
|
// We prepare the update by using the update builder.
|
|
|
|
let mut update_builder = UpdateBuilder::new();
|
|
|
|
if let Some(max_nb_chunks) = indexer_opt_cloned.max_nb_chunks {
|
|
|
|
update_builder.max_nb_chunks(max_nb_chunks);
|
|
|
|
}
|
|
|
|
if let Some(chunk_compression_level) = indexer_opt_cloned.chunk_compression_level {
|
|
|
|
update_builder.chunk_compression_level(chunk_compression_level);
|
|
|
|
}
|
2020-11-03 02:11:22 +08:00
|
|
|
update_builder.thread_pool(GLOBAL_THREAD_POOL.get().unwrap());
|
2020-10-27 03:18:10 +08:00
|
|
|
update_builder.log_every_n(indexer_opt_cloned.log_every_n);
|
|
|
|
update_builder.max_memory(indexer_opt_cloned.max_memory);
|
|
|
|
update_builder.linked_hash_map_size(indexer_opt_cloned.linked_hash_map_size);
|
|
|
|
update_builder.chunk_compression_type(indexer_opt_cloned.chunk_compression_type);
|
|
|
|
update_builder.chunk_fusing_shrink_size(indexer_opt_cloned.chunk_fusing_shrink_size);
|
|
|
|
|
|
|
|
// we extract the update type and execute the update itself.
|
|
|
|
let result: anyhow::Result<()> = match meta {
|
2020-11-01 00:48:24 +08:00
|
|
|
UpdateMeta::DocumentsAddition { method, format } => {
|
2020-10-24 22:23:08 +08:00
|
|
|
// We must use the write transaction of the update here.
|
2020-10-30 17:56:35 +08:00
|
|
|
let mut wtxn = index_cloned.write_txn()?;
|
2020-10-27 03:18:10 +08:00
|
|
|
let mut builder = update_builder.index_documents(&mut wtxn, &index_cloned);
|
|
|
|
|
2020-11-01 00:48:24 +08:00
|
|
|
match format.as_str() {
|
|
|
|
"csv" => builder.update_format(UpdateFormat::Csv),
|
2020-11-01 18:50:10 +08:00
|
|
|
"json" => builder.update_format(UpdateFormat::Json),
|
|
|
|
"json-stream" => builder.update_format(UpdateFormat::JsonStream),
|
2020-11-01 00:48:24 +08:00
|
|
|
otherwise => panic!("invalid update format {:?}", otherwise),
|
|
|
|
};
|
|
|
|
|
|
|
|
match method.as_str() {
|
|
|
|
"replace" => builder.index_documents_method(IndexDocumentsMethod::ReplaceDocuments),
|
|
|
|
"update" => builder.index_documents_method(IndexDocumentsMethod::UpdateDocuments),
|
|
|
|
otherwise => panic!("invalid indexing method {:?}", otherwise),
|
|
|
|
};
|
2020-10-24 22:23:08 +08:00
|
|
|
|
|
|
|
let gzipped = false;
|
|
|
|
let reader = if gzipped {
|
|
|
|
Box::new(GzDecoder::new(content))
|
|
|
|
} else {
|
|
|
|
Box::new(content) as Box<dyn io::Read>
|
2020-10-21 21:38:28 +08:00
|
|
|
};
|
|
|
|
|
2020-11-11 20:14:16 +08:00
|
|
|
let result = builder.execute(reader, |indexing_step| {
|
|
|
|
let (current, total) = match indexing_step {
|
|
|
|
TransformFromUserIntoGenericFormat { documents_seen } => (documents_seen, None),
|
|
|
|
ComputeIdsAndMergeDocuments { documents_seen, total_documents } => (documents_seen, Some(total_documents)),
|
|
|
|
IndexDocuments { documents_seen, total_documents } => (documents_seen, Some(total_documents)),
|
|
|
|
MergeDataIntoFinalDatabase { databases_seen, total_databases } => (databases_seen, Some(total_databases)),
|
|
|
|
};
|
2020-10-27 03:18:10 +08:00
|
|
|
let _ = update_status_sender_cloned.send(UpdateStatus::Progressing {
|
|
|
|
update_id,
|
|
|
|
meta: UpdateMetaProgress::DocumentsAddition {
|
2020-11-11 20:14:16 +08:00
|
|
|
step: indexing_step.step(),
|
|
|
|
total_steps: indexing_step.number_of_steps(),
|
|
|
|
current,
|
|
|
|
total,
|
2020-10-27 03:18:10 +08:00
|
|
|
}
|
|
|
|
});
|
|
|
|
});
|
|
|
|
|
|
|
|
match result {
|
|
|
|
Ok(()) => wtxn.commit().map_err(Into::into),
|
|
|
|
Err(e) => Err(e.into())
|
|
|
|
}
|
2020-10-24 22:23:08 +08:00
|
|
|
},
|
2020-10-30 20:12:55 +08:00
|
|
|
UpdateMeta::ClearDocuments => {
|
|
|
|
// We must use the write transaction of the update here.
|
|
|
|
let mut wtxn = index_cloned.write_txn()?;
|
|
|
|
let builder = update_builder.clear_documents(&mut wtxn, &index_cloned);
|
|
|
|
|
|
|
|
match builder.execute() {
|
|
|
|
Ok(_count) => wtxn.commit().map_err(Into::into),
|
|
|
|
Err(e) => Err(e.into())
|
|
|
|
}
|
2020-11-02 22:31:20 +08:00
|
|
|
},
|
2020-11-02 22:47:21 +08:00
|
|
|
UpdateMeta::Settings(settings) => {
|
|
|
|
// We must use the write transaction of the update here.
|
|
|
|
let mut wtxn = index_cloned.write_txn()?;
|
|
|
|
let mut builder = update_builder.settings(&mut wtxn, &index_cloned);
|
|
|
|
|
2020-11-04 02:35:55 +08:00
|
|
|
// We transpose the settings JSON struct into a real setting update.
|
|
|
|
if let Some(names) = settings.searchable_attributes {
|
|
|
|
match names {
|
|
|
|
Some(names) => builder.set_searchable_fields(names),
|
|
|
|
None => builder.reset_searchable_fields(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-02 22:47:21 +08:00
|
|
|
// We transpose the settings JSON struct into a real setting update.
|
|
|
|
if let Some(names) = settings.displayed_attributes {
|
|
|
|
match names {
|
|
|
|
Some(names) => builder.set_displayed_fields(names),
|
|
|
|
None => builder.reset_displayed_fields(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-11 20:14:16 +08:00
|
|
|
let result = builder.execute(|indexing_step| {
|
|
|
|
let (current, total) = match indexing_step {
|
|
|
|
TransformFromUserIntoGenericFormat { documents_seen } => (documents_seen, None),
|
|
|
|
ComputeIdsAndMergeDocuments { documents_seen, total_documents } => (documents_seen, Some(total_documents)),
|
|
|
|
IndexDocuments { documents_seen, total_documents } => (documents_seen, Some(total_documents)),
|
|
|
|
MergeDataIntoFinalDatabase { databases_seen, total_databases } => (databases_seen, Some(total_databases)),
|
|
|
|
};
|
2020-11-03 20:20:11 +08:00
|
|
|
let _ = update_status_sender_cloned.send(UpdateStatus::Progressing {
|
|
|
|
update_id,
|
|
|
|
meta: UpdateMetaProgress::DocumentsAddition {
|
2020-11-11 20:14:16 +08:00
|
|
|
step: indexing_step.step(),
|
|
|
|
total_steps: indexing_step.number_of_steps(),
|
|
|
|
current,
|
|
|
|
total,
|
2020-11-03 20:20:11 +08:00
|
|
|
}
|
|
|
|
});
|
|
|
|
});
|
|
|
|
|
|
|
|
match result {
|
2020-11-02 22:47:21 +08:00
|
|
|
Ok(_count) => wtxn.commit().map_err(Into::into),
|
|
|
|
Err(e) => Err(e.into())
|
|
|
|
}
|
2020-10-21 21:38:28 +08:00
|
|
|
}
|
|
|
|
};
|
2020-10-20 21:00:58 +08:00
|
|
|
|
|
|
|
let meta = match result {
|
|
|
|
Ok(()) => format!("valid update content"),
|
2020-10-28 18:17:36 +08:00
|
|
|
Err(e) => format!("error while processing update content: {:?}", e),
|
2020-10-20 21:00:58 +08:00
|
|
|
};
|
2020-10-20 18:28:10 +08:00
|
|
|
|
2020-10-20 18:09:38 +08:00
|
|
|
let processed = UpdateStatus::Processed { update_id, meta: meta.clone() };
|
|
|
|
let _ = update_status_sender_cloned.send(processed);
|
2020-10-20 21:00:58 +08:00
|
|
|
|
2020-10-19 22:03:17 +08:00
|
|
|
Ok(meta)
|
|
|
|
})?;
|
|
|
|
|
2020-10-20 21:00:58 +08:00
|
|
|
// The database name will not change.
|
2020-07-11 20:17:37 +08:00
|
|
|
let db_name = opt.database.file_stem().and_then(|s| s.to_str()).unwrap_or("").to_string();
|
2020-10-20 21:00:58 +08:00
|
|
|
let lmdb_path = opt.database.join("data.mdb");
|
2020-07-11 20:17:37 +08:00
|
|
|
|
2020-05-31 23:48:13 +08:00
|
|
|
// We run and wait on the HTTP server
|
|
|
|
|
|
|
|
// Expose an HTML page to debug the search in a browser
|
2020-10-20 01:57:15 +08:00
|
|
|
let db_name_cloned = db_name.clone();
|
2020-10-20 21:00:58 +08:00
|
|
|
let lmdb_path_cloned = lmdb_path.clone();
|
|
|
|
let index_cloned = index.clone();
|
2020-05-31 23:48:13 +08:00
|
|
|
let dash_html_route = warp::filters::method::get()
|
|
|
|
.and(warp::filters::path::end())
|
2020-10-20 21:00:58 +08:00
|
|
|
.map(move || {
|
|
|
|
// We retrieve the database size.
|
|
|
|
let db_size = File::open(lmdb_path_cloned.clone())
|
|
|
|
.unwrap()
|
|
|
|
.metadata()
|
|
|
|
.unwrap()
|
|
|
|
.len() as usize;
|
|
|
|
|
|
|
|
// And the number of documents in the database.
|
2020-10-30 18:42:00 +08:00
|
|
|
let rtxn = index_cloned.read_txn().unwrap();
|
2020-10-20 21:00:58 +08:00
|
|
|
let docs_count = index_cloned.clone().number_of_documents(&rtxn).unwrap() as usize;
|
|
|
|
|
|
|
|
IndexTemplate { db_name: db_name_cloned.clone(), db_size, docs_count }
|
|
|
|
});
|
2020-10-20 01:57:15 +08:00
|
|
|
|
|
|
|
let update_store_cloned = update_store.clone();
|
2020-10-20 21:00:58 +08:00
|
|
|
let lmdb_path_cloned = lmdb_path.clone();
|
|
|
|
let index_cloned = index.clone();
|
2020-10-20 01:57:15 +08:00
|
|
|
let updates_list_or_html_route = warp::filters::method::get()
|
|
|
|
.and(warp::header("Accept"))
|
|
|
|
.and(warp::path!("updates"))
|
|
|
|
.map(move |header: String| {
|
|
|
|
let update_store = update_store_cloned.clone();
|
|
|
|
let mut updates = update_store.iter_metas(|processed, pending| {
|
2020-10-21 21:38:28 +08:00
|
|
|
let mut updates = Vec::<UpdateStatus<_, UpdateMetaProgress, _>>::new();
|
2020-10-20 01:57:15 +08:00
|
|
|
for result in processed {
|
2020-10-20 18:09:38 +08:00
|
|
|
let (uid, meta) = result?;
|
|
|
|
updates.push(UpdateStatus::Processed { update_id: uid.get(), meta });
|
2020-10-20 01:57:15 +08:00
|
|
|
}
|
|
|
|
for result in pending {
|
2020-10-20 18:09:38 +08:00
|
|
|
let (uid, meta) = result?;
|
|
|
|
updates.push(UpdateStatus::Pending { update_id: uid.get(), meta });
|
2020-10-20 01:57:15 +08:00
|
|
|
}
|
|
|
|
Ok(updates)
|
|
|
|
}).unwrap();
|
|
|
|
|
|
|
|
if header.contains("text/html") {
|
|
|
|
updates.reverse();
|
2020-10-20 21:00:58 +08:00
|
|
|
|
|
|
|
// We retrieve the database size.
|
|
|
|
let db_size = File::open(lmdb_path_cloned.clone())
|
|
|
|
.unwrap()
|
|
|
|
.metadata()
|
|
|
|
.unwrap()
|
|
|
|
.len() as usize;
|
|
|
|
|
|
|
|
// And the number of documents in the database.
|
2020-10-30 18:42:00 +08:00
|
|
|
let rtxn = index_cloned.read_txn().unwrap();
|
2020-10-20 21:00:58 +08:00
|
|
|
let docs_count = index_cloned.clone().number_of_documents(&rtxn).unwrap() as usize;
|
|
|
|
|
2020-10-20 18:09:38 +08:00
|
|
|
let template = UpdatesTemplate {
|
|
|
|
db_name: db_name.clone(),
|
|
|
|
db_size,
|
|
|
|
docs_count,
|
|
|
|
updates,
|
|
|
|
};
|
2020-10-20 01:57:15 +08:00
|
|
|
Box::new(template) as Box<dyn warp::Reply>
|
|
|
|
} else {
|
|
|
|
Box::new(warp::reply::json(&updates))
|
|
|
|
}
|
|
|
|
});
|
2020-05-31 23:48:13 +08:00
|
|
|
|
|
|
|
let dash_bulma_route = warp::filters::method::get()
|
|
|
|
.and(warp::path!("bulma.min.css"))
|
|
|
|
.map(|| Response::builder()
|
|
|
|
.header("content-type", "text/css; charset=utf-8")
|
2020-11-05 18:16:39 +08:00
|
|
|
.body(include_str!("../public/bulma.min.css"))
|
2020-05-31 23:48:13 +08:00
|
|
|
);
|
|
|
|
|
2020-07-14 05:51:41 +08:00
|
|
|
let dash_bulma_dark_route = warp::filters::method::get()
|
|
|
|
.and(warp::path!("bulma-prefers-dark.min.css"))
|
|
|
|
.map(|| Response::builder()
|
|
|
|
.header("content-type", "text/css; charset=utf-8")
|
2020-11-05 18:16:39 +08:00
|
|
|
.body(include_str!("../public/bulma-prefers-dark.min.css"))
|
2020-07-14 05:51:41 +08:00
|
|
|
);
|
|
|
|
|
2020-07-11 17:48:27 +08:00
|
|
|
let dash_style_route = warp::filters::method::get()
|
|
|
|
.and(warp::path!("style.css"))
|
|
|
|
.map(|| Response::builder()
|
|
|
|
.header("content-type", "text/css; charset=utf-8")
|
2020-11-05 18:16:39 +08:00
|
|
|
.body(include_str!("../public/style.css"))
|
2020-07-11 17:48:27 +08:00
|
|
|
);
|
|
|
|
|
2020-05-31 23:48:13 +08:00
|
|
|
let dash_jquery_route = warp::filters::method::get()
|
|
|
|
.and(warp::path!("jquery-3.4.1.min.js"))
|
|
|
|
.map(|| Response::builder()
|
|
|
|
.header("content-type", "application/javascript; charset=utf-8")
|
2020-11-05 18:16:39 +08:00
|
|
|
.body(include_str!("../public/jquery-3.4.1.min.js"))
|
2020-05-31 23:48:13 +08:00
|
|
|
);
|
|
|
|
|
2020-07-11 20:17:37 +08:00
|
|
|
let dash_filesize_route = warp::filters::method::get()
|
|
|
|
.and(warp::path!("filesize.min.js"))
|
|
|
|
.map(|| Response::builder()
|
|
|
|
.header("content-type", "application/javascript; charset=utf-8")
|
2020-11-05 18:16:39 +08:00
|
|
|
.body(include_str!("../public/filesize.min.js"))
|
2020-07-11 20:17:37 +08:00
|
|
|
);
|
|
|
|
|
2020-07-11 17:48:27 +08:00
|
|
|
let dash_script_route = warp::filters::method::get()
|
|
|
|
.and(warp::path!("script.js"))
|
|
|
|
.map(|| Response::builder()
|
|
|
|
.header("content-type", "application/javascript; charset=utf-8")
|
2020-11-05 18:16:39 +08:00
|
|
|
.body(include_str!("../public/script.js"))
|
2020-07-11 17:48:27 +08:00
|
|
|
);
|
|
|
|
|
2020-10-20 01:57:15 +08:00
|
|
|
let updates_script_route = warp::filters::method::get()
|
|
|
|
.and(warp::path!("updates-script.js"))
|
|
|
|
.map(|| Response::builder()
|
|
|
|
.header("content-type", "application/javascript; charset=utf-8")
|
2020-11-05 18:16:39 +08:00
|
|
|
.body(include_str!("../public/updates-script.js"))
|
2020-10-20 01:57:15 +08:00
|
|
|
);
|
|
|
|
|
2020-07-16 05:51:12 +08:00
|
|
|
let dash_logo_white_route = warp::filters::method::get()
|
|
|
|
.and(warp::path!("logo-white.svg"))
|
|
|
|
.map(|| Response::builder()
|
|
|
|
.header("content-type", "image/svg+xml")
|
2020-11-05 18:16:39 +08:00
|
|
|
.body(include_str!("../public/logo-white.svg"))
|
2020-07-16 05:51:12 +08:00
|
|
|
);
|
|
|
|
|
|
|
|
let dash_logo_black_route = warp::filters::method::get()
|
|
|
|
.and(warp::path!("logo-black.svg"))
|
|
|
|
.map(|| Response::builder()
|
|
|
|
.header("content-type", "image/svg+xml")
|
2020-11-05 18:16:39 +08:00
|
|
|
.body(include_str!("../public/logo-black.svg"))
|
2020-07-16 05:51:12 +08:00
|
|
|
);
|
|
|
|
|
2020-05-31 23:48:13 +08:00
|
|
|
#[derive(Deserialize)]
|
|
|
|
struct QueryBody {
|
2020-10-06 20:52:05 +08:00
|
|
|
query: Option<String>,
|
2020-05-31 23:48:13 +08:00
|
|
|
}
|
|
|
|
|
2020-07-14 17:27:46 +08:00
|
|
|
let disable_highlighting = opt.disable_highlighting;
|
2020-11-11 00:00:38 +08:00
|
|
|
let index_cloned = index.clone();
|
2020-05-31 23:48:13 +08:00
|
|
|
let query_route = warp::filters::method::post()
|
|
|
|
.and(warp::path!("query"))
|
|
|
|
.and(warp::body::json())
|
|
|
|
.map(move |query: QueryBody| {
|
|
|
|
let before_search = Instant::now();
|
2020-11-11 00:00:38 +08:00
|
|
|
let index = index_cloned.clone();
|
2020-10-30 17:56:35 +08:00
|
|
|
let rtxn = index.read_txn().unwrap();
|
2020-05-31 23:48:13 +08:00
|
|
|
|
2020-10-06 20:52:05 +08:00
|
|
|
let mut search = index.search(&rtxn);
|
|
|
|
if let Some(query) = query.query {
|
|
|
|
search.query(query);
|
|
|
|
}
|
|
|
|
|
|
|
|
let SearchResult { found_words, documents_ids } = search.execute().unwrap();
|
2020-05-31 23:48:13 +08:00
|
|
|
|
2020-10-22 00:26:29 +08:00
|
|
|
let mut documents = Vec::new();
|
2020-10-26 01:32:01 +08:00
|
|
|
let fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
2020-11-02 20:01:32 +08:00
|
|
|
let displayed_fields = match index.displayed_fields(&rtxn).unwrap() {
|
|
|
|
Some(fields) => Cow::Borrowed(fields),
|
|
|
|
None => Cow::Owned(fields_ids_map.iter().map(|(id, _)| id).collect()),
|
|
|
|
};
|
2020-11-05 20:58:07 +08:00
|
|
|
let attributes_to_highlight = match index.searchable_fields(&rtxn).unwrap() {
|
|
|
|
Some(fields) => fields.iter().flat_map(|id| fields_ids_map.name(*id)).map(ToOwned::to_owned).collect(),
|
|
|
|
None => fields_ids_map.iter().map(|(_, name)| name).map(ToOwned::to_owned).collect(),
|
|
|
|
};
|
2020-10-22 20:23:33 +08:00
|
|
|
|
2020-11-05 20:58:07 +08:00
|
|
|
for (_id, obkv) in index.documents(&rtxn, documents_ids).unwrap() {
|
|
|
|
let mut object = obkv_to_json(&displayed_fields, &fields_ids_map, obkv).unwrap();
|
2020-10-22 20:23:33 +08:00
|
|
|
if !disable_highlighting {
|
2020-11-05 20:58:07 +08:00
|
|
|
highlight_record(&mut object, &found_words, &attributes_to_highlight);
|
2020-10-22 00:26:29 +08:00
|
|
|
}
|
2020-10-22 20:23:33 +08:00
|
|
|
|
2020-11-05 20:58:07 +08:00
|
|
|
documents.push(object);
|
2020-10-22 00:26:29 +08:00
|
|
|
}
|
2020-05-31 23:48:13 +08:00
|
|
|
|
|
|
|
Response::builder()
|
2020-10-22 00:26:29 +08:00
|
|
|
.header("Content-Type", "application/json")
|
2020-05-31 23:48:13 +08:00
|
|
|
.header("Time-Ms", before_search.elapsed().as_millis().to_string())
|
2020-10-22 00:26:29 +08:00
|
|
|
.body(serde_json::to_string(&documents).unwrap())
|
2020-05-31 23:48:13 +08:00
|
|
|
});
|
|
|
|
|
2020-11-11 00:00:38 +08:00
|
|
|
let index_cloned = index.clone();
|
|
|
|
let document_route = warp::filters::method::get()
|
|
|
|
.and(warp::path!("document" / String))
|
|
|
|
.map(move |id: String| {
|
|
|
|
let index = index_cloned.clone();
|
|
|
|
let rtxn = index.read_txn().unwrap();
|
|
|
|
|
|
|
|
let users_ids_documents_ids = index.users_ids_documents_ids(&rtxn).unwrap();
|
|
|
|
let fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
|
|
|
let displayed_fields = match index.displayed_fields(&rtxn).unwrap() {
|
|
|
|
Some(fields) => Cow::Borrowed(fields),
|
|
|
|
None => Cow::Owned(fields_ids_map.iter().map(|(id, _)| id).collect()),
|
|
|
|
};
|
|
|
|
|
|
|
|
match users_ids_documents_ids.get(&id) {
|
|
|
|
Some(document_id) => {
|
|
|
|
let document_id = document_id as u32;
|
|
|
|
let (_, obkv) = index.documents(&rtxn, Some(document_id)).unwrap().pop().unwrap();
|
|
|
|
let document = obkv_to_json(&displayed_fields, &fields_ids_map, obkv).unwrap();
|
|
|
|
|
|
|
|
Response::builder()
|
|
|
|
.header("Content-Type", "application/json")
|
|
|
|
.body(serde_json::to_string(&document).unwrap())
|
|
|
|
},
|
|
|
|
None => {
|
|
|
|
Response::builder()
|
|
|
|
.status(404)
|
|
|
|
.body(format!("Document with id {:?} not found.", id))
|
|
|
|
},
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
2020-10-19 22:03:17 +08:00
|
|
|
async fn buf_stream(
|
2020-10-21 21:38:28 +08:00
|
|
|
update_store: Arc<UpdateStore<UpdateMeta, String>>,
|
|
|
|
update_status_sender: broadcast::Sender<UpdateStatus<UpdateMeta, UpdateMetaProgress, String>>,
|
2020-11-01 00:48:24 +08:00
|
|
|
update_method: Option<String>,
|
|
|
|
update_format: UpdateFormat,
|
2020-10-19 22:03:17 +08:00
|
|
|
mut stream: impl futures::Stream<Item=Result<impl bytes::Buf, warp::Error>> + Unpin,
|
|
|
|
) -> Result<impl warp::Reply, warp::Rejection>
|
|
|
|
{
|
|
|
|
let file = tokio::task::block_in_place(tempfile::tempfile).unwrap();
|
|
|
|
let mut file = TFile::from_std(file);
|
|
|
|
|
|
|
|
while let Some(result) = stream.next().await {
|
|
|
|
let bytes = result.unwrap().to_bytes();
|
|
|
|
file.write_all(&bytes[..]).await.unwrap();
|
|
|
|
}
|
|
|
|
|
|
|
|
let file = file.into_std().await;
|
|
|
|
let mmap = unsafe { memmap::Mmap::map(&file).unwrap() };
|
|
|
|
|
2020-11-01 00:48:24 +08:00
|
|
|
let method = match update_method.as_deref() {
|
|
|
|
Some("replace") => String::from("replace"),
|
|
|
|
Some("update") => String::from("update"),
|
|
|
|
_ => String::from("replace"),
|
|
|
|
};
|
|
|
|
|
|
|
|
let format = match update_format {
|
|
|
|
UpdateFormat::Csv => String::from("csv"),
|
|
|
|
UpdateFormat::Json => String::from("json"),
|
2020-11-01 18:50:10 +08:00
|
|
|
UpdateFormat::JsonStream => String::from("json-stream"),
|
2020-11-05 18:16:39 +08:00
|
|
|
_ => panic!("Unknown update format"),
|
2020-11-01 00:48:24 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
let meta = UpdateMeta::DocumentsAddition { method, format };
|
2020-10-20 18:09:38 +08:00
|
|
|
let update_id = update_store.register_update(&meta, &mmap[..]).unwrap();
|
2020-10-20 21:14:06 +08:00
|
|
|
let _ = update_status_sender.send(UpdateStatus::Pending { update_id, meta });
|
2020-10-20 18:09:38 +08:00
|
|
|
eprintln!("update {} registered", update_id);
|
2020-10-19 22:03:17 +08:00
|
|
|
|
|
|
|
Ok(warp::reply())
|
|
|
|
}
|
|
|
|
|
2020-11-01 00:48:24 +08:00
|
|
|
#[derive(Deserialize)]
|
|
|
|
struct QueryUpdate {
|
|
|
|
method: Option<String>,
|
|
|
|
}
|
|
|
|
|
2020-10-19 22:03:17 +08:00
|
|
|
let update_store_cloned = update_store.clone();
|
2020-10-20 17:19:34 +08:00
|
|
|
let update_status_sender_cloned = update_status_sender.clone();
|
2020-11-02 22:30:29 +08:00
|
|
|
let indexing_csv_route = warp::filters::method::post()
|
2020-10-19 22:03:17 +08:00
|
|
|
.and(warp::path!("documents"))
|
2020-10-21 21:38:28 +08:00
|
|
|
.and(warp::header::exact_ignore_case("content-type", "text/csv"))
|
2020-11-01 00:48:24 +08:00
|
|
|
.and(warp::filters::query::query())
|
|
|
|
.and(warp::body::stream())
|
|
|
|
.and_then(move |params: QueryUpdate, stream| {
|
|
|
|
buf_stream(
|
|
|
|
update_store_cloned.clone(),
|
|
|
|
update_status_sender_cloned.clone(),
|
|
|
|
params.method,
|
|
|
|
UpdateFormat::Csv,
|
|
|
|
stream,
|
|
|
|
)
|
|
|
|
});
|
|
|
|
|
|
|
|
let update_store_cloned = update_store.clone();
|
|
|
|
let update_status_sender_cloned = update_status_sender.clone();
|
2020-11-02 22:30:29 +08:00
|
|
|
let indexing_json_route = warp::filters::method::post()
|
2020-11-01 00:48:24 +08:00
|
|
|
.and(warp::path!("documents"))
|
|
|
|
.and(warp::header::exact_ignore_case("content-type", "application/json"))
|
|
|
|
.and(warp::filters::query::query())
|
2020-10-19 22:03:17 +08:00
|
|
|
.and(warp::body::stream())
|
2020-11-01 00:48:24 +08:00
|
|
|
.and_then(move |params: QueryUpdate, stream| {
|
|
|
|
buf_stream(
|
|
|
|
update_store_cloned.clone(),
|
|
|
|
update_status_sender_cloned.clone(),
|
|
|
|
params.method,
|
|
|
|
UpdateFormat::Json,
|
|
|
|
stream,
|
|
|
|
)
|
2020-10-19 22:03:17 +08:00
|
|
|
});
|
|
|
|
|
2020-11-01 18:50:10 +08:00
|
|
|
let update_store_cloned = update_store.clone();
|
|
|
|
let update_status_sender_cloned = update_status_sender.clone();
|
2020-11-02 22:30:29 +08:00
|
|
|
let indexing_json_stream_route = warp::filters::method::post()
|
2020-11-01 18:50:10 +08:00
|
|
|
.and(warp::path!("documents"))
|
|
|
|
.and(warp::header::exact_ignore_case("content-type", "application/x-ndjson"))
|
|
|
|
.and(warp::filters::query::query())
|
|
|
|
.and(warp::body::stream())
|
|
|
|
.and_then(move |params: QueryUpdate, stream| {
|
|
|
|
buf_stream(
|
|
|
|
update_store_cloned.clone(),
|
|
|
|
update_status_sender_cloned.clone(),
|
|
|
|
params.method,
|
|
|
|
UpdateFormat::JsonStream,
|
|
|
|
stream,
|
|
|
|
)
|
|
|
|
});
|
|
|
|
|
2020-11-02 22:30:29 +08:00
|
|
|
let update_store_cloned = update_store.clone();
|
2020-10-21 21:38:28 +08:00
|
|
|
let update_status_sender_cloned = update_status_sender.clone();
|
2020-10-30 20:12:55 +08:00
|
|
|
let clearing_route = warp::filters::method::post()
|
|
|
|
.and(warp::path!("clear-documents"))
|
|
|
|
.map(move || {
|
|
|
|
let meta = UpdateMeta::ClearDocuments;
|
2020-11-02 22:47:21 +08:00
|
|
|
let update_id = update_store_cloned.register_update(&meta, &[]).unwrap();
|
|
|
|
let _ = update_status_sender_cloned.send(UpdateStatus::Pending { update_id, meta });
|
|
|
|
eprintln!("update {} registered", update_id);
|
|
|
|
Ok(warp::reply())
|
|
|
|
});
|
|
|
|
|
|
|
|
let update_store_cloned = update_store.clone();
|
|
|
|
let update_status_sender_cloned = update_status_sender.clone();
|
|
|
|
let change_settings_route = warp::filters::method::post()
|
|
|
|
.and(warp::path!("settings"))
|
|
|
|
.and(warp::body::json())
|
|
|
|
.map(move |settings: Settings| {
|
|
|
|
let meta = UpdateMeta::Settings(settings);
|
|
|
|
let update_id = update_store_cloned.register_update(&meta, &[]).unwrap();
|
2020-10-21 21:38:28 +08:00
|
|
|
let _ = update_status_sender_cloned.send(UpdateStatus::Pending { update_id, meta });
|
|
|
|
eprintln!("update {} registered", update_id);
|
|
|
|
Ok(warp::reply())
|
|
|
|
});
|
|
|
|
|
2020-10-19 22:03:17 +08:00
|
|
|
let update_ws_route = warp::ws()
|
|
|
|
.and(warp::path!("updates" / "ws"))
|
|
|
|
.map(move |ws: warp::ws::Ws| {
|
|
|
|
// And then our closure will be called when it completes...
|
2020-10-20 17:19:34 +08:00
|
|
|
let update_status_receiver = update_status_sender.subscribe();
|
2020-10-19 22:03:17 +08:00
|
|
|
ws.on_upgrade(|websocket| {
|
|
|
|
// Just echo all updates messages...
|
2020-10-20 17:19:34 +08:00
|
|
|
update_status_receiver
|
|
|
|
.into_stream()
|
|
|
|
.flat_map(|result| {
|
2020-10-20 18:09:38 +08:00
|
|
|
match result {
|
|
|
|
Ok(status) => {
|
|
|
|
let msg = serde_json::to_string(&status).unwrap();
|
|
|
|
stream::iter(Some(Ok(Message::text(msg))))
|
|
|
|
},
|
2020-10-20 17:19:34 +08:00
|
|
|
Err(e) => {
|
|
|
|
eprintln!("channel error: {:?}", e);
|
|
|
|
stream::iter(None)
|
|
|
|
},
|
|
|
|
}
|
|
|
|
})
|
2020-10-19 22:03:17 +08:00
|
|
|
.forward(websocket)
|
|
|
|
.map(|result| {
|
|
|
|
if let Err(e) = result {
|
|
|
|
eprintln!("websocket error: {:?}", e);
|
|
|
|
}
|
|
|
|
})
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
2020-05-31 23:48:13 +08:00
|
|
|
let routes = dash_html_route
|
2020-10-20 01:57:15 +08:00
|
|
|
.or(updates_list_or_html_route)
|
2020-05-31 23:48:13 +08:00
|
|
|
.or(dash_bulma_route)
|
2020-07-14 05:51:41 +08:00
|
|
|
.or(dash_bulma_dark_route)
|
2020-07-11 17:48:27 +08:00
|
|
|
.or(dash_style_route)
|
2020-05-31 23:48:13 +08:00
|
|
|
.or(dash_jquery_route)
|
2020-07-11 20:17:37 +08:00
|
|
|
.or(dash_filesize_route)
|
2020-07-11 17:48:27 +08:00
|
|
|
.or(dash_script_route)
|
2020-10-20 01:57:15 +08:00
|
|
|
.or(updates_script_route)
|
2020-07-16 05:51:12 +08:00
|
|
|
.or(dash_logo_white_route)
|
|
|
|
.or(dash_logo_black_route)
|
2020-10-19 22:03:17 +08:00
|
|
|
.or(query_route)
|
2020-11-11 00:00:38 +08:00
|
|
|
.or(document_route)
|
2020-11-02 22:30:29 +08:00
|
|
|
.or(indexing_csv_route)
|
|
|
|
.or(indexing_json_route)
|
|
|
|
.or(indexing_json_stream_route)
|
2020-10-30 20:12:55 +08:00
|
|
|
.or(clearing_route)
|
2020-11-02 22:30:29 +08:00
|
|
|
.or(change_settings_route)
|
2020-10-20 01:57:15 +08:00
|
|
|
.or(update_ws_route);
|
2020-05-31 23:48:13 +08:00
|
|
|
|
2020-10-19 19:44:17 +08:00
|
|
|
let addr = SocketAddr::from_str(&opt.http_listen_addr)?;
|
2020-11-05 18:16:39 +08:00
|
|
|
Ok(warp::serve(routes).run(addr).await)
|
2020-05-31 23:48:13 +08:00
|
|
|
}
|