2020-06-23 01:04:10 +08:00
|
|
|
use std::collections::hash_map::Entry;
|
2020-06-29 19:54:47 +08:00
|
|
|
use std::collections::{HashMap, BTreeSet};
|
2020-06-23 01:04:10 +08:00
|
|
|
use std::convert::{TryFrom, TryInto};
|
2020-06-29 19:54:47 +08:00
|
|
|
use std::io;
|
|
|
|
use std::iter::FromIterator;
|
2020-05-26 02:39:53 +08:00
|
|
|
use std::path::PathBuf;
|
2020-05-26 18:18:29 +08:00
|
|
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
2020-05-26 02:39:53 +08:00
|
|
|
|
2020-05-31 20:20:17 +08:00
|
|
|
use anyhow::Context;
|
2020-05-31 22:09:34 +08:00
|
|
|
use cow_utils::CowUtils;
|
2020-06-29 19:54:47 +08:00
|
|
|
use fst::Streamer;
|
2020-06-01 00:20:49 +08:00
|
|
|
use heed::EnvOpenOptions;
|
2020-05-30 21:35:33 +08:00
|
|
|
use heed::types::*;
|
2020-05-31 20:20:17 +08:00
|
|
|
use roaring::RoaringBitmap;
|
2020-06-05 02:25:51 +08:00
|
|
|
use slice_group_by::StrGroupBy;
|
2020-05-26 02:39:53 +08:00
|
|
|
use structopt::StructOpt;
|
|
|
|
|
2020-06-29 19:54:47 +08:00
|
|
|
use mega_mini_indexer::{BEU32, Index, DocumentId};
|
2020-06-05 22:32:14 +08:00
|
|
|
|
|
|
|
const MAX_POSITION: usize = 1000;
|
|
|
|
const MAX_ATTRIBUTES: usize = u32::max_value() as usize / MAX_POSITION;
|
2020-05-26 02:39:53 +08:00
|
|
|
|
|
|
|
#[cfg(target_os = "linux")]
|
|
|
|
#[global_allocator]
|
|
|
|
static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
|
|
|
|
|
2020-05-30 21:35:33 +08:00
|
|
|
static ID_GENERATOR: AtomicUsize = AtomicUsize::new(0); // AtomicU32 ?
|
2020-05-26 18:18:29 +08:00
|
|
|
|
2020-06-05 02:25:51 +08:00
|
|
|
pub fn simple_alphanumeric_tokens(string: &str) -> impl Iterator<Item = &str> {
|
|
|
|
let is_alphanumeric = |s: &&str| s.chars().next().map_or(false, char::is_alphanumeric);
|
|
|
|
string.linear_group_by_key(|c| c.is_alphanumeric()).filter(is_alphanumeric)
|
|
|
|
}
|
|
|
|
|
2020-05-26 02:39:53 +08:00
|
|
|
#[derive(Debug, StructOpt)]
|
2020-06-05 00:19:52 +08:00
|
|
|
#[structopt(name = "mm-indexer", about = "The indexer side of the MMI project.")]
|
2020-05-26 02:39:53 +08:00
|
|
|
struct Opt {
|
|
|
|
/// The database path where the database is located.
|
|
|
|
/// It is created if it doesn't already exist.
|
|
|
|
#[structopt(long = "db", parse(from_os_str))]
|
|
|
|
database: PathBuf,
|
|
|
|
|
2020-06-29 19:54:47 +08:00
|
|
|
/// CSV file to index.
|
|
|
|
csv_file: Option<PathBuf>,
|
2020-05-30 21:35:33 +08:00
|
|
|
}
|
|
|
|
|
2020-06-29 19:54:47 +08:00
|
|
|
fn index_csv<R: io::Read>(wtxn: &mut heed::RwTxn, mut rdr: csv::Reader<R>, index: &Index) -> anyhow::Result<()> {
|
|
|
|
eprintln!("Indexing into LMDB...");
|
2020-05-26 02:39:53 +08:00
|
|
|
|
|
|
|
// Write the headers into a Vec of bytes.
|
|
|
|
let headers = rdr.headers()?;
|
|
|
|
let mut writer = csv::WriterBuilder::new().has_headers(false).from_writer(Vec::new());
|
|
|
|
writer.write_byte_record(headers.as_byte_record())?;
|
|
|
|
let headers = writer.into_inner()?;
|
|
|
|
|
2020-06-29 19:54:47 +08:00
|
|
|
let mut document = csv::StringRecord::new();
|
|
|
|
|
2020-05-26 02:39:53 +08:00
|
|
|
while rdr.read_record(&mut document)? {
|
2020-05-26 18:18:29 +08:00
|
|
|
let document_id = ID_GENERATOR.fetch_add(1, Ordering::SeqCst);
|
2020-05-30 21:35:33 +08:00
|
|
|
let document_id = DocumentId::try_from(document_id).context("Generated id is too big")?;
|
2020-05-26 02:39:53 +08:00
|
|
|
|
2020-06-05 22:32:14 +08:00
|
|
|
for (attr, content) in document.iter().enumerate().take(MAX_ATTRIBUTES) {
|
2020-06-06 02:12:52 +08:00
|
|
|
for (pos, word) in simple_alphanumeric_tokens(&content).enumerate().take(MAX_POSITION) {
|
2020-05-30 21:35:33 +08:00
|
|
|
if !word.is_empty() && word.len() < 500 { // LMDB limits
|
2020-05-31 22:09:34 +08:00
|
|
|
let word = word.cow_to_lowercase();
|
2020-06-06 02:12:52 +08:00
|
|
|
let position = (attr * 1000 + pos) as u32;
|
2020-06-05 22:32:14 +08:00
|
|
|
|
2020-06-29 19:54:47 +08:00
|
|
|
// ------ merge word positions --------
|
|
|
|
|
|
|
|
let ids = match index.word_positions.get(wtxn, &word)? {
|
|
|
|
Some(mut ids) => { ids.insert(position); ids },
|
|
|
|
None => RoaringBitmap::from_iter(Some(position)),
|
|
|
|
};
|
|
|
|
|
|
|
|
index.word_positions.put(wtxn, &word, &ids)?;
|
|
|
|
|
|
|
|
// ------ merge word position documents ids --------
|
|
|
|
|
|
|
|
let mut key = word.as_bytes().to_vec();
|
|
|
|
key.extend_from_slice(&position.to_be_bytes());
|
2020-06-05 22:32:14 +08:00
|
|
|
|
2020-06-29 19:54:47 +08:00
|
|
|
let ids = match index.word_position_docids.get(wtxn, &key)? {
|
|
|
|
Some(mut ids) => { ids.insert(document_id); ids },
|
|
|
|
None => RoaringBitmap::from_iter(Some(document_id)),
|
|
|
|
};
|
|
|
|
|
|
|
|
index.word_position_docids.put(wtxn, &key, &ids)?;
|
2020-05-30 21:35:33 +08:00
|
|
|
}
|
2020-05-26 02:39:53 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// We write the document in the database.
|
|
|
|
let mut writer = csv::WriterBuilder::new().has_headers(false).from_writer(Vec::new());
|
|
|
|
writer.write_byte_record(document.as_byte_record())?;
|
|
|
|
let document = writer.into_inner()?;
|
2020-06-29 19:54:47 +08:00
|
|
|
index.documents.put(wtxn, &BEU32::new(document_id), &document)?;
|
2020-05-26 02:39:53 +08:00
|
|
|
}
|
|
|
|
|
2020-06-01 00:20:49 +08:00
|
|
|
// We store the words from the postings.
|
2020-05-31 01:56:57 +08:00
|
|
|
let mut new_words = BTreeSet::default();
|
2020-06-29 19:54:47 +08:00
|
|
|
let iter = index.word_positions.as_polymorph().iter::<_, Str, DecodeIgnore>(wtxn)?;
|
|
|
|
for result in iter {
|
|
|
|
let (word, ()) = result?;
|
2020-05-31 01:56:57 +08:00
|
|
|
new_words.insert(word.clone());
|
2020-05-26 02:39:53 +08:00
|
|
|
}
|
|
|
|
|
2020-06-29 19:54:47 +08:00
|
|
|
let new_words_fst = fst::Set::from_iter(new_words)?;
|
2020-05-31 01:56:57 +08:00
|
|
|
|
2020-06-29 19:54:47 +08:00
|
|
|
index.put_fst(wtxn, &new_words_fst)?;
|
|
|
|
index.put_headers(wtxn, &headers)?;
|
2020-05-26 02:39:53 +08:00
|
|
|
|
2020-06-02 03:09:32 +08:00
|
|
|
Ok(())
|
2020-05-26 02:39:53 +08:00
|
|
|
}
|
|
|
|
|
2020-06-23 01:04:10 +08:00
|
|
|
fn compute_words_attributes_docids(wtxn: &mut heed::RwTxn, index: &Index) -> anyhow::Result<()> {
|
|
|
|
eprintln!("Computing the attributes documents ids...");
|
|
|
|
|
|
|
|
let fst = match index.fst(&wtxn)? {
|
|
|
|
Some(fst) => fst.map_data(|s| s.to_vec())?,
|
|
|
|
None => return Ok(()),
|
|
|
|
};
|
|
|
|
|
|
|
|
let mut word_attributes = HashMap::new();
|
|
|
|
let mut stream = fst.stream();
|
|
|
|
while let Some(word) = stream.next() {
|
|
|
|
word_attributes.clear();
|
|
|
|
|
|
|
|
// Loop on the word attributes and unions all the documents ids by attribute.
|
|
|
|
for result in index.word_position_docids.prefix_iter(wtxn, word)? {
|
|
|
|
let (key, docids) = result?;
|
|
|
|
let (_key_word, key_pos) = key.split_at(key.len() - 4);
|
|
|
|
let key_pos = key_pos.try_into().map(u32::from_be_bytes)?;
|
|
|
|
// If the key corresponds to the word (minus the attribute)
|
|
|
|
if key.len() == word.len() + 4 {
|
|
|
|
let attribute = key_pos / 1000;
|
|
|
|
match word_attributes.entry(attribute) {
|
|
|
|
Entry::Vacant(entry) => { entry.insert(docids); },
|
|
|
|
Entry::Occupied(mut entry) => entry.get_mut().union_with(&docids),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write this word attributes unions into LMDB.
|
|
|
|
let mut key = word.to_vec();
|
|
|
|
for (attribute, docids) in word_attributes.drain() {
|
|
|
|
key.truncate(word.len());
|
|
|
|
key.extend_from_slice(&attribute.to_be_bytes());
|
|
|
|
index.word_attribute_docids.put(wtxn, &key, &docids)?;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2020-05-26 02:39:53 +08:00
|
|
|
fn main() -> anyhow::Result<()> {
|
|
|
|
let opt = Opt::from_args();
|
|
|
|
|
2020-05-30 21:35:33 +08:00
|
|
|
std::fs::create_dir_all(&opt.database)?;
|
|
|
|
let env = EnvOpenOptions::new()
|
|
|
|
.map_size(100 * 1024 * 1024 * 1024) // 100 GB
|
|
|
|
.max_readers(10)
|
2020-06-23 01:04:10 +08:00
|
|
|
.max_dbs(10)
|
2020-05-30 21:35:33 +08:00
|
|
|
.open(opt.database)?;
|
2020-05-26 18:18:29 +08:00
|
|
|
|
2020-06-01 00:20:49 +08:00
|
|
|
let index = Index::new(&env)?;
|
2020-06-02 00:27:26 +08:00
|
|
|
|
2020-05-31 20:20:17 +08:00
|
|
|
let mut wtxn = env.write_txn()?;
|
2020-06-29 19:54:47 +08:00
|
|
|
|
|
|
|
match opt.csv_file {
|
|
|
|
Some(path) => {
|
|
|
|
let rdr = csv::Reader::from_path(path)?;
|
|
|
|
index_csv(&mut wtxn, rdr, &index)?;
|
|
|
|
},
|
|
|
|
None => {
|
|
|
|
let rdr = csv::Reader::from_reader(io::stdin());
|
|
|
|
index_csv(&mut wtxn, rdr, &index)?;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2020-06-23 01:04:10 +08:00
|
|
|
compute_words_attributes_docids(&mut wtxn, &index)?;
|
2020-06-02 03:09:32 +08:00
|
|
|
let count = index.documents.len(&wtxn)?;
|
2020-06-29 19:54:47 +08:00
|
|
|
|
2020-05-31 20:20:17 +08:00
|
|
|
wtxn.commit()?;
|
2020-06-29 19:54:47 +08:00
|
|
|
|
2020-05-31 20:20:17 +08:00
|
|
|
eprintln!("Wrote {} documents into LMDB", count);
|
2020-05-26 02:39:53 +08:00
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|