diff --git a/src/bin/indexer.rs b/src/bin/indexer.rs index f7d68ef08..5406d9d80 100644 --- a/src/bin/indexer.rs +++ b/src/bin/indexer.rs @@ -1,4 +1,4 @@ -use std::collections::BTreeSet; +use std::collections::{BTreeSet, BTreeMap}; use std::convert::TryFrom; use std::fs::File; use std::path::PathBuf; @@ -15,7 +15,10 @@ use roaring::RoaringBitmap; use slice_group_by::StrGroupBy; use structopt::StructOpt; -use mega_mini_indexer::{FastMap4, SmallVec32, Index, DocumentId}; +use mega_mini_indexer::{FastMap4, SmallVec32, Index, DocumentId, AttributeId}; + +const MAX_POSITION: usize = 1000; +const MAX_ATTRIBUTES: usize = u32::max_value() as usize / MAX_POSITION; #[cfg(target_os = "linux")] #[global_allocator] @@ -42,8 +45,9 @@ struct Opt { struct Indexed { fst: fst::Set>, - postings_ids: FastMap4, - prefix_postings_ids: FastMap4, + postings_attrs: FastMap4, + postings_ids: FastMap4>, + prefix_postings_ids: FastMap4>, headers: Vec, documents: Vec<(DocumentId, Vec)>, } @@ -62,36 +66,76 @@ impl MtblKvStore { out.add(b"\0words-fst", indexed.fst.as_fst().as_bytes())?; // postings ids keys are all prefixed by a '1' - let mut key = vec![1]; + let mut key = vec![0]; let mut buffer = Vec::new(); + + // We must write the postings attrs + key[0] = 1; + // We must write the postings ids in order for mtbl therefore + // we iterate over the fst to read the words in order + let mut stream = indexed.fst.stream(); + while let Some(word) = stream.next() { + if let Some(attrs) = indexed.postings_attrs.remove(word) { + key.truncate(1); + key.extend_from_slice(word); + // We serialize the attrs ids into a buffer + buffer.clear(); + attrs.serialize_into(&mut buffer)?; + // that we write under the generated key into MTBL + out.add(&key, &buffer).unwrap(); + } + } + + // We must write the postings ids + key[0] = 2; // We must write the postings ids in order for mtbl therefore // we iterate over the fst to read the words in order let mut stream = indexed.fst.stream(); while let Some(word) = stream.next() { key.truncate(1); key.extend_from_slice(word); - if let Some(ids) = indexed.postings_ids.remove(word) { - buffer.clear(); - ids.serialize_into(&mut buffer)?; - out.add(&key, &buffer).unwrap(); + if let Some(attrs) = indexed.postings_ids.remove(word) { + let attrs: BTreeMap<_, _> = attrs.into_iter().collect(); + // We iterate over all the attributes containing the documents ids + for (attr, ids) in attrs { + // we postfix the word by the attribute id + key.extend_from_slice(&attr.to_be_bytes()); + // We serialize the document ids into a buffer + buffer.clear(); + ids.serialize_into(&mut buffer)?; + // that we write under the generated key into MTBL + out.add(&key, &buffer).unwrap(); + // And cleanup the attribute id afterward (u32 = 4 * u8) + key.truncate(key.len() - 4); + } } } // We must write the prefix postings ids - key[0] = 2; + key[0] = 3; let mut stream = indexed.fst.stream(); while let Some(prefix) = stream.next() { key.truncate(1); key.extend_from_slice(prefix); - if let Some(ids) = indexed.prefix_postings_ids.remove(prefix) { - buffer.clear(); - ids.serialize_into(&mut buffer)?; - out.add(&key, &buffer).unwrap(); + if let Some(attrs) = indexed.prefix_postings_ids.remove(prefix) { + let attrs: BTreeMap<_, _> = attrs.into_iter().collect(); + // We iterate over all the attributes containing the documents ids + for (attr, ids) in attrs { + // we postfix the word by the attribute id + key.extend_from_slice(&attr.to_be_bytes()); + // We serialize the document ids into a buffer + buffer.clear(); + ids.serialize_into(&mut buffer)?; + // that we write under the generated key into MTBL + out.add(&key, &buffer).unwrap(); + // And cleanup the attribute id afterward (u32 = 4 * u8) + key.truncate(key.len() - 4); + } } } - // postings ids keys are all prefixed by a '2' - key[0] = 3; + // postings ids keys are all prefixed by a '4' + key[0] = 4; indexed.documents.sort_unstable(); for (id, content) in indexed.documents { key.truncate(1); @@ -122,7 +166,8 @@ impl MtblKvStore { assert!(values.windows(2).all(|vs| vs[0] == vs[1])); Some(values[0].to_vec()) } - else if key.starts_with(&[1]) || key.starts_with(&[2]) { + // We either merge postings attrs, prefix postings or postings ids. + else if key.starts_with(&[1]) || key.starts_with(&[2]) || key.starts_with(&[3]) { let mut first = RoaringBitmap::deserialize_from(values[0].as_slice()).unwrap(); for value in &values[1..] { @@ -134,7 +179,7 @@ impl MtblKvStore { first.serialize_into(&mut vec).unwrap(); Some(vec) } - else if key.starts_with(&[3]) { + else if key.starts_with(&[4]) { assert!(values.windows(2).all(|vs| vs[0] == vs[1])); Some(values[0].to_vec()) } @@ -172,10 +217,8 @@ impl MtblKvStore { fn index_csv(mut rdr: csv::Reader) -> anyhow::Result { eprintln!("{:?}: Indexing into an Indexed...", rayon::current_thread_index()); - const MAX_POSITION: usize = 1000; - const MAX_ATTRIBUTES: usize = u32::max_value() as usize / MAX_POSITION; - let mut document = csv::StringRecord::new(); + let mut postings_attrs = FastMap4::default(); let mut postings_ids = FastMap4::default(); let mut prefix_postings_ids = FastMap4::default(); let mut documents = Vec::new(); @@ -190,18 +233,23 @@ fn index_csv(mut rdr: csv::Reader) -> anyhow::Result { let document_id = ID_GENERATOR.fetch_add(1, Ordering::SeqCst); let document_id = DocumentId::try_from(document_id).context("Generated id is too big")?; - for (_attr, content) in document.iter().enumerate().take(MAX_ATTRIBUTES) { + for (attr, content) in document.iter().enumerate().take(MAX_ATTRIBUTES) { for (_pos, word) in simple_alphanumeric_tokens(&content).enumerate().take(MAX_POSITION) { if !word.is_empty() && word.len() < 500 { // LMDB limits let word = word.cow_to_lowercase(); + + postings_attrs.entry(SmallVec32::from(word.as_bytes())) + .or_insert_with(RoaringBitmap::new).insert(attr as u32); // attributes ids + postings_ids.entry(SmallVec32::from(word.as_bytes())) - .or_insert_with(RoaringBitmap::new) - .insert(document_id); + .or_insert_with(FastMap4::default).entry(attr as u32) // attributes + .or_insert_with(RoaringBitmap::new).insert(document_id); // document ids + if let Some(prefix) = word.as_bytes().get(0..word.len().min(5)) { for i in 0..=prefix.len() { prefix_postings_ids.entry(SmallVec32::from(&prefix[..i])) - .or_insert_with(RoaringBitmap::new) - .insert(document_id); + .or_insert_with(FastMap4::default).entry(attr as u32) // attributes + .or_insert_with(RoaringBitmap::new).insert(document_id); // document ids } } } @@ -223,7 +271,7 @@ fn index_csv(mut rdr: csv::Reader) -> anyhow::Result { let new_words_fst = fst::Set::from_iter(new_words.iter().map(SmallVec32::as_ref))?; - let indexed = Indexed { fst: new_words_fst, headers, postings_ids, prefix_postings_ids, documents }; + let indexed = Indexed { fst: new_words_fst, headers, postings_attrs, postings_ids, prefix_postings_ids, documents }; eprintln!("{:?}: Indexed created!", rayon::current_thread_index()); MtblKvStore::from_indexed(indexed) @@ -241,15 +289,20 @@ fn writer(wtxn: &mut heed::RwTxn, index: &Index, key: &[u8], val: &[u8]) -> anyh } else if key.starts_with(&[1]) { // Write the postings lists - index.postings_ids.as_polymorph() + index.postings_attrs.as_polymorph() .put::<_, ByteSlice, ByteSlice>(wtxn, &key[1..], val)?; } else if key.starts_with(&[2]) { + // Write the postings lists + index.postings_ids.as_polymorph() + .put::<_, ByteSlice, ByteSlice>(wtxn, &key[1..], val)?; + } + else if key.starts_with(&[3]) { // Write the prefix postings lists index.prefix_postings_ids.as_polymorph() .put::<_, ByteSlice, ByteSlice>(wtxn, &key[1..], val)?; } - else if key.starts_with(&[3]) { + else if key.starts_with(&[4]) { // Write the documents index.documents.as_polymorph() .put::<_, ByteSlice, ByteSlice>(wtxn, &key[1..], val)?; @@ -284,6 +337,7 @@ fn main() -> anyhow::Result<()> { eprintln!("We are writing into LMDB..."); let mut wtxn = env.write_txn()?; MtblKvStore::from_many(stores, |k, v| writer(&mut wtxn, &index, k, v))?; + // FIXME Why is this count wrong? (indicates 99 when must return 100) let count = index.documents.len(&wtxn)?; wtxn.commit()?; eprintln!("Wrote {} documents into LMDB", count); diff --git a/src/bin/stats.rs b/src/bin/stats.rs new file mode 100644 index 000000000..91632b437 --- /dev/null +++ b/src/bin/stats.rs @@ -0,0 +1,64 @@ +use std::io; +use std::str::FromStr; + +use anyhow::Context; +use cow_utils::CowUtils; +use roaring::RoaringBitmap; +use slice_group_by::StrGroupBy; + +use mega_mini_indexer::{FastMap4, DocumentId, SmallString32}; + +const MAX_POSITION: usize = 1000; +const MAX_ATTRIBUTES: usize = u32::max_value() as usize / MAX_POSITION; + +fn simple_alphanumeric_tokens(string: &str) -> impl Iterator { + let is_alphanumeric = |s: &&str| s.chars().next().map_or(false, char::is_alphanumeric); + string.linear_group_by_key(|c| c.is_alphanumeric()).filter(is_alphanumeric) +} + +fn main() -> anyhow::Result<()> { + let mut rdr = csv::Reader::from_reader(io::stdin()); + + let mut document = csv::StringRecord::new(); + let mut postings_positions = FastMap4::default(); + + let headers = rdr.headers()?; + let mut writer = csv::WriterBuilder::new().has_headers(false).from_writer(Vec::new()); + writer.write_byte_record(headers.as_byte_record())?; + let id_pos = headers.iter().position(|h| h == "id").context("missing 'id' header")?; + + while rdr.read_record(&mut document)? { + let document_id = document.get(id_pos).unwrap(); + let document_id = DocumentId::from_str(document_id).context("invalid document id")?; + + for (attr, content) in document.iter().enumerate().take(MAX_ATTRIBUTES) { + if attr == id_pos { continue } + + for (pos, word) in simple_alphanumeric_tokens(&content).enumerate().take(MAX_POSITION) { + if !word.is_empty() && word.len() < 500 { // LMDB limits + let word = word.cow_to_lowercase(); + let position = (attr * 1000 + pos) as u32; + + postings_positions.entry(SmallString32::from(word.as_ref())) + .or_insert_with(FastMap4::default).entry(position) + .or_insert_with(RoaringBitmap::new).insert(document_id); + } + } + } + } + + // Write the stats to stdout + let mut wrt = csv::Writer::from_writer(io::stdout()); + wrt.write_record(&["word", "position", "count"])?; + + for (word, positions) in postings_positions { + let word = word.as_str(); + for (pos, ids) in positions { + let pos = pos.to_string(); + let count = ids.len().to_string(); + wrt.write_record(&[word, &pos, &count])?; + } + } + + Ok(()) +} diff --git a/src/lib.rs b/src/lib.rs index 8ea767b28..8017aa911 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -11,37 +11,41 @@ use fxhash::FxHasher32; use heed::types::*; use heed::{PolyDatabase, Database}; use levenshtein_automata::LevenshteinAutomatonBuilder as LevBuilder; -use once_cell::sync::OnceCell; +use once_cell::sync::Lazy; use roaring::RoaringBitmap; use self::query_tokens::{QueryTokens, QueryToken}; -static LEVDIST0: OnceCell = OnceCell::new(); -static LEVDIST1: OnceCell = OnceCell::new(); -static LEVDIST2: OnceCell = OnceCell::new(); +// Building these factories is not free. +static LEVDIST0: Lazy = Lazy::new(|| LevBuilder::new(0, true)); +static LEVDIST1: Lazy = Lazy::new(|| LevBuilder::new(1, true)); +static LEVDIST2: Lazy = Lazy::new(|| LevBuilder::new(2, true)); pub type FastMap4 = HashMap>; pub type SmallString32 = smallstr::SmallString<[u8; 32]>; pub type SmallVec32 = smallvec::SmallVec<[u8; 32]>; pub type BEU32 = heed::zerocopy::U32; pub type DocumentId = u32; +pub type AttributeId = u32; #[derive(Clone)] pub struct Index { pub main: PolyDatabase, - pub postings_ids: Database, - pub prefix_postings_ids: Database, + pub postings_attrs: Database, + pub postings_ids: Database, + pub prefix_postings_ids: Database, pub documents: Database, ByteSlice>, } impl Index { pub fn new(env: &heed::Env) -> heed::Result { let main = env.create_poly_database(None)?; + let postings_attrs = env.create_database(Some("postings-attrs"))?; let postings_ids = env.create_database(Some("postings-ids"))?; let prefix_postings_ids = env.create_database(Some("prefix-postings-ids"))?; let documents = env.create_database(Some("documents"))?; - Ok(Index { main, postings_ids, prefix_postings_ids, documents }) + Ok(Index { main, postings_attrs, postings_ids, prefix_postings_ids, documents }) } pub fn headers<'t>(&self, rtxn: &'t heed::RoTxn) -> heed::Result> { @@ -54,56 +58,90 @@ impl Index { None => return Ok(Vec::new()), }; - // Building these factories is not free. - let lev0 = LEVDIST0.get_or_init(|| LevBuilder::new(0, true)); - let lev1 = LEVDIST1.get_or_init(|| LevBuilder::new(1, true)); - let lev2 = LEVDIST2.get_or_init(|| LevBuilder::new(2, true)); + let (lev0, lev1, lev2) = (&LEVDIST0, &LEVDIST1, &LEVDIST2); let words: Vec<_> = QueryTokens::new(query).collect(); let ends_with_whitespace = query.chars().last().map_or(false, char::is_whitespace); let number_of_words = words.len(); - let dfas = words.into_iter().enumerate().map(|(i, word)| { + let dfas: Vec<_> = words.into_iter().enumerate().map(|(i, word)| { let (word, quoted) = match word { QueryToken::Free(word) => (word.cow_to_lowercase(), false), QueryToken::Quoted(word) => (Cow::Borrowed(word), true), }; let is_last = i + 1 == number_of_words; let is_prefix = is_last && !ends_with_whitespace && !quoted; - let dfa = match word.len() { - 0..=4 => if is_prefix { lev0.build_prefix_dfa(&word) } else if quoted { lev0.build_dfa(&word) } else { lev0.build_dfa(&word) }, - 5..=8 => if is_prefix { lev1.build_prefix_dfa(&word) } else if quoted { lev0.build_dfa(&word) } else { lev1.build_dfa(&word) }, - _ => if is_prefix { lev2.build_prefix_dfa(&word) } else if quoted { lev0.build_dfa(&word) } else { lev2.build_dfa(&word) }, + let lev = match word.len() { + 0..=4 => if quoted { lev0 } else { lev0 }, + 5..=8 => if quoted { lev0 } else { lev1 }, + _ => if quoted { lev0 } else { lev2 }, }; - (word, is_prefix, dfa) - }); - let mut intersect_result: Option = None; - for (word, is_prefix, dfa) in dfas { - let before = Instant::now(); - - let mut union_result = RoaringBitmap::default(); - let count = if word.len() <= 4 && is_prefix { - if let Some(ids) = self.prefix_postings_ids.get(rtxn, &word[..word.len().min(5)])? { - union_result = RoaringBitmap::deserialize_from(ids)?; - } - 1 + let dfa = if is_prefix { + lev.build_prefix_dfa(&word) } else { + lev.build_dfa(&word) + }; + + (word, is_prefix, dfa) + }) + .collect(); + + let mut intersect_attrs: Option = None; + for (_word, _is_prefix, dfa) in &dfas { + let mut union_result = RoaringBitmap::default(); + let mut stream = fst.search(dfa).into_stream(); + while let Some(word) = stream.next() { + let word = std::str::from_utf8(word)?; + if let Some(attrs) = self.postings_attrs.get(rtxn, word)? { + let right = RoaringBitmap::deserialize_from(attrs)?; + union_result.union_with(&right); + } + } + + match &mut intersect_attrs { + Some(left) => left.intersect_with(&union_result), + None => intersect_attrs = Some(union_result), + } + } + + eprintln!("we should only look for documents with attrs {:?}", intersect_attrs); + + let mut intersect_docids: Option = None; + // TODO would be faster to store and use the words + // seen in the previous attrs loop + for (word, is_prefix, dfa) in &dfas { + let mut union_result = RoaringBitmap::default(); + for attr in intersect_attrs.as_ref().unwrap_or(&RoaringBitmap::default()) { + let before = Instant::now(); + let mut count = 0; - let mut stream = fst.search(dfa).into_stream(); - while let Some(word) = stream.next() { - count += 1; - let word = std::str::from_utf8(word)?; - if let Some(ids) = self.postings_ids.get(rtxn, word)? { + if word.len() <= 4 && *is_prefix { + let mut key = word.as_bytes()[..word.len().min(5)].to_vec(); + key.extend_from_slice(&attr.to_be_bytes()); + if let Some(ids) = self.prefix_postings_ids.get(rtxn, &key)? { let right = RoaringBitmap::deserialize_from(ids)?; union_result.union_with(&right); + count = 1; + } + } else { + let mut stream = fst.search(dfa).into_stream(); + while let Some(word) = stream.next() { + count += 1; + let word = std::str::from_utf8(word)?; + let mut key = word.as_bytes().to_vec(); + key.extend_from_slice(&attr.to_be_bytes()); + if let Some(ids) = self.postings_ids.get(rtxn, &key)? { + let right = RoaringBitmap::deserialize_from(ids)?; + union_result.union_with(&right); + } } } - count - }; - eprintln!("with {:?} words union for {:?} gives {:?} took {:.02?}", - count, word, union_result.len(), before.elapsed()); - match &mut intersect_result { + eprintln!("with {:?} similar words (for attr {}) union for {:?} gives {:?} took {:.02?}", + count, attr, word, union_result.len(), before.elapsed()); + } + + match &mut intersect_docids { Some(left) => { let before = Instant::now(); let left_len = left.len(); @@ -111,12 +149,12 @@ impl Index { eprintln!("intersect between {:?} and {:?} gives {:?} took {:.02?}", left_len, union_result.len(), left.len(), before.elapsed()); }, - None => intersect_result = Some(union_result), + None => intersect_docids = Some(union_result), } } - eprintln!("{} candidates", intersect_result.as_ref().map_or(0, |r| r.len())); + eprintln!("{} candidates", intersect_docids.as_ref().map_or(0, |r| r.len())); - Ok(intersect_result.unwrap_or_default().iter().take(20).collect()) + Ok(intersect_docids.unwrap_or_default().iter().take(20).collect()) } }