2021-01-03 19:49:09 +08:00
|
|
|
use std::fmt::Write as _;
|
2020-08-17 21:15:37 +08:00
|
|
|
use std::path::PathBuf;
|
2020-11-19 23:39:59 +08:00
|
|
|
use std::{str, io, fmt};
|
2020-08-17 21:15:37 +08:00
|
|
|
|
2020-08-23 16:52:47 +08:00
|
|
|
use anyhow::Context;
|
2020-12-20 18:55:21 +08:00
|
|
|
use byte_unit::Byte;
|
2020-08-17 21:15:37 +08:00
|
|
|
use heed::EnvOpenOptions;
|
2021-02-15 01:55:15 +08:00
|
|
|
use milli::Index;
|
2020-08-17 21:15:37 +08:00
|
|
|
use structopt::StructOpt;
|
|
|
|
|
2020-08-23 16:52:47 +08:00
|
|
|
use Command::*;
|
|
|
|
|
2021-02-15 01:55:15 +08:00
|
|
|
#[cfg(target_os = "linux")]
|
|
|
|
#[global_allocator]
|
|
|
|
static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
|
|
|
|
|
2020-10-02 22:46:05 +08:00
|
|
|
const MAIN_DB_NAME: &str = "main";
|
|
|
|
const WORD_DOCIDS_DB_NAME: &str = "word-docids";
|
2021-02-10 18:20:00 +08:00
|
|
|
const WORD_PREFIX_DOCIDS_DB_NAME: &str = "word-prefix-docids";
|
2020-10-02 22:46:05 +08:00
|
|
|
const DOCID_WORD_POSITIONS_DB_NAME: &str = "docid-word-positions";
|
|
|
|
const WORD_PAIR_PROXIMITY_DOCIDS_DB_NAME: &str = "word-pair-proximity-docids";
|
2021-02-10 17:28:15 +08:00
|
|
|
const WORD_PREFIX_PAIR_PROXIMITY_DOCIDS_DB_NAME: &str = "word-prefix-pair-proximity-docids";
|
2021-03-17 21:22:01 +08:00
|
|
|
const WORD_LEVEL_POSITION_DOCIDS_DB_NAME: &str = "word-level-position-docids";
|
|
|
|
const FACET_FIELD_ID_VALUE_DOCIDS_DB_NAME: &str = "facet-field-id-value-docids";
|
|
|
|
const FIELD_ID_DOCID_FACET_VALUES_DB_NAME: &str = "field-id-docid-facet-values";
|
2020-10-02 22:46:05 +08:00
|
|
|
const DOCUMENTS_DB_NAME: &str = "documents";
|
|
|
|
|
2020-10-01 17:39:30 +08:00
|
|
|
const ALL_DATABASE_NAMES: &[&str] = &[
|
2020-10-02 22:46:05 +08:00
|
|
|
MAIN_DB_NAME,
|
|
|
|
WORD_DOCIDS_DB_NAME,
|
2021-02-10 18:20:00 +08:00
|
|
|
WORD_PREFIX_DOCIDS_DB_NAME,
|
2020-10-02 22:46:05 +08:00
|
|
|
DOCID_WORD_POSITIONS_DB_NAME,
|
|
|
|
WORD_PAIR_PROXIMITY_DOCIDS_DB_NAME,
|
2021-03-05 23:37:18 +08:00
|
|
|
WORD_PREFIX_PAIR_PROXIMITY_DOCIDS_DB_NAME,
|
2021-03-17 21:22:01 +08:00
|
|
|
WORD_LEVEL_POSITION_DOCIDS_DB_NAME,
|
|
|
|
FACET_FIELD_ID_VALUE_DOCIDS_DB_NAME,
|
|
|
|
FIELD_ID_DOCID_FACET_VALUES_DB_NAME,
|
2020-10-02 22:46:05 +08:00
|
|
|
DOCUMENTS_DB_NAME,
|
|
|
|
];
|
|
|
|
|
2020-10-01 17:39:30 +08:00
|
|
|
const POSTINGS_DATABASE_NAMES: &[&str] = &[
|
|
|
|
WORD_DOCIDS_DB_NAME,
|
2021-02-10 18:20:00 +08:00
|
|
|
WORD_PREFIX_DOCIDS_DB_NAME,
|
2020-10-01 17:39:30 +08:00
|
|
|
DOCID_WORD_POSITIONS_DB_NAME,
|
|
|
|
WORD_PAIR_PROXIMITY_DOCIDS_DB_NAME,
|
2021-02-10 18:20:00 +08:00
|
|
|
WORD_PREFIX_PAIR_PROXIMITY_DOCIDS_DB_NAME,
|
2020-10-01 17:39:30 +08:00
|
|
|
];
|
|
|
|
|
2020-08-17 21:15:37 +08:00
|
|
|
#[derive(Debug, StructOpt)]
|
2020-10-19 19:44:17 +08:00
|
|
|
/// A stats fetcher for milli.
|
|
|
|
pub struct Opt {
|
2020-08-17 21:15:37 +08:00
|
|
|
/// The database path where the database is located.
|
|
|
|
/// It is created if it doesn't already exist.
|
|
|
|
#[structopt(long = "db", parse(from_os_str))]
|
|
|
|
database: PathBuf,
|
|
|
|
|
|
|
|
/// The maximum size the database can take on disk. It is recommended to specify
|
|
|
|
/// the whole disk space (value must be a multiple of a page size).
|
2020-12-20 18:55:21 +08:00
|
|
|
#[structopt(long = "db-size", default_value = "100 GiB")]
|
|
|
|
database_size: Byte,
|
2020-08-17 21:15:37 +08:00
|
|
|
|
|
|
|
/// Verbose mode (-v, -vv, -vvv, etc.)
|
|
|
|
#[structopt(short, long, parse(from_occurrences))]
|
|
|
|
verbose: usize,
|
|
|
|
|
|
|
|
#[structopt(subcommand)]
|
|
|
|
command: Command,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, StructOpt)]
|
|
|
|
enum Command {
|
|
|
|
/// Outputs a CSV of the most frequent words of this index.
|
|
|
|
///
|
|
|
|
/// `word` are displayed and ordered by frequency.
|
|
|
|
/// `document_frequency` defines the number of documents which contains the word.
|
|
|
|
MostCommonWords {
|
|
|
|
/// The maximum number of frequencies to return.
|
|
|
|
#[structopt(default_value = "10")]
|
|
|
|
limit: usize,
|
2020-08-21 20:24:05 +08:00
|
|
|
},
|
|
|
|
|
2020-08-21 20:42:55 +08:00
|
|
|
/// Outputs a CSV with the biggest entries of the database.
|
2020-09-06 23:14:20 +08:00
|
|
|
BiggestValues {
|
2020-08-21 20:42:55 +08:00
|
|
|
/// The maximum number of sizes to return.
|
|
|
|
#[structopt(default_value = "10")]
|
|
|
|
limit: usize,
|
|
|
|
},
|
2020-08-23 16:52:47 +08:00
|
|
|
|
2020-09-08 04:36:35 +08:00
|
|
|
/// Outputs a CSV with the documents ids where the given words appears.
|
|
|
|
WordsDocids {
|
|
|
|
/// Display the whole documents ids in details.
|
|
|
|
#[structopt(long)]
|
|
|
|
full_display: bool,
|
|
|
|
|
|
|
|
/// The words to display the documents ids of.
|
|
|
|
words: Vec<String>,
|
|
|
|
},
|
|
|
|
|
2021-02-10 19:28:46 +08:00
|
|
|
/// Outputs a CSV with the documents ids where the given words prefixes appears.
|
|
|
|
WordsPrefixesDocids {
|
|
|
|
/// Display the whole documents ids in details.
|
|
|
|
#[structopt(long)]
|
|
|
|
full_display: bool,
|
|
|
|
|
|
|
|
/// The prefixes to display the documents ids of.
|
|
|
|
prefixes: Vec<String>,
|
|
|
|
},
|
|
|
|
|
2020-11-13 23:15:05 +08:00
|
|
|
/// Outputs a CSV with the documents ids along with the facet values where it appears.
|
|
|
|
FacetValuesDocids {
|
|
|
|
/// Display the whole documents ids in details.
|
|
|
|
#[structopt(long)]
|
|
|
|
full_display: bool,
|
|
|
|
|
|
|
|
/// The field name in the document.
|
|
|
|
field_name: String,
|
|
|
|
},
|
|
|
|
|
2021-03-17 21:22:01 +08:00
|
|
|
/// Outputs a CSV with the documents ids along with the word level positions where it appears.
|
|
|
|
WordsLevelPositionsDocids {
|
|
|
|
/// Display the whole documents ids in details.
|
|
|
|
#[structopt(long)]
|
|
|
|
full_display: bool,
|
|
|
|
|
|
|
|
/// The field name in the document.
|
|
|
|
words: Vec<String>,
|
|
|
|
},
|
|
|
|
|
2021-03-05 23:13:21 +08:00
|
|
|
/// Outputs a CSV with the documents ids, words and the positions where this word appears.
|
|
|
|
DocidsWordsPositions {
|
|
|
|
/// Display the whole positions in detail.
|
|
|
|
#[structopt(long)]
|
|
|
|
full_display: bool,
|
|
|
|
|
|
|
|
/// If defined, only retrieve the documents that corresponds to these internal ids.
|
|
|
|
internal_documents_ids: Vec<u32>,
|
|
|
|
},
|
|
|
|
|
2020-11-17 21:50:32 +08:00
|
|
|
/// Outputs some facets statistics for the given facet name.
|
|
|
|
FacetStats {
|
|
|
|
/// The field name in the document.
|
|
|
|
field_name: String,
|
|
|
|
},
|
|
|
|
|
2020-09-07 20:56:48 +08:00
|
|
|
/// Outputs the average number of *different* words by document.
|
|
|
|
AverageNumberOfWordsByDoc,
|
|
|
|
|
2020-09-07 21:26:42 +08:00
|
|
|
/// Outputs the average number of positions for each document words.
|
2020-09-30 00:11:44 +08:00
|
|
|
AverageNumberOfPositionsByWord,
|
|
|
|
|
2020-10-01 17:39:30 +08:00
|
|
|
/// Outputs some statistics about the given database (e.g. median, quartiles,
|
|
|
|
/// percentiles, minimum, maximum, averge, key size, value size).
|
|
|
|
DatabaseStats {
|
|
|
|
#[structopt(possible_values = POSTINGS_DATABASE_NAMES)]
|
|
|
|
database: String,
|
|
|
|
},
|
2020-09-30 23:41:54 +08:00
|
|
|
|
2020-12-28 23:46:16 +08:00
|
|
|
/// Outputs the size in bytes of the specified databases names.
|
2020-10-02 22:46:05 +08:00
|
|
|
SizeOfDatabase {
|
2021-03-05 23:37:18 +08:00
|
|
|
/// The name of the database to measure the size of, if not specified it's equivalent
|
|
|
|
/// to specifying all the databases names.
|
2020-10-01 17:39:30 +08:00
|
|
|
#[structopt(possible_values = ALL_DATABASE_NAMES)]
|
2020-12-28 23:46:16 +08:00
|
|
|
databases: Vec<String>,
|
2020-10-02 22:46:05 +08:00
|
|
|
},
|
|
|
|
|
2020-09-22 19:52:24 +08:00
|
|
|
/// Outputs a CSV with the proximities for the two specidied words and
|
|
|
|
/// the documents ids where these relations appears.
|
|
|
|
///
|
2020-09-22 20:49:22 +08:00
|
|
|
/// `word1`, `word2` defines the word pair specified *in this specific order*.
|
2020-09-22 19:52:24 +08:00
|
|
|
/// `proximity` defines the proximity between the two specified words.
|
|
|
|
/// `documents_ids` defines the documents ids where the relation appears.
|
|
|
|
WordPairProximitiesDocids {
|
|
|
|
/// Display the whole documents ids in details.
|
|
|
|
#[structopt(long)]
|
|
|
|
full_display: bool,
|
|
|
|
|
|
|
|
/// First word of the word pair.
|
|
|
|
word1: String,
|
|
|
|
|
|
|
|
/// Second word of the word pair.
|
|
|
|
word2: String,
|
|
|
|
},
|
|
|
|
|
2020-12-02 17:43:22 +08:00
|
|
|
/// Outputs the words FST to standard output.
|
2020-09-06 23:14:20 +08:00
|
|
|
///
|
|
|
|
/// One can use the FST binary helper to dissect and analyze it,
|
|
|
|
/// you can install it using `cargo install fst-bin`.
|
2020-12-02 17:43:22 +08:00
|
|
|
ExportWordsFst,
|
2020-11-23 01:21:22 +08:00
|
|
|
|
2021-02-10 19:18:56 +08:00
|
|
|
/// Outputs the words prefix FST to standard output.
|
|
|
|
///
|
|
|
|
/// One can use the FST binary helper to dissect and analyze it,
|
|
|
|
/// you can install it using `cargo install fst-bin`.
|
|
|
|
ExportWordsPrefixFst,
|
|
|
|
|
2020-12-02 17:30:28 +08:00
|
|
|
/// Outputs the documents as JSON lines to the standard output.
|
|
|
|
///
|
|
|
|
/// All of the fields are extracted, not just the displayed ones.
|
2021-02-24 04:08:52 +08:00
|
|
|
ExportDocuments {
|
|
|
|
/// If defined, only retrieve the documents that corresponds to these internal ids.
|
|
|
|
internal_documents_ids: Vec<u32>,
|
|
|
|
},
|
2020-08-17 21:15:37 +08:00
|
|
|
}
|
|
|
|
|
2021-02-17 23:21:44 +08:00
|
|
|
fn main() -> anyhow::Result<()> {
|
2021-02-15 01:55:15 +08:00
|
|
|
let opt = Opt::from_args();
|
|
|
|
|
2020-08-17 21:15:37 +08:00
|
|
|
stderrlog::new()
|
|
|
|
.verbosity(opt.verbose)
|
|
|
|
.show_level(false)
|
|
|
|
.timestamp(stderrlog::Timestamp::Off)
|
|
|
|
.init()?;
|
|
|
|
|
2020-10-30 17:56:35 +08:00
|
|
|
let mut options = EnvOpenOptions::new();
|
2020-12-20 18:55:21 +08:00
|
|
|
options.map_size(opt.database_size.get_bytes() as usize);
|
2020-10-30 17:56:35 +08:00
|
|
|
|
2021-02-17 23:19:27 +08:00
|
|
|
// Return an error if the database does not exist.
|
|
|
|
if !opt.database.exists() {
|
|
|
|
anyhow::bail!("The database ({}) does not exist.", opt.database.display());
|
|
|
|
}
|
|
|
|
|
2020-08-17 21:15:37 +08:00
|
|
|
// Open the LMDB database.
|
2020-10-30 17:56:35 +08:00
|
|
|
let index = Index::new(options, opt.database)?;
|
|
|
|
let rtxn = index.read_txn()?;
|
2020-08-17 21:15:37 +08:00
|
|
|
|
|
|
|
match opt.command {
|
2020-08-23 16:52:47 +08:00
|
|
|
MostCommonWords { limit } => most_common_words(&index, &rtxn, limit),
|
2020-09-06 23:14:20 +08:00
|
|
|
BiggestValues { limit } => biggest_value_sizes(&index, &rtxn, limit),
|
2020-09-08 04:36:35 +08:00
|
|
|
WordsDocids { full_display, words } => words_docids(&index, &rtxn, !full_display, words),
|
2021-02-10 19:28:46 +08:00
|
|
|
WordsPrefixesDocids { full_display, prefixes } => {
|
|
|
|
words_prefixes_docids(&index, &rtxn, !full_display, prefixes)
|
|
|
|
},
|
2020-11-13 23:15:05 +08:00
|
|
|
FacetValuesDocids { full_display, field_name } => {
|
|
|
|
facet_values_docids(&index, &rtxn, !full_display, field_name)
|
|
|
|
},
|
2021-03-17 21:22:01 +08:00
|
|
|
WordsLevelPositionsDocids { full_display, words } => {
|
|
|
|
words_level_positions_docids(&index, &rtxn, !full_display, words)
|
|
|
|
},
|
2021-03-05 23:13:21 +08:00
|
|
|
DocidsWordsPositions { full_display, internal_documents_ids } => {
|
|
|
|
docids_words_positions(&index, &rtxn, !full_display, internal_documents_ids)
|
|
|
|
},
|
2020-11-17 21:50:32 +08:00
|
|
|
FacetStats { field_name } => facet_stats(&index, &rtxn, field_name),
|
2020-09-07 20:56:48 +08:00
|
|
|
AverageNumberOfWordsByDoc => average_number_of_words_by_doc(&index, &rtxn),
|
2020-09-30 00:11:44 +08:00
|
|
|
AverageNumberOfPositionsByWord => {
|
|
|
|
average_number_of_positions_by_word(&index, &rtxn)
|
|
|
|
},
|
2020-12-28 23:46:16 +08:00
|
|
|
SizeOfDatabase { databases } => size_of_databases(&index, &rtxn, databases),
|
2020-10-01 17:39:30 +08:00
|
|
|
DatabaseStats { database } => database_stats(&index, &rtxn, &database),
|
2020-09-22 19:52:24 +08:00
|
|
|
WordPairProximitiesDocids { full_display, word1, word2 } => {
|
|
|
|
word_pair_proximities_docids(&index, &rtxn, !full_display, word1, word2)
|
|
|
|
},
|
2020-12-02 17:43:22 +08:00
|
|
|
ExportWordsFst => export_words_fst(&index, &rtxn),
|
2021-02-10 19:18:56 +08:00
|
|
|
ExportWordsPrefixFst => export_words_prefix_fst(&index, &rtxn),
|
2021-02-24 04:08:52 +08:00
|
|
|
ExportDocuments { internal_documents_ids } => {
|
|
|
|
export_documents(&index, &rtxn, internal_documents_ids)
|
|
|
|
},
|
2020-11-23 01:21:22 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-17 21:15:37 +08:00
|
|
|
fn most_common_words(index: &Index, rtxn: &heed::RoTxn, limit: usize) -> anyhow::Result<()> {
|
|
|
|
use std::collections::BinaryHeap;
|
|
|
|
use std::cmp::Reverse;
|
|
|
|
|
|
|
|
let mut heap = BinaryHeap::with_capacity(limit + 1);
|
2020-09-06 23:14:20 +08:00
|
|
|
for result in index.word_docids.iter(rtxn)? {
|
2020-08-17 21:15:37 +08:00
|
|
|
if limit == 0 { break }
|
2020-09-06 23:14:20 +08:00
|
|
|
let (word, docids) = result?;
|
|
|
|
heap.push((Reverse(docids.len()), word));
|
2020-08-17 21:15:37 +08:00
|
|
|
if heap.len() > limit { heap.pop(); }
|
|
|
|
}
|
|
|
|
|
|
|
|
let stdout = io::stdout();
|
|
|
|
let mut wtr = csv::Writer::from_writer(stdout.lock());
|
2020-09-06 23:14:20 +08:00
|
|
|
wtr.write_record(&["word", "document_frequency"])?;
|
2020-08-17 21:15:37 +08:00
|
|
|
|
2020-09-06 23:14:20 +08:00
|
|
|
for (Reverse(document_frequency), word) in heap.into_sorted_vec() {
|
|
|
|
wtr.write_record(&[word, &document_frequency.to_string()])?;
|
2020-08-21 20:24:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
Ok(wtr.flush()?)
|
|
|
|
}
|
2020-08-21 20:42:55 +08:00
|
|
|
|
2020-11-19 23:39:59 +08:00
|
|
|
/// Helper function that converts the facet value key to a unique type
|
|
|
|
/// that can be used to log or display purposes.
|
|
|
|
fn facet_values_iter<'txn, DC: 'txn, T>(
|
2020-11-18 23:29:07 +08:00
|
|
|
rtxn: &'txn heed::RoTxn,
|
|
|
|
db: heed::Database<heed::types::ByteSlice, DC>,
|
|
|
|
field_id: u8,
|
2021-02-15 01:55:15 +08:00
|
|
|
facet_type: milli::facet::FacetType,
|
2020-11-19 23:39:59 +08:00
|
|
|
string_fn: impl Fn(&str) -> T + 'txn,
|
|
|
|
float_fn: impl Fn(u8, f64, f64) -> T + 'txn,
|
|
|
|
) -> heed::Result<Box<dyn Iterator<Item=heed::Result<(T, DC::DItem)>> + 'txn>>
|
2020-11-18 23:29:07 +08:00
|
|
|
where
|
|
|
|
DC: heed::BytesDecode<'txn>,
|
|
|
|
{
|
2021-02-15 01:55:15 +08:00
|
|
|
use milli::facet::FacetType;
|
2021-04-07 17:57:16 +08:00
|
|
|
use milli::heed_codec::facet::{FacetValueStringCodec, FacetLevelValueF64Codec};
|
2020-11-18 23:29:07 +08:00
|
|
|
|
|
|
|
let iter = db.prefix_iter(&rtxn, &[field_id])?;
|
|
|
|
match facet_type {
|
|
|
|
FacetType::String => {
|
|
|
|
let iter = iter.remap_key_type::<FacetValueStringCodec>()
|
2020-11-19 23:39:59 +08:00
|
|
|
.map(move |r| r.map(|((_, key), value)| (string_fn(key), value)));
|
2020-11-18 23:29:07 +08:00
|
|
|
Ok(Box::new(iter) as Box<dyn Iterator<Item=_>>)
|
|
|
|
},
|
2021-04-07 17:57:16 +08:00
|
|
|
FacetType::Number => {
|
2020-11-18 23:29:07 +08:00
|
|
|
let iter = iter.remap_key_type::<FacetLevelValueF64Codec>()
|
2020-11-19 23:39:59 +08:00
|
|
|
.map(move |r| r.map(|((_, level, left, right), value)| {
|
|
|
|
(float_fn(level, left, right), value)
|
2020-11-18 23:29:07 +08:00
|
|
|
}));
|
|
|
|
Ok(Box::new(iter))
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-03 19:49:09 +08:00
|
|
|
fn facet_number_value_to_string<T: fmt::Debug>(level: u8, left: T, right: T) -> (u8, String) {
|
2020-11-19 23:39:59 +08:00
|
|
|
if level == 0 {
|
2021-01-03 19:49:09 +08:00
|
|
|
(level, format!("{:?}", left))
|
2020-11-19 23:39:59 +08:00
|
|
|
} else {
|
2021-01-03 19:49:09 +08:00
|
|
|
(level, format!("{:?} to {:?}", left, right))
|
2020-11-19 23:39:59 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-21 20:42:55 +08:00
|
|
|
fn biggest_value_sizes(index: &Index, rtxn: &heed::RoTxn, limit: usize) -> anyhow::Result<()> {
|
|
|
|
use std::cmp::Reverse;
|
|
|
|
use std::collections::BinaryHeap;
|
|
|
|
use heed::types::{Str, ByteSlice};
|
2020-11-15 18:58:19 +08:00
|
|
|
|
|
|
|
let Index {
|
|
|
|
env: _env,
|
|
|
|
main,
|
|
|
|
word_docids,
|
2021-02-03 17:30:33 +08:00
|
|
|
word_prefix_docids,
|
2020-11-15 18:58:19 +08:00
|
|
|
docid_word_positions,
|
|
|
|
word_pair_proximity_docids,
|
2021-02-10 17:28:15 +08:00
|
|
|
word_prefix_pair_proximity_docids,
|
2021-03-12 00:24:35 +08:00
|
|
|
word_level_position_docids,
|
2020-11-15 18:58:19 +08:00
|
|
|
facet_field_id_value_docids,
|
2020-12-03 01:31:41 +08:00
|
|
|
field_id_docid_facet_values: _,
|
2020-11-15 18:58:19 +08:00
|
|
|
documents,
|
|
|
|
} = index;
|
2020-08-21 20:42:55 +08:00
|
|
|
|
2020-08-26 20:36:22 +08:00
|
|
|
let main_name = "main";
|
2020-09-06 23:14:20 +08:00
|
|
|
let word_docids_name = "word_docids";
|
2021-02-03 17:35:19 +08:00
|
|
|
let word_prefix_docids_name = "word_prefix_docids";
|
2020-09-06 23:14:20 +08:00
|
|
|
let docid_word_positions_name = "docid_word_positions";
|
2021-02-10 17:28:15 +08:00
|
|
|
let word_prefix_pair_proximity_docids_name = "word_prefix_pair_proximity_docids";
|
2020-11-15 18:58:19 +08:00
|
|
|
let word_pair_proximity_docids_name = "word_pair_proximity_docids";
|
|
|
|
let facet_field_id_value_docids_name = "facet_field_id_value_docids";
|
|
|
|
let documents_name = "documents";
|
2020-08-21 20:42:55 +08:00
|
|
|
|
|
|
|
let mut heap = BinaryHeap::with_capacity(limit + 1);
|
|
|
|
|
|
|
|
if limit > 0 {
|
2021-02-03 17:35:19 +08:00
|
|
|
// Fetch the words FST
|
2020-10-26 01:32:01 +08:00
|
|
|
let words_fst = index.words_fst(rtxn)?;
|
2021-02-03 17:35:19 +08:00
|
|
|
let length = words_fst.as_fst().as_bytes().len();
|
|
|
|
heap.push(Reverse((length, format!("words-fst"), main_name)));
|
|
|
|
if heap.len() > limit { heap.pop(); }
|
|
|
|
|
|
|
|
// Fetch the word prefix FST
|
|
|
|
let words_prefixes_fst = index.words_prefixes_fst(rtxn)?;
|
|
|
|
let length = words_prefixes_fst.as_fst().as_bytes().len();
|
|
|
|
heap.push(Reverse((length, format!("words-prefixes-fst"), main_name)));
|
2020-10-26 01:32:01 +08:00
|
|
|
if heap.len() > limit { heap.pop(); }
|
2020-08-26 20:36:22 +08:00
|
|
|
|
2020-11-15 18:58:19 +08:00
|
|
|
if let Some(documents_ids) = main.get::<_, Str, ByteSlice>(rtxn, "documents-ids")? {
|
2020-08-30 00:12:31 +08:00
|
|
|
heap.push(Reverse((documents_ids.len(), format!("documents-ids"), main_name)));
|
|
|
|
if heap.len() > limit { heap.pop(); }
|
|
|
|
}
|
|
|
|
|
2020-11-15 18:58:19 +08:00
|
|
|
for result in word_docids.remap_data_type::<ByteSlice>().iter(rtxn)? {
|
2020-08-21 20:42:55 +08:00
|
|
|
let (word, value) = result?;
|
2020-09-06 23:14:20 +08:00
|
|
|
heap.push(Reverse((value.len(), word.to_string(), word_docids_name)));
|
2020-08-21 20:42:55 +08:00
|
|
|
if heap.len() > limit { heap.pop(); }
|
|
|
|
}
|
|
|
|
|
2021-02-03 17:35:19 +08:00
|
|
|
for result in word_prefix_docids.remap_data_type::<ByteSlice>().iter(rtxn)? {
|
|
|
|
let (word, value) = result?;
|
|
|
|
heap.push(Reverse((value.len(), word.to_string(), word_prefix_docids_name)));
|
|
|
|
if heap.len() > limit { heap.pop(); }
|
|
|
|
}
|
|
|
|
|
2020-11-15 18:58:19 +08:00
|
|
|
for result in docid_word_positions.remap_data_type::<ByteSlice>().iter(rtxn)? {
|
2020-09-06 23:14:20 +08:00
|
|
|
let ((docid, word), value) = result?;
|
|
|
|
let key = format!("{} {}", docid, word);
|
|
|
|
heap.push(Reverse((value.len(), key, docid_word_positions_name)));
|
2020-08-21 20:42:55 +08:00
|
|
|
if heap.len() > limit { heap.pop(); }
|
|
|
|
}
|
2020-11-15 18:58:19 +08:00
|
|
|
|
|
|
|
for result in word_pair_proximity_docids.remap_data_type::<ByteSlice>().iter(rtxn)? {
|
|
|
|
let ((word1, word2, prox), value) = result?;
|
|
|
|
let key = format!("{} {} {}", word1, word2, prox);
|
|
|
|
heap.push(Reverse((value.len(), key, word_pair_proximity_docids_name)));
|
|
|
|
if heap.len() > limit { heap.pop(); }
|
|
|
|
}
|
|
|
|
|
2021-02-10 17:28:15 +08:00
|
|
|
for result in word_prefix_pair_proximity_docids.remap_data_type::<ByteSlice>().iter(rtxn)? {
|
|
|
|
let ((word, prefix, prox), value) = result?;
|
|
|
|
let key = format!("{} {} {}", word, prefix, prox);
|
|
|
|
heap.push(Reverse((value.len(), key, word_prefix_pair_proximity_docids_name)));
|
|
|
|
if heap.len() > limit { heap.pop(); }
|
|
|
|
}
|
|
|
|
|
2021-01-21 00:27:43 +08:00
|
|
|
let faceted_fields = index.faceted_fields_ids(rtxn)?;
|
2020-11-15 18:58:19 +08:00
|
|
|
let fields_ids_map = index.fields_ids_map(rtxn)?;
|
|
|
|
for (field_id, field_type) in faceted_fields {
|
|
|
|
let facet_name = fields_ids_map.name(field_id).unwrap();
|
2020-11-18 23:29:07 +08:00
|
|
|
|
|
|
|
let db = facet_field_id_value_docids.remap_data_type::<ByteSlice>();
|
2020-11-19 23:39:59 +08:00
|
|
|
let iter = facet_values_iter(
|
|
|
|
rtxn,
|
|
|
|
db,
|
|
|
|
field_id,
|
|
|
|
field_type,
|
|
|
|
|key| key.to_owned(),
|
2021-01-03 19:49:09 +08:00
|
|
|
|level, left, right| {
|
|
|
|
let mut output = facet_number_value_to_string(level, left, right).1;
|
|
|
|
let _ = write!(&mut output, " (level {})", level);
|
|
|
|
output
|
|
|
|
},
|
2020-11-19 23:39:59 +08:00
|
|
|
)?;
|
|
|
|
|
|
|
|
for result in iter {
|
2020-11-15 18:58:19 +08:00
|
|
|
let (fvalue, value) = result?;
|
|
|
|
let key = format!("{} {}", facet_name, fvalue);
|
|
|
|
heap.push(Reverse((value.len(), key, facet_field_id_value_docids_name)));
|
|
|
|
if heap.len() > limit { heap.pop(); }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for result in documents.remap_data_type::<ByteSlice>().iter(rtxn)? {
|
|
|
|
let (id, value) = result?;
|
|
|
|
heap.push(Reverse((value.len(), id.to_string(), documents_name)));
|
|
|
|
if heap.len() > limit { heap.pop(); }
|
|
|
|
}
|
2020-08-21 20:42:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
let stdout = io::stdout();
|
|
|
|
let mut wtr = csv::Writer::from_writer(stdout.lock());
|
2020-08-26 20:36:22 +08:00
|
|
|
wtr.write_record(&["database_name", "key_name", "size"])?;
|
2020-08-21 20:42:55 +08:00
|
|
|
|
|
|
|
for Reverse((size, key_name, database_name)) in heap.into_sorted_vec() {
|
|
|
|
wtr.write_record(&[database_name.to_string(), key_name, size.to_string()])?;
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(wtr.flush()?)
|
2020-08-23 16:52:47 +08:00
|
|
|
}
|
|
|
|
|
2020-09-08 04:36:35 +08:00
|
|
|
fn words_docids(index: &Index, rtxn: &heed::RoTxn, debug: bool, words: Vec<String>) -> anyhow::Result<()> {
|
|
|
|
let stdout = io::stdout();
|
|
|
|
let mut wtr = csv::Writer::from_writer(stdout.lock());
|
|
|
|
wtr.write_record(&["word", "documents_ids"])?;
|
|
|
|
|
|
|
|
for word in words {
|
|
|
|
if let Some(docids) = index.word_docids.get(rtxn, &word)? {
|
|
|
|
let docids = if debug {
|
|
|
|
format!("{:?}", docids)
|
|
|
|
} else {
|
|
|
|
format!("{:?}", docids.iter().collect::<Vec<_>>())
|
|
|
|
};
|
|
|
|
wtr.write_record(&[word, docids])?;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(wtr.flush()?)
|
|
|
|
}
|
|
|
|
|
2021-02-10 19:28:46 +08:00
|
|
|
fn words_prefixes_docids(
|
|
|
|
index: &Index,
|
|
|
|
rtxn: &heed::RoTxn,
|
|
|
|
debug: bool,
|
|
|
|
prefixes: Vec<String>,
|
|
|
|
) -> anyhow::Result<()>
|
|
|
|
{
|
|
|
|
let stdout = io::stdout();
|
|
|
|
let mut wtr = csv::Writer::from_writer(stdout.lock());
|
|
|
|
wtr.write_record(&["prefix", "documents_ids"])?;
|
|
|
|
|
|
|
|
if prefixes.is_empty() {
|
|
|
|
for result in index.word_prefix_docids.iter(rtxn)? {
|
|
|
|
let (prefix, docids) = result?;
|
|
|
|
let docids = if debug {
|
|
|
|
format!("{:?}", docids)
|
|
|
|
} else {
|
|
|
|
format!("{:?}", docids.iter().collect::<Vec<_>>())
|
|
|
|
};
|
|
|
|
wtr.write_record(&[prefix, &docids])?;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for prefix in prefixes {
|
|
|
|
if let Some(docids) = index.word_prefix_docids.get(rtxn, &prefix)? {
|
|
|
|
let docids = if debug {
|
|
|
|
format!("{:?}", docids)
|
|
|
|
} else {
|
|
|
|
format!("{:?}", docids.iter().collect::<Vec<_>>())
|
|
|
|
};
|
|
|
|
wtr.write_record(&[prefix, docids])?;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(wtr.flush()?)
|
|
|
|
}
|
|
|
|
|
2020-11-13 23:15:05 +08:00
|
|
|
fn facet_values_docids(index: &Index, rtxn: &heed::RoTxn, debug: bool, field_name: String) -> anyhow::Result<()> {
|
|
|
|
let fields_ids_map = index.fields_ids_map(&rtxn)?;
|
2021-01-21 00:27:43 +08:00
|
|
|
let faceted_fields = index.faceted_fields_ids(&rtxn)?;
|
2020-11-13 23:15:05 +08:00
|
|
|
|
|
|
|
let field_id = fields_ids_map.id(&field_name)
|
|
|
|
.with_context(|| format!("field {} not found", field_name))?;
|
|
|
|
let field_type = faceted_fields.get(&field_id)
|
|
|
|
.with_context(|| format!("field {} is not faceted", field_name))?;
|
|
|
|
|
|
|
|
let stdout = io::stdout();
|
|
|
|
let mut wtr = csv::Writer::from_writer(stdout.lock());
|
2021-01-03 19:49:09 +08:00
|
|
|
wtr.write_record(&["facet_value", "facet_level", "documents_count", "documents_ids"])?;
|
2020-11-13 23:15:05 +08:00
|
|
|
|
2020-11-18 23:29:07 +08:00
|
|
|
let db = index.facet_field_id_value_docids;
|
2020-11-19 23:39:59 +08:00
|
|
|
let iter = facet_values_iter(
|
|
|
|
rtxn,
|
|
|
|
db,
|
|
|
|
field_id,
|
|
|
|
*field_type,
|
2021-01-03 19:49:09 +08:00
|
|
|
|key| (0, key.to_owned()),
|
2020-11-19 23:39:59 +08:00
|
|
|
facet_number_value_to_string,
|
|
|
|
)?;
|
|
|
|
|
|
|
|
for result in iter {
|
2021-01-03 19:49:09 +08:00
|
|
|
let ((level, value), docids) = result?;
|
2020-11-20 19:09:21 +08:00
|
|
|
let count = docids.len();
|
2020-11-13 23:15:05 +08:00
|
|
|
let docids = if debug {
|
|
|
|
format!("{:?}", docids)
|
|
|
|
} else {
|
|
|
|
format!("{:?}", docids.iter().collect::<Vec<_>>())
|
|
|
|
};
|
2021-01-03 19:49:09 +08:00
|
|
|
wtr.write_record(&[value, level.to_string(), count.to_string(), docids])?;
|
2020-11-13 23:15:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
Ok(wtr.flush()?)
|
|
|
|
}
|
|
|
|
|
2021-03-17 21:22:01 +08:00
|
|
|
fn words_level_positions_docids(
|
|
|
|
index: &Index,
|
|
|
|
rtxn: &heed::RoTxn,
|
|
|
|
debug: bool,
|
|
|
|
words: Vec<String>,
|
|
|
|
) -> anyhow::Result<()>
|
|
|
|
{
|
|
|
|
let stdout = io::stdout();
|
|
|
|
let mut wtr = csv::Writer::from_writer(stdout.lock());
|
|
|
|
wtr.write_record(&["word", "level", "position_range", "documents_count", "documents_ids"])?;
|
|
|
|
|
|
|
|
for word in words.iter().map(AsRef::as_ref) {
|
|
|
|
let range = {
|
|
|
|
let left = (word, 0, u32::min_value(), u32::min_value());
|
|
|
|
let right = (word, u8::max_value(), u32::max_value(), u32::max_value());
|
|
|
|
left..=right
|
|
|
|
};
|
|
|
|
for result in index.word_level_position_docids.range(rtxn, &range)? {
|
|
|
|
let ((word, level, left, right), docids) = result?;
|
|
|
|
let level = level.to_string();
|
|
|
|
let count = docids.len().to_string();
|
|
|
|
let docids = if debug {
|
|
|
|
format!("{:?}", docids)
|
|
|
|
} else {
|
|
|
|
format!("{:?}", docids.iter().collect::<Vec<_>>())
|
|
|
|
};
|
|
|
|
let position_range = format!("{:?}", left..=right);
|
|
|
|
wtr.write_record(&[word, &level, &position_range, &count, &docids])?;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(wtr.flush()?)
|
|
|
|
}
|
|
|
|
|
2021-03-05 23:13:21 +08:00
|
|
|
fn docids_words_positions(
|
|
|
|
index: &Index,
|
|
|
|
rtxn: &heed::RoTxn,
|
|
|
|
debug: bool,
|
|
|
|
internal_ids: Vec<u32>,
|
|
|
|
) -> anyhow::Result<()>
|
|
|
|
{
|
|
|
|
let stdout = io::stdout();
|
|
|
|
let mut wtr = csv::Writer::from_writer(stdout.lock());
|
|
|
|
wtr.write_record(&["document_id", "word", "positions"])?;
|
|
|
|
|
|
|
|
let iter: Box<dyn Iterator<Item = _>> = if internal_ids.is_empty() {
|
|
|
|
Box::new(index.docid_word_positions.iter(rtxn)?)
|
|
|
|
} else {
|
|
|
|
let vec: heed::Result<Vec<_>> = internal_ids.into_iter().map(|id| {
|
|
|
|
index.docid_word_positions.prefix_iter(rtxn, &(id, ""))
|
|
|
|
}).collect();
|
|
|
|
Box::new(vec?.into_iter().flatten())
|
|
|
|
};
|
|
|
|
|
|
|
|
for result in iter {
|
|
|
|
let ((id, word), positions) = result?;
|
|
|
|
let positions = if debug {
|
|
|
|
format!("{:?}", positions)
|
|
|
|
} else {
|
|
|
|
format!("{:?}", positions.iter().collect::<Vec<_>>())
|
|
|
|
};
|
|
|
|
wtr.write_record(&[&id.to_string(), word, &positions])?;
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(wtr.flush()?)
|
|
|
|
}
|
|
|
|
|
2020-11-17 21:50:32 +08:00
|
|
|
fn facet_stats(index: &Index, rtxn: &heed::RoTxn, field_name: String) -> anyhow::Result<()> {
|
|
|
|
let fields_ids_map = index.fields_ids_map(&rtxn)?;
|
2021-01-21 00:27:43 +08:00
|
|
|
let faceted_fields = index.faceted_fields_ids(&rtxn)?;
|
2020-11-17 21:50:32 +08:00
|
|
|
|
|
|
|
let field_id = fields_ids_map.id(&field_name)
|
|
|
|
.with_context(|| format!("field {} not found", field_name))?;
|
|
|
|
let field_type = faceted_fields.get(&field_id)
|
|
|
|
.with_context(|| format!("field {} is not faceted", field_name))?;
|
|
|
|
|
2020-11-19 23:39:59 +08:00
|
|
|
let db = index.facet_field_id_value_docids;
|
|
|
|
let iter = facet_values_iter(
|
|
|
|
rtxn,
|
|
|
|
db,
|
|
|
|
field_id,
|
|
|
|
*field_type,
|
|
|
|
|_key| 0u8,
|
|
|
|
|level, _left, _right| level,
|
|
|
|
)?;
|
2020-11-17 21:50:32 +08:00
|
|
|
|
|
|
|
println!("The database {:?} facet stats", field_name);
|
|
|
|
|
|
|
|
let mut level_size = 0;
|
|
|
|
let mut current_level = None;
|
|
|
|
for result in iter {
|
2020-11-19 23:39:59 +08:00
|
|
|
let (level, _) = result?;
|
2020-11-17 21:50:32 +08:00
|
|
|
if let Some(current) = current_level {
|
|
|
|
if current != level {
|
|
|
|
println!("\tnumber of groups at level {}: {}", current, level_size);
|
|
|
|
level_size = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
current_level = Some(level);
|
|
|
|
level_size += 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if let Some(current) = current_level {
|
|
|
|
println!("\tnumber of groups at level {}: {}", current, level_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2020-12-02 17:43:22 +08:00
|
|
|
fn export_words_fst(index: &Index, rtxn: &heed::RoTxn) -> anyhow::Result<()> {
|
2020-09-06 23:14:20 +08:00
|
|
|
use std::io::Write as _;
|
2020-08-23 16:52:47 +08:00
|
|
|
|
2020-12-02 17:43:22 +08:00
|
|
|
let mut stdout = io::stdout();
|
2020-10-26 01:32:01 +08:00
|
|
|
let words_fst = index.words_fst(rtxn)?;
|
2020-12-02 17:43:22 +08:00
|
|
|
stdout.write_all(words_fst.as_fst().as_bytes())?;
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
2020-08-23 16:52:47 +08:00
|
|
|
|
2021-02-10 19:18:56 +08:00
|
|
|
fn export_words_prefix_fst(index: &Index, rtxn: &heed::RoTxn) -> anyhow::Result<()> {
|
|
|
|
use std::io::Write as _;
|
|
|
|
|
|
|
|
let mut stdout = io::stdout();
|
|
|
|
let words_prefixes_fst = index.words_prefixes_fst(rtxn)?;
|
|
|
|
stdout.write_all(words_prefixes_fst.as_fst().as_bytes())?;
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2021-02-24 04:08:52 +08:00
|
|
|
fn export_documents(index: &Index, rtxn: &heed::RoTxn, internal_ids: Vec<u32>) -> anyhow::Result<()> {
|
2020-12-02 17:30:28 +08:00
|
|
|
use std::io::{BufWriter, Write as _};
|
2021-02-24 04:08:52 +08:00
|
|
|
use milli::{BEU32, obkv_to_json};
|
2020-12-02 17:30:28 +08:00
|
|
|
|
|
|
|
let stdout = io::stdout();
|
|
|
|
let mut out = BufWriter::new(stdout);
|
|
|
|
|
|
|
|
let fields_ids_map = index.fields_ids_map(rtxn)?;
|
|
|
|
let displayed_fields: Vec<_> = fields_ids_map.iter().map(|(id, _name)| id).collect();
|
|
|
|
|
2021-03-03 18:25:36 +08:00
|
|
|
let iter: Box<dyn Iterator<Item = _>> = if internal_ids.is_empty() {
|
2021-02-24 04:08:52 +08:00
|
|
|
Box::new(index.documents.iter(rtxn)?.map(|result| {
|
|
|
|
result.map(|(_id, obkv)| obkv)
|
|
|
|
}))
|
|
|
|
} else {
|
|
|
|
Box::new(internal_ids.into_iter().flat_map(|id| {
|
|
|
|
index.documents.get(rtxn, &BEU32::new(id)).transpose()
|
|
|
|
}))
|
|
|
|
};
|
|
|
|
|
|
|
|
for result in iter {
|
|
|
|
let obkv = result?;
|
2020-12-02 17:30:28 +08:00
|
|
|
let document = obkv_to_json(&displayed_fields, &fields_ids_map, obkv)?;
|
|
|
|
serde_json::to_writer(&mut out, &document)?;
|
|
|
|
writeln!(&mut out)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
out.into_inner()?;
|
|
|
|
|
2020-09-06 23:14:20 +08:00
|
|
|
Ok(())
|
2020-08-21 20:42:55 +08:00
|
|
|
}
|
2020-09-07 20:56:48 +08:00
|
|
|
|
|
|
|
fn average_number_of_words_by_doc(index: &Index, rtxn: &heed::RoTxn) -> anyhow::Result<()> {
|
|
|
|
use heed::types::DecodeIgnore;
|
2021-02-15 01:55:15 +08:00
|
|
|
use milli::{DocumentId, BEU32StrCodec};
|
2020-09-07 20:56:48 +08:00
|
|
|
|
|
|
|
let mut words_counts = Vec::new();
|
|
|
|
let mut count = 0;
|
|
|
|
let mut prev = None as Option<(DocumentId, u32)>;
|
|
|
|
|
|
|
|
let iter = index.docid_word_positions.as_polymorph().iter::<_, BEU32StrCodec, DecodeIgnore>(rtxn)?;
|
|
|
|
for result in iter {
|
|
|
|
let ((docid, _word), ()) = result?;
|
|
|
|
|
|
|
|
match prev.as_mut() {
|
|
|
|
Some((prev_docid, prev_count)) if docid == *prev_docid => {
|
|
|
|
*prev_count += 1;
|
|
|
|
},
|
|
|
|
Some((prev_docid, prev_count)) => {
|
|
|
|
words_counts.push(*prev_count);
|
|
|
|
*prev_docid = docid;
|
|
|
|
*prev_count = 0;
|
|
|
|
count += 1;
|
|
|
|
},
|
|
|
|
None => prev = Some((docid, 1)),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if let Some((_, prev_count)) = prev.take() {
|
|
|
|
words_counts.push(prev_count);
|
|
|
|
count += 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
let words_count = words_counts.into_iter().map(|c| c as usize).sum::<usize>() as f64;
|
|
|
|
let count = count as f64;
|
|
|
|
|
|
|
|
println!("average number of different words by document: {}", words_count / count);
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
2020-09-07 21:26:42 +08:00
|
|
|
|
2020-09-30 00:11:44 +08:00
|
|
|
fn average_number_of_positions_by_word(index: &Index, rtxn: &heed::RoTxn) -> anyhow::Result<()> {
|
2020-09-07 21:26:42 +08:00
|
|
|
use heed::types::DecodeIgnore;
|
2021-02-15 01:55:15 +08:00
|
|
|
use milli::BoRoaringBitmapCodec;
|
2020-09-07 21:26:42 +08:00
|
|
|
|
|
|
|
let mut values_length = Vec::new();
|
|
|
|
let mut count = 0;
|
|
|
|
|
2020-09-07 21:42:20 +08:00
|
|
|
let db = index.docid_word_positions.as_polymorph();
|
2020-10-01 16:58:19 +08:00
|
|
|
for result in db.iter::<_, DecodeIgnore, BoRoaringBitmapCodec>(rtxn)? {
|
2020-09-07 21:26:42 +08:00
|
|
|
let ((), val) = result?;
|
|
|
|
values_length.push(val.len() as u32);
|
|
|
|
count += 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
let values_length_sum = values_length.into_iter().map(|c| c as usize).sum::<usize>() as f64;
|
|
|
|
let count = count as f64;
|
|
|
|
|
|
|
|
println!("average number of positions by word: {}", values_length_sum / count);
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
2020-09-22 19:52:24 +08:00
|
|
|
|
2020-12-28 23:46:16 +08:00
|
|
|
fn size_of_databases(index: &Index, rtxn: &heed::RoTxn, names: Vec<String>) -> anyhow::Result<()> {
|
2020-10-02 22:46:05 +08:00
|
|
|
use heed::types::ByteSlice;
|
|
|
|
|
2021-03-05 23:37:18 +08:00
|
|
|
let names = if names.is_empty() {
|
|
|
|
ALL_DATABASE_NAMES.iter().map(|s| s.to_string()).collect()
|
|
|
|
} else {
|
|
|
|
names
|
|
|
|
};
|
|
|
|
|
2020-12-28 23:46:16 +08:00
|
|
|
for name in names {
|
|
|
|
let database = match name.as_str() {
|
|
|
|
MAIN_DB_NAME => &index.main,
|
|
|
|
WORD_PREFIX_DOCIDS_DB_NAME => index.word_prefix_docids.as_polymorph(),
|
|
|
|
WORD_DOCIDS_DB_NAME => index.word_docids.as_polymorph(),
|
|
|
|
DOCID_WORD_POSITIONS_DB_NAME => index.docid_word_positions.as_polymorph(),
|
|
|
|
WORD_PAIR_PROXIMITY_DOCIDS_DB_NAME => index.word_pair_proximity_docids.as_polymorph(),
|
|
|
|
WORD_PREFIX_PAIR_PROXIMITY_DOCIDS_DB_NAME => index.word_prefix_pair_proximity_docids.as_polymorph(),
|
2021-03-17 21:22:01 +08:00
|
|
|
FACET_FIELD_ID_VALUE_DOCIDS_DB_NAME => index.facet_field_id_value_docids.as_polymorph(),
|
|
|
|
FIELD_ID_DOCID_FACET_VALUES_DB_NAME => index.field_id_docid_facet_values.as_polymorph(),
|
2020-12-28 23:46:16 +08:00
|
|
|
DOCUMENTS_DB_NAME => index.documents.as_polymorph(),
|
|
|
|
unknown => anyhow::bail!("unknown database {:?}", unknown),
|
|
|
|
};
|
2020-10-02 22:46:05 +08:00
|
|
|
|
2020-12-28 23:46:16 +08:00
|
|
|
let mut key_size: u64 = 0;
|
|
|
|
let mut val_size: u64 = 0;
|
|
|
|
for result in database.iter::<_, ByteSlice, ByteSlice>(rtxn)? {
|
|
|
|
let (k, v) = result?;
|
|
|
|
key_size += k.len() as u64;
|
|
|
|
val_size += v.len() as u64;
|
|
|
|
}
|
2020-10-02 22:46:05 +08:00
|
|
|
|
2020-12-28 23:46:16 +08:00
|
|
|
println!("The {} database weigh:", name);
|
2021-03-05 23:37:18 +08:00
|
|
|
println!("\ttotal key size: {}", Byte::from(key_size).get_appropriate_unit(true));
|
|
|
|
println!("\ttotal val size: {}", Byte::from(val_size).get_appropriate_unit(true));
|
|
|
|
println!("\ttotal size: {}", Byte::from(key_size + val_size).get_appropriate_unit(true));
|
2020-12-28 23:46:16 +08:00
|
|
|
}
|
2020-10-02 22:46:05 +08:00
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2020-10-01 17:39:30 +08:00
|
|
|
fn database_stats(index: &Index, rtxn: &heed::RoTxn, name: &str) -> anyhow::Result<()> {
|
2020-10-01 17:23:37 +08:00
|
|
|
use heed::types::ByteSlice;
|
|
|
|
use heed::{Error, BytesDecode};
|
2020-10-01 17:39:30 +08:00
|
|
|
use roaring::RoaringBitmap;
|
2021-02-15 01:55:15 +08:00
|
|
|
use milli::{BoRoaringBitmapCodec, CboRoaringBitmapCodec, RoaringBitmapCodec};
|
2020-10-01 17:39:30 +08:00
|
|
|
|
|
|
|
fn compute_stats<'a, DC: BytesDecode<'a, DItem = RoaringBitmap>>(
|
|
|
|
db: heed::PolyDatabase,
|
|
|
|
rtxn: &'a heed::RoTxn,
|
|
|
|
name: &str,
|
|
|
|
) -> anyhow::Result<()>
|
|
|
|
{
|
|
|
|
let mut key_size = 0u64;
|
|
|
|
let mut val_size = 0u64;
|
|
|
|
let mut values_length = Vec::new();
|
|
|
|
|
|
|
|
for result in db.iter::<_, ByteSlice, ByteSlice>(rtxn)? {
|
|
|
|
let (key, val) = result?;
|
|
|
|
key_size += key.len() as u64;
|
|
|
|
val_size += val.len() as u64;
|
|
|
|
let val = DC::bytes_decode(val).ok_or(Error::Decoding)?;
|
|
|
|
values_length.push(val.len() as u32);
|
|
|
|
}
|
2020-09-30 23:41:54 +08:00
|
|
|
|
2020-10-01 17:39:30 +08:00
|
|
|
values_length.sort_unstable();
|
2021-02-10 19:19:10 +08:00
|
|
|
let len = values_length.len();
|
|
|
|
|
|
|
|
let twenty_five_percentile = values_length.get(len / 4).unwrap_or(&0);
|
|
|
|
let fifty_percentile = values_length.get(len / 2).unwrap_or(&0);
|
|
|
|
let seventy_five_percentile = values_length.get(len * 3 / 4).unwrap_or(&0);
|
|
|
|
let ninety_percentile = values_length.get(len * 90 / 100).unwrap_or(&0);
|
|
|
|
let ninety_five_percentile = values_length.get(len * 95 / 100).unwrap_or(&0);
|
|
|
|
let ninety_nine_percentile = values_length.get(len * 99 / 100).unwrap_or(&0);
|
2020-10-01 17:39:30 +08:00
|
|
|
let minimum = values_length.first().unwrap_or(&0);
|
|
|
|
let maximum = values_length.last().unwrap_or(&0);
|
|
|
|
let count = values_length.len();
|
|
|
|
let sum = values_length.iter().map(|l| *l as u64).sum::<u64>();
|
|
|
|
|
|
|
|
println!("The {} database stats on the lengths", name);
|
2021-02-10 18:20:00 +08:00
|
|
|
println!("\tnumber of entries: {}", count);
|
2020-10-01 17:48:05 +08:00
|
|
|
println!("\t25th percentile (first quartile): {}", twenty_five_percentile);
|
|
|
|
println!("\t50th percentile (median): {}", fifty_percentile);
|
|
|
|
println!("\t75th percentile (third quartile): {}", seventy_five_percentile);
|
2020-10-01 17:39:30 +08:00
|
|
|
println!("\t90th percentile: {}", ninety_percentile);
|
|
|
|
println!("\t95th percentile: {}", ninety_five_percentile);
|
|
|
|
println!("\t99th percentile: {}", ninety_nine_percentile);
|
|
|
|
println!("\tminimum: {}", minimum);
|
|
|
|
println!("\tmaximum: {}", maximum);
|
|
|
|
println!("\taverage: {}", sum as f64 / count as f64);
|
2021-03-05 23:37:18 +08:00
|
|
|
println!("\ttotal key size: {}", Byte::from(key_size).get_appropriate_unit(true));
|
|
|
|
println!("\ttotal val size: {}", Byte::from(val_size).get_appropriate_unit(true));
|
|
|
|
println!("\ttotal size: {}", Byte::from(key_size + val_size).get_appropriate_unit(true));
|
2020-10-01 17:39:30 +08:00
|
|
|
|
|
|
|
Ok(())
|
2020-09-30 23:41:54 +08:00
|
|
|
}
|
|
|
|
|
2020-10-01 17:39:30 +08:00
|
|
|
match name {
|
|
|
|
WORD_DOCIDS_DB_NAME => {
|
|
|
|
let db = index.word_docids.as_polymorph();
|
|
|
|
compute_stats::<RoaringBitmapCodec>(*db, rtxn, name)
|
|
|
|
},
|
2021-02-10 18:20:00 +08:00
|
|
|
WORD_PREFIX_DOCIDS_DB_NAME => {
|
|
|
|
let db = index.word_prefix_docids.as_polymorph();
|
|
|
|
compute_stats::<RoaringBitmapCodec>(*db, rtxn, name)
|
|
|
|
},
|
2020-10-01 17:39:30 +08:00
|
|
|
DOCID_WORD_POSITIONS_DB_NAME => {
|
|
|
|
let db = index.docid_word_positions.as_polymorph();
|
|
|
|
compute_stats::<BoRoaringBitmapCodec>(*db, rtxn, name)
|
|
|
|
},
|
|
|
|
WORD_PAIR_PROXIMITY_DOCIDS_DB_NAME => {
|
|
|
|
let db = index.word_pair_proximity_docids.as_polymorph();
|
|
|
|
compute_stats::<CboRoaringBitmapCodec>(*db, rtxn, name)
|
|
|
|
},
|
2021-02-10 18:20:00 +08:00
|
|
|
WORD_PREFIX_PAIR_PROXIMITY_DOCIDS_DB_NAME => {
|
|
|
|
let db = index.word_prefix_pair_proximity_docids.as_polymorph();
|
|
|
|
compute_stats::<CboRoaringBitmapCodec>(*db, rtxn, name)
|
|
|
|
},
|
2020-10-01 17:39:30 +08:00
|
|
|
unknown => anyhow::bail!("unknown database {:?}", unknown),
|
|
|
|
}
|
2020-09-30 23:41:54 +08:00
|
|
|
}
|
|
|
|
|
2020-09-22 19:52:24 +08:00
|
|
|
fn word_pair_proximities_docids(
|
|
|
|
index: &Index,
|
|
|
|
rtxn: &heed::RoTxn,
|
|
|
|
debug: bool,
|
|
|
|
word1: String,
|
|
|
|
word2: String,
|
|
|
|
) -> anyhow::Result<()>
|
|
|
|
{
|
|
|
|
use heed::types::ByteSlice;
|
2021-02-15 01:55:15 +08:00
|
|
|
use milli::RoaringBitmapCodec;
|
2020-09-22 19:52:24 +08:00
|
|
|
|
|
|
|
let stdout = io::stdout();
|
|
|
|
let mut wtr = csv::Writer::from_writer(stdout.lock());
|
|
|
|
wtr.write_record(&["word1", "word2", "proximity", "documents_ids"])?;
|
|
|
|
|
|
|
|
// Create the prefix key with only the pair of words.
|
2020-09-22 20:49:22 +08:00
|
|
|
let mut prefix = Vec::with_capacity(word1.len() + word2.len() + 1);
|
|
|
|
prefix.extend_from_slice(word1.as_bytes());
|
2020-09-22 19:52:24 +08:00
|
|
|
prefix.push(0);
|
2020-09-22 20:49:22 +08:00
|
|
|
prefix.extend_from_slice(word2.as_bytes());
|
2020-09-22 19:52:24 +08:00
|
|
|
|
|
|
|
let db = index.word_pair_proximity_docids.as_polymorph();
|
|
|
|
let iter = db.prefix_iter::<_, ByteSlice, RoaringBitmapCodec>(rtxn, &prefix)?;
|
|
|
|
for result in iter {
|
|
|
|
let (key, docids) = result?;
|
|
|
|
|
|
|
|
// Skip keys that are longer than the requested one,
|
|
|
|
// a longer key means that the second word is a prefix of the request word.
|
|
|
|
if key.len() != prefix.len() + 1 { continue; }
|
|
|
|
|
|
|
|
let proximity = key.last().unwrap();
|
|
|
|
let docids = if debug {
|
|
|
|
format!("{:?}", docids)
|
|
|
|
} else {
|
|
|
|
format!("{:?}", docids.iter().collect::<Vec<_>>())
|
|
|
|
};
|
2020-09-22 20:49:22 +08:00
|
|
|
wtr.write_record(&[&word1, &word2, &proximity.to_string(), &docids])?;
|
2020-09-22 19:52:24 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
Ok(wtr.flush()?)
|
|
|
|
}
|