2020-11-18 04:19:25 +08:00
|
|
|
use std::fs::File;
|
2021-06-23 23:20:04 +08:00
|
|
|
use std::num::{NonZeroU8, NonZeroUsize};
|
|
|
|
use std::{cmp, mem};
|
2020-11-18 04:19:25 +08:00
|
|
|
|
2021-03-12 01:42:21 +08:00
|
|
|
use chrono::Utc;
|
2021-06-17 00:33:33 +08:00
|
|
|
use grenad::{CompressionType, FileFuse, Reader, Writer};
|
2020-11-18 04:19:25 +08:00
|
|
|
use heed::types::{ByteSlice, DecodeIgnore};
|
|
|
|
use heed::{BytesEncode, Error};
|
|
|
|
use log::debug;
|
|
|
|
use roaring::RoaringBitmap;
|
|
|
|
|
2021-06-14 22:46:19 +08:00
|
|
|
use crate::error::InternalError;
|
2021-06-23 23:20:04 +08:00
|
|
|
use crate::heed_codec::facet::{
|
|
|
|
FacetLevelValueF64Codec, FacetLevelValueU32Codec, FacetStringLevelZeroCodec,
|
2021-07-17 18:50:01 +08:00
|
|
|
FacetStringLevelZeroValueCodec, FacetStringZeroBoundsValueCodec,
|
2021-06-23 23:20:04 +08:00
|
|
|
};
|
2021-06-17 00:33:33 +08:00
|
|
|
use crate::heed_codec::CboRoaringBitmapCodec;
|
|
|
|
use crate::update::index_documents::{
|
|
|
|
create_writer, write_into_lmdb_database, writer_into_reader, WriteMethod,
|
|
|
|
};
|
2021-07-06 17:31:24 +08:00
|
|
|
use crate::{FieldId, Index, Result};
|
2020-11-18 04:19:25 +08:00
|
|
|
|
2020-11-23 20:08:57 +08:00
|
|
|
pub struct Facets<'t, 'u, 'i> {
|
2020-11-18 04:19:25 +08:00
|
|
|
wtxn: &'t mut heed::RwTxn<'i, 'u>,
|
|
|
|
index: &'i Index,
|
|
|
|
pub(crate) chunk_compression_type: CompressionType,
|
|
|
|
pub(crate) chunk_compression_level: Option<u32>,
|
|
|
|
pub(crate) chunk_fusing_shrink_size: Option<u64>,
|
2020-11-28 19:43:43 +08:00
|
|
|
level_group_size: NonZeroUsize,
|
|
|
|
min_level_size: NonZeroUsize,
|
2020-12-22 23:21:07 +08:00
|
|
|
_update_id: u64,
|
2020-11-18 04:19:25 +08:00
|
|
|
}
|
|
|
|
|
2020-11-23 20:08:57 +08:00
|
|
|
impl<'t, 'u, 'i> Facets<'t, 'u, 'i> {
|
2020-12-22 23:21:07 +08:00
|
|
|
pub fn new(
|
|
|
|
wtxn: &'t mut heed::RwTxn<'i, 'u>,
|
|
|
|
index: &'i Index,
|
|
|
|
update_id: u64,
|
2021-06-17 00:33:33 +08:00
|
|
|
) -> Facets<'t, 'u, 'i> {
|
2020-11-23 20:08:57 +08:00
|
|
|
Facets {
|
2020-11-18 04:19:25 +08:00
|
|
|
wtxn,
|
|
|
|
index,
|
|
|
|
chunk_compression_type: CompressionType::None,
|
|
|
|
chunk_compression_level: None,
|
|
|
|
chunk_fusing_shrink_size: None,
|
2020-11-28 19:43:43 +08:00
|
|
|
level_group_size: NonZeroUsize::new(4).unwrap(),
|
|
|
|
min_level_size: NonZeroUsize::new(5).unwrap(),
|
2020-12-22 23:21:07 +08:00
|
|
|
_update_id: update_id,
|
2020-11-18 04:19:25 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-28 19:43:43 +08:00
|
|
|
pub fn level_group_size(&mut self, value: NonZeroUsize) -> &mut Self {
|
|
|
|
self.level_group_size = NonZeroUsize::new(cmp::max(value.get(), 2)).unwrap();
|
2020-11-18 04:19:25 +08:00
|
|
|
self
|
|
|
|
}
|
|
|
|
|
2020-11-28 19:43:43 +08:00
|
|
|
pub fn min_level_size(&mut self, value: NonZeroUsize) -> &mut Self {
|
|
|
|
self.min_level_size = value;
|
2020-11-18 04:19:25 +08:00
|
|
|
self
|
|
|
|
}
|
|
|
|
|
2021-06-14 22:46:19 +08:00
|
|
|
pub fn execute(self) -> Result<()> {
|
2021-03-12 01:42:21 +08:00
|
|
|
self.index.set_updated_at(self.wtxn, &Utc::now())?;
|
2020-11-18 04:19:25 +08:00
|
|
|
// We get the faceted fields to be able to create the facet levels.
|
2021-01-21 00:27:43 +08:00
|
|
|
let faceted_fields = self.index.faceted_fields_ids(self.wtxn)?;
|
2020-11-18 04:19:25 +08:00
|
|
|
|
|
|
|
debug!("Computing and writing the facet values levels docids into LMDB on disk...");
|
2020-11-23 20:08:57 +08:00
|
|
|
|
2021-04-28 23:58:16 +08:00
|
|
|
for field_id in faceted_fields {
|
2021-06-23 23:20:04 +08:00
|
|
|
// Clear the facet string levels.
|
|
|
|
clear_field_string_levels(
|
|
|
|
self.wtxn,
|
|
|
|
self.index.facet_id_string_docids.remap_types::<ByteSlice, DecodeIgnore>(),
|
|
|
|
field_id,
|
|
|
|
)?;
|
|
|
|
|
2021-04-28 23:58:16 +08:00
|
|
|
// Compute and store the faceted strings documents ids.
|
2021-07-17 18:50:01 +08:00
|
|
|
let string_documents_ids = compute_faceted_strings_documents_ids(
|
2021-04-28 23:58:16 +08:00
|
|
|
self.wtxn,
|
|
|
|
self.index.facet_id_string_docids.remap_key_type::<ByteSlice>(),
|
|
|
|
field_id,
|
|
|
|
)?;
|
|
|
|
|
2021-06-23 23:20:04 +08:00
|
|
|
let facet_string_levels = compute_facet_string_levels(
|
|
|
|
self.wtxn,
|
|
|
|
self.index.facet_id_string_docids,
|
|
|
|
self.chunk_compression_type,
|
|
|
|
self.chunk_compression_level,
|
|
|
|
self.chunk_fusing_shrink_size,
|
|
|
|
self.level_group_size,
|
|
|
|
self.min_level_size,
|
|
|
|
field_id,
|
|
|
|
)?;
|
|
|
|
|
2021-04-28 23:58:16 +08:00
|
|
|
// Clear the facet number levels.
|
2021-06-17 00:33:33 +08:00
|
|
|
clear_field_number_levels(self.wtxn, self.index.facet_id_f64_docids, field_id)?;
|
2021-04-28 23:58:16 +08:00
|
|
|
|
|
|
|
// Compute and store the faceted numbers documents ids.
|
2021-07-17 18:50:01 +08:00
|
|
|
let number_documents_ids = compute_faceted_numbers_documents_ids(
|
2021-04-28 23:58:16 +08:00
|
|
|
self.wtxn,
|
|
|
|
self.index.facet_id_f64_docids.remap_key_type::<ByteSlice>(),
|
|
|
|
field_id,
|
|
|
|
)?;
|
|
|
|
|
2021-06-23 23:20:04 +08:00
|
|
|
let facet_number_levels = compute_facet_number_levels(
|
2021-04-28 23:58:16 +08:00
|
|
|
self.wtxn,
|
|
|
|
self.index.facet_id_f64_docids,
|
|
|
|
self.chunk_compression_type,
|
|
|
|
self.chunk_compression_level,
|
|
|
|
self.chunk_fusing_shrink_size,
|
|
|
|
self.level_group_size,
|
|
|
|
self.min_level_size,
|
|
|
|
field_id,
|
|
|
|
)?;
|
|
|
|
|
2021-06-17 00:33:33 +08:00
|
|
|
self.index.put_string_faceted_documents_ids(
|
|
|
|
self.wtxn,
|
|
|
|
field_id,
|
|
|
|
&string_documents_ids,
|
|
|
|
)?;
|
|
|
|
self.index.put_number_faceted_documents_ids(
|
|
|
|
self.wtxn,
|
|
|
|
field_id,
|
|
|
|
&number_documents_ids,
|
|
|
|
)?;
|
2021-04-28 23:58:16 +08:00
|
|
|
|
|
|
|
write_into_lmdb_database(
|
|
|
|
self.wtxn,
|
|
|
|
*self.index.facet_id_f64_docids.as_polymorph(),
|
2021-06-23 23:20:04 +08:00
|
|
|
facet_number_levels,
|
|
|
|
|_, _| Err(InternalError::IndexingMergingKeys { process: "facet number levels" }),
|
|
|
|
WriteMethod::GetMergePut,
|
|
|
|
)?;
|
|
|
|
|
|
|
|
write_into_lmdb_database(
|
|
|
|
self.wtxn,
|
|
|
|
*self.index.facet_id_string_docids.as_polymorph(),
|
|
|
|
facet_string_levels,
|
|
|
|
|_, _| Err(InternalError::IndexingMergingKeys { process: "facet string levels" }),
|
2021-04-28 23:58:16 +08:00
|
|
|
WriteMethod::GetMergePut,
|
|
|
|
)?;
|
2020-11-18 04:19:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-17 00:33:33 +08:00
|
|
|
fn clear_field_number_levels<'t>(
|
2020-11-19 04:52:08 +08:00
|
|
|
wtxn: &'t mut heed::RwTxn,
|
2021-04-07 17:57:16 +08:00
|
|
|
db: heed::Database<FacetLevelValueF64Codec, CboRoaringBitmapCodec>,
|
2021-07-06 17:31:24 +08:00
|
|
|
field_id: FieldId,
|
2021-06-17 00:33:33 +08:00
|
|
|
) -> heed::Result<()> {
|
2021-04-07 17:57:16 +08:00
|
|
|
let left = (field_id, 1, f64::MIN, f64::MIN);
|
|
|
|
let right = (field_id, u8::MAX, f64::MAX, f64::MAX);
|
2020-11-19 04:52:08 +08:00
|
|
|
let range = left..=right;
|
2021-04-07 17:57:16 +08:00
|
|
|
db.delete_range(wtxn, &range).map(drop)
|
2020-11-18 04:19:25 +08:00
|
|
|
}
|
|
|
|
|
2021-04-07 17:57:16 +08:00
|
|
|
fn compute_facet_number_levels<'t>(
|
2020-11-19 04:52:08 +08:00
|
|
|
rtxn: &'t heed::RoTxn,
|
2021-04-07 17:57:16 +08:00
|
|
|
db: heed::Database<FacetLevelValueF64Codec, CboRoaringBitmapCodec>,
|
2020-11-18 04:19:25 +08:00
|
|
|
compression_type: CompressionType,
|
|
|
|
compression_level: Option<u32>,
|
|
|
|
shrink_size: Option<u64>,
|
2020-11-28 19:43:43 +08:00
|
|
|
level_group_size: NonZeroUsize,
|
|
|
|
min_level_size: NonZeroUsize,
|
2021-07-06 17:31:24 +08:00
|
|
|
field_id: FieldId,
|
2021-06-17 00:33:33 +08:00
|
|
|
) -> Result<Reader<FileFuse>> {
|
2021-04-07 17:57:16 +08:00
|
|
|
let first_level_size = db
|
|
|
|
.remap_key_type::<ByteSlice>()
|
2021-07-06 17:31:24 +08:00
|
|
|
.prefix_iter(rtxn, &field_id.to_be_bytes())?
|
2020-11-18 04:19:25 +08:00
|
|
|
.remap_types::<DecodeIgnore, DecodeIgnore>()
|
|
|
|
.fold(Ok(0usize), |count, result| result.and(count).map(|c| c + 1))?;
|
|
|
|
|
|
|
|
// It is forbidden to keep a cursor and write in a database at the same time with LMDB
|
|
|
|
// therefore we write the facet levels entries into a grenad file before transfering them.
|
2021-06-17 00:33:33 +08:00
|
|
|
let mut writer = tempfile::tempfile()
|
|
|
|
.and_then(|file| create_writer(compression_type, compression_level, file))?;
|
2020-11-18 04:19:25 +08:00
|
|
|
|
2020-11-19 04:52:08 +08:00
|
|
|
let level_0_range = {
|
2021-04-07 17:57:16 +08:00
|
|
|
let left = (field_id, 0, f64::MIN, f64::MIN);
|
|
|
|
let right = (field_id, 0, f64::MAX, f64::MAX);
|
2020-11-19 04:52:08 +08:00
|
|
|
left..=right
|
|
|
|
};
|
|
|
|
|
2020-11-28 19:43:43 +08:00
|
|
|
// Groups sizes are always a power of the original level_group_size and therefore a group
|
|
|
|
// always maps groups of the previous level and never splits previous levels groups in half.
|
|
|
|
let group_size_iter = (1u8..)
|
|
|
|
.map(|l| (l, level_group_size.get().pow(l as u32)))
|
|
|
|
.take_while(|(_, s)| first_level_size / *s >= min_level_size.get());
|
2020-11-18 04:19:25 +08:00
|
|
|
|
2020-11-28 19:43:43 +08:00
|
|
|
for (level, group_size) in group_size_iter {
|
2021-04-07 17:57:16 +08:00
|
|
|
let mut left = 0.0;
|
|
|
|
let mut right = 0.0;
|
2020-11-18 04:19:25 +08:00
|
|
|
let mut group_docids = RoaringBitmap::new();
|
|
|
|
|
|
|
|
for (i, result) in db.range(rtxn, &level_0_range)?.enumerate() {
|
|
|
|
let ((_field_id, _level, value, _right), docids) = result?;
|
|
|
|
|
|
|
|
if i == 0 {
|
|
|
|
left = value;
|
2020-11-28 19:43:43 +08:00
|
|
|
} else if i % group_size == 0 {
|
2020-11-18 04:19:25 +08:00
|
|
|
// we found the first bound of the next group, we must store the left
|
|
|
|
// and right bounds associated with the docids.
|
2021-04-07 17:57:16 +08:00
|
|
|
write_number_entry(&mut writer, field_id, level, left, right, &group_docids)?;
|
2020-11-18 04:19:25 +08:00
|
|
|
|
|
|
|
// We save the left bound for the new group and also reset the docids.
|
|
|
|
group_docids = RoaringBitmap::new();
|
|
|
|
left = value;
|
|
|
|
}
|
|
|
|
|
|
|
|
// The right bound is always the bound we run through.
|
2021-06-30 20:12:56 +08:00
|
|
|
group_docids |= docids;
|
2020-11-18 04:19:25 +08:00
|
|
|
right = value;
|
|
|
|
}
|
|
|
|
|
|
|
|
if !group_docids.is_empty() {
|
2021-04-07 17:57:16 +08:00
|
|
|
write_number_entry(&mut writer, field_id, level, left, right, &group_docids)?;
|
2020-11-18 04:19:25 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
writer_into_reader(writer, shrink_size)
|
|
|
|
}
|
|
|
|
|
2021-06-23 23:20:04 +08:00
|
|
|
fn write_number_entry(
|
|
|
|
writer: &mut Writer<File>,
|
|
|
|
field_id: FieldId,
|
|
|
|
level: u8,
|
|
|
|
left: f64,
|
|
|
|
right: f64,
|
|
|
|
ids: &RoaringBitmap,
|
|
|
|
) -> Result<()> {
|
|
|
|
let key = (field_id, level, left, right);
|
|
|
|
let key = FacetLevelValueF64Codec::bytes_encode(&key).ok_or(Error::Encoding)?;
|
|
|
|
let data = CboRoaringBitmapCodec::bytes_encode(&ids).ok_or(Error::Encoding)?;
|
|
|
|
writer.insert(&key, &data)?;
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2021-07-17 18:50:01 +08:00
|
|
|
fn compute_faceted_strings_documents_ids(
|
2020-11-23 20:08:57 +08:00
|
|
|
rtxn: &heed::RoTxn,
|
2021-07-17 18:50:01 +08:00
|
|
|
db: heed::Database<ByteSlice, FacetStringLevelZeroValueCodec<CboRoaringBitmapCodec>>,
|
2021-07-06 17:31:24 +08:00
|
|
|
field_id: FieldId,
|
2021-06-17 00:33:33 +08:00
|
|
|
) -> Result<RoaringBitmap> {
|
2020-11-23 20:08:57 +08:00
|
|
|
let mut documents_ids = RoaringBitmap::new();
|
2021-07-17 18:50:01 +08:00
|
|
|
for result in db.prefix_iter(rtxn, &field_id.to_be_bytes())? {
|
|
|
|
let (_key, (_original_value, docids)) = result?;
|
|
|
|
documents_ids |= docids;
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(documents_ids)
|
|
|
|
}
|
2021-04-28 23:58:16 +08:00
|
|
|
|
2021-07-17 18:50:01 +08:00
|
|
|
fn compute_faceted_numbers_documents_ids(
|
|
|
|
rtxn: &heed::RoTxn,
|
|
|
|
db: heed::Database<ByteSlice, CboRoaringBitmapCodec>,
|
|
|
|
field_id: FieldId,
|
|
|
|
) -> Result<RoaringBitmap> {
|
|
|
|
let mut documents_ids = RoaringBitmap::new();
|
2021-07-06 17:31:24 +08:00
|
|
|
for result in db.prefix_iter(rtxn, &field_id.to_be_bytes())? {
|
2020-11-23 20:08:57 +08:00
|
|
|
let (_key, docids) = result?;
|
2021-04-28 23:58:16 +08:00
|
|
|
documents_ids |= docids;
|
2020-11-23 20:08:57 +08:00
|
|
|
}
|
2021-04-28 23:58:16 +08:00
|
|
|
|
2020-11-23 20:08:57 +08:00
|
|
|
Ok(documents_ids)
|
|
|
|
}
|
|
|
|
|
2021-06-23 23:20:04 +08:00
|
|
|
fn clear_field_string_levels<'t>(
|
|
|
|
wtxn: &'t mut heed::RwTxn,
|
|
|
|
db: heed::Database<ByteSlice, DecodeIgnore>,
|
|
|
|
field_id: FieldId,
|
|
|
|
) -> heed::Result<()> {
|
|
|
|
let left = (field_id, NonZeroU8::new(1).unwrap(), u32::MIN, u32::MIN);
|
|
|
|
let right = (field_id, NonZeroU8::new(u8::MAX).unwrap(), u32::MAX, u32::MAX);
|
|
|
|
let range = left..=right;
|
|
|
|
db.remap_key_type::<FacetLevelValueU32Codec>().delete_range(wtxn, &range).map(drop)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn compute_facet_string_levels<'t>(
|
|
|
|
rtxn: &'t heed::RoTxn,
|
2021-07-17 18:50:01 +08:00
|
|
|
db: heed::Database<
|
|
|
|
FacetStringLevelZeroCodec,
|
|
|
|
FacetStringLevelZeroValueCodec<CboRoaringBitmapCodec>,
|
|
|
|
>,
|
2021-06-23 23:20:04 +08:00
|
|
|
compression_type: CompressionType,
|
|
|
|
compression_level: Option<u32>,
|
|
|
|
shrink_size: Option<u64>,
|
|
|
|
level_group_size: NonZeroUsize,
|
|
|
|
min_level_size: NonZeroUsize,
|
|
|
|
field_id: FieldId,
|
|
|
|
) -> Result<Reader<FileFuse>> {
|
|
|
|
let first_level_size = db
|
|
|
|
.remap_key_type::<ByteSlice>()
|
|
|
|
.prefix_iter(rtxn, &field_id.to_be_bytes())?
|
|
|
|
.remap_types::<DecodeIgnore, DecodeIgnore>()
|
|
|
|
.fold(Ok(0usize), |count, result| result.and(count).map(|c| c + 1))?;
|
|
|
|
|
|
|
|
// It is forbidden to keep a cursor and write in a database at the same time with LMDB
|
|
|
|
// therefore we write the facet levels entries into a grenad file before transfering them.
|
|
|
|
let mut writer = tempfile::tempfile()
|
|
|
|
.and_then(|file| create_writer(compression_type, compression_level, file))?;
|
|
|
|
|
|
|
|
// Groups sizes are always a power of the original level_group_size and therefore a group
|
|
|
|
// always maps groups of the previous level and never splits previous levels groups in half.
|
|
|
|
let group_size_iter = (1u8..)
|
|
|
|
.map(|l| (l, level_group_size.get().pow(l as u32)))
|
|
|
|
.take_while(|(_, s)| first_level_size / *s >= min_level_size.get());
|
|
|
|
|
|
|
|
for (level, group_size) in group_size_iter {
|
|
|
|
let level = NonZeroU8::new(level).unwrap();
|
|
|
|
let mut left = (0, "");
|
|
|
|
let mut right = (0, "");
|
|
|
|
let mut group_docids = RoaringBitmap::new();
|
|
|
|
|
|
|
|
// Because we know the size of the level 0 we can use a range iterator that starts
|
|
|
|
// at the first value of the level and goes to the last by simply counting.
|
|
|
|
for (i, result) in db.range(rtxn, &((field_id, "")..))?.take(first_level_size).enumerate() {
|
2021-07-17 18:50:01 +08:00
|
|
|
let ((_field_id, value), (_original_value, docids)) = result?;
|
2021-06-23 23:20:04 +08:00
|
|
|
|
|
|
|
if i == 0 {
|
|
|
|
left = (i as u32, value);
|
|
|
|
} else if i % group_size == 0 {
|
|
|
|
// we found the first bound of the next group, we must store the left
|
|
|
|
// and right bounds associated with the docids. We also reset the docids.
|
|
|
|
let docids = mem::take(&mut group_docids);
|
|
|
|
write_string_entry(&mut writer, field_id, level, left, right, docids)?;
|
|
|
|
|
|
|
|
// We save the left bound for the new group.
|
|
|
|
left = (i as u32, value);
|
|
|
|
}
|
|
|
|
|
|
|
|
// The right bound is always the bound we run through.
|
|
|
|
group_docids |= docids;
|
|
|
|
right = (i as u32, value);
|
|
|
|
}
|
|
|
|
|
|
|
|
if !group_docids.is_empty() {
|
|
|
|
let docids = mem::take(&mut group_docids);
|
|
|
|
write_string_entry(&mut writer, field_id, level, left, right, docids)?;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
writer_into_reader(writer, shrink_size)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn write_string_entry(
|
2020-11-18 04:19:25 +08:00
|
|
|
writer: &mut Writer<File>,
|
2021-07-06 17:31:24 +08:00
|
|
|
field_id: FieldId,
|
2021-06-23 23:20:04 +08:00
|
|
|
level: NonZeroU8,
|
|
|
|
(left_id, left_value): (u32, &str),
|
|
|
|
(right_id, right_value): (u32, &str),
|
|
|
|
docids: RoaringBitmap,
|
2021-06-17 00:33:33 +08:00
|
|
|
) -> Result<()> {
|
2021-06-23 23:20:04 +08:00
|
|
|
let key = (field_id, level, left_id, right_id);
|
|
|
|
let key = FacetLevelValueU32Codec::bytes_encode(&key).ok_or(Error::Encoding)?;
|
|
|
|
let data = match level.get() {
|
|
|
|
1 => (Some((left_value, right_value)), docids),
|
|
|
|
_ => (None, docids),
|
|
|
|
};
|
|
|
|
let data = FacetStringZeroBoundsValueCodec::<CboRoaringBitmapCodec>::bytes_encode(&data)
|
|
|
|
.ok_or(Error::Encoding)?;
|
2020-11-18 04:19:25 +08:00
|
|
|
writer.insert(&key, &data)?;
|
|
|
|
Ok(())
|
|
|
|
}
|