Add buffer to the obkv writter

This commit is contained in:
ManyTheFish 2023-09-21 10:02:08 +02:00
parent 50ba751244
commit 8fb96b8274
2 changed files with 30 additions and 5 deletions

View File

@ -155,7 +155,8 @@ fn extract_tokens_from_document(
let tokens = process_tokens(tokenizer.tokenize(field)) let tokens = process_tokens(tokenizer.tokenize(field))
.take_while(|(p, _)| (*p as u32) < max_positions_per_attributes); .take_while(|(p, _)| (*p as u32) < max_positions_per_attributes);
let mut writer = KvWriterU16::memory(); buffers.obkv_buffer.clear();
let mut writer = KvWriterU16::new(&mut buffers.obkv_buffer);
for (index, token) in tokens { for (index, token) in tokens {
// if a language has been detected for the token, we update the counter. // if a language has been detected for the token, we update the counter.
if let Some(language) = token.language { if let Some(language) = token.language {
@ -293,4 +294,6 @@ struct Buffers {
key_buffer: Vec<u8>, key_buffer: Vec<u8>,
// the field buffer for each fields desserialization, and must be cleared between each field. // the field buffer for each fields desserialization, and must be cleared between each field.
field_buffer: String, field_buffer: String,
// buffer used to store the value data containing an obkv.
obkv_buffer: Vec<u8>,
} }

View File

@ -1,3 +1,4 @@
use std::collections::HashSet;
use std::fs::File; use std::fs::File;
use std::io; use std::io;
@ -33,18 +34,39 @@ pub fn extract_word_position_docids<R: io::Read + io::Seek>(
max_memory, max_memory,
); );
let mut word_positions: HashSet<(u16, Vec<u8>)> = HashSet::new();
let mut current_document_id = None;
let mut key_buffer = Vec::new(); let mut key_buffer = Vec::new();
let mut cursor = docid_word_positions.into_cursor()?; let mut cursor = docid_word_positions.into_cursor()?;
while let Some((key, value)) = cursor.move_on_next()? { while let Some((key, value)) = cursor.move_on_next()? {
let (document_id_bytes, fid_bytes) = try_split_array_at(key) let (document_id_bytes, _fid_bytes) = try_split_array_at(key)
.ok_or(SerializationError::Decoding { db_name: Some(DOCID_WORD_POSITIONS) })?; .ok_or(SerializationError::Decoding { db_name: Some(DOCID_WORD_POSITIONS) })?;
let document_id = DocumentId::from_be_bytes(document_id_bytes); let document_id = DocumentId::from_be_bytes(document_id_bytes);
for (position, word_bytes) in KvReaderU16::new(&value).iter() { if current_document_id.map_or(false, |id| document_id != id) {
for (position, word_bytes) in word_positions.iter() {
key_buffer.clear(); key_buffer.clear();
key_buffer.extend_from_slice(word_bytes); key_buffer.extend_from_slice(word_bytes);
key_buffer.push(0); key_buffer.push(0);
key_buffer.extend_from_slice(&position.to_be_bytes());
word_position_docids_sorter.insert(&key_buffer, document_id.to_ne_bytes())?;
}
word_positions.clear();
}
current_document_id = Some(document_id);
for (position, word_bytes) in KvReaderU16::new(&value).iter() {
let position = bucketed_position(position); let position = bucketed_position(position);
word_positions.insert((position, word_bytes.to_vec()));
}
}
if let Some(document_id) = current_document_id {
for (position, word_bytes) in word_positions {
key_buffer.clear();
key_buffer.extend_from_slice(&word_bytes);
key_buffer.push(0);
key_buffer.extend_from_slice(&position.to_be_bytes()); key_buffer.extend_from_slice(&position.to_be_bytes());
word_position_docids_sorter.insert(&key_buffer, document_id.to_ne_bytes())?; word_position_docids_sorter.insert(&key_buffer, document_id.to_ne_bytes())?;
} }