mirror of
https://github.com/meilisearch/meilisearch.git
synced 2024-11-26 20:15:07 +08:00
Keep the ZK flow when enqueuing tasks
This commit is contained in:
parent
c488a4a351
commit
2d1434da81
@ -609,7 +609,8 @@ impl IndexScheduler {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
run.breakpoint(Breakpoint::Init);
|
run.breakpoint(Breakpoint::Init);
|
||||||
|
|
||||||
if let Some(zookeeper) = self.zookeeper.clone() {
|
let latch = match self.zookeeper.clone() {
|
||||||
|
Some(zookeeper) => {
|
||||||
let id = Uuid::new_v4().to_string();
|
let id = Uuid::new_v4().to_string();
|
||||||
let latch = LeaderLatch::new(zookeeper.clone(), id, "/election".to_string());
|
let latch = LeaderLatch::new(zookeeper.clone(), id, "/election".to_string());
|
||||||
latch.start().unwrap();
|
latch.start().unwrap();
|
||||||
@ -659,8 +660,12 @@ impl IndexScheduler {
|
|||||||
let dst = snapshot_dir.join("indexes");
|
let dst = snapshot_dir.join("indexes");
|
||||||
for result in std::fs::read_dir(&dst).unwrap() {
|
for result in std::fs::read_dir(&dst).unwrap() {
|
||||||
let entry = result.unwrap();
|
let entry = result.unwrap();
|
||||||
let uuid =
|
let uuid = entry
|
||||||
entry.file_name().as_os_str().to_str().unwrap().to_string();
|
.file_name()
|
||||||
|
.as_os_str()
|
||||||
|
.to_str()
|
||||||
|
.unwrap()
|
||||||
|
.to_string();
|
||||||
log::info!("\tDownloading the index {}", uuid.to_string());
|
log::info!("\tDownloading the index {}", uuid.to_string());
|
||||||
std::fs::copy(
|
std::fs::copy(
|
||||||
dst.join(&uuid),
|
dst.join(&uuid),
|
||||||
@ -739,8 +744,12 @@ impl IndexScheduler {
|
|||||||
let path = path.unwrap();
|
let path = path.unwrap();
|
||||||
// Add raw task content in local DB
|
// Add raw task content in local DB
|
||||||
log::info!("Received a new task from the cluster at {}", path);
|
log::info!("Received a new task from the cluster at {}", path);
|
||||||
let (data, _stat) =
|
let (data, _stat) = this
|
||||||
this.zookeeper.as_ref().unwrap().get_data(&path, false).unwrap();
|
.zookeeper
|
||||||
|
.as_ref()
|
||||||
|
.unwrap()
|
||||||
|
.get_data(&path, false)
|
||||||
|
.unwrap();
|
||||||
let task = serde_json::from_slice(data.as_slice()).unwrap();
|
let task = serde_json::from_slice(data.as_slice()).unwrap();
|
||||||
let mut wtxn = this.env.write_txn().unwrap();
|
let mut wtxn = this.env.write_txn().unwrap();
|
||||||
this.register_raw_task(&mut wtxn, &task).unwrap();
|
this.register_raw_task(&mut wtxn, &task).unwrap();
|
||||||
@ -757,16 +766,21 @@ impl IndexScheduler {
|
|||||||
this.wake_up.signal();
|
this.wake_up.signal();
|
||||||
})
|
})
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
|
Some(latch)
|
||||||
}
|
}
|
||||||
|
None => None,
|
||||||
|
};
|
||||||
|
|
||||||
let this = self.private_clone();
|
let this = self.private_clone();
|
||||||
std::thread::spawn(move || {
|
std::thread::spawn(move || {
|
||||||
loop {
|
loop {
|
||||||
// we're either a leader or not running in a cluster,
|
// we're either a leader or not running in a cluster,
|
||||||
// either way we should wait until we receive a task.
|
// either way we should wait until we receive a task.
|
||||||
let wake_up = this.wake_up.clone();
|
this.wake_up.wait();
|
||||||
let _ = wake_up.wait();
|
|
||||||
|
|
||||||
|
// TODO watch the /election node and send a signal once it changes (be careful about atomics ordering)
|
||||||
|
if latch.as_ref().map_or(true, |latch| latch.has_leadership()) {
|
||||||
match this.tick() {
|
match this.tick() {
|
||||||
Ok(TickOutcome::TickAgain(n)) => {
|
Ok(TickOutcome::TickAgain(n)) => {
|
||||||
// We must tick again.
|
// We must tick again.
|
||||||
@ -849,7 +863,9 @@ impl IndexScheduler {
|
|||||||
vec![],
|
vec![],
|
||||||
None,
|
None,
|
||||||
);
|
);
|
||||||
log::info!("Notified everyone about the new snapshot {snapshot_id}");
|
log::info!(
|
||||||
|
"Notified everyone about the new snapshot {snapshot_id}"
|
||||||
|
);
|
||||||
|
|
||||||
// We can now delete all the tasks that has been processed
|
// We can now delete all the tasks that has been processed
|
||||||
let processed = this
|
let processed = this
|
||||||
@ -878,6 +894,7 @@ impl IndexScheduler {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1264,12 +1281,14 @@ impl IndexScheduler {
|
|||||||
// (that it does not contain duplicate indexes).
|
// (that it does not contain duplicate indexes).
|
||||||
check_index_swap_validity(&task)?;
|
check_index_swap_validity(&task)?;
|
||||||
|
|
||||||
|
if self.zookeeper.is_none() {
|
||||||
this.register_raw_task(&mut wtxn, &task)?;
|
this.register_raw_task(&mut wtxn, &task)?;
|
||||||
|
|
||||||
if let Err(e) = wtxn.commit() {
|
if let Err(e) = wtxn.commit() {
|
||||||
this.delete_persisted_task_data(&task)?;
|
this.delete_persisted_task_data(&task)?;
|
||||||
return Err(e.into());
|
return Err(e.into());
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// If the registered task is a task cancelation
|
// If the registered task is a task cancelation
|
||||||
// we inform the processing tasks to stop (if necessary).
|
// we inform the processing tasks to stop (if necessary).
|
||||||
@ -1281,8 +1300,10 @@ impl IndexScheduler {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if self.zookeeper.is_none() {
|
||||||
// notify the scheduler loop to execute a new tick
|
// notify the scheduler loop to execute a new tick
|
||||||
this.wake_up.signal();
|
this.wake_up.signal();
|
||||||
|
}
|
||||||
|
|
||||||
// TODO: send task to ZK in raw json.
|
// TODO: send task to ZK in raw json.
|
||||||
if let Some(zookeeper) = &self.zookeeper {
|
if let Some(zookeeper) = &self.zookeeper {
|
||||||
|
Loading…
Reference in New Issue
Block a user