diff --git a/lektor_nkdb/src/database/pool.rs b/lektor_nkdb/src/database/pool.rs index 69736c802ff4a442faea96e014bf80c97b40b87f..8bb0584d3a28b6cd49b8c959819a54581c4ca1de 100644 --- a/lektor_nkdb/src/database/pool.rs +++ b/lektor_nkdb/src/database/pool.rs @@ -4,112 +4,137 @@ //! representation for searching plus some mapping. A pool is common for all the epochs. For epoch //! specific things like the [u64] to [Kid] / [Kara] mappings, do that in the epoch struct. -use crate::{EpochData, KId, Kara, RemoteKId, kara::status::KaraState, strings}; +use crate::{kara::status::KaraState, strings, EpochData, KId, Kara, KaraStatus, RemoteKId}; +use anyhow::Result; use futures::prelude::*; -use hashbrown::HashMap; +use lektor_utils::dkmap::DKMap; use serde::{Deserialize, Serialize}; +use std::ops::Deref; use std::sync::atomic::{AtomicU64, Ordering}; use tokio::sync::RwLock; -type IdMapping = HashMap<RemoteKId, KId>; -type StateMapping = HashMap<KId, KaraState>; +/// ID mapping from local to remote. Note that there is a one-to-one relation between +/// [RemoteKId] and [KId], even with metadata updates or file update. +// State of the karas: virtual or not, if physical: filesize, filehash,... +type KaraMappingValue = (KId, RemoteKId, KaraState); +type KaraMapping = DKMap<KId, RemoteKId, KaraMappingValue>; pub trait IntoMapping: Sized { - fn into_mapping(self) -> (KId, RemoteKId, KaraState); + fn into_mapping(self) -> KaraMappingValue; } impl IntoMapping for &'_ Kara { - fn into_mapping(self) -> (KId, RemoteKId, KaraState) { - todo!() + fn into_mapping(self) -> KaraMappingValue { + ( + self.id, + self.remote.clone(), + match self.kara_status { + KaraStatus::Virtual => KaraState::Virtual, + // Don't try to discriminate PhysicalUnavailable and PhysicalAvailable here because + // it might take some time to resolve + _ => KaraState::PhysicalUnavailable, + }, + ) } } impl IntoMapping for (KId, RemoteKId, KaraState) { - fn into_mapping(self) -> (KId, RemoteKId, KaraState) { + fn into_mapping(self) -> KaraMappingValue { self } } impl IntoMapping for (RemoteKId, KaraState, KId) { - fn into_mapping(self) -> (KId, RemoteKId, KaraState) { - todo!() + fn into_mapping(self) -> KaraMappingValue { + (self.2, self.0, self.1) } } impl IntoMapping for (KaraState, KId, RemoteKId) { - fn into_mapping(self) -> (KId, RemoteKId, KaraState) { - todo!() + fn into_mapping(self) -> KaraMappingValue { + (self.1, self.2, self.0) } } impl IntoMapping for (KId, KaraState, RemoteKId) { - fn into_mapping(self) -> (KId, RemoteKId, KaraState) { - todo!() + fn into_mapping(self) -> KaraMappingValue { + (self.0, self.2, self.1) } } impl IntoMapping for (RemoteKId, KId, KaraState) { - fn into_mapping(self) -> (KId, RemoteKId, KaraState) { - todo!() + fn into_mapping(self) -> KaraMappingValue { + (self.1, self.0, self.2) } } impl IntoMapping for (KaraState, RemoteKId, KId) { - fn into_mapping(self) -> (KId, RemoteKId, KaraState) { - todo!() + fn into_mapping(self) -> KaraMappingValue { + (self.2, self.1, self.0) } } -#[derive(Debug, Default, Serialize, Deserialize)] +#[derive(Debug, Serialize, Deserialize)] pub(crate) struct PoolSerDeProxy { - id_mapping: IdMapping, - state_mapping: StateMapping, - next_id: u64, + kara_mapping: KaraMapping, } /// A pool of all the available kara for every epochs. Contains the mapping for remote id to the /// local id representations. -#[derive(Debug, Default)] +#[derive(Debug)] pub(crate) struct Pool { /// ID mapping from local to remote. Note that there is a one-to-one relation between /// [RemoteKId] and [KId], even with metadata updates or file update. - id_mapping: RwLock<IdMapping>, - // State of the karas: virtual or not, if physical: filesize, filehash,... - state_mapping: RwLock<StateMapping>, + kara_mapping: RwLock<KaraMapping>, /// The next local ID. next_id: AtomicU64, } impl From<PoolSerDeProxy> for Pool { - fn from(value: PoolSerDeProxy) -> Self { - // TODO: Factorize returned value - todo!() + fn from(proxy: PoolSerDeProxy) -> Self { + let next_id = AtomicU64::new( + proxy + .kara_mapping + .clone() + .into_iter() + .map(|(KId(id), _, _)| id + 1) + .max() + .unwrap_or(1), + ); + + Self { + next_id, + kara_mapping: RwLock::new(proxy.kara_mapping), + } } } impl<M: IntoMapping> FromIterator<M> for Pool { fn from_iter<T: IntoIterator<Item = M>>(iter: T) -> Self { - let (id_mapping, kara_state): (IdMapping, StateMapping) = iter + let (mappings, ids): (Vec<KaraMappingValue>, Vec<u64>) = iter .into_iter() - .map(|mapping| { - let (kid, rkid, state) = mapping.into_mapping(); - ((rkid, kid), (kid, state)) + .map(|kara| { + let (KId(id), rkid, remaining_values) = kara.into_mapping(); + ((KId(id), rkid, remaining_values), id) }) .unzip(); + let next_id = ids.into_iter().max().unwrap_or(0) + 1; + Self { - next_id: AtomicU64::new(kara_state.keys().map(|KId(id)| *id + 1).max().unwrap_or(1)), - id_mapping: RwLock::new(id_mapping), - state_mapping: RwLock::new(kara_state), + next_id: AtomicU64::new(next_id), + kara_mapping: RwLock::new(DKMap::from_iter(mappings)), } } } impl Pool { pub(crate) async fn to_serialize_proxy(&self) -> PoolSerDeProxy { - todo!() + PoolSerDeProxy { + kara_mapping: self.kara_mapping.read().await.deref().clone(), + } } /// Get a new and unique [KId] @@ -119,23 +144,43 @@ impl Pool { pub(crate) async fn add_mapping(&self, mapping: impl IntoMapping) { let (kid, rkid, state) = mapping.into_mapping(); - self.id_mapping.write().await.insert(rkid, kid); - self.state_mapping.write().await.insert(kid, state); + self.kara_mapping.write().await.insert((kid, rkid, state)); } - pub(crate) async fn update_state(&self, id: KId, state: KaraState) { - todo!() + pub(crate) async fn update_state_from_kid(&self, id: KId, state: KaraState) -> Result<()> { + if let Err(err) = self.kara_mapping.write().await.update_from_k1(&id, state) { + log::error!("pool: update_state: failed to set state for kara {id}: {err}"); + Err(err) + } else { + Ok(()) + } + } + + pub(crate) async fn update_state_from_rkid( + &self, + rid: RemoteKId, + state: KaraState, + ) -> Result<()> { + if let Err(err) = self.kara_mapping.write().await.update_from_k2(&rid, state) { + log::error!("pool: update_state: failed to set state for kara {rid}: {err}"); + Err(err) + } else { + Ok(()) + } } pub(crate) async fn get_state(&self, kid: KId) -> Option<KaraState> { - self.state_mapping.read().await.get(&kid).copied() + Some(self.kara_mapping.read().await.get_k1(&kid)?.2) } /// Get the tuple (local, remote) from the string representation of the remote id. Id the local /// id is not present, returns none, else some(local_id). - pub(crate) async fn get_from_remote(&self, rkid: impl AsRef<str>) -> (Option<KId>, RemoteKId) { - let id = self.id_mapping.read().await.get(rkid.as_ref()).copied(); - (id, RemoteKId(rkid.as_ref().into())) + pub(crate) async fn get_from_remote(&self, rkid: RemoteKId) -> (Option<KId>, RemoteKId) { + if let Some((id, _, _)) = self.kara_mapping.read().await.get_k2(&rkid) { + (Some(id), RemoteKId(rkid.as_ref().into())) + } else { + (None, RemoteKId(rkid.as_ref().into())) + } } /// Fatcorize the [Arc<str>] for an [EpochData] to have pointer equality. We do that at the diff --git a/lektor_nkdb/src/database/update.rs b/lektor_nkdb/src/database/update.rs index 9da56ec96ad1b1405d9958fb77ca4b25c0ec2fa0..ead9be4b9ad86239e3126e7a073884dcb4aaba4a 100644 --- a/lektor_nkdb/src/database/update.rs +++ b/lektor_nkdb/src/database/update.rs @@ -2,8 +2,8 @@ use crate::kara::status::KaraState; use crate::*; -use anyhow::Result; use anyhow::bail; +use anyhow::Result; use futures::prelude::*; use kurisu_api::SHA256; use std::{cell::RefCell, marker}; @@ -82,10 +82,10 @@ impl<'a, Storage: DatabaseStorage> UpdateHandler<'a, Storage> { } } - pub async fn set_physical_available(&self, id: KId, hash: SHA256, filesize: u64) { + pub async fn set_physical_available(&self, id: KId, hash: SHA256, filesize: u64) -> Result<()> { self.pool - .update_state(id, KaraState::PhysicalAvailable { filesize, hash }) - .await; + .update_state_from_kid(id, KaraState::PhysicalAvailable { filesize, hash }) + .await } /// Add a kara. On success, if the kara needs to be downloaded returns the couple of IDs with diff --git a/lektor_nkdb/src/lib.rs b/lektor_nkdb/src/lib.rs index 2d3bec78eacd20d16ba0d0215957ce1809eed846..5be8d9de53682aa4b914975506a2003df8947b36 100644 --- a/lektor_nkdb/src/lib.rs +++ b/lektor_nkdb/src/lib.rs @@ -7,21 +7,21 @@ pub use crate::{ }, id::{KId, RemoteKId}, kara::{ - Kara, status::KaraStatus, tags::{TagKey, Tags}, timestamps::KaraTimeStamps, + Kara, }, playlists::{ - PlaylistsHandle, playlist::{Playlist, PlaylistInfo}, + PlaylistsHandle, }, storage::{DatabaseDiskStorage, DatabaseStorage}, }; -pub use kurisu_api::v2::{SONGORIGIN_LENGTH, SONGTYPE_LENGTH, SongOrigin, SongType}; +pub use kurisu_api::v2::{SongOrigin, SongType, SONGORIGIN_LENGTH, SONGTYPE_LENGTH}; use crate::database::{epoch::EpochData, pool::Pool}; -use anyhow::{Context as _, anyhow}; +use anyhow::{anyhow, Context as _}; use hashbrown::HashMap; use lektor_utils::pushvec::*; use playlists::Playlists; diff --git a/lektor_nkdb/src/storage/disk_storage.rs b/lektor_nkdb/src/storage/disk_storage.rs index 5099f76f2d1b8a69917c07f199073bbc675ab29a..78cb65c7c18abff93e60b9c57c54c8ab288b2031 100644 --- a/lektor_nkdb/src/storage/disk_storage.rs +++ b/lektor_nkdb/src/storage/disk_storage.rs @@ -16,7 +16,7 @@ use std::{ use tokio::{ fs, io::AsyncWriteExt as _, - sync::mpsc::{Receiver, Sender, channel}, + sync::mpsc::{channel, Receiver, Sender}, }; /// Load the database from the disk. @@ -168,7 +168,7 @@ impl DatabaseStorage for DatabaseDiskStorage { Ok(content) => serde_json::from_slice::<PoolSerDeProxy>(content)?.into(), //Ok(content) => Pool::from_iter(std::iter::empty()).await, // FIXME: display some meaningful message to say we start from scratch - Err(e) => Pool::default(), + Err(e) => Pool::from_iter(std::iter::empty::<(id::KId, id::RemoteKId, KaraState)>()), }; // Here we will write the playlists one at a time, so no race conditions! diff --git a/lektor_repo/src/lib.rs b/lektor_repo/src/lib.rs index 03cca6ea212c7b814b5c3e09adf70337dd6d0ddd..8a26b58d205447633a675190bff93ef1120a6a5b 100644 --- a/lektor_repo/src/lib.rs +++ b/lektor_repo/src/lib.rs @@ -119,8 +119,11 @@ impl Repo { { log::error!("error downloading karaoke {kid}: {err}") } else { - handler.set_physical_available(kid, hash, filesize).await; - return Some(kid); + return handler + .set_physical_available(kid, hash, filesize) + .await + .and(Ok(kid)) + .ok(); } } Ok(_) => log::trace!("kara {kid} doesn't need to be downloaded"),