diff --git a/lektor_nkdb/src/database/pool.rs b/lektor_nkdb/src/database/pool.rs
index 8bb0584d3a28b6cd49b8c959819a54581c4ca1de..35a2fdbac531f4a0e7bb9a0fc117faeb5cb47e41 100644
--- a/lektor_nkdb/src/database/pool.rs
+++ b/lektor_nkdb/src/database/pool.rs
@@ -4,7 +4,7 @@
 //! representation for searching plus some mapping. A pool is common for all the epochs. For epoch
 //! specific things like the [u64] to [Kid] / [Kara] mappings, do that in the epoch struct.
 
-use crate::{kara::status::KaraState, strings, EpochData, KId, Kara, KaraStatus, RemoteKId};
+use crate::{EpochData, KId, Kara, KaraStatus, RemoteKId, kara::status::KaraState, strings};
 use anyhow::Result;
 use futures::prelude::*;
 use lektor_utils::dkmap::DKMap;
@@ -82,7 +82,7 @@ pub(crate) struct PoolSerDeProxy {
 /// A pool of all the available kara for every epochs. Contains the mapping for remote id to the
 /// local id representations.
 #[derive(Debug)]
-pub(crate) struct Pool {
+pub struct Pool {
     /// ID mapping from local to remote. Note that there is a one-to-one relation between
     /// [RemoteKId] and [KId], even with metadata updates or file update.
     // State of the karas: virtual or not, if physical: filesize, filehash,...
@@ -111,6 +111,12 @@ impl From<PoolSerDeProxy> for Pool {
     }
 }
 
+impl Default for Pool {
+    fn default() -> Self {
+        Self::from_iter(std::iter::empty::<(KId, RemoteKId, KaraState)>())
+    }
+}
+
 impl<M: IntoMapping> FromIterator<M> for Pool {
     fn from_iter<T: IntoIterator<Item = M>>(iter: T) -> Self {
         let (mappings, ids): (Vec<KaraMappingValue>, Vec<u64>) = iter
diff --git a/lektor_nkdb/src/storage/disk_storage.rs b/lektor_nkdb/src/storage/disk_storage.rs
index 78cb65c7c18abff93e60b9c57c54c8ab288b2031..f31c02f4cc049bbc423fab405512fccfa8cc9fa6 100644
--- a/lektor_nkdb/src/storage/disk_storage.rs
+++ b/lektor_nkdb/src/storage/disk_storage.rs
@@ -16,7 +16,7 @@ use std::{
 use tokio::{
     fs,
     io::AsyncWriteExt as _,
-    sync::mpsc::{channel, Receiver, Sender},
+    sync::mpsc::{Receiver, Sender, channel},
 };
 
 /// Load the database from the disk.
@@ -122,16 +122,16 @@ impl DatabaseDiskStorage {
     }
 
     async fn get_kara_cached_checksum(&self, id: KId) -> anyhow::Result<SHA256> {
-        if fs::try_exists(self.path_from_root(format!("data/{id}.mkv"))).await? == false {
-            return Err(anyhow!("No mkv file for kara {id}"));
-        }
+        ensure!(
+            fs::try_exists(self.path_from_root(format!("data/{id}.mkv"))).await?,
+            "No mkv file for kara {id}"
+        );
 
-        Ok(
-            fs::read_to_string(self.path_from_root(format!("data/{id}.ok")))
-                .await
-                .context("failed to read `.ok` file")?
-                .parse::<SHA256>()?,
-        )
+        let checksum = fs::read_to_string(self.path_from_root(format!("data/{id}.ok")))
+            .await
+            .context("failed to read `.ok` file")?
+            .parse::<SHA256>()?;
+        Ok(checksum)
     }
 }
 
@@ -153,23 +153,13 @@ impl DatabaseStorage for DatabaseDiskStorage {
 
         // We find the maximal epoch number for this computer, to be sure to not override any
         // previous epoch.
-        let last_epoch = FolderReader::new(prefix.join("epoch"))
-            .await?
+        let epochs = (FolderReader::new(prefix.join("epoch")).await?)
             .unfold_entries::<u64>()
-            .collect::<FuturesUnordered<_>>()
-            .await
-            .into_iter()
-            .max()
-            .unwrap_or_default();
-
-        let pool_json = &fs::read(prefix.join("pool.json")).await;
-        let pool = match pool_json {
-            // FIXME: if deserialize error, then stop and display why
-            Ok(content) => serde_json::from_slice::<PoolSerDeProxy>(content)?.into(),
-            //Ok(content) => Pool::from_iter(std::iter::empty()).await,
-            // FIXME: display some meaningful message to say we start from scratch
-            Err(e) => Pool::from_iter(std::iter::empty::<(id::KId, id::RemoteKId, KaraState)>()),
-        };
+            .collect::<FuturesUnordered<_>>();
+        let last_epoch = epochs.await.into_iter().max().unwrap_or_default();
+
+        let content = fs::read(prefix.join("pool.json")).await?;
+        let pool: Pool = serde_json::from_slice::<PoolSerDeProxy>(&content)?.into();
 
         // Here we will write the playlists one at a time, so no race conditions!
         let (send_playlist, recv) = channel::<PlaylistWriteEvent>(10);
@@ -253,14 +243,12 @@ impl DatabaseStorage for DatabaseDiskStorage {
 
     async fn write_pool(&self, data: &Pool) -> anyhow::Result<()> {
         // FIXME: here, self.pool is not updated to match what is in data
-        if let Ok(data) = serde_json::to_string(&data.to_serialize_proxy().await) {
-            self.pool_pipeline
-                .send(PoolWriteEvent::Write(data))
-                .await
-                .context("failed to send event")
-        } else {
-            Err(anyhow!("Error while serializing pool"))
-        }
+        let data = serde_json::to_string(&data.to_serialize_proxy().await)
+            .context("error while serializing pool")?;
+        self.pool_pipeline
+            .send(PoolWriteEvent::Write(data))
+            .await
+            .context("failed to send event")
     }
 
     async fn write_playlist(&self, playlist: &Playlist) -> anyhow::Result<()> {
@@ -329,17 +317,14 @@ impl DatabaseStorage for DatabaseDiskStorage {
 
     async fn is_kara_valid_file(&self, id: KId, status: KaraStatus) -> bool {
         match status {
-            KaraStatus::Physical => {
-                if let Ok(known_checksum) = self.get_kara_cached_checksum(id).await {
-                    if let Some(KaraState::PhysicalAvailable { filesize: _, hash }) =
-                        self.pool.get_state(id).await
-                    {
-                        return known_checksum == hash;
-                    }
-                }
-                false
-            }
             KaraStatus::Virtual => true,
+            KaraStatus::Physical => match self.get_kara_cached_checksum(id).await {
+                Ok(known) => match self.pool.get_state(id).await {
+                    Some(KaraState::PhysicalAvailable { hash, .. }) => known == hash,
+                    _ => false,
+                },
+                _ => false,
+            },
         }
     }
 }
diff --git a/lektor_nkdb/src/storage/mod.rs b/lektor_nkdb/src/storage/mod.rs
index 5fb297f53097c7e8c3d38cba19db53e2f69dc922..896b0df870429e4f49624fdab23ee0dd5fac3a31 100644
--- a/lektor_nkdb/src/storage/mod.rs
+++ b/lektor_nkdb/src/storage/mod.rs
@@ -79,6 +79,6 @@ pub trait DatabaseStorage: Sized + std::fmt::Debug {
     /// Get the path to a kara by its ID. Every file should have an [url::Url] where it is located.
     fn get_kara_uri(&self, id: KId) -> Result<url::Url>;
 
-    /// Get the path to a kara by its ID. Every file should have an [url::Url] where it is located.
+    /// Tells if a [KId] is of a certain [KaraStatus] or not.
     async fn is_kara_valid_file(&self, id: KId, status: KaraStatus) -> bool;
 }
diff --git a/lektor_nkdb/src/storage/test_storage.rs b/lektor_nkdb/src/storage/test_storage.rs
index 049ad384b25b663e7b2b198c3adc7259e26f764a..4e566e3952349fc3de8d2dd730dae302c71d5278 100644
--- a/lektor_nkdb/src/storage/test_storage.rs
+++ b/lektor_nkdb/src/storage/test_storage.rs
@@ -62,11 +62,11 @@ impl DatabaseStorage for DatabaseTestStorage {
         url::Url::parse(&format!("void://{id}")).with_context(|| format!("invalid url void://{id}"))
     }
 
-    async fn write_pool(&self, data: &crate::Pool) -> Result<()> {
+    async fn write_pool(&self, _: &crate::Pool) -> Result<()> {
         todo!()
     }
 
-    async fn is_kara_valid_file(&self, id: KId, status: crate::KaraStatus) -> bool {
+    async fn is_kara_valid_file(&self, _id: KId, _status: crate::KaraStatus) -> bool {
         todo!()
     }
 }