feat(event cache): add a way to clear a single room's persistent storage

This commit is contained in:
Benjamin Bouvier
2024-12-09 15:35:05 +01:00
parent 0783cf89ba
commit 4ee96aaffc
3 changed files with 157 additions and 0 deletions

View File

@@ -898,6 +898,16 @@ impl Room {
Ok(())
}
/// Clear the event cache storage for the current room.
///
/// This will remove all the information related to the event cache, in
/// memory and in the persisted storage, if enabled.
pub async fn clear_event_cache_storage(&self) -> Result<(), ClientError> {
let (room_event_cache, _drop_handles) = self.inner.event_cache().await?;
room_event_cache.clear().await?;
Ok(())
}
}
/// Generates a `matrix.to` permalink to the given room alias.

View File

@@ -352,6 +352,13 @@ struct AllEventsCache {
relations: RelationsMap,
}
impl AllEventsCache {
fn clear(&mut self) {
self.events.clear();
self.relations.clear();
}
}
struct EventCacheInner {
/// A weak reference to the inner client, useful when trying to get a handle
/// on the owning client.

View File

@@ -128,6 +128,27 @@ impl RoomEventCache {
}
}
/// Clear all the storage for this [`RoomEventCache`].
///
/// This will get rid of all the events from the linked chunk and persisted
/// storage.
pub async fn clear(&self) -> Result<()> {
// Clear the linked chunk and persisted storage.
self.inner.state.write().await.clear().await?;
// Clear the (temporary) events mappings.
self.inner.all_events.write().await.clear();
// Reset the paginator.
// TODO: properly stop any ongoing back-pagination.
let _ = self.inner.paginator.set_idle_state(PaginatorState::Initial, None, None);
// Notify observers about the update.
let _ = self.inner.sender.send(RoomEventCacheUpdate::Clear);
Ok(())
}
/// Looks for related event ids for the passed event id, and appends them to
/// the `results` parameter. Then it'll recursively get the related
/// event ids for those too.
@@ -602,6 +623,14 @@ mod private {
Ok(Self { room, store, events, waited_for_initial_prev_token: false })
}
/// Clear all cached content for this [`RoomEventCacheState`].
pub async fn clear(&mut self) -> Result<(), EventCacheError> {
self.events.reset();
self.propagate_changes().await?;
self.waited_for_initial_prev_token = false;
Ok(())
}
/// Removes the bundled relations from an event, if they were present.
///
/// Only replaces the present if it contained bundled relations.
@@ -1112,6 +1141,117 @@ mod tests {
assert!(chunks.next().is_none());
}
#[cfg(not(target_arch = "wasm32"))] // This uses the cross-process lock, so needs time support.
#[async_test]
async fn test_clear() {
use crate::{assert_let_timeout, event_cache::RoomEventCacheUpdate};
let room_id = room_id!("!galette:saucisse.bzh");
let f = EventFactory::new().room(room_id).sender(user_id!("@ben:saucisse.bzh"));
let event_cache_store = Arc::new(MemoryStore::new());
let event_id1 = event_id!("$1");
let event_id2 = event_id!("$2");
let ev1 = f.text_msg("hello world").sender(*ALICE).event_id(event_id1).into_sync();
let ev2 = f.text_msg("how's it going").sender(*BOB).event_id(event_id2).into_sync();
// Prefill the store with some data.
event_cache_store
.handle_linked_chunk_updates(
room_id,
vec![
// An empty items chunk.
Update::NewItemsChunk {
previous: None,
new: ChunkIdentifier::new(0),
next: None,
},
// A gap chunk.
Update::NewGapChunk {
previous: Some(ChunkIdentifier::new(0)),
// Chunk IDs aren't supposed to be ordered, so use a random value here.
new: ChunkIdentifier::new(42),
next: None,
gap: Gap { prev_token: "cheddar".to_owned() },
},
// Another items chunk, non-empty this time.
Update::NewItemsChunk {
previous: Some(ChunkIdentifier::new(42)),
new: ChunkIdentifier::new(1),
next: None,
},
Update::PushItems {
at: Position::new(ChunkIdentifier::new(1), 0),
items: vec![ev1.clone()],
},
// And another items chunk, non-empty again.
Update::NewItemsChunk {
previous: Some(ChunkIdentifier::new(1)),
new: ChunkIdentifier::new(2),
next: None,
},
Update::PushItems {
at: Position::new(ChunkIdentifier::new(2), 0),
items: vec![ev2.clone()],
},
],
)
.await
.unwrap();
let client = MockClientBuilder::new("http://localhost".to_owned())
.store_config(
StoreConfig::new("hodlor".to_owned()).event_cache_store(event_cache_store.clone()),
)
.build()
.await;
let event_cache = client.event_cache();
// Don't forget to subscribe and like^W enable storage!
event_cache.subscribe().unwrap();
event_cache.enable_storage().unwrap();
client.base_client().get_or_create_room(room_id, matrix_sdk_base::RoomState::Joined);
let room = client.get_room(room_id).unwrap();
let (room_event_cache, _drop_handles) = room.event_cache().await.unwrap();
let (items, mut stream) = room_event_cache.subscribe().await.unwrap();
// The rooms knows about the cached events.
assert!(room_event_cache.event(event_id1).await.is_some());
// The reloaded room must contain the two events.
assert_eq!(items.len(), 2);
assert_eq!(items[0].event_id().unwrap(), event_id1);
assert_eq!(items[1].event_id().unwrap(), event_id2);
assert!(stream.is_empty());
// After clearing,…
room_event_cache.clear().await.unwrap();
//…we get an update that the content has been cleared.
assert_let_timeout!(Ok(RoomEventCacheUpdate::Clear) = stream.recv());
// The room event cache has forgotten about the events.
assert!(room_event_cache.event(event_id1).await.is_none());
let (items, _) = room_event_cache.subscribe().await.unwrap();
assert!(items.is_empty());
// The event cache store too.
let reloaded = event_cache_store.reload_linked_chunk(room_id).await.unwrap();
// Note: while the event cache store could return `None` here, clearing it will
// reset it to its initial form, maintaining the invariant that it
// contains a single items chunk that's empty.
let linked_chunk = reloaded.unwrap();
assert_eq!(linked_chunk.num_items(), 0);
}
#[cfg(not(target_arch = "wasm32"))] // This uses the cross-process lock, so needs time support.
#[async_test]
async fn test_load_from_storage() {