feat(event cache): don't add a previous gap if all events were deduplicated, after back-pagination

This commit is contained in:
Benjamin Bouvier
2024-12-17 16:15:45 +01:00
parent bcb9a86a00
commit 60f521cc23
2 changed files with 117 additions and 7 deletions

View File

@@ -209,14 +209,22 @@ impl RoomPagination {
};
// And insert the new gap if needs be.
if let Some(new_gap) = new_gap {
if let Some(new_pos) = insert_new_gap_pos {
room_events
.insert_gap_at(new_gap, new_pos)
.expect("events_chunk_pos represents a valid chunk position");
} else {
room_events.push_gap(new_gap);
//
// We only do this when at least one new, non-duplicated event, has been added
// to the chunk. Otherwise it means we've back-paginated all the
// known events.
if !add_event_report.deduplicated_all_new_events() {
if let Some(new_gap) = new_gap {
if let Some(new_pos) = insert_new_gap_pos {
room_events
.insert_gap_at(new_gap, new_pos)
.expect("events_chunk_pos represents a valid chunk position");
} else {
room_events.push_gap(new_gap);
}
}
} else {
debug!("not storing previous batch token, because we deduplicated all new back-paginated events");
}
BackPaginationOutcome { events, reached_start }

View File

@@ -1100,3 +1100,105 @@ async fn test_no_gap_stored_after_deduplicated_sync() {
assert_event_matches_msg(&events[2], "sup");
assert_eq!(events.len(), 3);
}
#[async_test]
async fn test_no_gap_stored_after_deduplicated_backpagination() {
let server = MatrixMockServer::new().await;
let client = server.client_builder().build().await;
let event_cache = client.event_cache();
// Immediately subscribe the event cache to sync updates.
event_cache.subscribe().unwrap();
event_cache.enable_storage().unwrap();
let room_id = room_id!("!omelette:fromage.fr");
let f = EventFactory::new().room(room_id).sender(user_id!("@a:b.c"));
// Start with a room with a single event, limited timeline and prev-batch token.
let room = server
.sync_room(
&client,
JoinedRoomBuilder::new(room_id)
.add_timeline_event(f.text_msg("sup").event_id(event_id!("$3")).into_raw_sync())
.set_timeline_limited()
.set_timeline_prev_batch("prev-batch".to_owned()),
)
.await;
let (room_event_cache, _drop_handles) = room.event_cache().await.unwrap();
let (events, mut stream) = room_event_cache.subscribe().await.unwrap();
if events.is_empty() {
let update = stream.recv().await.expect("read error");
assert_matches!(update, RoomEventCacheUpdate::AddTimelineEvents { .. });
}
drop(events);
// Now, simulate that we expanded the timeline window with sliding sync, by
// returning more items.
server
.sync_room(
&client,
JoinedRoomBuilder::new(room_id)
.add_timeline_bulk(vec![
f.text_msg("hello").event_id(event_id!("$1")).into_raw_sync(),
f.text_msg("world").event_id(event_id!("$2")).into_raw_sync(),
f.text_msg("sup").event_id(event_id!("$3")).into_raw_sync(),
])
.set_timeline_limited()
.set_timeline_prev_batch("prev-batch2".to_owned()),
)
.await;
// For prev-batch2, the back-pagination returns nothing.
server
.mock_room_messages()
.from("prev-batch2")
.ok("start-token-unused".to_owned(), None, Vec::<Raw<AnyTimelineEvent>>::new(), Vec::new())
.mock_once()
.mount()
.await;
// For prev-batch, the back-pagination returns two events we already know, and a
// previous batch token.
server
.mock_room_messages()
.from("prev-batch")
.ok(
"start-token-unused".to_owned(),
Some("prev-batch3".to_owned()),
vec![
// Items in reverse order, since this is back-pagination.
f.text_msg("world").event_id(event_id!("$2")).into_raw_timeline(),
f.text_msg("hello").event_id(event_id!("$1")).into_raw_timeline(),
],
Vec::new(),
)
.mock_once()
.mount()
.await;
let pagination = room_event_cache.pagination();
// Run pagination once: it will consume prev-batch2 first, which is the most
// recent token.
pagination.run_backwards(20, once).await.unwrap();
// Run pagination a second time: it will consume prev-batch, which is the least
// recent token.
pagination.run_backwards(20, once).await.unwrap();
// If this back-pagination fails, that's because we've stored a gap that's
// useless. It should be short-circuited because storing the previous gap was
// useless.
let outcome = pagination.run_backwards(20, once).await.unwrap();
assert!(outcome.reached_start);
let (events, _stream) = room_event_cache.subscribe().await.unwrap();
assert_event_matches_msg(&events[0], "hello");
assert_event_matches_msg(&events[1], "world");
assert_event_matches_msg(&events[2], "sup");
assert_eq!(events.len(), 3);
}