From 032356566e4ebd3f6ea3834c670273745c4bf435 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Mon, 13 Mar 2023 14:07:36 +0100 Subject: [PATCH 01/43] chore(sdk): Rewrite a small part of the code to make it clearer. --- .../src/sliding_sync/list/request_generator.rs | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs b/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs index 76281e0b5..ff05512cf 100644 --- a/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs +++ b/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs @@ -65,17 +65,13 @@ impl SlidingSyncListRequestGenerator { batch_size: u32, limit: Option, ) -> v4::SyncRequestList { - let calculated_end = start + batch_size; + let maximum_end = start + batch_size; - let mut end = match limit { - Some(limit) => min(limit, calculated_end), - _ => calculated_end, - }; + let mut end = limit.map(|limit| min(limit, maximum_end)).unwrap_or(maximum_end); - end = match self.list.rooms_count() { - Some(total_room_count) => min(end, total_room_count - 1), - _ => end, - }; + if let Some(rooms_count) = self.list.rooms_count() { + end = min(end, rooms_count - 1); + } self.make_request_for_ranges(vec![(start.into(), end.into())]) } From c6c259144ae85c3adc9a510bbc61e26aa78ca4e1 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Mon, 13 Mar 2023 14:29:12 +0100 Subject: [PATCH 02/43] chore(sdk): Rename enum variants to be consistent across all the modules. --- crates/matrix-sdk/src/sliding_sync/list/mod.rs | 8 +++++--- .../src/sliding_sync/list/request_generator.rs | 14 +++++++------- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/crates/matrix-sdk/src/sliding_sync/list/mod.rs b/crates/matrix-sdk/src/sliding_sync/list/mod.rs index af7c706d3..b0ce9344d 100644 --- a/crates/matrix-sdk/src/sliding_sync/list/mod.rs +++ b/crates/matrix-sdk/src/sliding_sync/list/mod.rs @@ -380,14 +380,16 @@ impl SlidingSyncList { pub(super) fn request_generator(&self) -> SlidingSyncListRequestGenerator { match &self.sync_mode { SlidingSyncMode::PagingFullSync => { - SlidingSyncListRequestGenerator::new_with_paging_syncup(self.clone()) + SlidingSyncListRequestGenerator::new_with_paging_full_sync(self.clone()) } SlidingSyncMode::GrowingFullSync => { - SlidingSyncListRequestGenerator::new_with_growing_syncup(self.clone()) + SlidingSyncListRequestGenerator::new_with_growing_full_sync(self.clone()) } - SlidingSyncMode::Selective => SlidingSyncListRequestGenerator::new_live(self.clone()), + SlidingSyncMode::Selective => { + SlidingSyncListRequestGenerator::new_selective(self.clone()) + } } } } diff --git a/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs b/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs index ff05512cf..54f407ed8 100644 --- a/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs +++ b/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs @@ -9,7 +9,7 @@ use super::{Error, SlidingSyncList, SlidingSyncState}; enum GeneratorKind { GrowingFullSync { position: u32, batch_size: u32, limit: Option, live: bool }, PagingFullSync { position: u32, batch_size: u32, limit: Option, live: bool }, - Live, + Selective, } pub(in super::super) struct SlidingSyncListRequestGenerator { @@ -19,7 +19,7 @@ pub(in super::super) struct SlidingSyncListRequestGenerator { } impl SlidingSyncListRequestGenerator { - pub(super) fn new_with_paging_syncup(list: SlidingSyncList) -> Self { + pub(super) fn new_with_paging_full_sync(list: SlidingSyncList) -> Self { let batch_size = list.batch_size; let limit = list.limit; let position = list @@ -37,7 +37,7 @@ impl SlidingSyncListRequestGenerator { } } - pub(super) fn new_with_growing_syncup(list: SlidingSyncList) -> Self { + pub(super) fn new_with_growing_full_sync(list: SlidingSyncList) -> Self { let batch_size = list.batch_size; let limit = list.limit; let position = list @@ -55,8 +55,8 @@ impl SlidingSyncListRequestGenerator { } } - pub(super) fn new_live(list: SlidingSyncList) -> Self { - Self { list, ranges: Default::default(), kind: GeneratorKind::Live } + pub(super) fn new_selective(list: SlidingSyncList) -> Self { + Self { list, ranges: Default::default(), kind: GeneratorKind::Selective } } fn prefetch_request( @@ -158,7 +158,7 @@ impl SlidingSyncListRequestGenerator { } } - GeneratorKind::Live => { + GeneratorKind::Selective => { Observable::update_eq(&mut self.list.state.write().unwrap(), |state| { *state = SlidingSyncState::Live; }); @@ -174,7 +174,7 @@ impl Iterator for SlidingSyncListRequestGenerator { match self.kind { GeneratorKind::PagingFullSync { live: true, .. } | GeneratorKind::GrowingFullSync { live: true, .. } - | GeneratorKind::Live => { + | GeneratorKind::Selective => { let ranges = self.list.ranges.read().unwrap().clone(); Some(self.make_request_for_ranges(ranges)) From a2dcfa905fe47880ceddf47f6c7bd4d89f441552 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Wed, 15 Mar 2023 16:57:29 +0100 Subject: [PATCH 03/43] fix(sdk): Fix, test, and clean up `SlidingSyncList`. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch should ideally be split into multiple smaller ones, but life is life. This main purpose of this patch is to fix and to test `SlidingSyncListRequestGenerator`. This quest has led me to rename mutiple fields in `SlidingSyncList` and `SlidingSyncListBuilder`, like: * `rooms_count` becomes `maximum_number_of_rooms`, it's not something the _client_ counts, but it's a maximum number given by the server, * `batch_size` becomes `full_sync_batch_size`, so that now, it emphasizes that it's about full-sync only, * `limit` becomes `full_sync_maximum_number_of_rooms_to_fetch`, so that now, it also emphasizes that it's about ful-sync only _and_ what the limit is about! This quest has continued with the renaming of the `SlidingSyncMode` variants. After a discussion with the ElementX team, we've agreed on the following renamings: * `Cold` becomes `NotLoaded`, * `Preload` becomes `Preloaded`, * `CatchingUp` becomes `PartiallyLoaded`, * `Live` becomes `FullyLoaded`. Finally, _le plat de résistance_. In `SlidingSyncListRequestGenerator`, the `make_request_for_ranges` has been renamed to `build_request` and no longer takes a `&mut self` but a simpler `&self`! It didn't make sense to me that something that make/build a request was modifying `Self`. Because the type of `SlidingSyncListRequestGenerator::ranges` has changed, all ranges now have a consistent type (within this module at least). Consequently, this method no longer need to do a type conversion. Still on the same type, the `update_state` method is much more documented, and errors on range bounds (offset by 1) are now all fixed. The creation of new ranges happens in a new dedicated pure function, `create_range`. It returns an `Option` because it's possible to not be able to compute a range (previously, invalid ranges were considered valid). It's used in the `Iterator` implementation. This `Iterator` implementation contains a liiiittle bit more code, but at least now we understand what it does, and it's clear what `range_start` and `desired_size` we calculate. By the way, the `prefetch_request` method has been removed: it's not a prefetch, it's a regular request; it was calculating the range. But now there is `create_range`, and since it's pure, we can unit test it! _Pour le dessert_, this patch adds multiple tests. It is now possible because of the previous refactoring. First off, we test the `create_range` in many configurations. It's pretty clear to understand, and since it's core to `SlidingSyncListRequestGenerator`, I'm pretty happy with how it ends. Second, we test paging-, growing- and selective- mode with a new macro: `assert_request_and_response`, which allows to “send” requests, and to “receive” responses. The design of `SlidingSync` allows to mimic requests and responses, that's great. We don't really care about the responses here, but we care about the requests' `ranges`, and the `SlidingSyncList.state` after a response is received. It also helps to see how ranges behaves when the state is `PartiallyLoaded` or `FullyLoaded`. --- bindings/matrix-sdk-ffi/src/sliding_sync.rs | 10 +- crates/matrix-sdk/src/sliding_sync/builder.rs | 5 +- .../src/sliding_sync/list/builder.rs | 52 +- .../matrix-sdk/src/sliding_sync/list/mod.rs | 134 ++-- .../sliding_sync/list/request_generator.rs | 643 +++++++++++++++--- crates/matrix-sdk/src/sliding_sync/mod.rs | 68 +- labs/jack-in/src/client/mod.rs | 7 +- labs/jack-in/src/client/state.rs | 2 +- .../sliding-sync-integration-test/src/lib.rs | 32 +- 9 files changed, 739 insertions(+), 214 deletions(-) diff --git a/bindings/matrix-sdk-ffi/src/sliding_sync.rs b/bindings/matrix-sdk-ffi/src/sliding_sync.rs index 4a70a2695..440066b4c 100644 --- a/bindings/matrix-sdk-ffi/src/sliding_sync.rs +++ b/bindings/matrix-sdk-ffi/src/sliding_sync.rs @@ -448,19 +448,19 @@ impl SlidingSyncListBuilder { pub fn batch_size(self: Arc, batch_size: u32) -> Arc { let mut builder = unwrap_or_clone_arc(self); - builder.inner = builder.inner.batch_size(batch_size); + builder.inner = builder.inner.full_sync_batch_size(batch_size); Arc::new(builder) } pub fn room_limit(self: Arc, limit: u32) -> Arc { let mut builder = unwrap_or_clone_arc(self); - builder.inner = builder.inner.limit(limit); + builder.inner = builder.inner.full_sync_maximum_number_of_rooms_to_fetch(limit); Arc::new(builder) } pub fn no_room_limit(self: Arc) -> Arc { let mut builder = unwrap_or_clone_arc(self); - builder.inner = builder.inner.limit(None); + builder.inner = builder.inner.full_sync_maximum_number_of_rooms_to_fetch(None); Arc::new(builder) } @@ -556,7 +556,7 @@ impl SlidingSyncList { &self, observer: Box, ) -> Arc { - let mut rooms_count_stream = self.inner.rooms_count_stream(); + let mut rooms_count_stream = self.inner.maximum_number_of_rooms_stream(); Arc::new(TaskHandle::new(RUNTIME.spawn(async move { loop { @@ -598,7 +598,7 @@ impl SlidingSyncList { /// Total of rooms matching the filter pub fn current_room_count(&self) -> Option { - self.inner.rooms_count() + self.inner.maximum_number_of_rooms() } /// The current timeline limit diff --git a/crates/matrix-sdk/src/sliding_sync/builder.rs b/crates/matrix-sdk/src/sliding_sync/builder.rs index 114787487..027c1f98c 100644 --- a/crates/matrix-sdk/src/sliding_sync/builder.rs +++ b/crates/matrix-sdk/src/sliding_sync/builder.rs @@ -248,8 +248,9 @@ impl SlidingSyncBuilder { { trace!(name, "frozen for list found"); - let FrozenSlidingSyncList { rooms_count, rooms_list, rooms } = frozen_list; - list.set_from_cold(rooms_count, rooms_list); + let FrozenSlidingSyncList { maximum_number_of_rooms, rooms_list, rooms } = + frozen_list; + list.set_from_cold(maximum_number_of_rooms, rooms_list); for (key, frozen_room) in rooms.into_iter() { rooms_found.entry(key).or_insert_with(|| { diff --git a/crates/matrix-sdk/src/sliding_sync/list/builder.rs b/crates/matrix-sdk/src/sliding_sync/list/builder.rs index 772e4d6fc..42e629e24 100644 --- a/crates/matrix-sdk/src/sliding_sync/list/builder.rs +++ b/crates/matrix-sdk/src/sliding_sync/list/builder.rs @@ -22,14 +22,13 @@ pub struct SlidingSyncListBuilder { sync_mode: SlidingSyncMode, sort: Vec, required_state: Vec<(StateEventType, String)>, - batch_size: u32, + full_sync_batch_size: u32, + full_sync_maximum_number_of_rooms_to_fetch: Option, send_updates_for_items: bool, - limit: Option, filters: Option, timeline_limit: Option, name: Option, state: SlidingSyncState, - rooms_count: Option, rooms_list: Vector, ranges: Vec<(UInt, UInt)>, } @@ -43,14 +42,13 @@ impl SlidingSyncListBuilder { (StateEventType::RoomEncryption, "".to_owned()), (StateEventType::RoomTombstone, "".to_owned()), ], - batch_size: 20, + full_sync_batch_size: 20, + full_sync_maximum_number_of_rooms_to_fetch: None, send_updates_for_items: false, - limit: None, filters: None, timeline_limit: None, name: None, state: SlidingSyncState::default(), - rooms_count: None, rooms_list: Vector::new(), ranges: Vec::new(), } @@ -79,9 +77,20 @@ impl SlidingSyncListBuilder { self } - /// How many rooms request at a time when doing a full-sync catch up. - pub fn batch_size(mut self, value: u32) -> Self { - self.batch_size = value; + /// When doing a full-sync, this method defines the value by which ranges of + /// rooms will be extended. + pub fn full_sync_batch_size(mut self, value: u32) -> Self { + self.full_sync_batch_size = value; + self + } + + /// When doing a full-sync, this method defines the total limit of rooms to + /// load (it can be useful for gigantic account). + pub fn full_sync_maximum_number_of_rooms_to_fetch( + mut self, + value: impl Into>, + ) -> Self { + self.full_sync_maximum_number_of_rooms_to_fetch = value.into(); self } @@ -92,12 +101,6 @@ impl SlidingSyncListBuilder { self } - /// How many rooms request a total hen doing a full-sync catch up. - pub fn limit(mut self, value: impl Into>) -> Self { - self.limit = value.into(); - self - } - /// Any filters to apply to the query. pub fn filters(mut self, value: Option) -> Self { self.filters = value; @@ -123,31 +126,31 @@ impl SlidingSyncListBuilder { self } - /// Set the ranges to fetch + /// Set the ranges to fetch. pub fn ranges>(mut self, range: Vec<(U, U)>) -> Self { self.ranges = range.into_iter().map(|(a, b)| (a.into(), b.into())).collect(); self } - /// Set a single range fetch + /// Set a single range fetch. pub fn set_range>(mut self, from: U, to: U) -> Self { self.ranges = vec![(from.into(), to.into())]; self } - /// Set the ranges to fetch + /// Set the ranges to fetch. pub fn add_range>(mut self, from: U, to: U) -> Self { self.ranges.push((from.into(), to.into())); self } - /// Set the ranges to fetch + /// Set the ranges to fetch. pub fn reset_ranges(mut self) -> Self { - self.ranges = Default::default(); + self.ranges.clear(); self } - /// Build the list + /// Build the list. pub fn build(self) -> Result { let mut rooms_list = ObservableVector::new(); rooms_list.append(self.rooms_list); @@ -156,14 +159,15 @@ impl SlidingSyncListBuilder { sync_mode: self.sync_mode, sort: self.sort, required_state: self.required_state, - batch_size: self.batch_size, + full_sync_batch_size: self.full_sync_batch_size, send_updates_for_items: self.send_updates_for_items, - limit: self.limit, + full_sync_maximum_number_of_rooms_to_fetch: self + .full_sync_maximum_number_of_rooms_to_fetch, filters: self.filters, timeline_limit: Arc::new(StdRwLock::new(Observable::new(self.timeline_limit))), name: self.name.ok_or(Error::BuildMissingField("name"))?, state: Arc::new(StdRwLock::new(Observable::new(self.state))), - rooms_count: Arc::new(StdRwLock::new(Observable::new(self.rooms_count))), + maximum_number_of_rooms: Arc::new(StdRwLock::new(Observable::new(None))), rooms_list: Arc::new(StdRwLock::new(rooms_list)), ranges: Arc::new(StdRwLock::new(Observable::new(self.ranges))), is_cold: Arc::new(AtomicBool::new(false)), diff --git a/crates/matrix-sdk/src/sliding_sync/list/mod.rs b/crates/matrix-sdk/src/sliding_sync/list/mod.rs index b0ce9344d..8bd338f6b 100644 --- a/crates/matrix-sdk/src/sliding_sync/list/mod.rs +++ b/crates/matrix-sdk/src/sliding_sync/list/mod.rs @@ -51,17 +51,19 @@ pub struct SlidingSyncList { /// Required states to return per room required_state: Vec<(StateEventType, String)>, - /// How many rooms request at a time when doing a full-sync catch up - batch_size: u32, + /// When doing a full-sync, the ranges of rooms to load are extended by this + /// `full_sync_batch_size` size. + full_sync_batch_size: u32, + + /// When doing a full-sync, it is possible to limit the total number of + /// rooms to load by using this field. + full_sync_maximum_number_of_rooms_to_fetch: Option, /// Whether the list should send `UpdatedAt`-Diff signals for rooms - /// that have changed + /// that have changed. send_updates_for_items: bool, - /// How many rooms request a total hen doing a full-sync catch up - limit: Option, - - /// Any filters to apply to the query + /// Any filters to apply to the query. filters: Option, /// The maximum number of timeline events to query for @@ -70,16 +72,20 @@ pub struct SlidingSyncList { /// Name of this list to easily recognize them pub name: String, - /// The state this list is in + /// The state this list is in. state: Arc>>, - /// The total known number of rooms, - rooms_count: Arc>>>, + /// The total number of rooms that is possible to interact with for the + /// given list. It's not the total rooms that have been fetched. The + /// server tells the client that it's possible to fetch this amount of + /// rooms maximum. Since this number can change according to the list + /// filters, it's observable. + maximum_number_of_rooms: Arc>>>, - /// The rooms in order + /// The rooms in order. rooms_list: Arc>>, - /// The ranges windows of the list + /// The ranges windows of the list. #[allow(clippy::type_complexity)] // temporarily ranges: Arc>>>, @@ -95,12 +101,15 @@ pub struct SlidingSyncList { impl SlidingSyncList { pub(crate) fn set_from_cold( &mut self, - rooms_count: Option, + maximum_number_of_rooms: Option, rooms_list: Vector, ) { - Observable::set(&mut self.state.write().unwrap(), SlidingSyncState::Preload); + Observable::set(&mut self.state.write().unwrap(), SlidingSyncState::Preloaded); self.is_cold.store(true, Ordering::SeqCst); - Observable::set(&mut self.rooms_count.write().unwrap(), rooms_count); + Observable::set( + &mut self.maximum_number_of_rooms.write().unwrap(), + maximum_number_of_rooms, + ); let mut lock = self.rooms_list.write().unwrap(); lock.clear(); @@ -119,7 +128,7 @@ impl SlidingSyncList { .sync_mode(self.sync_mode.clone()) .sort(self.sort.clone()) .required_state(self.required_state.clone()) - .batch_size(self.batch_size) + .full_sync_batch_size(self.full_sync_batch_size) .ranges(self.ranges.read().unwrap().clone()) } @@ -195,14 +204,15 @@ impl SlidingSyncList { ObservableVector::subscribe(&self.rooms_list.read().unwrap()) } - /// Get the current rooms count. - pub fn rooms_count(&self) -> Option { - **self.rooms_count.read().unwrap() + /// Get the maximum number of rooms. See [`Self::maximum_number_of_rooms`] + /// to learn more. + pub fn maximum_number_of_rooms(&self) -> Option { + **self.maximum_number_of_rooms.read().unwrap() } /// Get a stream of rooms count. - pub fn rooms_count_stream(&self) -> impl Stream> { - Observable::subscribe(&self.rooms_count.read().unwrap()) + pub fn maximum_number_of_rooms_stream(&self) -> impl Stream> { + Observable::subscribe(&self.maximum_number_of_rooms.read().unwrap()) } /// Find the current valid position of the room in the list `room_list`. @@ -283,26 +293,32 @@ impl SlidingSyncList { #[instrument(skip(self, ops), fields(name = self.name, ops_count = ops.len()))] pub(super) fn handle_response( &self, - rooms_count: u32, + maximum_number_of_rooms: u32, ops: &Vec, - ranges: &Vec<(usize, usize)>, - rooms: &Vec, + ranges: &Vec<(UInt, UInt)>, + updated_rooms: &Vec, ) -> Result { - let current_rooms_count = **self.rooms_count.read().unwrap(); + let ranges = ranges + .iter() + .map(|(start, end)| ((*start).try_into().unwrap(), (*end).try_into().unwrap())) + .collect::>(); - if current_rooms_count.is_none() - || current_rooms_count == Some(0) + let current_maximum_number_of_rooms = **self.maximum_number_of_rooms.read().unwrap(); + + if current_maximum_number_of_rooms.is_none() + || current_maximum_number_of_rooms == Some(0) || self.is_cold.load(Ordering::SeqCst) { debug!("first run, replacing rooms list"); // first response, we do that slightly differently let mut rooms_list = ObservableVector::new(); - rooms_list - .append(iter::repeat(RoomListEntry::Empty).take(rooms_count as usize).collect()); + rooms_list.append( + iter::repeat(RoomListEntry::Empty).take(maximum_number_of_rooms as usize).collect(), + ); // then we apply it - room_ops(&mut rooms_list, ops, ranges)?; + room_ops(&mut rooms_list, ops, &ranges)?; { let mut lock = self.rooms_list.write().unwrap(); @@ -310,7 +326,10 @@ impl SlidingSyncList { lock.append(rooms_list.into_inner()); } - Observable::set(&mut self.rooms_count.write().unwrap(), Some(rooms_count)); + Observable::set( + &mut self.maximum_number_of_rooms.write().unwrap(), + Some(maximum_number_of_rooms), + ); self.is_cold.store(false, Ordering::SeqCst); return Ok(true); @@ -318,7 +337,7 @@ impl SlidingSyncList { debug!("regular update"); - let mut missing = rooms_count + let mut missing = maximum_number_of_rooms .checked_sub(self.rooms_list.read().unwrap().len() as u32) .unwrap_or_default(); let mut changed = false; @@ -339,7 +358,7 @@ impl SlidingSyncList { let mut rooms_list = self.rooms_list.write().unwrap(); if !ops.is_empty() { - room_ops(&mut rooms_list, ops, ranges)?; + room_ops(&mut rooms_list, ops, &ranges)?; changed = true; } else { debug!("no rooms operations found"); @@ -347,16 +366,16 @@ impl SlidingSyncList { } { - let mut lock = self.rooms_count.write().unwrap(); + let mut lock = self.maximum_number_of_rooms.write().unwrap(); - if **lock != Some(rooms_count) { - Observable::set(&mut lock, Some(rooms_count)); + if **lock != Some(maximum_number_of_rooms) { + Observable::set(&mut lock, Some(maximum_number_of_rooms)); changed = true; } } - if self.send_updates_for_items && !rooms.is_empty() { - let found_lists = self.find_rooms_in_list(rooms); + if self.send_updates_for_items && !updated_rooms.is_empty() { + let found_lists = self.find_rooms_in_list(updated_rooms); if !found_lists.is_empty() { debug!("room details found"); @@ -396,8 +415,8 @@ impl SlidingSyncList { #[derive(Serialize, Deserialize)] pub(super) struct FrozenSlidingSyncList { - #[serde(default, skip_serializing_if = "Option::is_none")] - pub(super) rooms_count: Option, + #[serde(default, rename = "rooms_count", skip_serializing_if = "Option::is_none")] + pub(super) maximum_number_of_rooms: Option, #[serde(default, skip_serializing_if = "Vector::is_empty")] pub(super) rooms_list: Vector, #[serde(default, skip_serializing_if = "BTreeMap::is_empty")] @@ -428,7 +447,7 @@ impl FrozenSlidingSyncList { } FrozenSlidingSyncList { - rooms_count: **source_list.rooms_count.read().unwrap(), + maximum_number_of_rooms: **source_list.maximum_number_of_rooms.read().unwrap(), rooms_list, rooms, } @@ -601,24 +620,31 @@ fn room_ops( /// The state the [`SlidingSyncList`] is in. /// -/// The lifetime of a SlidingSync usually starts at a `Preload`, getting a fast -/// response for the first given number of Rooms, then switches into -/// `CatchingUp` during which the list fetches the remaining rooms, usually in -/// order, some times in batches. Once that is ready, it switches into `Live`. +/// The lifetime of a `SlidingSync` usually starts at a `Loading`, getting a +/// fast response for the first given number of rooms, then switches into +/// `PartiallyLoaded` during which the list fetches the remaining rooms, usually +/// in order, some times in batches. Once that is ready, it switches into +/// `Live`. /// /// If the client has been offline for a while, though, the SlidingSync might -/// return back to `CatchingUp` at any point. +/// return back to `PartiallyLoaded` at any point. #[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)] pub enum SlidingSyncState { - /// Hasn't started yet + /// Sliding Sync has not started to load anything yet. #[default] - Cold, - /// We are quickly preloading a preview of the most important rooms - Preload, - /// We are trying to load all remaining rooms, might be in batches - CatchingUp, - /// We are all caught up and now only sync the live responses. - Live, + #[serde(rename = "Cold")] + NotLoaded, + /// Sliding Sync has been preloaded, i.e. restored from a catch for example. + #[serde(rename = "Preload")] + Preloaded, + /// Updates are received from the loaded rooms, and new rooms are being + /// fetched in the background. + #[serde(rename = "CatchingUp")] + PartiallyLoaded, + /// Updates are received for all the loaded rooms, and all rooms have been + /// loaded! + #[serde(rename = "Live")] + FullyLoaded, } /// The mode by which the the [`SlidingSyncList`] is in fetching the data. diff --git a/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs b/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs index 54f407ed8..f3c1cbd3f 100644 --- a/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs +++ b/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs @@ -1,100 +1,157 @@ +//! The logic to generate Sliding Sync list request. +//! +//! Depending on the [`SlidingSyncMode`], the generated requests aren't the +//! same. +//! +//! In [`SlidingSyncMode::Selective`], it's pretty straighforward: +//! +//! * There is set of ranges, +//! * Each request asks to load the particular ranges. +//! +//! In [`SlidingSyncMode::PagingFullSync`]: +//! +//! * There is a `batch_size`, +//! * Each request asks to load a new successive range containing exactly +//! `batch_size` rooms. +//! +//! In [`SlidingSyncMode::GrowingFullSync]: +//! +//! * There is a `batch_size`, +//! * Each request asks to load a new range, always starting from 0, but where +//! the end is incremented by `batch_size` everytime. +//! +//! The number of rooms to load is capped by the +//! [`SlidingSyncList::maximum_number_of_rooms`], i.e. the real number of +//! rooms it is possible to load. This value comes from the server. +//! +//! The number of rooms to load can _also_ be capped by the +//! [`SlidingSyncList::full_sync_maximum_number_of_rooms_to_fetch`], i.e. a +//! user-specified limit representing the maximum number of rooms the user +//! actually wants to load. + use std::cmp::min; use eyeball::unique::Observable; use ruma::{api::client::sync::sync_events::v4, assign, OwnedRoomId, UInt}; -use tracing::{error, instrument, trace}; +use tracing::{error, instrument}; use super::{Error, SlidingSyncList, SlidingSyncState}; +/// The kind of request generator. +#[derive(Debug)] enum GeneratorKind { - GrowingFullSync { position: u32, batch_size: u32, limit: Option, live: bool }, - PagingFullSync { position: u32, batch_size: u32, limit: Option, live: bool }, + // Growing-mode (see [`SlidingSyncMode`]). + GrowingFullSync { + // Number of fetched rooms. + number_of_fetched_rooms: u32, + // Size of the batch, used to grow the range to fetch more rooms. + batch_size: u32, + // Maximum number of rooms to fetch (see + // [`SlidingSyncList::full_sync_maximum_number_of_rooms_to_fetch`]). + maximum_number_of_rooms_to_fetch: Option, + // Whether all rooms have been loaded. + fully_loaded: bool, + }, + + // Paging-mode (see [`SlidingSyncMode`]). + PagingFullSync { + // Number of fetched rooms. + number_of_fetched_rooms: u32, + // Size of the batch, used to grow the range to fetch more rooms. + batch_size: u32, + // Maximum number of rooms to fetch (see + // [`SlidingSyncList::full_sync_maximum_number_of_rooms_to_fetch`]). + maximum_number_of_rooms_to_fetch: Option, + // Whether all romms have been loaded. + fully_loaded: bool, + }, + + // Selective-mode (see [`SlidingSyncMode`]). Selective, } +/// A request generator for [`SlidingSyncList`]. +#[derive(Debug)] pub(in super::super) struct SlidingSyncListRequestGenerator { + /// The parent [`SlidingSyncList`] object that has created this request + /// generator. list: SlidingSyncList, - ranges: Vec<(usize, usize)>, + /// The current range used by this request generator. + ranges: Vec<(UInt, UInt)>, + /// The kind of request generator. kind: GeneratorKind, } impl SlidingSyncListRequestGenerator { + /// Create a new request generator configured for paging-mode. pub(super) fn new_with_paging_full_sync(list: SlidingSyncList) -> Self { - let batch_size = list.batch_size; - let limit = list.limit; - let position = list + let batch_size = list.full_sync_batch_size; + let maximum_number_of_rooms_to_fetch = list.full_sync_maximum_number_of_rooms_to_fetch; + // If a range exists, let's consider it's been used to load existing room. So + // let's start from the end of the range. It can be useful when we resume a sync + // for example. Otherwise let's use the default value. + let number_of_fetched_rooms = list .ranges .read() .unwrap() .first() - .map(|(_start, end)| u32::try_from(*end).unwrap()) + .map(|(_start, end)| u32::try_from(*end).unwrap().saturating_add(1)) .unwrap_or_default(); Self { list, - ranges: Default::default(), - kind: GeneratorKind::PagingFullSync { position, batch_size, limit, live: false }, + ranges: Vec::new(), + kind: GeneratorKind::PagingFullSync { + number_of_fetched_rooms, + batch_size, + maximum_number_of_rooms_to_fetch, + fully_loaded: false, + }, } } + /// Create a new request generator configured for growing-mode. pub(super) fn new_with_growing_full_sync(list: SlidingSyncList) -> Self { - let batch_size = list.batch_size; - let limit = list.limit; - let position = list + let batch_size = list.full_sync_batch_size; + let maximum_number_of_rooms_to_fetch = list.full_sync_maximum_number_of_rooms_to_fetch; + // If a range exists, let's consider it's been used to load existing room. So + // let's start from the end of the range. It can be useful when we resume a sync + // for example. Otherwise let's use the default value. + let number_of_fetched_rooms = list .ranges .read() .unwrap() .first() - .map(|(_start, end)| u32::try_from(*end).unwrap()) + .map(|(_start, end)| u32::try_from(*end).unwrap().saturating_add(1)) .unwrap_or_default(); Self { list, - ranges: Default::default(), - kind: GeneratorKind::GrowingFullSync { position, batch_size, limit, live: false }, + ranges: Vec::new(), + kind: GeneratorKind::GrowingFullSync { + number_of_fetched_rooms, + batch_size, + maximum_number_of_rooms_to_fetch, + fully_loaded: false, + }, } } + /// Create a new request generator configured for selective-mode. pub(super) fn new_selective(list: SlidingSyncList) -> Self { - Self { list, ranges: Default::default(), kind: GeneratorKind::Selective } + Self { list, ranges: Vec::new(), kind: GeneratorKind::Selective } } - fn prefetch_request( - &mut self, - start: u32, - batch_size: u32, - limit: Option, - ) -> v4::SyncRequestList { - let maximum_end = start + batch_size; - - let mut end = limit.map(|limit| min(limit, maximum_end)).unwrap_or(maximum_end); - - if let Some(rooms_count) = self.list.rooms_count() { - end = min(end, rooms_count - 1); - } - - self.make_request_for_ranges(vec![(start.into(), end.into())]) - } - - #[instrument(skip(self), fields(name = self.list.name))] - fn make_request_for_ranges(&mut self, ranges: Vec<(UInt, UInt)>) -> v4::SyncRequestList { + /// Build a [`SyncRequestList`][v4::SyncRequestList]. + #[instrument(skip(self), fields(name = self.list.name, ranges = ?&self.ranges))] + fn build_request(&self) -> v4::SyncRequestList { let sort = self.list.sort.clone(); let required_state = self.list.required_state.clone(); let timeline_limit = **self.list.timeline_limit.read().unwrap(); let filters = self.list.filters.clone(); - self.ranges = ranges - .iter() - .map(|(a, b)| { - ( - usize::try_from(*a).expect("range is a valid u32"), - usize::try_from(*b).expect("range is a valid u32"), - ) - }) - .collect(); - assign!(v4::SyncRequestList::default(), { - ranges: ranges, + ranges: self.ranges.clone(), room_details: assign!(v4::RoomDetailsConfig::default(), { required_state, timeline_limit, @@ -104,67 +161,151 @@ impl SlidingSyncListRequestGenerator { }) } + // Handle the response from the server. #[instrument(skip_all, fields(name = self.list.name, rooms_count, has_ops = !ops.is_empty()))] pub(in super::super) fn handle_response( &mut self, - rooms_count: u32, + maximum_number_of_rooms: u32, ops: &Vec, - rooms: &Vec, + updated_rooms: &Vec, ) -> Result { - let response = self.list.handle_response(rooms_count, ops, &self.ranges, rooms)?; - self.update_state(rooms_count.saturating_sub(1)); // index is 0 based, count is 1 based + let response = + self.list.handle_response(maximum_number_of_rooms, ops, &self.ranges, updated_rooms)?; + + self.update_state(maximum_number_of_rooms); Ok(response) } - fn update_state(&mut self, max_index: u32) { - let Some((_start, range_end)) = self.ranges.first() else { - error!("Why don't we have any ranges?"); + /// Update the state of the generator. + fn update_state(&mut self, maximum_number_of_rooms: u32) { + let Some(range_end) = self.ranges.first().map(|(_start, end)| u32::try_from(*end).unwrap()) else { + error!(name = self.list.name, "The request generator must have a range."); return; }; - let end = if &(max_index as usize) < range_end { max_index } else { *range_end as u32 }; - - trace!(end, max_index, range_end, name = self.list.name, "updating state"); - match &mut self.kind { - GeneratorKind::PagingFullSync { position, live, limit, .. } - | GeneratorKind::GrowingFullSync { position, live, limit, .. } => { - let max = limit.map(|limit| min(limit, max_index)).unwrap_or(max_index); + GeneratorKind::PagingFullSync { + number_of_fetched_rooms, + fully_loaded, + maximum_number_of_rooms_to_fetch, + .. + } + | GeneratorKind::GrowingFullSync { + number_of_fetched_rooms, + fully_loaded, + maximum_number_of_rooms_to_fetch, + .. + } => { + // Calculate the maximum bound for the range. + // At this step, the server has given us a maximum number of rooms for this + // list. That's our `range_maximum`. + let mut range_maximum = maximum_number_of_rooms; - trace!(end, max, name = self.list.name, "updating state"); + // But maybe the user has defined a maximum number of rooms to fetch? In this + // case, let's take the minimum of the two. + if let Some(maximum_number_of_rooms_to_fetch) = maximum_number_of_rooms_to_fetch { + range_maximum = min(range_maximum, *maximum_number_of_rooms_to_fetch); + } - if end >= max { - // Switching to live mode. + // Finally, ranges are inclusive! + range_maximum = range_maximum.saturating_sub(1); - trace!(name = self.list.name, "going live"); + // Now, we know what the maximum bound for the range is. - self.list.set_range(0, max); - *position = max; - *live = true; + // The current range hasn't reached its maximum, let's continue. + if range_end < range_maximum { + // Update the _list range_ to cover from 0 to `range_end`. + // The list range is different from the request generator (this) range. + self.list.set_range(0, range_end); + // Update the number of fetched rooms forward. Do not forget that ranges are + // inclusive, so let's add 1. + *number_of_fetched_rooms = range_end.saturating_add(1); + + // The list is still not fully loaded. + *fully_loaded = false; + + // Finally, let's update the list' state. Observable::update_eq(&mut self.list.state.write().unwrap(), |state| { - *state = SlidingSyncState::Live; + *state = SlidingSyncState::PartiallyLoaded; }); - } else { - *position = end; - *live = false; - self.list.set_range(0, end); + } + // Otherwise the current range has reached its maximum, we switched to `Live` mode. + else { + // The range is covering the entire list, from 0 to its maximum. + self.list.set_range(0, range_maximum); + // The number of fetched rooms is set to the maximum too. + *number_of_fetched_rooms = range_maximum; + + // And we update the `fully_loaded` marker. + *fully_loaded = true; + + // Finally, let's update the list' state. Observable::update_eq(&mut self.list.state.write().unwrap(), |state| { - *state = SlidingSyncState::CatchingUp; + *state = SlidingSyncState::FullyLoaded; }); } } GeneratorKind::Selective => { + // Selective mode always loads everything. Observable::update_eq(&mut self.list.state.write().unwrap(), |state| { - *state = SlidingSyncState::Live; + *state = SlidingSyncState::FullyLoaded; }); } } } + + #[cfg(test)] + fn is_fully_loaded(&self) -> bool { + match self.kind { + GeneratorKind::PagingFullSync { fully_loaded, .. } + | GeneratorKind::GrowingFullSync { fully_loaded, .. } => fully_loaded, + GeneratorKind::Selective => true, + } + } +} + +fn create_range( + start: u32, + desired_size: u32, + maximum_number_of_rooms_to_fetch: Option, + maximum_number_of_rooms: Option, +) -> Option<(UInt, UInt)> { + // Calculate the range. + // The `start` bound is given. Let's calculate the `end` bound. + + // The `end`, by default, is `start` + `desired_size`. + let mut end = start + desired_size; + + // But maybe the user has defined a maximum number of rooms to fetch? In this + // case, take the minimum of the two. + if let Some(maximum_number_of_rooms_to_fetch) = maximum_number_of_rooms_to_fetch { + end = min(end, maximum_number_of_rooms_to_fetch); + } + + // But there is more! The server can tell us what is the maximum number of rooms + // fulfilling a particular list. For example, if the server says there is 42 + // rooms for a particular list, with a `start` of 40 and a `batch_size` of 20, + // the range must be capped to `[40; 46]`; the range `[40; 60]` would be invalid + // and could be rejected by the server. + if let Some(maximum_number_of_rooms) = maximum_number_of_rooms { + end = min(end, maximum_number_of_rooms); + } + + // Finally, because the bounds of the range are inclusive, 1 is substracted. + end = end.saturating_sub(1); + + // Make sure `start` is smaller than `end`. It can happen if `start` is greater + // than `maximum_number_of_rooms_to_fetch` or `maximum_number_of_rooms`. + if start > end { + return None; + } + + Some((start.into(), end.into())) } impl Iterator for SlidingSyncListRequestGenerator { @@ -172,21 +313,357 @@ impl Iterator for SlidingSyncListRequestGenerator { fn next(&mut self) -> Option { match self.kind { - GeneratorKind::PagingFullSync { live: true, .. } - | GeneratorKind::GrowingFullSync { live: true, .. } + // Cases where all rooms have been fully loaded. + GeneratorKind::PagingFullSync { fully_loaded: true, .. } + | GeneratorKind::GrowingFullSync { fully_loaded: true, .. } | GeneratorKind::Selective => { - let ranges = self.list.ranges.read().unwrap().clone(); + // Let's copy all the ranges from the parent `SlidingSyncList`, and build a + // request for them. + self.ranges = self.list.ranges.read().unwrap().clone(); - Some(self.make_request_for_ranges(ranges)) + // Here we go. + Some(self.build_request()) } - GeneratorKind::PagingFullSync { position, batch_size, limit, .. } => { - Some(self.prefetch_request(position, batch_size, limit)) + GeneratorKind::PagingFullSync { + number_of_fetched_rooms, + batch_size, + maximum_number_of_rooms_to_fetch, + .. + } => { + // In paging-mode, range starts at the number of fetched rooms. Since ranges are + // inclusive, and since the number of fetched rooms starts at 1, + // not at 0, there is no need to add 1 here. + let range_start = number_of_fetched_rooms; + let range_desired_size = batch_size; + + // Create a new range, and use it as the current set of ranges. + self.ranges = vec![create_range( + range_start, + range_desired_size, + maximum_number_of_rooms_to_fetch, + self.list.maximum_number_of_rooms(), + )?]; + + // Here we go. + Some(self.build_request()) } - GeneratorKind::GrowingFullSync { position, batch_size, limit, .. } => { - Some(self.prefetch_request(0, position + batch_size, limit)) + GeneratorKind::GrowingFullSync { + number_of_fetched_rooms, + batch_size, + maximum_number_of_rooms_to_fetch, + .. + } => { + // In growing-mode, range always starts from 0. However, the end is growing by + // adding `batch_size` to the previous number of fetched rooms. + let range_start = 0; + let range_desired_size = number_of_fetched_rooms.saturating_add(batch_size); + + self.ranges = vec![create_range( + range_start, + range_desired_size, + maximum_number_of_rooms_to_fetch, + self.list.maximum_number_of_rooms(), + )?]; + + // Here we go. + Some(self.build_request()) } } } } + +#[cfg(test)] +mod tests { + use ruma::uint; + + use super::*; + + #[test] + fn test_create_range_from() { + // From 0, we want 100 items. + assert_eq!(create_range(0, 100, None, None), Some((uint!(0), uint!(99)))); + + // From 100, we want 100 items. + assert_eq!(create_range(100, 100, None, None), Some((uint!(100), uint!(199)))); + + // From 0, we want 100 items, but there is a maximum number of rooms to fetch + // defined at 50. + assert_eq!(create_range(0, 100, Some(50), None), Some((uint!(0), uint!(49)))); + + // From 49, we want 100 items, but there is a maximum number of rooms to fetch + // defined at 50. There is 1 item to load. + assert_eq!(create_range(49, 100, Some(50), None), Some((uint!(49), uint!(49)))); + + // From 50, we want 100 items, but there is a maximum number of rooms to fetch + // defined at 50. + assert_eq!(create_range(50, 100, Some(50), None), None); + + // From 0, we want 100 items, but there is a maximum number of rooms defined at + // 50. + assert_eq!(create_range(0, 100, None, Some(50)), Some((uint!(0), uint!(49)))); + + // From 49, we want 100 items, but there is a maximum number of rooms defined at + // 50. There is 1 item to load. + assert_eq!(create_range(49, 100, None, Some(50)), Some((uint!(49), uint!(49)))); + + // From 50, we want 100 items, but there is a maximum number of rooms defined at + // 50. + assert_eq!(create_range(50, 100, None, Some(50)), None); + + // From 0, we want 100 items, but there is a maximum number of rooms to fetch + // defined at 75, and a maximum number of rooms defined at 50. + assert_eq!(create_range(0, 100, Some(75), Some(50)), Some((uint!(0), uint!(49)))); + + // From 0, we want 100 items, but there is a maximum number of rooms to fetch + // defined at 50, and a maximum number of rooms defined at 75. + assert_eq!(create_range(0, 100, Some(50), Some(75)), Some((uint!(0), uint!(49)))); + } + + macro_rules! assert_request_and_response { + ( + list = $list:ident, + generator = $generator:ident, + maximum_number_of_rooms = $maximum_number_of_rooms:expr, + $( + next => { + ranges = $( [ $range_start:literal ; $range_end:literal ] ),+ , + is_fully_loaded = $is_fully_loaded:expr, + list_state = $list_state:ident, + } + ),* + $(,)* + ) => { + // That's the initial state. + assert_eq!($list.state(), SlidingSyncState::NotLoaded); + + $( + { + let request = $generator.next().unwrap(); + + assert_eq!(request.ranges, [ $( (uint!( $range_start ), uint!( $range_end )) ),* ]); + + // Fake a response. + let _ = $generator.handle_response($maximum_number_of_rooms, &vec![], &vec![]); + + // Now, Sliding Sync has started to load rooms. + assert_eq!($generator.is_fully_loaded(), $is_fully_loaded); + assert_eq!($list.state(), SlidingSyncState::$list_state); + } + )* + }; + } + + #[test] + fn test_generator_paging_full_sync() { + let list = SlidingSyncList::builder() + .sync_mode(crate::SlidingSyncMode::PagingFullSync) + .name("testing") + .full_sync_batch_size(10) + .build() + .unwrap(); + let mut generator = list.request_generator(); + + assert_request_and_response! { + list = list, + generator = generator, + maximum_number_of_rooms = 25, + next => { + ranges = [0; 9], + is_fully_loaded = false, + list_state = PartiallyLoaded, + }, + next => { + ranges = [10; 19], + is_fully_loaded = false, + list_state = PartiallyLoaded, + }, + // The maximum number of rooms is reached! + next => { + ranges = [20; 24], + is_fully_loaded = true, + list_state = FullyLoaded, + }, + // Now it's fully loaded, so the same request must be produced everytime. + next => { + ranges = [0; 24], // the range starts at 0 now! + is_fully_loaded = true, + list_state = FullyLoaded, + }, + next => { + ranges = [0; 24], + is_fully_loaded = true, + list_state = FullyLoaded, + }, + }; + } + + #[test] + fn test_generator_paging_full_sync_with_a_maximum_number_of_rooms_to_fetch() { + let list = SlidingSyncList::builder() + .sync_mode(crate::SlidingSyncMode::PagingFullSync) + .name("testing") + .full_sync_batch_size(10) + .full_sync_maximum_number_of_rooms_to_fetch(22) + .build() + .unwrap(); + let mut generator = list.request_generator(); + + assert_request_and_response! { + list = list, + generator = generator, + maximum_number_of_rooms = 25, + next => { + ranges = [0; 9], + is_fully_loaded = false, + list_state = PartiallyLoaded, + }, + next => { + ranges = [10; 19], + is_fully_loaded = false, + list_state = PartiallyLoaded, + }, + // The maximum number of rooms to fetch is reached! + next => { + ranges = [20; 21], + is_fully_loaded = true, + list_state = FullyLoaded, + }, + // Now it's fully loaded, so the same request must be produced everytime. + next => { + ranges = [0; 21], // the range starts at 0 now! + is_fully_loaded = true, + list_state = FullyLoaded, + }, + next => { + ranges = [0; 21], + is_fully_loaded = true, + list_state = FullyLoaded, + }, + }; + } + + #[test] + fn test_generator_growing_full_sync() { + let list = SlidingSyncList::builder() + .sync_mode(crate::SlidingSyncMode::GrowingFullSync) + .name("testing") + .full_sync_batch_size(10) + .build() + .unwrap(); + let mut generator = list.request_generator(); + + assert_request_and_response! { + list = list, + generator = generator, + maximum_number_of_rooms = 25, + next => { + ranges = [0; 9], + is_fully_loaded = false, + list_state = PartiallyLoaded, + }, + next => { + ranges = [0; 19], + is_fully_loaded = false, + list_state = PartiallyLoaded, + }, + // The maximum number of rooms is reached! + next => { + ranges = [0; 24], + is_fully_loaded = true, + list_state = FullyLoaded, + }, + // Now it's fully loaded, so the same request must be produced everytime. + next => { + ranges = [0; 24], + is_fully_loaded = true, + list_state = FullyLoaded, + }, + next => { + ranges = [0; 24], + is_fully_loaded = true, + list_state = FullyLoaded, + }, + }; + } + + #[test] + fn test_generator_growing_full_sync_with_a_maximum_number_of_rooms_to_fetch() { + let list = SlidingSyncList::builder() + .sync_mode(crate::SlidingSyncMode::GrowingFullSync) + .name("testing") + .full_sync_batch_size(10) + .full_sync_maximum_number_of_rooms_to_fetch(22) + .build() + .unwrap(); + let mut generator = list.request_generator(); + + assert_request_and_response! { + list = list, + generator = generator, + maximum_number_of_rooms = 25, + next => { + ranges = [0; 9], + is_fully_loaded = false, + list_state = PartiallyLoaded, + }, + next => { + ranges = [0; 19], + is_fully_loaded = false, + list_state = PartiallyLoaded, + }, + // The maximum number of rooms is reached! + next => { + ranges = [0; 21], + is_fully_loaded = true, + list_state = FullyLoaded, + }, + // Now it's fully loaded, so the same request must be produced everytime. + next => { + ranges = [0; 21], + is_fully_loaded = true, + list_state = FullyLoaded, + }, + next => { + ranges = [0; 21], + is_fully_loaded = true, + list_state = FullyLoaded, + }, + }; + } + + #[test] + fn test_generator_selective() { + let list = SlidingSyncList::builder() + .sync_mode(crate::SlidingSyncMode::Selective) + .name("testing") + .ranges(vec![(0u32, 10), (42, 153)]) + .build() + .unwrap(); + let mut generator = list.request_generator(); + + assert_request_and_response! { + list = list, + generator = generator, + maximum_number_of_rooms = 25, + // The maximum number of rooms is reached directly! + next => { + ranges = [0; 10], [42; 153], + is_fully_loaded = true, + list_state = FullyLoaded, + }, + // Now it's fully loaded, so the same request must be produced everytime. + next => { + ranges = [0; 10], [42; 153], + is_fully_loaded = true, + list_state = FullyLoaded, + }, + next => { + ranges = [0; 10], [42; 153], + is_fully_loaded = true, + list_state = FullyLoaded, + } + }; + } +} diff --git a/crates/matrix-sdk/src/sliding_sync/mod.rs b/crates/matrix-sdk/src/sliding_sync/mod.rs index 6de238261..73bbbf04e 100644 --- a/crates/matrix-sdk/src/sliding_sync/mod.rs +++ b/crates/matrix-sdk/src/sliding_sync/mod.rs @@ -110,15 +110,15 @@ //! copy can be retrieved by calling `SlidingSync::list()`, providing the name //! of the list. Next to the configuration settings (like name and //! `timeline_limit`), the list provides the stateful -//! [`rooms_count`](SlidingSyncList::rooms_count), +//! [`maximum_number_of_rooms`](SlidingSyncList::maximum_number_of_rooms), //! [`rooms_list`](SlidingSyncList::rooms_list) and //! [`state`](SlidingSyncList::state): //! -//! - `rooms_count` is the number of rooms _total_ there were found matching -//! the filters given. -//! - `rooms_list` is a vector of `rooms_count` [`RoomListEntry`]'s at the -//! current state. `RoomListEntry`'s only hold `the room_id` if given, the -//! [Rooms API](#rooms) holds the actual information about each room +//! - `maximum_number_of_rooms` is the number of rooms _total_ there were found +//! matching the filters given. +//! - `rooms_list` is a vector of `maximum_number_of_rooms` [`RoomListEntry`]'s +//! at the current state. `RoomListEntry`'s only hold `the room_id` if given, +//! the [Rooms API](#rooms) holds the actual information about each room //! - `state` is a [`SlidingSyncMode`] signalling meta information about the //! list and its stateful data — whether this is the state loaded from local //! cache, whether the [full sync](#helper-lists) is in progress or whether @@ -171,11 +171,11 @@ //! //! ### Room List Entries //! -//! As the room list of each list is a vec of the `rooms_count` len but a room -//! may only know of a subset of entries for sure at any given time, these -//! entries are wrapped in [`RoomListEntry`][]. This type, in close proximity to -//! the [specification][MSC], can be either `Empty`, `Filled` or `Invalidated`, -//! signaling the state of each entry position. +//! As the room list of each list is a vec of the `maximum_number_of_rooms` len +//! but a room may only know of a subset of entries for sure at any given time, +//! these entries are wrapped in [`RoomListEntry`][]. This type, in close +//! proximity to the [specification][MSC], can be either `Empty`, `Filled` or +//! `Invalidated`, signaling the state of each entry position. //! - `Empty` we don't know what sits here at this position in the list. //! - `Filled`: there is this `room_id` at this position. //! - `Invalidated` in that sense means that we _knew_ what was here before, but @@ -429,8 +429,9 @@ //! ## Caching //! //! All room data, for filled but also _invalidated_ rooms, including the entire -//! timeline events as well as all list `room_lists` and `rooms_count` are held -//! in memory (unless one `pop`s the list out). +//! timeline events as well as all list `room_lists` and +//! `maximum_number_of_rooms` are held in memory (unless one `pop`s the list +//! out). //! //! This is a purely in-memory cache layer though. If one wants Sliding Sync to //! persist and load from cold (storage) cache, one needs to set its key with @@ -511,8 +512,8 @@ //! .required_state(vec![ //! (StateEventType::RoomEncryption, "".to_owned()) //! ]) // only want to know if the room is encrypted -//! .batch_size(50) // grow the window by 50 items at a time -//! .limit(500) // only sync up the top 500 rooms +//! .full_sync_batch_size(50) // grow the window by 50 items at a time +//! .full_sync_maximum_number_of_rooms_to_fetch(500) // only sync up the top 500 rooms //! .build()?; //! //! let active_list = SlidingSyncList::builder() @@ -538,7 +539,7 @@ //! //! let active_list = sliding_sync.list(&active_list_name).unwrap(); //! let list_state_stream = active_list.state_stream(); -//! let list_count_stream = active_list.rooms_count_stream(); +//! let list_count_stream = active_list.maximum_number_of_rooms_stream(); //! let list_stream = active_list.rooms_list_stream(); //! //! tokio::spawn(async move { @@ -585,7 +586,6 @@ //! # }); //! ``` //! -//! //! [MSC]: https://github.com/matrix-org/matrix-spec-proposals/pull/3575 //! [proxy]: https://github.com/matrix-org/sliding-sync //! [ruma-types]: https://docs.rs/ruma/latest/ruma/api/client/sync/sync_events/v4/index.html @@ -930,7 +930,7 @@ impl SlidingSync { } let update_summary = { - let mut rooms = Vec::new(); + let mut updated_rooms = Vec::new(); let mut rooms_map = self.inner.rooms.write().unwrap(); for (room_id, mut room_data) in sliding_sync_response.rooms.into_iter() { @@ -963,22 +963,26 @@ impl SlidingSync { ); } - rooms.push(room_id); + updated_rooms.push(room_id); } let mut updated_lists = Vec::new(); for (name, updates) in sliding_sync_response.lists { - let Some(generator) = list_generators.get_mut(&name) else { + let Some(list_generator) = list_generators.get_mut(&name) else { error!("Response for list `{name}` - unknown to us; skipping"); continue }; - let count: u32 = + let maximum_number_of_rooms: u32 = updates.count.try_into().expect("the list total count convertible into u32"); - if generator.handle_response(count, &updates.ops, &rooms)? { + if list_generator.handle_response( + maximum_number_of_rooms, + &updates.ops, + &updated_rooms, + )? { updated_lists.push(name.clone()); } } @@ -988,7 +992,7 @@ impl SlidingSync { self.update_to_device_since(to_device.next_batch); } - UpdateSummary { lists: updated_lists, rooms } + UpdateSummary { lists: updated_lists, rooms: updated_rooms } }; Ok(update_summary) @@ -1296,7 +1300,7 @@ pub struct UpdateSummary { #[cfg(test)] mod test { use assert_matches::assert_matches; - use ruma::room_id; + use ruma::{room_id, uint}; use serde_json::json; use wiremock::MockServer; @@ -1325,7 +1329,13 @@ mod test { })) .unwrap(); - list.handle_response(10u32, &vec![full_window_update], &vec![(0, 9)], &vec![]).unwrap(); + list.handle_response( + 10u32, + &vec![full_window_update], + &vec![(uint!(0), uint!(9))], + &vec![], + ) + .unwrap(); let a02 = room_id!("!A00002:matrix.example").to_owned(); let a05 = room_id!("!A00005:matrix.example").to_owned(); @@ -1347,7 +1357,13 @@ mod test { })) .unwrap(); - list.handle_response(10u32, &vec![update], &vec![(0, 3), (8, 9)], &vec![]).unwrap(); + list.handle_response( + 10u32, + &vec![update], + &vec![(uint!(0), uint!(3)), (uint!(8), uint!(9))], + &vec![], + ) + .unwrap(); assert_eq!(list.find_room_in_list(room_id!("!A00002:matrix.example")), Some(2)); assert_eq!(list.find_room_in_list(room_id!("!A00005:matrix.example")), None); diff --git a/labs/jack-in/src/client/mod.rs b/labs/jack-in/src/client/mod.rs index d411211ed..79b180140 100644 --- a/labs/jack-in/src/client/mod.rs +++ b/labs/jack-in/src/client/mod.rs @@ -21,11 +21,12 @@ pub async fn run_client( .timeline_limit(10u32) .sync_mode(config.full_sync_mode.into()); if let Some(size) = config.batch_size { - full_sync_view_builder = full_sync_view_builder.batch_size(size); + full_sync_view_builder = full_sync_view_builder.full_sync_batch_size(size); } if let Some(limit) = config.limit { - full_sync_view_builder = full_sync_view_builder.limit(limit); + full_sync_view_builder = + full_sync_view_builder.full_sync_maximum_number_of_rooms_to_fetch(limit); } if let Some(limit) = config.timeline_limit { full_sync_view_builder = full_sync_view_builder.timeline_limit(limit); @@ -66,7 +67,7 @@ pub async fn run_client( let state = view.state(); ssync_state.set_view_state(state.clone()); - if state == SlidingSyncState::Live { + if state == SlidingSyncState::FullyLoaded { info!("Reached live sync"); break; } diff --git a/labs/jack-in/src/client/state.rs b/labs/jack-in/src/client/state.rs index 5f60af891..32ad09253 100644 --- a/labs/jack-in/src/client/state.rs +++ b/labs/jack-in/src/client/state.rs @@ -135,7 +135,7 @@ impl SlidingSyncState { } pub fn total_rooms_count(&self) -> Option { - self.view.rooms_count() + self.view.maximum_number_of_rooms() } pub fn set_first_render_now(&mut self) { diff --git a/testing/sliding-sync-integration-test/src/lib.rs b/testing/sliding-sync-integration-test/src/lib.rs index 90bd8b354..c8ec677db 100644 --- a/testing/sliding-sync-integration-test/src/lib.rs +++ b/testing/sliding-sync-integration-test/src/lib.rs @@ -133,7 +133,7 @@ mod tests { // Get the list to all rooms to check the list' state. let list = sync.list("init_list").context("list `init_list` isn't found")?; - assert_eq!(list.state(), SlidingSyncState::Cold); + assert_eq!(list.state(), SlidingSyncState::NotLoaded); // Send the request and wait for a response. let update_summary = stream @@ -142,7 +142,7 @@ mod tests { .context("No room summary found, loop ended unsuccessfully")??; // Check the state has switched to `Live`. - assert_eq!(list.state(), SlidingSyncState::Live); + assert_eq!(list.state(), SlidingSyncState::FullyLoaded); // One room has received an update. assert_eq!(update_summary.rooms.len(), 1); @@ -543,7 +543,7 @@ mod tests { let full = SlidingSyncList::builder() .sync_mode(SlidingSyncMode::GrowingFullSync) - .batch_size(10u32) + .full_sync_batch_size(10u32) .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) .name("full") .build()?; @@ -552,8 +552,8 @@ mod tests { let list = sync_proxy.list("sliding").context("but we just added that list!")?; let full_list = sync_proxy.list("full").context("but we just added that list!")?; - assert_eq!(list.state(), SlidingSyncState::Cold, "list isn't cold"); - assert_eq!(full_list.state(), SlidingSyncState::Cold, "full isn't cold"); + assert_eq!(list.state(), SlidingSyncState::NotLoaded, "list isn't cold"); + assert_eq!(full_list.state(), SlidingSyncState::NotLoaded, "full isn't cold"); let stream = sync_proxy.stream(); pin_mut!(stream); @@ -564,8 +564,8 @@ mod tests { // we only heard about the ones we had asked for assert_eq!(room_summary.rooms.len(), 11); - assert_eq!(list.state(), SlidingSyncState::Live, "list isn't live"); - assert_eq!(full_list.state(), SlidingSyncState::CatchingUp, "full isn't preloading"); + assert_eq!(list.state(), SlidingSyncState::FullyLoaded, "list isn't live"); + assert_eq!(full_list.state(), SlidingSyncState::PartiallyLoaded, "full isn't preloading"); // doing another two requests 0-20; 0-21 should bring full live, too let _room_summary = @@ -574,7 +574,7 @@ mod tests { let rooms_list = full_list.rooms_list::(); assert_eq!(rooms_list, repeat(RoomListEntryEasy::Filled).take(21).collect::>()); - assert_eq!(full_list.state(), SlidingSyncState::Live, "full isn't live yet"); + assert_eq!(full_list.state(), SlidingSyncState::FullyLoaded, "full isn't live yet"); Ok(()) } @@ -844,7 +844,7 @@ mod tests { .build()?; let growing_sync = SlidingSyncList::builder() .sync_mode(SlidingSyncMode::GrowingFullSync) - .limit(100) + .full_sync_maximum_number_of_rooms_to_fetch(100) .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) .name("growing") .build()?; @@ -867,7 +867,7 @@ mod tests { sync_proxy.list("growing").context("but we just added that list!")?; // let's catch it up fully. let stream = sync_proxy.stream(); pin_mut!(stream); - while growing_sync.state() != SlidingSyncState::Live { + while growing_sync.state() != SlidingSyncState::FullyLoaded { // we wait until growing sync is all done, too println!("awaiting"); let _room_summary = stream @@ -902,7 +902,7 @@ mod tests { let (_client, sync_proxy_builder) = random_setup_with_rooms(50).await?; let growing_sync = SlidingSyncList::builder() .sync_mode(SlidingSyncMode::GrowingFullSync) - .batch_size(10u32) + .full_sync_batch_size(10u32) .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) .name("growing") .build()?; @@ -954,7 +954,7 @@ mod tests { let (_client, sync_proxy_builder) = random_setup_with_rooms(50).await?; let growing_sync = SlidingSyncList::builder() .sync_mode(SlidingSyncMode::GrowingFullSync) - .batch_size(10u32) + .full_sync_batch_size(10u32) .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) .name("growing") .build()?; @@ -1014,7 +1014,7 @@ mod tests { print!("setup took its time"); let growing_sync = SlidingSyncList::builder() .sync_mode(SlidingSyncMode::GrowingFullSync) - .limit(100) + .full_sync_maximum_number_of_rooms_to_fetch(100) .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) .name("growing") .build()?; @@ -1096,7 +1096,7 @@ mod tests { print!("setup took its time"); let growing_sync = SlidingSyncList::builder() .sync_mode(SlidingSyncMode::GrowingFullSync) - .limit(100) + .full_sync_maximum_number_of_rooms_to_fetch(100) .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) .name("growing") .build()?; @@ -1111,7 +1111,7 @@ mod tests { let list = sync_proxy.list("growing").context("but we just added that list!")?; // let's catch it up fully. let stream = sync_proxy.stream(); pin_mut!(stream); - while list.state() != SlidingSyncState::Live { + while list.state() != SlidingSyncState::FullyLoaded { // we wait until growing sync is all done, too println!("awaiting"); let _room_summary = stream @@ -1142,7 +1142,7 @@ mod tests { let summary = room_summary?; // we only heard about the ones we had asked for if summary.lists.iter().any(|s| s == "growing") - && list.rooms_count().unwrap_or_default() == 32 + && list.maximum_number_of_rooms().unwrap_or_default() == 32 { if seen { // once we saw 32, we give it another loop to catch up! From 9a9b1426306e7aed3459e38c378425cd6743471e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?K=C3=A9vin=20Commaille?= Date: Wed, 15 Mar 2023 17:08:22 +0100 Subject: [PATCH 04/43] indexeddb: Use u32 to represent version of crypto store MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Like for the state store, the bindings expose the version as an f64 while the web API expects an unsigned integer. Signed-off-by: Kévin Commaille --- .../matrix-sdk-indexeddb/src/crypto_store.rs | 26 ++++++++++--------- 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/crates/matrix-sdk-indexeddb/src/crypto_store.rs b/crates/matrix-sdk-indexeddb/src/crypto_store.rs index 17a2b6014..10116fe15 100644 --- a/crates/matrix-sdk-indexeddb/src/crypto_store.rs +++ b/crates/matrix-sdk-indexeddb/src/crypto_store.rs @@ -144,11 +144,14 @@ impl IndexeddbCryptoStore { let name = format!("{prefix:0}::matrix-sdk-crypto"); // Open my_db v1 - let mut db_req: OpenDbRequest = IdbDatabase::open_f64(&name, 2.0)?; + let mut db_req: OpenDbRequest = IdbDatabase::open_u32(&name, 2)?; db_req.set_on_upgrade_needed(Some(|evt: &IdbVersionChangeEvent| -> Result<(), JsValue> { - let old_version = evt.old_version(); + // Even if the web-sys bindings expose the version as a f64, the IndexedDB API + // works with an unsigned integer. + // See + let old_version = evt.old_version() as u32; - if old_version < 1.0 { + if old_version < 1 { // migrating to version 1 let db = evt.db(); @@ -167,21 +170,19 @@ impl IndexeddbCryptoStore { db.create_object_store(keys::SECRET_REQUESTS_BY_INFO)?; db.create_object_store(keys::BACKUP_KEYS)?; - } else if old_version < 1.1 { + } + + if old_version < 2 { + let db = evt.db(); + // We changed how we store inbound group sessions, the key used to // be a trippled of `(room_id, sender_key, session_id)` now it's a // tuple of `(room_id, session_id)` // // Let's just drop the whole object store. - - let db = evt.db(); - db.delete_object_store(keys::INBOUND_GROUP_SESSIONS)?; db.create_object_store(keys::INBOUND_GROUP_SESSIONS)?; - } - if old_version < 2.0 { - let db = evt.db(); db.create_object_store(keys::ROOM_SETTINGS)?; } @@ -238,9 +239,10 @@ impl IndexeddbCryptoStore { pub async fn open_with_passphrase(prefix: &str, passphrase: &str) -> Result { let name = format!("{prefix:0}::matrix-sdk-crypto-meta"); - let mut db_req: OpenDbRequest = IdbDatabase::open_f64(&name, 1.0)?; + let mut db_req: OpenDbRequest = IdbDatabase::open_u32(&name, 1)?; db_req.set_on_upgrade_needed(Some(|evt: &IdbVersionChangeEvent| -> Result<(), JsValue> { - if evt.old_version() < 1.0 { + let old_version = evt.old_version() as u32; + if old_version < 1 { // migrating to version 1 let db = evt.db(); From c0f84bb8daab3832aed7760cab7b4da09d33db18 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Wed, 15 Mar 2023 17:54:39 +0100 Subject: [PATCH 05/43] test(sdk): Fix tests and improve speed. To improve the speed, we simply reduce the numbers of rooms involved in the test. --- .../sliding-sync-integration-test/src/lib.rs | 96 +++++++++++-------- 1 file changed, 55 insertions(+), 41 deletions(-) diff --git a/testing/sliding-sync-integration-test/src/lib.rs b/testing/sliding-sync-integration-test/src/lib.rs index c8ec677db..c7a870bed 100644 --- a/testing/sliding-sync-integration-test/src/lib.rs +++ b/testing/sliding-sync-integration-test/src/lib.rs @@ -67,7 +67,7 @@ impl From<&RoomListEntry> for RoomListEntryEasy { #[cfg(test)] mod tests { use std::{ - iter::repeat, + iter::{once, repeat}, time::{Duration, Instant}, }; @@ -558,7 +558,8 @@ mod tests { let stream = sync_proxy.stream(); pin_mut!(stream); - // exactly one poll! + // Exactly one poll! + // Ranges are 0-10 for selective list, and 0-9 for growing list. let room_summary = stream.next().await.context("No room summary found, loop ended unsuccessfully")??; @@ -567,14 +568,30 @@ mod tests { assert_eq!(list.state(), SlidingSyncState::FullyLoaded, "list isn't live"); assert_eq!(full_list.state(), SlidingSyncState::PartiallyLoaded, "full isn't preloading"); - // doing another two requests 0-20; 0-21 should bring full live, too + // Another poll! + // Ranges are 0-10 for selective list, and 0-19 for growing list. let _room_summary = stream.next().await.context("No room summary found, loop ended unsuccessfully")??; let rooms_list = full_list.rooms_list::(); + assert_eq!( + rooms_list, + repeat(RoomListEntryEasy::Filled) + .take(20) + .chain(once(RoomListEntryEasy::Empty)) + .collect::>() + ); + assert_eq!(full_list.state(), SlidingSyncState::PartiallyLoaded, "full isn't preloading"); + + // One last poll, and we should get all rooms loaded. + let _room_summary = + stream.next().await.context("No room summary found, loop ended unsecessfully")??; + + let rooms_list = full_list.rooms_list::(); + assert_eq!(rooms_list, repeat(RoomListEntryEasy::Filled).take(21).collect::>()); - assert_eq!(full_list.state(), SlidingSyncState::FullyLoaded, "full isn't live yet"); + assert_eq!(full_list.state(), SlidingSyncState::FullyLoaded, "full isn't fully loaded"); Ok(()) } @@ -899,10 +916,10 @@ mod tests { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn growing_sync_keeps_going() -> anyhow::Result<()> { - let (_client, sync_proxy_builder) = random_setup_with_rooms(50).await?; + let (_client, sync_proxy_builder) = random_setup_with_rooms(20).await?; let growing_sync = SlidingSyncList::builder() .sync_mode(SlidingSyncMode::GrowingFullSync) - .full_sync_batch_size(10u32) + .full_sync_batch_size(5u32) .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) .name("growing") .build()?; @@ -913,9 +930,9 @@ mod tests { let stream = sync_proxy.stream(); pin_mut!(stream); - // we have 50 and catch up in batches of 10. so let's get over to 20. + // we have 20 and catch up in batches of 5. so let's get over to 15. - for _n in 0..2 { + for _ in 0..=2 { let room_summary = stream.next().await.context("sync has closed unexpectedly")?; let _summary = room_summary?; } @@ -925,25 +942,20 @@ mod tests { assert_eq!( collection_simple, repeat(RoomListEntryEasy::Filled) - .take(21) - .chain(repeat(RoomListEntryEasy::Empty).take(29)) + .take(15) + .chain(repeat(RoomListEntryEasy::Empty).take(5)) .collect::>() ); - // we have 50 and catch up in batches of 10. let's go two more, see it grow. - for _n in 0..2 { - let room_summary = stream.next().await.context("sync has closed unexpectedly")?; - let _summary = room_summary?; - } + // we have 20 and catch up in batches of 5. let's go one more, see it grows. + let room_summary = stream.next().await.context("sync has closed unexpectedly")?; + let _summary = room_summary?; let collection_simple = list.rooms_list::(); assert_eq!( collection_simple, - repeat(RoomListEntryEasy::Filled) - .take(41) - .chain(repeat(RoomListEntryEasy::Empty).take(9)) - .collect::>() + repeat(RoomListEntryEasy::Filled).take(20).collect::>() ); Ok(()) @@ -951,10 +963,10 @@ mod tests { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn growing_sync_keeps_going_after_restart() -> anyhow::Result<()> { - let (_client, sync_proxy_builder) = random_setup_with_rooms(50).await?; + let (_client, sync_proxy_builder) = random_setup_with_rooms(20).await?; let growing_sync = SlidingSyncList::builder() .sync_mode(SlidingSyncMode::GrowingFullSync) - .full_sync_batch_size(10u32) + .full_sync_batch_size(5u32) .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) .name("growing") .build()?; @@ -965,9 +977,9 @@ mod tests { let stream = sync_proxy.stream(); pin_mut!(stream); - // we have 50 and catch up in batches of 10. so let's get over to 20. + // we have 20 and catch up in batches of 5. so let's get over to 15. - for _n in 0..2 { + for _ in 0..=2 { let room_summary = stream.next().await.context("sync has closed unexpectedly")?; let _summary = room_summary?; } @@ -980,19 +992,17 @@ mod tests { } else { acc }), - 21 + 15 ); - // we have 50 and catch up in batches of 10. Let's make sure the restart - // continues + // we have 20 and catch up in batches of 5. Let's make sure the restart + // continues. let stream = sync_proxy.stream(); pin_mut!(stream); - for _n in 0..2 { - let room_summary = stream.next().await.context("sync has closed unexpectedly")?; - let _summary = room_summary?; - } + let room_summary = stream.next().await.context("sync has closed unexpectedly")?; + let _summary = room_summary?; let collection_simple = list.rooms_list::(); @@ -1002,7 +1012,7 @@ mod tests { } else { acc }), - 41 + 20 ); Ok(()) @@ -1010,10 +1020,11 @@ mod tests { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn continue_on_reset() -> anyhow::Result<()> { - let (_client, sync_proxy_builder) = random_setup_with_rooms(30).await?; + let (_client, sync_proxy_builder) = random_setup_with_rooms(10).await?; print!("setup took its time"); let growing_sync = SlidingSyncList::builder() .sync_mode(SlidingSyncMode::GrowingFullSync) + .full_sync_batch_size(5u32) .full_sync_maximum_number_of_rooms_to_fetch(100) .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) .name("growing") @@ -1030,9 +1041,10 @@ mod tests { let stream = sync_proxy.stream(); pin_mut!(stream); - for _n in 0..2 { + for _ in 0..=2 { let room_summary = stream.next().await.context("sync has closed unexpectedly")?; let summary = room_summary?; + if summary.lists.iter().any(|s| s == "growing") { break; } @@ -1046,14 +1058,14 @@ mod tests { } else { acc }), - 21 + 5 ); // force the pos to be invalid and thus this being reset internally sync_proxy.set_pos("100".to_owned()); let mut error_seen = false; - for _n in 0..2 { + for _ in 0..2 { let summary = match stream.next().await { Some(Ok(e)) => e, Some(Err(e)) => { @@ -1068,6 +1080,7 @@ mod tests { } None => anyhow::bail!("Stream ended unexpectedly."), }; + // we only heard about the ones we had asked for if summary.lists.iter().any(|s| s == "growing") { break; @@ -1084,7 +1097,7 @@ mod tests { } else { acc }), - 30 + 10 ); Ok(()) @@ -1092,10 +1105,11 @@ mod tests { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn noticing_new_rooms_in_growing() -> anyhow::Result<()> { - let (client, sync_proxy_builder) = random_setup_with_rooms(30).await?; + let (client, sync_proxy_builder) = random_setup_with_rooms(20).await?; print!("setup took its time"); let growing_sync = SlidingSyncList::builder() .sync_mode(SlidingSyncMode::GrowingFullSync) + .full_sync_batch_size(10u32) .full_sync_maximum_number_of_rooms_to_fetch(100) .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) .name("growing") @@ -1128,7 +1142,7 @@ mod tests { } else { acc }), - 30 + 20 ); // all found. let's add two more. @@ -1142,10 +1156,10 @@ mod tests { let summary = room_summary?; // we only heard about the ones we had asked for if summary.lists.iter().any(|s| s == "growing") - && list.maximum_number_of_rooms().unwrap_or_default() == 32 + && list.maximum_number_of_rooms().unwrap_or_default() == 22 { if seen { - // once we saw 32, we give it another loop to catch up! + // once we saw 22, we give it another loop to catch up! break; } else { seen = true; @@ -1161,7 +1175,7 @@ mod tests { } else { acc }), - 32 + 22 ); Ok(()) From 7a2b1e6e1fca3b4b3844ea721e73960a9cb85324 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Wed, 15 Mar 2023 17:59:11 +0100 Subject: [PATCH 06/43] doc(sdk): Fix a typo. --- crates/matrix-sdk/src/sliding_sync/list/request_generator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs b/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs index f3c1cbd3f..b2fa7934a 100644 --- a/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs +++ b/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs @@ -3,7 +3,7 @@ //! Depending on the [`SlidingSyncMode`], the generated requests aren't the //! same. //! -//! In [`SlidingSyncMode::Selective`], it's pretty straighforward: +//! In [`SlidingSyncMode::Selective`], it's pretty straightforward: //! //! * There is set of ranges, //! * Each request asks to load the particular ranges. From ca5cabb7e18cbc2d0e205c5f993815d2654075fa Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Wed, 15 Mar 2023 18:08:19 +0100 Subject: [PATCH 07/43] doc(sdk): Fix a typo. --- crates/matrix-sdk/src/sliding_sync/list/request_generator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs b/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs index b2fa7934a..3d4736f2c 100644 --- a/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs +++ b/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs @@ -296,7 +296,7 @@ fn create_range( end = min(end, maximum_number_of_rooms); } - // Finally, because the bounds of the range are inclusive, 1 is substracted. + // Finally, because the bounds of the range are inclusive, 1 is subtracted. end = end.saturating_sub(1); // Make sure `start` is smaller than `end`. It can happen if `start` is greater From 9d5a1fda3c02d301a2afbe92afc7793ebd2f59ab Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Thu, 16 Mar 2023 08:11:45 +0100 Subject: [PATCH 08/43] doc(sdk): Fix typos or improve. --- .../src/sliding_sync/list/builder.rs | 2 +- .../matrix-sdk/src/sliding_sync/list/mod.rs | 33 ++++++++++--------- .../sliding_sync/list/request_generator.rs | 7 ++-- 3 files changed, 22 insertions(+), 20 deletions(-) diff --git a/crates/matrix-sdk/src/sliding_sync/list/builder.rs b/crates/matrix-sdk/src/sliding_sync/list/builder.rs index 42e629e24..f38022b54 100644 --- a/crates/matrix-sdk/src/sliding_sync/list/builder.rs +++ b/crates/matrix-sdk/src/sliding_sync/list/builder.rs @@ -85,7 +85,7 @@ impl SlidingSyncListBuilder { } /// When doing a full-sync, this method defines the total limit of rooms to - /// load (it can be useful for gigantic account). + /// load (it can be useful for gigantic accounts). pub fn full_sync_maximum_number_of_rooms_to_fetch( mut self, value: impl Into>, diff --git a/crates/matrix-sdk/src/sliding_sync/list/mod.rs b/crates/matrix-sdk/src/sliding_sync/list/mod.rs index 8bd338f6b..cef5d232f 100644 --- a/crates/matrix-sdk/src/sliding_sync/list/mod.rs +++ b/crates/matrix-sdk/src/sliding_sync/list/mod.rs @@ -25,7 +25,7 @@ use super::{Error, FrozenSlidingSyncRoom, SlidingSyncRoom}; use crate::Result; /// Holding a specific filtered list within the concept of sliding sync. -/// Main entrypoint to the SlidingSync +/// Main entrypoint to the `SlidingSync`: /// /// ```no_run /// # use futures::executor::block_on; @@ -76,10 +76,12 @@ pub struct SlidingSyncList { state: Arc>>, /// The total number of rooms that is possible to interact with for the - /// given list. It's not the total rooms that have been fetched. The - /// server tells the client that it's possible to fetch this amount of - /// rooms maximum. Since this number can change according to the list - /// filters, it's observable. + /// given list. + /// + /// It's not the total rooms that have been fetched. The server tells the + /// client that it's possible to fetch this amount of rooms maximum. + /// Since this number can change according to the list filters, it's + /// observable. maximum_number_of_rooms: Arc>>>, /// The rooms in order. @@ -620,14 +622,13 @@ fn room_ops( /// The state the [`SlidingSyncList`] is in. /// -/// The lifetime of a `SlidingSync` usually starts at a `Loading`, getting a -/// fast response for the first given number of rooms, then switches into -/// `PartiallyLoaded` during which the list fetches the remaining rooms, usually -/// in order, some times in batches. Once that is ready, it switches into -/// `Live`. +/// The lifetime of a `SlidingSyncList` ususally starts at `NotLoaded` or +/// `Preloaded` (if it is restored from a cache). When loading rooms in a list, +/// depending of the [`SlidingSyncMode`], it moves to `PartiallyLoaded` or +/// `FullyLoaded`. The lifetime of a `SlidingSync` usually starts at a /// -/// If the client has been offline for a while, though, the SlidingSync might -/// return back to `PartiallyLoaded` at any point. +/// If the client has been offline for a while, though, the `SlidingSyncList` +/// might return back to `PartiallyLoaded` at any point. #[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)] pub enum SlidingSyncState { /// Sliding Sync has not started to load anything yet. @@ -647,18 +648,18 @@ pub enum SlidingSyncState { FullyLoaded, } -/// The mode by which the the [`SlidingSyncList`] is in fetching the data. +/// How a [`SlidingSyncList`] fetches the data. #[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)] pub enum SlidingSyncMode { /// Fully sync all rooms in the background, page by page of `batch_size`, - /// like `0..20`, `21..40`, 41..60` etc. assuming the `batch_size` is 20. + /// like `0..19`, `20..39`, 40..59` etc. assuming the `batch_size` is 20. #[serde(alias = "FullSync")] PagingFullSync, /// Fully sync all rooms in the background, with a growing window of - /// `batch_size`, like `0..20`, `0..40`, `0..60` etc. assuming the + /// `batch_size`, like `0..19`, `0..39`, `0..59` etc. assuming the /// `batch_size` is 20. GrowingFullSync, - /// Only sync the specific windows defined + /// Only sync the specific defined windows/ranges. #[default] Selective, } diff --git a/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs b/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs index 3d4736f2c..abe193894 100644 --- a/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs +++ b/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs @@ -1,11 +1,11 @@ -//! The logic to generate Sliding Sync list request. +//! The logic to generate Sliding Sync list requests. //! //! Depending on the [`SlidingSyncMode`], the generated requests aren't the //! same. //! //! In [`SlidingSyncMode::Selective`], it's pretty straightforward: //! -//! * There is set of ranges, +//! * There is a set of ranges, //! * Each request asks to load the particular ranges. //! //! In [`SlidingSyncMode::PagingFullSync`]: @@ -232,7 +232,8 @@ impl SlidingSyncListRequestGenerator { *state = SlidingSyncState::PartiallyLoaded; }); } - // Otherwise the current range has reached its maximum, we switched to `Live` mode. + // Otherwise the current range has reached its maximum, we switched to `FullyLoaded` + // mode. else { // The range is covering the entire list, from 0 to its maximum. self.list.set_range(0, range_maximum); From 51abedda59c0ac254c6b4f8954e3f8e26c7e1cb7 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Thu, 16 Mar 2023 08:21:11 +0100 Subject: [PATCH 09/43] feat(ffi): Update `SlidingStateState` variants. --- bindings/matrix-sdk-ffi/src/api.udl | 17 ++++++++++------- crates/matrix-sdk/src/sliding_sync/list/mod.rs | 2 +- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/bindings/matrix-sdk-ffi/src/api.udl b/bindings/matrix-sdk-ffi/src/api.udl index 833040183..e27b7a384 100644 --- a/bindings/matrix-sdk-ffi/src/api.udl +++ b/bindings/matrix-sdk-ffi/src/api.udl @@ -32,14 +32,17 @@ callback interface SlidingSyncObserver { }; enum SlidingSyncState { - /// Hasn't started yet - "Cold", - /// We are quickly preloading a preview of the most important rooms - "Preload", + /// Sliding Sync has not started to load anything yet. + "NotLoaded", + /// Sliding Sync has been preloaded, i.e. restored from a cache for example. + "Preloaded", /// We are trying to load all remaining rooms, might be in batches - "CatchingUp", - /// We are all caught up and now only sync the live responses. - "Live", + /// Updates are received from the loaded rooms, and new rooms are being fetched + /// in background + "PartiallyLoaded", + /// Updates are received for all the loaded rooms, and all rooms have been + /// loaded! + "FullyLoaded", }; enum SlidingSyncMode { diff --git a/crates/matrix-sdk/src/sliding_sync/list/mod.rs b/crates/matrix-sdk/src/sliding_sync/list/mod.rs index cef5d232f..98f8b4d3f 100644 --- a/crates/matrix-sdk/src/sliding_sync/list/mod.rs +++ b/crates/matrix-sdk/src/sliding_sync/list/mod.rs @@ -635,7 +635,7 @@ pub enum SlidingSyncState { #[default] #[serde(rename = "Cold")] NotLoaded, - /// Sliding Sync has been preloaded, i.e. restored from a catch for example. + /// Sliding Sync has been preloaded, i.e. restored from a cache for example. #[serde(rename = "Preload")] Preloaded, /// Updates are received from the loaded rooms, and new rooms are being From 07c366983de53d34ae5f133c9ff8ede30fb829e8 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Thu, 16 Mar 2023 08:24:05 +0100 Subject: [PATCH 10/43] doc(sdk): Fix a typo. --- crates/matrix-sdk/src/sliding_sync/list/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/matrix-sdk/src/sliding_sync/list/mod.rs b/crates/matrix-sdk/src/sliding_sync/list/mod.rs index 98f8b4d3f..ca4594063 100644 --- a/crates/matrix-sdk/src/sliding_sync/list/mod.rs +++ b/crates/matrix-sdk/src/sliding_sync/list/mod.rs @@ -622,7 +622,7 @@ fn room_ops( /// The state the [`SlidingSyncList`] is in. /// -/// The lifetime of a `SlidingSyncList` ususally starts at `NotLoaded` or +/// The lifetime of a `SlidingSyncList` usually starts at `NotLoaded` or /// `Preloaded` (if it is restored from a cache). When loading rooms in a list, /// depending of the [`SlidingSyncMode`], it moves to `PartiallyLoaded` or /// `FullyLoaded`. The lifetime of a `SlidingSync` usually starts at a From c8bad4b86e39a6a8866593fdc962f8505e141b90 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Thu, 16 Mar 2023 08:55:25 +0100 Subject: [PATCH 11/43] doc(sdk): Fix a typo. --- crates/matrix-sdk/src/sliding_sync/list/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/matrix-sdk/src/sliding_sync/list/mod.rs b/crates/matrix-sdk/src/sliding_sync/list/mod.rs index ca4594063..9859fd424 100644 --- a/crates/matrix-sdk/src/sliding_sync/list/mod.rs +++ b/crates/matrix-sdk/src/sliding_sync/list/mod.rs @@ -622,7 +622,7 @@ fn room_ops( /// The state the [`SlidingSyncList`] is in. /// -/// The lifetime of a `SlidingSyncList` usually starts at `NotLoaded` or +/// The lifetime of a `SlidingSyncList` usuaslly starts at `NotLoaded` or /// `Preloaded` (if it is restored from a cache). When loading rooms in a list, /// depending of the [`SlidingSyncMode`], it moves to `PartiallyLoaded` or /// `FullyLoaded`. The lifetime of a `SlidingSync` usually starts at a From 5e23bd09bc3bda0144db66cc34fc905386d4a4f9 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Thu, 16 Mar 2023 09:01:14 +0100 Subject: [PATCH 12/43] feat(ci): `aarch64-apple-ios` should work on rustc stable. --- .github/workflows/bindings_ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/bindings_ci.yml b/.github/workflows/bindings_ci.yml index 1c6e22fce..f4969ec59 100644 --- a/.github/workflows/bindings_ci.yml +++ b/.github/workflows/bindings_ci.yml @@ -209,7 +209,7 @@ jobs: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install Rust - uses: dtolnay/rust-toolchain@nightly + uses: dtolnay/rust-toolchain@stable - name: Install aarch64-apple-ios target run: rustup target install aarch64-apple-ios From 3aa1c30f5cd541eaeb7e40670ceeb50c050ba63d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Damir=20Jeli=C4=87?= Date: Wed, 15 Mar 2023 14:47:40 +0100 Subject: [PATCH 13/43] Re-expose the vodozemac and matrix-sdk-crypto versions in the bindings --- bindings/matrix-sdk-crypto-ffi/src/lib.rs | 3 +++ bindings/matrix-sdk-crypto-js/src/lib.rs | 21 +++++++++++++++++ .../tests/machine.test.js | 17 ++++++++++++-- bindings/matrix-sdk-crypto-nodejs/src/lib.rs | 23 +++++++++++++++++++ .../tests/machine.test.js | 15 +++++++++++- 5 files changed, 76 insertions(+), 3 deletions(-) diff --git a/bindings/matrix-sdk-crypto-ffi/src/lib.rs b/bindings/matrix-sdk-crypto-ffi/src/lib.rs index aad28d805..4b1c0d55c 100644 --- a/bindings/matrix-sdk-crypto-ffi/src/lib.rs +++ b/bindings/matrix-sdk-crypto-ffi/src/lib.rs @@ -840,6 +840,9 @@ fn parse_user_id(user_id: &str) -> Result { } mod uniffi_types { + pub use matrix_sdk_crypto::VERSION; + pub use vodozemac::VERSION as VODOZEMAC_VERSION; + pub use crate::{ backup_recovery_key::{ BackupRecoveryKey, DecodeError, MegolmV1BackupKey, PassphraseInfo, PkDecryptionError, diff --git a/bindings/matrix-sdk-crypto-js/src/lib.rs b/bindings/matrix-sdk-crypto-js/src/lib.rs index afbacff00..4c2067a2b 100644 --- a/bindings/matrix-sdk-crypto-js/src/lib.rs +++ b/bindings/matrix-sdk-crypto-js/src/lib.rs @@ -40,6 +40,27 @@ pub mod vodozemac; use wasm_bindgen::prelude::*; +/// Object containing the versions of the Rust libraries we are using. +#[wasm_bindgen(getter_with_clone)] +#[derive(Debug)] +pub struct Versions { + /// The version of the vodozemac crate. + #[wasm_bindgen(readonly)] + pub vodozemac: &'static str, + /// The version of the matrix-sdk-crypto crate. + #[wasm_bindgen(readonly)] + pub matrix_sdk_crypto: &'static str, +} + +/// Get the versions of the Rust libraries we are using. +#[wasm_bindgen(js_name = "getVersions")] +pub fn get_versions() -> Versions { + Versions { + vodozemac: matrix_sdk_crypto::vodozemac::VERSION, + matrix_sdk_crypto: matrix_sdk_crypto::VERSION, + } +} + /// Run some stuff when the Wasm module is instantiated. /// /// Right now, it does the following: diff --git a/bindings/matrix-sdk-crypto-js/tests/machine.test.js b/bindings/matrix-sdk-crypto-js/tests/machine.test.js index 32a0ffa0d..871758e2a 100644 --- a/bindings/matrix-sdk-crypto-js/tests/machine.test.js +++ b/bindings/matrix-sdk-crypto-js/tests/machine.test.js @@ -7,7 +7,6 @@ const { EncryptionSettings, EventId, InboundGroupSession, - KeysClaimRequest, KeysQueryRequest, KeysUploadRequest, MaybeSignature, @@ -16,16 +15,30 @@ const { RequestType, RoomId, RoomMessageRequest, + ShieldColor, SignatureUploadRequest, ToDeviceRequest, UserId, UserIdentity, VerificationRequest, - ShieldColor, + VerificationState, + Versions, + getVersions, } = require("../pkg/matrix_sdk_crypto_js"); const { addMachineToMachine } = require("./helper"); require("fake-indexeddb/auto"); +describe("Versions", () => { + test("can find out the crate versions", async () => { + const versions = getVersions(); + + expect(versions).toBeInstanceOf(Versions) + expect(versions.vodozemac).toBeDefined() + expect(versions.matrix_sdk_crypto).toBeDefined() + }); + +}); + describe(OlmMachine.name, () => { test("can be instantiated with the async initializer", async () => { expect(await OlmMachine.initialize(new UserId("@foo:bar.org"), new DeviceId("baz"))).toBeInstanceOf(OlmMachine); diff --git a/bindings/matrix-sdk-crypto-nodejs/src/lib.rs b/bindings/matrix-sdk-crypto-nodejs/src/lib.rs index 26e165fd4..98a90bcea 100644 --- a/bindings/matrix-sdk-crypto-nodejs/src/lib.rs +++ b/bindings/matrix-sdk-crypto-nodejs/src/lib.rs @@ -16,6 +16,8 @@ #![cfg_attr(docsrs, feature(doc_auto_cfg))] //#![warn(missing_docs, missing_debug_implementations)] +use napi_derive::napi; + pub mod attachment; pub mod encryption; mod errors; @@ -31,4 +33,25 @@ pub mod tracing; pub mod types; pub mod vodozemac; +/// Object containing the versions of the Rust libraries we are using. +#[napi(object)] +pub struct Versions { + /// The version of the vodozemac crate. + #[napi(getter)] + pub vodozemac: &'static str, + + /// The version of the matrix-sdk-crypto crate. + #[napi(getter)] + pub matrix_sdk_crypto: &'static str, +} + +/// Get the versions of the Rust libraries we are using. +#[napi(js_name = "getVersions")] +pub fn get_versions() -> Versions { + Versions { + vodozemac: matrix_sdk_crypto::vodozemac::VERSION, + matrix_sdk_crypto: matrix_sdk_crypto::VERSION, + } +} + use crate::errors::into_err; diff --git a/bindings/matrix-sdk-crypto-nodejs/tests/machine.test.js b/bindings/matrix-sdk-crypto-nodejs/tests/machine.test.js index 90ed77f19..e1223f4aa 100644 --- a/bindings/matrix-sdk-crypto-nodejs/tests/machine.test.js +++ b/bindings/matrix-sdk-crypto-nodejs/tests/machine.test.js @@ -14,8 +14,10 @@ const { VerificationState, CrossSigningStatus, MaybeSignature, - StoreType, ShieldColor, + StoreType, + Versions, + getVersions, } = require("../"); const path = require("path"); const os = require("os"); @@ -28,6 +30,17 @@ describe("StoreType", () => { }); }); +describe("Versions", () => { + test("can find out the crate versions", async () => { + const versions = getVersions(); + + expect(versions).toBeInstanceOf(Versions) + expect(versions.vodozemac).toBeDefined() + expect(versions.matrix_sdk_crypto).toBeDefined() + }); + +}); + describe(OlmMachine.name, () => { test("cannot be instantiated with the constructor", () => { expect(() => { From 26789f22b068057318f079b89351a74b117fdd9c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Damir=20Jeli=C4=87?= Date: Wed, 15 Mar 2023 14:51:59 +0100 Subject: [PATCH 14/43] fixup! Re-expose the vodozemac and matrix-sdk-crypto versions in the bindings --- bindings/matrix-sdk-crypto-js/tests/machine.test.js | 7 +++---- bindings/matrix-sdk-crypto-nodejs/tests/machine.test.js | 7 +++---- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/bindings/matrix-sdk-crypto-js/tests/machine.test.js b/bindings/matrix-sdk-crypto-js/tests/machine.test.js index 871758e2a..37ec94791 100644 --- a/bindings/matrix-sdk-crypto-js/tests/machine.test.js +++ b/bindings/matrix-sdk-crypto-js/tests/machine.test.js @@ -32,11 +32,10 @@ describe("Versions", () => { test("can find out the crate versions", async () => { const versions = getVersions(); - expect(versions).toBeInstanceOf(Versions) - expect(versions.vodozemac).toBeDefined() - expect(versions.matrix_sdk_crypto).toBeDefined() + expect(versions).toBeInstanceOf(Versions); + expect(versions.vodozemac).toBeDefined(); + expect(versions.matrix_sdk_crypto).toBeDefined(); }); - }); describe(OlmMachine.name, () => { diff --git a/bindings/matrix-sdk-crypto-nodejs/tests/machine.test.js b/bindings/matrix-sdk-crypto-nodejs/tests/machine.test.js index e1223f4aa..50a4a83b9 100644 --- a/bindings/matrix-sdk-crypto-nodejs/tests/machine.test.js +++ b/bindings/matrix-sdk-crypto-nodejs/tests/machine.test.js @@ -34,11 +34,10 @@ describe("Versions", () => { test("can find out the crate versions", async () => { const versions = getVersions(); - expect(versions).toBeInstanceOf(Versions) - expect(versions.vodozemac).toBeDefined() - expect(versions.matrix_sdk_crypto).toBeDefined() + expect(versions).toBeInstanceOf(Versions); + expect(versions.vodozemac).toBeDefined(); + expect(versions.matrix_sdk_crypto).toBeDefined(); }); - }); describe(OlmMachine.name, () => { From 2f377d536a22ee0362c7e23cc6ce0f44a4a62cc4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Damir=20Jeli=C4=87?= Date: Wed, 15 Mar 2023 15:35:28 +0100 Subject: [PATCH 15/43] fixup! Re-expose the vodozemac and matrix-sdk-crypto versions in the bindings --- bindings/matrix-sdk-crypto-js/src/lib.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/bindings/matrix-sdk-crypto-js/src/lib.rs b/bindings/matrix-sdk-crypto-js/src/lib.rs index 4c2067a2b..4038ecaa5 100644 --- a/bindings/matrix-sdk-crypto-js/src/lib.rs +++ b/bindings/matrix-sdk-crypto-js/src/lib.rs @@ -38,6 +38,7 @@ pub mod types; pub mod verification; pub mod vodozemac; +use js_sys::JsString; use wasm_bindgen::prelude::*; /// Object containing the versions of the Rust libraries we are using. @@ -46,18 +47,18 @@ use wasm_bindgen::prelude::*; pub struct Versions { /// The version of the vodozemac crate. #[wasm_bindgen(readonly)] - pub vodozemac: &'static str, + pub vodozemac: JsString, /// The version of the matrix-sdk-crypto crate. #[wasm_bindgen(readonly)] - pub matrix_sdk_crypto: &'static str, + pub matrix_sdk_crypto: JsString, } /// Get the versions of the Rust libraries we are using. #[wasm_bindgen(js_name = "getVersions")] pub fn get_versions() -> Versions { Versions { - vodozemac: matrix_sdk_crypto::vodozemac::VERSION, - matrix_sdk_crypto: matrix_sdk_crypto::VERSION, + vodozemac: matrix_sdk_crypto::vodozemac::VERSION.into(), + matrix_sdk_crypto: matrix_sdk_crypto::VERSION.into(), } } From e1f6fd8a1e364b525b3ca6998dffa90890830212 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Thu, 16 Mar 2023 09:51:06 +0100 Subject: [PATCH 16/43] feat(ffi): Create the `version` and `vodozemac_version` functions. --- bindings/matrix-sdk-crypto-ffi/src/lib.rs | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/bindings/matrix-sdk-crypto-ffi/src/lib.rs b/bindings/matrix-sdk-crypto-ffi/src/lib.rs index 4b1c0d55c..23ff01253 100644 --- a/bindings/matrix-sdk-crypto-ffi/src/lib.rs +++ b/bindings/matrix-sdk-crypto-ffi/src/lib.rs @@ -839,10 +839,17 @@ fn parse_user_id(user_id: &str) -> Result { ruma::UserId::parse(user_id).map_err(|e| CryptoStoreError::InvalidUserId(user_id.to_owned(), e)) } -mod uniffi_types { - pub use matrix_sdk_crypto::VERSION; - pub use vodozemac::VERSION as VODOZEMAC_VERSION; +#[uniffi::export] +fn version() -> String { + matrix_sdk_crypto::VERSION.to_owned() +} +#[uniffi::export] +fn vodozemac_version() -> String { + vodozemac::VERSION.to_owned() +} + +mod uniffi_types { pub use crate::{ backup_recovery_key::{ BackupRecoveryKey, DecodeError, MegolmV1BackupKey, PassphraseInfo, PkDecryptionError, From c2d3afffffe9f3385bb768b97599057aa48b0253 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Thu, 16 Mar 2023 10:33:41 +0100 Subject: [PATCH 17/43] feat(crypto-nodejs): Make `Versions` a class, not a JS object. --- bindings/matrix-sdk-crypto-nodejs/src/lib.rs | 10 +++++----- .../matrix-sdk-crypto-nodejs/tests/machine.test.js | 3 ++- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/bindings/matrix-sdk-crypto-nodejs/src/lib.rs b/bindings/matrix-sdk-crypto-nodejs/src/lib.rs index 98a90bcea..540204be7 100644 --- a/bindings/matrix-sdk-crypto-nodejs/src/lib.rs +++ b/bindings/matrix-sdk-crypto-nodejs/src/lib.rs @@ -34,23 +34,23 @@ pub mod types; pub mod vodozemac; /// Object containing the versions of the Rust libraries we are using. -#[napi(object)] +#[napi] pub struct Versions { /// The version of the vodozemac crate. #[napi(getter)] - pub vodozemac: &'static str, + pub vodozemac: String, /// The version of the matrix-sdk-crypto crate. #[napi(getter)] - pub matrix_sdk_crypto: &'static str, + pub matrix_sdk_crypto: String, } /// Get the versions of the Rust libraries we are using. #[napi(js_name = "getVersions")] pub fn get_versions() -> Versions { Versions { - vodozemac: matrix_sdk_crypto::vodozemac::VERSION, - matrix_sdk_crypto: matrix_sdk_crypto::VERSION, + vodozemac: matrix_sdk_crypto::vodozemac::VERSION.to_owned(), + matrix_sdk_crypto: matrix_sdk_crypto::VERSION.to_owned(), } } diff --git a/bindings/matrix-sdk-crypto-nodejs/tests/machine.test.js b/bindings/matrix-sdk-crypto-nodejs/tests/machine.test.js index 50a4a83b9..86b93533b 100644 --- a/bindings/matrix-sdk-crypto-nodejs/tests/machine.test.js +++ b/bindings/matrix-sdk-crypto-nodejs/tests/machine.test.js @@ -36,8 +36,9 @@ describe("Versions", () => { expect(versions).toBeInstanceOf(Versions); expect(versions.vodozemac).toBeDefined(); - expect(versions.matrix_sdk_crypto).toBeDefined(); + expect(versions.matrixSdkCrypto).toBeDefined(); }); + }); describe(OlmMachine.name, () => { From ccfb66c57622dc33eb27f6a5d0f5955287200942 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Thu, 16 Mar 2023 10:39:43 +0100 Subject: [PATCH 18/43] chore(crypto-nodejs): Fix a lint. --- bindings/matrix-sdk-crypto-nodejs/tests/machine.test.js | 1 - 1 file changed, 1 deletion(-) diff --git a/bindings/matrix-sdk-crypto-nodejs/tests/machine.test.js b/bindings/matrix-sdk-crypto-nodejs/tests/machine.test.js index 86b93533b..0e4ed8929 100644 --- a/bindings/matrix-sdk-crypto-nodejs/tests/machine.test.js +++ b/bindings/matrix-sdk-crypto-nodejs/tests/machine.test.js @@ -38,7 +38,6 @@ describe("Versions", () => { expect(versions.vodozemac).toBeDefined(); expect(versions.matrixSdkCrypto).toBeDefined(); }); - }); describe(OlmMachine.name, () => { From 1933fe7a8f0f3e6ea6e253f1d7de6bcefac7de36 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Thu, 16 Mar 2023 11:04:18 +0100 Subject: [PATCH 19/43] doc(sdk): Fix a typo. --- crates/matrix-sdk/src/sliding_sync/list/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/matrix-sdk/src/sliding_sync/list/mod.rs b/crates/matrix-sdk/src/sliding_sync/list/mod.rs index 9859fd424..ca4594063 100644 --- a/crates/matrix-sdk/src/sliding_sync/list/mod.rs +++ b/crates/matrix-sdk/src/sliding_sync/list/mod.rs @@ -622,7 +622,7 @@ fn room_ops( /// The state the [`SlidingSyncList`] is in. /// -/// The lifetime of a `SlidingSyncList` usuaslly starts at `NotLoaded` or +/// The lifetime of a `SlidingSyncList` usually starts at `NotLoaded` or /// `Preloaded` (if it is restored from a cache). When loading rooms in a list, /// depending of the [`SlidingSyncMode`], it moves to `PartiallyLoaded` or /// `FullyLoaded`. The lifetime of a `SlidingSync` usually starts at a From e23be443457fcf48056a705ef07c54063c604fba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?K=C3=A9vin=20Commaille?= Date: Thu, 16 Mar 2023 10:21:40 +0100 Subject: [PATCH 20/43] sdk: Store OIDC issuer as a String rather than a Url MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The url crate normalizes the string, but during OIDC verification steps, the issuer verification must be made against the exact string that was provided. Signed-off-by: Kévin Commaille --- bindings/matrix-sdk-ffi/src/client.rs | 2 +- crates/matrix-sdk/src/client/builder.rs | 7 +++---- crates/matrix-sdk/src/client/mod.rs | 4 ++-- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/bindings/matrix-sdk-ffi/src/client.rs b/bindings/matrix-sdk-ffi/src/client.rs index ef28c730c..96c33168f 100644 --- a/bindings/matrix-sdk-ffi/src/client.rs +++ b/bindings/matrix-sdk-ffi/src/client.rs @@ -251,7 +251,7 @@ impl Client { /// The OIDC Provider that is trusted by the homeserver. `None` when /// not configured. pub async fn authentication_issuer(&self) -> Option { - self.client.authentication_issuer().await.map(|server| server.to_string()) + self.client.authentication_issuer().await } /// The sliding sync proxy that is trusted by the homeserver. `None` when diff --git a/crates/matrix-sdk/src/client/builder.rs b/crates/matrix-sdk/src/client/builder.rs index a4ec7370a..d9112b04c 100644 --- a/crates/matrix-sdk/src/client/builder.rs +++ b/crates/matrix-sdk/src/client/builder.rs @@ -378,7 +378,7 @@ impl ClientBuilder { let base_client = BaseClient::with_store_config(store_config); let http_client = HttpClient::new(inner_http_client.clone(), self.request_config); - let mut authentication_issuer: Option = None; + let mut authentication_issuer = None; #[cfg(feature = "experimental-sliding-sync")] let mut sliding_sync_proxy: Option = None; let homeserver = match homeserver_cfg { @@ -402,9 +402,8 @@ impl ClientBuilder { err => ClientBuildError::Http(err), })?; - if let Some(issuer) = well_known.authentication.map(|auth| auth.issuer) { - authentication_issuer = Url::parse(&issuer).ok(); - } + authentication_issuer = well_known.authentication.map(|auth| auth.issuer); + #[cfg(feature = "experimental-sliding-sync")] if let Some(proxy) = well_known.sliding_sync_proxy.map(|p| p.url) { sliding_sync_proxy = Url::parse(&proxy).ok(); diff --git a/crates/matrix-sdk/src/client/mod.rs b/crates/matrix-sdk/src/client/mod.rs index 5d44f88a2..fe4bf926a 100644 --- a/crates/matrix-sdk/src/client/mod.rs +++ b/crates/matrix-sdk/src/client/mod.rs @@ -145,7 +145,7 @@ pub(crate) struct ClientInner { /// The URL of the homeserver to connect to. homeserver: RwLock, /// The OIDC Provider that is trusted by the homeserver. - authentication_issuer: Option>, + authentication_issuer: Option>, /// The sliding sync proxy that is trusted by the homeserver. #[cfg(feature = "experimental-sliding-sync")] sliding_sync_proxy: Option>, @@ -330,7 +330,7 @@ impl Client { } /// The OIDC Provider that is trusted by the homeserver. - pub async fn authentication_issuer(&self) -> Option { + pub async fn authentication_issuer(&self) -> Option { let server = self.inner.authentication_issuer.as_ref()?; Some(server.read().await.clone()) } From 46d8d26b71837d1fc13f9be1ee1f6c17cdcf1d29 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?K=C3=A9vin=20Commaille?= Date: Thu, 16 Mar 2023 10:22:27 +0100 Subject: [PATCH 21/43] sdk: Fix a typo MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Kévin Commaille --- crates/matrix-sdk/src/client/builder.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/crates/matrix-sdk/src/client/builder.rs b/crates/matrix-sdk/src/client/builder.rs index d9112b04c..3cdd0394b 100644 --- a/crates/matrix-sdk/src/client/builder.rs +++ b/crates/matrix-sdk/src/client/builder.rs @@ -408,7 +408,10 @@ impl ClientBuilder { if let Some(proxy) = well_known.sliding_sync_proxy.map(|p| p.url) { sliding_sync_proxy = Url::parse(&proxy).ok(); } - debug!(homserver_url = well_known.homeserver.base_url, "Discovered the homeserver"); + debug!( + homeserver_url = well_known.homeserver.base_url, + "Discovered the homeserver" + ); well_known.homeserver.base_url } From 5f06dc8229edc6cdcc2ccc74d852f097a6fd91ed Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Thu, 16 Mar 2023 11:53:01 +0100 Subject: [PATCH 22/43] doc(sdk): Fix a typo. --- crates/matrix-sdk/src/sliding_sync/list/request_generator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs b/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs index abe193894..7fa7b285b 100644 --- a/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs +++ b/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs @@ -441,6 +441,7 @@ mod tests { $( { + // Generate a new request. let request = $generator.next().unwrap(); assert_eq!(request.ranges, [ $( (uint!( $range_start ), uint!( $range_end )) ),* ]); @@ -448,7 +449,6 @@ mod tests { // Fake a response. let _ = $generator.handle_response($maximum_number_of_rooms, &vec![], &vec![]); - // Now, Sliding Sync has started to load rooms. assert_eq!($generator.is_fully_loaded(), $is_fully_loaded); assert_eq!($list.state(), SlidingSyncState::$list_state); } From e6bf74b7db381013134f2c628f5e18218f660787 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 16 Mar 2023 13:47:55 +0000 Subject: [PATCH 23/43] matrix-sdk-crypto-js v0.1.0-alpha.5 --- bindings/matrix-sdk-crypto-js/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bindings/matrix-sdk-crypto-js/package.json b/bindings/matrix-sdk-crypto-js/package.json index 47c5bb57c..12b5f1a75 100644 --- a/bindings/matrix-sdk-crypto-js/package.json +++ b/bindings/matrix-sdk-crypto-js/package.json @@ -1,6 +1,6 @@ { "name": "@matrix-org/matrix-sdk-crypto-js", - "version": "0.1.0-alpha.4", + "version": "0.1.0-alpha.5", "homepage": "https://github.com/matrix-org/matrix-rust-sdk", "description": "Matrix encryption library, for JavaScript", "license": "Apache-2.0", From b39a224be82efdd3b4b3433ef5554319f93d4855 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Thu, 16 Mar 2023 17:03:25 +0100 Subject: [PATCH 24/43] doc(sdk): Use the Rust range notation to avoid ambiguity. --- crates/matrix-sdk/src/sliding_sync/list/mod.rs | 4 ++-- testing/sliding-sync-integration-test/src/lib.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/matrix-sdk/src/sliding_sync/list/mod.rs b/crates/matrix-sdk/src/sliding_sync/list/mod.rs index ca4594063..5f7079520 100644 --- a/crates/matrix-sdk/src/sliding_sync/list/mod.rs +++ b/crates/matrix-sdk/src/sliding_sync/list/mod.rs @@ -652,11 +652,11 @@ pub enum SlidingSyncState { #[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)] pub enum SlidingSyncMode { /// Fully sync all rooms in the background, page by page of `batch_size`, - /// like `0..19`, `20..39`, 40..59` etc. assuming the `batch_size` is 20. + /// like `0..=19`, `20..=39`, 40..=59` etc. assuming the `batch_size` is 20. #[serde(alias = "FullSync")] PagingFullSync, /// Fully sync all rooms in the background, with a growing window of - /// `batch_size`, like `0..19`, `0..39`, `0..59` etc. assuming the + /// `batch_size`, like `0..=19`, `0..=39`, `0..=59` etc. assuming the /// `batch_size` is 20. GrowingFullSync, /// Only sync the specific defined windows/ranges. diff --git a/testing/sliding-sync-integration-test/src/lib.rs b/testing/sliding-sync-integration-test/src/lib.rs index c7a870bed..d8d10b783 100644 --- a/testing/sliding-sync-integration-test/src/lib.rs +++ b/testing/sliding-sync-integration-test/src/lib.rs @@ -559,7 +559,7 @@ mod tests { pin_mut!(stream); // Exactly one poll! - // Ranges are 0-10 for selective list, and 0-9 for growing list. + // Ranges are 0..=9 for selective list, and 0..=9 for growing list. let room_summary = stream.next().await.context("No room summary found, loop ended unsuccessfully")??; @@ -569,7 +569,7 @@ mod tests { assert_eq!(full_list.state(), SlidingSyncState::PartiallyLoaded, "full isn't preloading"); // Another poll! - // Ranges are 0-10 for selective list, and 0-19 for growing list. + // Ranges are 0..=10 for selective list, and 0..=19 for growing list. let _room_summary = stream.next().await.context("No room summary found, loop ended unsuccessfully")??; From 42dce635de18aca5965ed172d4122fd73a950056 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 16 Mar 2023 17:49:52 +0000 Subject: [PATCH 25/43] matrix-sdk-crypto-js: drop `v` from release tags Currently, the tags used for releases of this binding have the format: "matrix-sdk-crypto-js-v0.1.0-alpha.5". This is inconsistent with tags used for other parts of the project, which omit the `v` prefix. --- bindings/matrix-sdk-crypto-js/.yarnrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bindings/matrix-sdk-crypto-js/.yarnrc b/bindings/matrix-sdk-crypto-js/.yarnrc index 212068c24..03fd995e9 100644 --- a/bindings/matrix-sdk-crypto-js/.yarnrc +++ b/bindings/matrix-sdk-crypto-js/.yarnrc @@ -1,2 +1,2 @@ -version-tag-prefix "matrix-sdk-crypto-js-v" +version-tag-prefix "matrix-sdk-crypto-js-" version-git-message "matrix-sdk-crypto-js v%s" From a7ed8e0b45035277e8ebb65989a43e2e962596af Mon Sep 17 00:00:00 2001 From: Alfonso Grillo Date: Mon, 20 Mar 2023 12:08:21 +0100 Subject: [PATCH 26/43] Update account_data when creating a new dm room --- .../src/identities/device.rs | 1 + crates/matrix-sdk/src/account.rs | 41 ++++++++++++++++++- crates/matrix-sdk/src/client/mod.rs | 22 +++++++--- crates/matrix-sdk/src/encryption/mod.rs | 22 +--------- 4 files changed, 60 insertions(+), 26 deletions(-) diff --git a/crates/matrix-sdk-crypto/src/identities/device.rs b/crates/matrix-sdk-crypto/src/identities/device.rs index 7d2ea5c12..d33d9159a 100644 --- a/crates/matrix-sdk-crypto/src/identities/device.rs +++ b/crates/matrix-sdk-crypto/src/identities/device.rs @@ -928,6 +928,7 @@ pub(crate) mod tests { } #[test] + #[allow(clippy::redundant_clone)] fn delete_a_device() { let device = get_device(); assert!(!device.is_deleted()); diff --git a/crates/matrix-sdk/src/account.rs b/crates/matrix-sdk/src/account.rs index 9579106f1..3550bc0bc 100644 --- a/crates/matrix-sdk/src/account.rs +++ b/crates/matrix-sdk/src/account.rs @@ -39,7 +39,7 @@ use ruma::{ }, serde::Raw, thirdparty::Medium, - ClientSecret, MxcUri, OwnedMxcUri, SessionId, UInt, + ClientSecret, MxcUri, OwnedMxcUri, OwnedUserId, RoomId, SessionId, UInt, }; use serde::Deserialize; @@ -747,6 +747,45 @@ impl Account { Ok(self.client.send(request, None).await?) } + + /// Marks the given room with `room_id` as "direct chat" with with any + /// user in `user_ids`. + /// + /// This is done adding new the `room_id` to the list of DM + /// chats for any user id in `user_ids`. + /// + /// # Arguments + /// + /// * `room_id` - The room id of the DM room. + /// * `user_ids` - The user ids of the invitees for the DM room. + pub(crate) async fn mark_as_dm( + &self, + room_id: &RoomId, + user_ids: &[OwnedUserId], + ) -> Result<()> { + use ruma::events::direct::DirectEventContent; + + // Now we need to mark the room as a DM for ourselves, we fetch the + // existing `m.direct` event and append the room to the list of DMs we + // have with this user. + let mut content = self + .account_data::() + .await? + .map(|c| c.deserialize()) + .transpose()? + .unwrap_or_default(); + + for user_id in user_ids { + content.entry(user_id.to_owned()).or_default().push(room_id.to_owned()); + } + + // TODO We should probably save the fact that we need to send this out + // because otherwise we might end up in a state where we have a DM that + // isn't marked as one. + self.set_account_data(content).await?; + + Ok(()) + } } fn get_raw_content(raw: Option>) -> Result>> { diff --git a/crates/matrix-sdk/src/client/mod.rs b/crates/matrix-sdk/src/client/mod.rs index fe4bf926a..19ec63b65 100644 --- a/crates/matrix-sdk/src/client/mod.rs +++ b/crates/matrix-sdk/src/client/mod.rs @@ -71,9 +71,7 @@ use serde::de::DeserializeOwned; use tokio::sync::broadcast; #[cfg(not(target_arch = "wasm32"))] use tokio::sync::OnceCell; -#[cfg(feature = "e2e-encryption")] -use tracing::error; -use tracing::{debug, field::display, info, instrument, trace, Instrument, Span}; +use tracing::{debug, error, field::display, info, instrument, trace, Instrument, Span}; use url::Url; #[cfg(feature = "e2e-encryption")] @@ -1691,12 +1689,26 @@ impl Client { /// assert!(client.create_room(request).await.is_ok()); /// # }); /// ``` - pub async fn create_room(&self, request: create_room::v3::Request) -> HttpResult { + pub async fn create_room(&self, request: create_room::v3::Request) -> Result { + let invite = request.invite.clone(); + let is_direct_room = request.is_direct; let response = self.send(request, None).await?; let base_room = self.base_client().get_or_create_room(&response.room_id, RoomState::Joined).await; - Ok(room::Joined::new(self, base_room).unwrap()) + + let joined_room = room::Joined::new(self, base_room).unwrap(); + + if is_direct_room && !invite.is_empty() { + if let Err(error) = + self.account().mark_as_dm(joined_room.room_id(), invite.as_slice()).await + { + // FIXME: Retry in the background + error!("Failed to mark room as DM: {error}"); + } + } + + Ok(joined_room) } /// Search the homeserver's directory for public rooms with a filter. diff --git a/crates/matrix-sdk/src/encryption/mod.rs b/crates/matrix-sdk/src/encryption/mod.rs index dea2e59aa..a7a7a7fb0 100644 --- a/crates/matrix-sdk/src/encryption/mod.rs +++ b/crates/matrix-sdk/src/encryption/mod.rs @@ -254,9 +254,7 @@ impl Client { #[cfg(feature = "e2e-encryption")] pub(crate) async fn create_dm_room(&self, user_id: OwnedUserId) -> Result { - use ruma::{ - api::client::room::create_room::v3::RoomPreset, events::direct::DirectEventContent, - }; + use ruma::api::client::room::create_room::v3::RoomPreset; // First we create the DM room, where we invite the user and tell the // invitee that the room should be a DM. @@ -270,23 +268,7 @@ impl Client { let room = self.create_room(request).await?; - // Now we need to mark the room as a DM for ourselves, we fetch the - // existing `m.direct` event and append the room to the list of DMs we - // have with this user. - let mut content = self - .account() - .account_data::() - .await? - .map(|c| c.deserialize()) - .transpose()? - .unwrap_or_default(); - - content.entry(user_id.to_owned()).or_default().push(room.room_id().to_owned()); - - // TODO We should probably save the fact that we need to send this out - // because otherwise we might end up in a state where we have a DM that - // isn't marked as one. - self.account().set_account_data(content).await?; + self.account().mark_as_dm(room.room_id(), &[user_id]).await?; Ok(room) } From 39a4dc911f4145b500fb082fb7cbeb2285ea525f Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Mon, 20 Mar 2023 13:03:53 +0100 Subject: [PATCH 27/43] Remove event contents from (Sync)TimelineEvent Debug impls --- .../src/deserialized_responses.rs | 86 +++++++++++++++++-- 1 file changed, 77 insertions(+), 9 deletions(-) diff --git a/crates/matrix-sdk-common/src/deserialized_responses.rs b/crates/matrix-sdk-common/src/deserialized_responses.rs index e7a91c864..4c86d993e 100644 --- a/crates/matrix-sdk-common/src/deserialized_responses.rs +++ b/crates/matrix-sdk-common/src/deserialized_responses.rs @@ -1,4 +1,4 @@ -use std::collections::BTreeMap; +use std::{collections::BTreeMap, fmt}; use ruma::{ events::{AnySyncTimelineEvent, AnyTimelineEvent}, @@ -180,7 +180,7 @@ pub struct EncryptionInfo { /// A customized version of a room event coming from a sync that holds optional /// encryption info. -#[derive(Clone, Debug, Deserialize, Serialize)] +#[derive(Clone, Deserialize, Serialize)] pub struct SyncTimelineEvent { /// The actual event. pub event: Raw, @@ -193,6 +193,14 @@ pub struct SyncTimelineEvent { } impl SyncTimelineEvent { + /// Create a new `SyncTimelineEvent` from the given raw event. + /// + /// This is a convenience constructor for when you don't need to set + /// `encryption_info` or `push_action`, for example inside a test. + pub fn new(event: Raw) -> Self { + Self { event, encryption_info: None, push_actions: vec![] } + } + /// Get the event id of this `SyncTimelineEvent` if the event has any valid /// id. pub fn event_id(&self) -> Option { @@ -200,6 +208,18 @@ impl SyncTimelineEvent { } } +#[cfg(not(tarpaulin_include))] +impl fmt::Debug for SyncTimelineEvent { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let SyncTimelineEvent { event, encryption_info, push_actions } = self; + f.debug_struct("SyncTimelineEvent") + .field("event", &DebugRawEvent(event)) + .field("encryption_info", encryption_info) + .field("push_actions", push_actions) + .finish() + } +} + impl From> for SyncTimelineEvent { fn from(inner: Raw) -> Self { Self { encryption_info: None, event: inner, push_actions: Vec::default() } @@ -220,7 +240,7 @@ impl From for SyncTimelineEvent { } } -#[derive(Clone, Debug)] +#[derive(Clone)] pub struct TimelineEvent { /// The actual event. pub event: Raw, @@ -241,6 +261,42 @@ impl TimelineEvent { } } +#[cfg(not(tarpaulin_include))] +impl fmt::Debug for TimelineEvent { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let TimelineEvent { event, encryption_info, push_actions } = self; + f.debug_struct("TimelineEvent") + .field("event", &DebugRawEvent(event)) + .field("encryption_info", encryption_info) + .field("push_actions", push_actions) + .finish() + } +} + +struct DebugRawEvent<'a, T>(&'a Raw); + +#[cfg(not(tarpaulin_include))] +impl fmt::Debug for DebugRawEvent<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RawEvent") + .field("event_id", &DebugEventId(self.0.get_field("event_id"))) + .finish_non_exhaustive() + } +} + +struct DebugEventId(serde_json::Result>); + +#[cfg(not(tarpaulin_include))] +impl fmt::Debug for DebugEventId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match &self.0 { + Ok(Some(id)) => id.fmt(f), + Ok(None) => f.write_str("Missing"), + Err(e) => f.debug_tuple("Invalid").field(&e).finish(), + } + } +} + #[cfg(test)] mod tests { use ruma::{ @@ -251,18 +307,30 @@ mod tests { use super::{SyncTimelineEvent, TimelineEvent}; - #[test] - fn room_event_to_sync_room_event() { - let event = json!({ - "content": RoomMessageEventContent::text_plain("foobar"), + fn example_event() -> serde_json::Value { + json!({ + "content": RoomMessageEventContent::text_plain("secret"), "type": "m.room.message", "event_id": "$xxxxx:example.org", "room_id": "!someroom:example.com", "origin_server_ts": 2189, "sender": "@carl:example.com", - }); + }) + } - let room_event = TimelineEvent::new(Raw::new(&event).unwrap().cast()); + #[test] + fn sync_timeline_debug_content() { + let room_event = SyncTimelineEvent::new(Raw::new(&example_event()).unwrap().cast()); + let debug_s = format!("{room_event:?}"); + assert!( + !debug_s.contains("secret"), + "Debug representation contains event content!\n{debug_s}" + ); + } + + #[test] + fn room_event_to_sync_room_event() { + let room_event = TimelineEvent::new(Raw::new(&example_event()).unwrap().cast()); let converted_room_event: SyncTimelineEvent = room_event.into(); let converted_event: AnySyncTimelineEvent = From 82d1d64f856246ce2610b23e29e579c792ae9b14 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Mon, 20 Mar 2023 12:17:51 +0100 Subject: [PATCH 28/43] sdk: Simplify create_dm_room It doesn't need to call mark_as_dm itself as create_room will do that. --- crates/matrix-sdk/src/encryption/mod.rs | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/crates/matrix-sdk/src/encryption/mod.rs b/crates/matrix-sdk/src/encryption/mod.rs index a7a7a7fb0..20d788aa9 100644 --- a/crates/matrix-sdk/src/encryption/mod.rs +++ b/crates/matrix-sdk/src/encryption/mod.rs @@ -254,23 +254,14 @@ impl Client { #[cfg(feature = "e2e-encryption")] pub(crate) async fn create_dm_room(&self, user_id: OwnedUserId) -> Result { - use ruma::api::client::room::create_room::v3::RoomPreset; + use ruma::api::client::room::create_room; - // First we create the DM room, where we invite the user and tell the - // invitee that the room should be a DM. - let invite = vec![user_id.clone()]; - - let request = assign!(ruma::api::client::room::create_room::v3::Request::new(), { - invite, + self.create_room(assign!(create_room::v3::Request::new(), { + invite: vec![user_id.clone()], is_direct: true, - preset: Some(RoomPreset::TrustedPrivateChat), - }); - - let room = self.create_room(request).await?; - - self.account().mark_as_dm(room.room_id(), &[user_id]).await?; - - Ok(room) + preset: Some(create_room::v3::RoomPreset::TrustedPrivateChat), + })) + .await } /// Claim one-time keys creating new Olm sessions. From 9d6e192b9f7fc777ac48147a35a9c1c89396b636 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Mon, 20 Mar 2023 12:20:07 +0100 Subject: [PATCH 29/43] sdk: Move create_dm_room out of encryption module Creating DM rooms makes sense for non-encryption-capable client as well. --- crates/matrix-sdk/src/client/mod.rs | 9 +++++++++ crates/matrix-sdk/src/encryption/identities/users.rs | 2 +- crates/matrix-sdk/src/encryption/mod.rs | 12 ------------ 3 files changed, 10 insertions(+), 13 deletions(-) diff --git a/crates/matrix-sdk/src/client/mod.rs b/crates/matrix-sdk/src/client/mod.rs index 19ec63b65..dfc8ba118 100644 --- a/crates/matrix-sdk/src/client/mod.rs +++ b/crates/matrix-sdk/src/client/mod.rs @@ -1711,6 +1711,15 @@ impl Client { Ok(joined_room) } + pub(crate) async fn create_dm_room(&self, user_id: &UserId) -> Result { + self.create_room(assign!(create_room::v3::Request::new(), { + invite: vec![user_id.to_owned()], + is_direct: true, + preset: Some(create_room::v3::RoomPreset::TrustedPrivateChat), + })) + .await + } + /// Search the homeserver's directory for public rooms with a filter. /// /// # Arguments diff --git a/crates/matrix-sdk/src/encryption/identities/users.rs b/crates/matrix-sdk/src/encryption/identities/users.rs index d4f02c67d..a4179eddc 100644 --- a/crates/matrix-sdk/src/encryption/identities/users.rs +++ b/crates/matrix-sdk/src/encryption/identities/users.rs @@ -477,7 +477,7 @@ impl OtherUserIdentity { } room.clone() } else { - self.client.create_dm_room(self.inner.user_id().to_owned()).await? + self.client.create_dm_room(self.inner.user_id()).await? }; let response = room diff --git a/crates/matrix-sdk/src/encryption/mod.rs b/crates/matrix-sdk/src/encryption/mod.rs index 20d788aa9..1222a65ce 100644 --- a/crates/matrix-sdk/src/encryption/mod.rs +++ b/crates/matrix-sdk/src/encryption/mod.rs @@ -252,18 +252,6 @@ impl Client { }) } - #[cfg(feature = "e2e-encryption")] - pub(crate) async fn create_dm_room(&self, user_id: OwnedUserId) -> Result { - use ruma::api::client::room::create_room; - - self.create_room(assign!(create_room::v3::Request::new(), { - invite: vec![user_id.clone()], - is_direct: true, - preset: Some(create_room::v3::RoomPreset::TrustedPrivateChat), - })) - .await - } - /// Claim one-time keys creating new Olm sessions. /// /// # Arguments From 130dc58a5d8f378ab6e9681d1dbbbc2bad1076ca Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Mon, 20 Mar 2023 12:24:14 +0100 Subject: [PATCH 30/43] Remove redundant cfg attributes The whole encryption module is only enabled with e2e-encryption. --- crates/matrix-sdk/src/encryption/mod.rs | 17 +---------------- 1 file changed, 1 insertion(+), 16 deletions(-) diff --git a/crates/matrix-sdk/src/encryption/mod.rs b/crates/matrix-sdk/src/encryption/mod.rs index 1222a65ce..9db51849d 100644 --- a/crates/matrix-sdk/src/encryption/mod.rs +++ b/crates/matrix-sdk/src/encryption/mod.rs @@ -38,8 +38,6 @@ pub use matrix_sdk_base::crypto::{ use matrix_sdk_base::crypto::{ CrossSigningStatus, OutgoingRequest, RoomMessageRequest, ToDeviceRequest, }; -#[cfg(feature = "e2e-encryption")] -use ruma::OwnedDeviceId; use ruma::{ api::client::{ backup::add_backup_keys::v3::Response as KeysBackupResponse, @@ -52,7 +50,7 @@ use ruma::{ }, uiaa::AuthData, }, - assign, DeviceId, OwnedUserId, TransactionId, UserId, + assign, DeviceId, OwnedDeviceId, OwnedUserId, TransactionId, UserId, }; use tracing::{debug, instrument, trace, warn}; @@ -68,12 +66,10 @@ use crate::{ }; impl Client { - #[cfg(feature = "e2e-encryption")] pub(crate) fn olm_machine(&self) -> Option<&matrix_sdk_base::crypto::OlmMachine> { self.base_client().olm_machine() } - #[cfg(feature = "e2e-encryption")] pub(crate) async fn mark_request_as_sent( &self, request_id: &TransactionId, @@ -93,7 +89,6 @@ impl Client { /// # Panics /// /// Panics if no key query needs to be done. - #[cfg(feature = "e2e-encryption")] #[instrument(skip(self))] pub(crate) async fn keys_query( &self, @@ -140,7 +135,6 @@ impl Client { /// room.send(CustomEventContent { encrypted_file }, None).await?; /// # anyhow::Ok(()) }); /// ``` - #[cfg(feature = "e2e-encryption")] pub async fn prepare_encrypted_file<'a, R: Read + ?Sized + 'a>( &self, content_type: &mime::Mime, @@ -170,7 +164,6 @@ impl Client { /// Encrypt and upload the file to be read from `reader` and construct an /// attachment message with `body`, `content_type`, `info` and `thumbnail`. - #[cfg(feature = "e2e-encryption")] pub(crate) async fn prepare_encrypted_attachment_message( &self, body: &str, @@ -257,7 +250,6 @@ impl Client { /// # Arguments /// /// * `users` - The list of user/device pairs that we should claim keys for. - #[cfg(feature = "e2e-encryption")] pub(crate) async fn claim_one_time_keys( &self, users: impl Iterator, @@ -286,7 +278,6 @@ impl Client { /// /// Panics if the client isn't logged in, or if no encryption keys need to /// be uploaded. - #[cfg(feature = "e2e-encryption")] #[instrument(skip(self, request))] pub(crate) async fn keys_upload( &self, @@ -305,7 +296,6 @@ impl Client { Ok(response) } - #[cfg(feature = "e2e-encryption")] pub(crate) async fn room_send_helper( &self, request: &RoomMessageRequest, @@ -320,7 +310,6 @@ impl Client { .await } - #[cfg(feature = "e2e-encryption")] pub(crate) async fn send_to_device( &self, request: &ToDeviceRequest, @@ -334,7 +323,6 @@ impl Client { self.send(request, None).await } - #[cfg(feature = "e2e-encryption")] pub(crate) async fn send_verification_request( &self, request: matrix_sdk_base::crypto::OutgoingVerificationRequest, @@ -351,7 +339,6 @@ impl Client { Ok(()) } - #[cfg(feature = "e2e-encryption")] fn get_dm_room(&self, user_id: &UserId) -> Option { let rooms = self.joined_rooms(); @@ -444,14 +431,12 @@ impl Client { /// A high-level API to manage the client's encryption. /// /// To get this, use [`Client::encryption()`]. -#[cfg(feature = "e2e-encryption")] #[derive(Debug, Clone)] pub struct Encryption { /// The underlying client. client: Client, } -#[cfg(feature = "e2e-encryption")] impl Encryption { pub(crate) fn new(client: Client) -> Self { Self { client } From ea41076c8261f5500c02b3161ea096adc82acc6e Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Mon, 20 Mar 2023 13:13:00 +0100 Subject: [PATCH 31/43] sdk: Rename create_dm_room to create_dm and make it public --- crates/matrix-sdk/src/client/mod.rs | 7 ++++++- crates/matrix-sdk/src/encryption/identities/users.rs | 2 +- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/crates/matrix-sdk/src/client/mod.rs b/crates/matrix-sdk/src/client/mod.rs index dfc8ba118..743aec664 100644 --- a/crates/matrix-sdk/src/client/mod.rs +++ b/crates/matrix-sdk/src/client/mod.rs @@ -1711,7 +1711,12 @@ impl Client { Ok(joined_room) } - pub(crate) async fn create_dm_room(&self, user_id: &UserId) -> Result { + /// Create a DM room. + /// + /// Convenience shorthand for [`create_room`][Self::create_room] with the + /// given user being invited, the room marked `is_direct` and both the + /// creator and invitee getting the default maximum power level. + pub async fn create_dm(&self, user_id: &UserId) -> Result { self.create_room(assign!(create_room::v3::Request::new(), { invite: vec![user_id.to_owned()], is_direct: true, diff --git a/crates/matrix-sdk/src/encryption/identities/users.rs b/crates/matrix-sdk/src/encryption/identities/users.rs index a4179eddc..619fbdab2 100644 --- a/crates/matrix-sdk/src/encryption/identities/users.rs +++ b/crates/matrix-sdk/src/encryption/identities/users.rs @@ -477,7 +477,7 @@ impl OtherUserIdentity { } room.clone() } else { - self.client.create_dm_room(self.inner.user_id()).await? + self.client.create_dm(self.inner.user_id()).await? }; let response = room From 16687f24f9bd7884f00ba357a1f18854aadf6659 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Mon, 20 Mar 2023 13:14:24 +0100 Subject: [PATCH 32/43] sdk: Fix documentation of create_room --- crates/matrix-sdk/src/client/mod.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/crates/matrix-sdk/src/client/mod.rs b/crates/matrix-sdk/src/client/mod.rs index 743aec664..220cf6018 100644 --- a/crates/matrix-sdk/src/client/mod.rs +++ b/crates/matrix-sdk/src/client/mod.rs @@ -1662,17 +1662,17 @@ impl Client { self.send(request, None).await } - /// Create a room using the `RoomBuilder` and send the request. + /// Create a room with the given parameters. /// - /// Sends a request to `/_matrix/client/r0/createRoom`, returns a - /// `create_room::Response`, this is an empty response. + /// Sends a request to `/_matrix/client/r0/createRoom`, returns the created + /// room as a [`room::Joined`] object. /// - /// # Arguments - /// - /// * `room` - The easiest way to create this request is using the - /// `create_room::Request` itself. + /// If you want to create a direct message with one specific user, you can + /// use [`create_dm`][Self::create_dm], which is more convenient than + /// assembling the [`create_room::v3::Request`] yourself. /// /// # Examples + /// /// ```no_run /// use matrix_sdk::Client; /// # use matrix_sdk::ruma::api::client::room::{ From 1a1fe97d00ccf6d686bf2192cc5418dbb466f204 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Mon, 20 Mar 2023 15:05:50 +0100 Subject: [PATCH 33/43] Use Rust conventions for variable names in UDL --- bindings/matrix-sdk-crypto-ffi/src/olm.udl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bindings/matrix-sdk-crypto-ffi/src/olm.udl b/bindings/matrix-sdk-crypto-ffi/src/olm.udl index a175051e4..f8cbb0675 100644 --- a/bindings/matrix-sdk-crypto-ffi/src/olm.udl +++ b/bindings/matrix-sdk-crypto-ffi/src/olm.udl @@ -28,7 +28,7 @@ interface MigrationError { }; callback interface Logger { - void log(string logLine); + void log(string log_line); }; callback interface ProgressListener { From 816e722807e551e22acb984348148a6b20733590 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Mon, 20 Mar 2023 15:07:00 +0100 Subject: [PATCH 34/43] ffi: Inline uniffi_api modules include_scaffolding! is expected to be used at the crate root now, and clippy seems happy with the generated code right now. --- bindings/matrix-sdk-crypto-ffi/src/lib.rs | 4 ++-- bindings/matrix-sdk-crypto-ffi/src/uniffi_api.rs | 5 ----- bindings/matrix-sdk-ffi/src/lib.rs | 4 ++-- bindings/matrix-sdk-ffi/src/uniffi_api.rs | 5 ----- 4 files changed, 4 insertions(+), 14 deletions(-) delete mode 100644 bindings/matrix-sdk-crypto-ffi/src/uniffi_api.rs delete mode 100644 bindings/matrix-sdk-ffi/src/uniffi_api.rs diff --git a/bindings/matrix-sdk-crypto-ffi/src/lib.rs b/bindings/matrix-sdk-crypto-ffi/src/lib.rs index 23ff01253..c5f7c4667 100644 --- a/bindings/matrix-sdk-crypto-ffi/src/lib.rs +++ b/bindings/matrix-sdk-crypto-ffi/src/lib.rs @@ -12,7 +12,6 @@ mod error; mod logger; mod machine; mod responses; -mod uniffi_api; mod users; mod verification; @@ -47,7 +46,6 @@ use ruma::{ }; use serde::{Deserialize, Serialize}; use tokio::runtime::Runtime; -use uniffi_api::*; pub use users::UserIdentity; pub use verification::{ CancelInfo, ConfirmVerificationResult, QrCode, QrCodeListener, QrCodeState, @@ -849,6 +847,8 @@ fn vodozemac_version() -> String { vodozemac::VERSION.to_owned() } +uniffi::include_scaffolding!("olm"); + mod uniffi_types { pub use crate::{ backup_recovery_key::{ diff --git a/bindings/matrix-sdk-crypto-ffi/src/uniffi_api.rs b/bindings/matrix-sdk-crypto-ffi/src/uniffi_api.rs deleted file mode 100644 index df681245f..000000000 --- a/bindings/matrix-sdk-crypto-ffi/src/uniffi_api.rs +++ /dev/null @@ -1,5 +0,0 @@ -#![allow(clippy::all, warnings)] - -use crate::*; - -uniffi::include_scaffolding!("olm"); diff --git a/bindings/matrix-sdk-ffi/src/lib.rs b/bindings/matrix-sdk-ffi/src/lib.rs index 3e73017ec..4c388aa59 100644 --- a/bindings/matrix-sdk-ffi/src/lib.rs +++ b/bindings/matrix-sdk-ffi/src/lib.rs @@ -31,14 +31,12 @@ pub mod room; pub mod session_verification; pub mod sliding_sync; pub mod timeline; -mod uniffi_api; use client::Client; use client_builder::ClientBuilder; use matrix_sdk::{encryption::CryptoStoreError, HttpError, IdParseError}; use once_cell::sync::Lazy; use tokio::runtime::Runtime; -pub use uniffi_api::*; pub static RUNTIME: Lazy = Lazy::new(|| Runtime::new().expect("Can't start Tokio runtime")); @@ -97,6 +95,8 @@ impl From for ClientError { pub use platform::*; +uniffi::include_scaffolding!("api"); + mod uniffi_types { pub use matrix_sdk::ruma::events::room::{message::RoomMessageEventContent, MediaSource}; diff --git a/bindings/matrix-sdk-ffi/src/uniffi_api.rs b/bindings/matrix-sdk-ffi/src/uniffi_api.rs deleted file mode 100644 index 3d6676f8f..000000000 --- a/bindings/matrix-sdk-ffi/src/uniffi_api.rs +++ /dev/null @@ -1,5 +0,0 @@ -#![allow(clippy::all)] - -use crate::*; - -uniffi::include_scaffolding!("api"); From 72ae9dd8857587932e1dc97a8e44f9b891212117 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Mon, 20 Mar 2023 16:26:26 +0100 Subject: [PATCH 35/43] Upgrade eyeball-im --- Cargo.lock | 63 +++++++++---------- Cargo.toml | 2 +- crates/matrix-sdk/Cargo.toml | 2 +- .../matrix-sdk/src/room/timeline/builder.rs | 2 +- crates/matrix-sdk/src/room/timeline/inner.rs | 2 +- crates/matrix-sdk/src/room/timeline/mod.rs | 2 +- .../src/room/timeline/tests/basic.rs | 2 +- .../src/sliding_sync/list/builder.rs | 2 +- .../matrix-sdk/src/sliding_sync/list/mod.rs | 2 +- crates/matrix-sdk/src/sliding_sync/room.rs | 4 +- 10 files changed, 39 insertions(+), 44 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a067eab55..48ff993ff 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -540,12 +540,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitmaps" -version = "2.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "031043d04099746d8db04daf1fa424b2bc8bd69d92b25962dcde24da39ab64a2" -dependencies = [ - "typenum", -] +checksum = "703642b98a00b3b90513279a8ede3fcfa479c126c5fb46e78f3051522f021403" [[package]] name = "blake3" @@ -1614,12 +1611,12 @@ dependencies = [ [[package]] name = "eyeball-im" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bb8a6cfd1f5947d0426dcb753723318d5922c738e905be7af167547565f81d9" +checksum = "29e6dff0ac9894dcc183064377dfeb4137bcffa9f9ec3dbc10f8e7fba34c0ac7" dependencies = [ "futures-core", - "im", + "imbl", "tokio", "tokio-stream", ] @@ -2220,21 +2217,6 @@ dependencies = [ "unicode-normalization", ] -[[package]] -name = "im" -version = "15.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0acd33ff0285af998aaf9b57342af478078f53492322fafc47450e09397e0e9" -dependencies = [ - "bitmaps", - "rand_core 0.6.4", - "rand_xoshiro", - "serde", - "sized-chunks", - "typenum", - "version_check", -] - [[package]] name = "image" version = "0.23.14" @@ -2272,6 +2254,29 @@ dependencies = [ "tiff 0.8.1", ] +[[package]] +name = "imbl" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2806b69cd9f4664844027b64465eacb444c67c1db9c778e341adff0c25cdb0d" +dependencies = [ + "bitmaps", + "imbl-sized-chunks", + "rand_core 0.6.4", + "rand_xoshiro", + "serde", + "version_check", +] + +[[package]] +name = "imbl-sized-chunks" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6957ea0b2541c5ca561d3ef4538044af79f8a05a1eb3a3b148936aaceaa1076" +dependencies = [ + "bitmaps", +] + [[package]] name = "indenter" version = "0.3.3" @@ -2746,8 +2751,8 @@ dependencies = [ "gloo-timers", "http", "hyper", - "im", "image 0.24.5", + "imbl", "indexmap", "matrix-sdk-base", "matrix-sdk-common", @@ -4925,16 +4930,6 @@ version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7bd3e3206899af3f8b12af284fafc038cc1dc2b41d1b89dd17297221c5d225de" -[[package]] -name = "sized-chunks" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16d69225bde7a69b235da73377861095455d298f2b970996eec25ddbb42b3d1e" -dependencies = [ - "bitmaps", - "typenum", -] - [[package]] name = "slab" version = "0.4.7" diff --git a/Cargo.toml b/Cargo.toml index f64995365..0cc37470a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,7 +28,7 @@ byteorder = "1.4.3" ctor = "0.1.26" dashmap = "5.2.0" eyeball = "0.4.0" -eyeball-im = "0.1.0" +eyeball-im = "0.2.0" futures-util = { version = "0.3.26", default-features = false, features = ["alloc"] } http = "0.2.6" ruma = { git = "https://github.com/ruma/ruma", rev = "8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5", features = ["client-api-c"] } diff --git a/crates/matrix-sdk/Cargo.toml b/crates/matrix-sdk/Cargo.toml index 251909546..3281f1a9e 100644 --- a/crates/matrix-sdk/Cargo.toml +++ b/crates/matrix-sdk/Cargo.toml @@ -79,7 +79,7 @@ eyre = { version = "0.6.8", optional = true } futures-core = "0.3.21" futures-util = { workspace = true } http = { workspace = true } -im = { version = "15.1.0", features = ["serde"] } +imbl = { version = "2.0.0", features = ["serde"] } indexmap = "1.9.1" hyper = { version = "0.14.20", features = ["http1", "http2", "server"], optional = true } matrix-sdk-base = { version = "0.6.0", path = "../matrix-sdk-base", default_features = false } diff --git a/crates/matrix-sdk/src/room/timeline/builder.rs b/crates/matrix-sdk/src/room/timeline/builder.rs index 6e57190ee..de2c99c8c 100644 --- a/crates/matrix-sdk/src/room/timeline/builder.rs +++ b/crates/matrix-sdk/src/room/timeline/builder.rs @@ -14,7 +14,7 @@ use std::sync::Arc; -use im::Vector; +use imbl::Vector; use matrix_sdk_base::{ deserialized_responses::{EncryptionInfo, SyncTimelineEvent}, locks::Mutex, diff --git a/crates/matrix-sdk/src/room/timeline/inner.rs b/crates/matrix-sdk/src/room/timeline/inner.rs index efb5e4fd8..226582148 100644 --- a/crates/matrix-sdk/src/room/timeline/inner.rs +++ b/crates/matrix-sdk/src/room/timeline/inner.rs @@ -18,7 +18,7 @@ use std::{collections::HashMap, sync::Arc}; use async_trait::async_trait; use eyeball_im::{ObservableVector, VectorSubscriber}; -use im::Vector; +use imbl::Vector; use indexmap::{IndexMap, IndexSet}; #[cfg(feature = "e2e-encryption")] use matrix_sdk_base::crypto::OlmMachine; diff --git a/crates/matrix-sdk/src/room/timeline/mod.rs b/crates/matrix-sdk/src/room/timeline/mod.rs index a0cdb9817..8bbdbce81 100644 --- a/crates/matrix-sdk/src/room/timeline/mod.rs +++ b/crates/matrix-sdk/src/room/timeline/mod.rs @@ -20,7 +20,7 @@ use std::{pin::Pin, sync::Arc, task::Poll}; use eyeball_im::{VectorDiff, VectorSubscriber}; use futures_core::Stream; -use im::Vector; +use imbl::Vector; use matrix_sdk_base::locks::Mutex; use pin_project_lite::pin_project; use ruma::{ diff --git a/crates/matrix-sdk/src/room/timeline/tests/basic.rs b/crates/matrix-sdk/src/room/timeline/tests/basic.rs index e8ead03fe..2203cf087 100644 --- a/crates/matrix-sdk/src/room/timeline/tests/basic.rs +++ b/crates/matrix-sdk/src/room/timeline/tests/basic.rs @@ -1,7 +1,7 @@ use assert_matches::assert_matches; use eyeball_im::VectorDiff; use futures_util::StreamExt; -use im::vector; +use imbl::vector; use matrix_sdk_base::deserialized_responses::SyncTimelineEvent; use matrix_sdk_test::async_test; use ruma::{ diff --git a/crates/matrix-sdk/src/sliding_sync/list/builder.rs b/crates/matrix-sdk/src/sliding_sync/list/builder.rs index f38022b54..d5b8a7322 100644 --- a/crates/matrix-sdk/src/sliding_sync/list/builder.rs +++ b/crates/matrix-sdk/src/sliding_sync/list/builder.rs @@ -7,7 +7,7 @@ use std::{ use eyeball::unique::Observable; use eyeball_im::ObservableVector; -use im::Vector; +use imbl::Vector; use ruma::{api::client::sync::sync_events::v4, events::StateEventType, UInt}; use super::{Error, RoomListEntry, SlidingSyncList, SlidingSyncMode, SlidingSyncState}; diff --git a/crates/matrix-sdk/src/sliding_sync/list/mod.rs b/crates/matrix-sdk/src/sliding_sync/list/mod.rs index 5f7079520..a806d0711 100644 --- a/crates/matrix-sdk/src/sliding_sync/list/mod.rs +++ b/crates/matrix-sdk/src/sliding_sync/list/mod.rs @@ -15,7 +15,7 @@ pub use builder::*; use eyeball::unique::Observable; use eyeball_im::{ObservableVector, VectorDiff}; use futures_core::Stream; -use im::Vector; +use imbl::Vector; pub(super) use request_generator::*; use ruma::{api::client::sync::sync_events::v4, events::StateEventType, OwnedRoomId, RoomId, UInt}; use serde::{Deserialize, Serialize}; diff --git a/crates/matrix-sdk/src/sliding_sync/room.rs b/crates/matrix-sdk/src/sliding_sync/room.rs index 48d270ba2..56801ec5c 100644 --- a/crates/matrix-sdk/src/sliding_sync/room.rs +++ b/crates/matrix-sdk/src/sliding_sync/room.rs @@ -9,7 +9,7 @@ use std::{ use eyeball::unique::Observable; use eyeball_im::ObservableVector; -use im::Vector; +use imbl::Vector; use matrix_sdk_base::deserialized_responses::SyncTimelineEvent; use ruma::{ api::client::sync::sync_events::{v4, UnreadNotificationsCount}, @@ -355,7 +355,7 @@ impl From<&SlidingSyncRoom> for FrozenSlidingSyncRoom { #[cfg(test)] mod tests { - use im::vector; + use imbl::vector; use matrix_sdk_base::deserialized_responses::TimelineEvent; use ruma::{events::room::message::RoomMessageEventContent, RoomId}; use serde_json::json; From 76763a80fecac65f87fd5aaa34f93e36734d37cd Mon Sep 17 00:00:00 2001 From: Florian Renaud Date: Mon, 20 Mar 2023 17:30:25 +0100 Subject: [PATCH 36/43] ffi: Add binding for get_dm_room --- bindings/matrix-sdk-ffi/src/client.rs | 8 ++++++++ crates/matrix-sdk/src/encryption/mod.rs | 3 ++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/bindings/matrix-sdk-ffi/src/client.rs b/bindings/matrix-sdk-ffi/src/client.rs index 96c33168f..f9698a983 100644 --- a/bindings/matrix-sdk-ffi/src/client.rs +++ b/bindings/matrix-sdk-ffi/src/client.rs @@ -3,6 +3,7 @@ use std::sync::{Arc, RwLock}; use anyhow::{anyhow, Context}; use matrix_sdk::{ media::{MediaFileHandle as SdkMediaFileHandle, MediaFormat, MediaRequest, MediaThumbnailSize}, + room::Room as SdkRoom, ruma::{ api::client::{ account::whoami, @@ -497,6 +498,13 @@ impl Client { pub fn rooms(&self) -> Vec> { self.client.rooms().into_iter().map(|room| Arc::new(Room::new(room))).collect() } + + pub fn get_dm_room(&self, user_id: String) -> Result>, ClientError> { + let user_id = UserId::parse(user_id)?; + let sdk_room = self.client.get_dm_room(&user_id).map(SdkRoom::Joined); + let dm = sdk_room.map(|room| Arc::new(Room::new(room))); + Ok(dm) + } } impl Client { diff --git a/crates/matrix-sdk/src/encryption/mod.rs b/crates/matrix-sdk/src/encryption/mod.rs index 9db51849d..2978591ca 100644 --- a/crates/matrix-sdk/src/encryption/mod.rs +++ b/crates/matrix-sdk/src/encryption/mod.rs @@ -339,7 +339,8 @@ impl Client { Ok(()) } - fn get_dm_room(&self, user_id: &UserId) -> Option { + /// Get the existing DM room with the given user, if any. + pub fn get_dm_room(&self, user_id: &UserId) -> Option { let rooms = self.joined_rooms(); // Find the room we share with the `user_id` and only with `user_id` From 7263914f672d970f48ee500cbd956fff2ee4d537 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Damir=20Jeli=C4=87?= Date: Tue, 21 Mar 2023 11:45:22 +0100 Subject: [PATCH 37/43] Remove the Apple specific auth service tests These tests are doing real network requests towards hosts that are not under our control. --- .../AuthenticationServiceTests.swift | 64 ------------------- 1 file changed, 64 deletions(-) delete mode 100644 bindings/apple/Tests/MatrixRustSDKTests/AuthenticationServiceTests.swift diff --git a/bindings/apple/Tests/MatrixRustSDKTests/AuthenticationServiceTests.swift b/bindings/apple/Tests/MatrixRustSDKTests/AuthenticationServiceTests.swift deleted file mode 100644 index df92b6a32..000000000 --- a/bindings/apple/Tests/MatrixRustSDKTests/AuthenticationServiceTests.swift +++ /dev/null @@ -1,64 +0,0 @@ -@testable import MatrixRustSDK -import XCTest - -class AuthenticationServiceTests: XCTestCase { - var service: AuthenticationService! - - override func setUp() { - service = AuthenticationService(basePath: FileManager.default.temporaryDirectory.path, - passphrase: nil, - customSlidingSyncProxy: nil) - } - - func testValidServers() { - XCTAssertNoThrow(try service.configureHomeserver(serverNameOrHomeserverUrl: "matrix.org")) - XCTAssertNoThrow(try service.configureHomeserver(serverNameOrHomeserverUrl: "https://matrix.org")) - XCTAssertNoThrow(try service.configureHomeserver(serverNameOrHomeserverUrl: "https://matrix.org/")) - } - - func testInvalidCharacters() { - XCTAssertThrowsError(try service.configureHomeserver(serverNameOrHomeserverUrl: "hello!@$£%^world"), - "A server name with invalid characters should not succeed to build.") { error in - guard case AuthenticationError.InvalidServerName = error else { XCTFail("Expected invalid name error."); return } - } - } - - func textNonExistentDomain() { - XCTAssertThrowsError(try service.configureHomeserver(serverNameOrHomeserverUrl: "somesillylinkthatdoesntexist.com"), - "A server name that doesn't exist should not succeed.") { error in - guard case AuthenticationError.Generic = error else { XCTFail("Expected generic error."); return } - } - XCTAssertThrowsError(try service.configureHomeserver(serverNameOrHomeserverUrl: "https://somesillylinkthatdoesntexist.com"), - "A server URL that doesn't exist should not succeed.") { error in - guard case AuthenticationError.Generic = error else { XCTFail("Expected generic error."); return } - } - } - - func testValidDomainWithoutServer() { - XCTAssertThrowsError(try service.configureHomeserver(serverNameOrHomeserverUrl: "https://google.com"), - "Google should not succeed as it doesn't host a homeserver.") { error in - guard case AuthenticationError.Generic = error else { XCTFail("Expected generic error."); return } - } - } - - func testServerWithoutSlidingSync() { - XCTAssertThrowsError(try service.configureHomeserver(serverNameOrHomeserverUrl: "envs.net"), - "Envs should not succeed as it doesn't advertise a sliding sync proxy.") { error in - guard case AuthenticationError.SlidingSyncNotAvailable = error else { XCTFail("Expected sliding sync error."); return } - } - } - - func testHomeserverURL() { - XCTAssertThrowsError(try service.configureHomeserver(serverNameOrHomeserverUrl: "https://matrix-client.matrix.org"), - "Directly using a homeserver should not succeed as a sliding sync proxy won't be found.") { error in - guard case AuthenticationError.SlidingSyncNotAvailable = error else { XCTFail("Expected sliding sync error."); return } - } - } - - func testHomeserverURLWithProxyOverride() { - service = AuthenticationService(basePath: FileManager.default.temporaryDirectory.path, - passphrase: nil, customSlidingSyncProxy: "https://slidingsync.proxy") - XCTAssertNoThrow(try service.configureHomeserver(serverNameOrHomeserverUrl: "https://matrix-client.matrix.org"), - "Directly using a homeserver should succeed what a custom sliding sync proxy has been set.") - } -} From 32e2ea0288cb147071f51084d639e79876150d32 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Damir=20Jeli=C4=87?= Date: Tue, 21 Mar 2023 09:43:09 +0100 Subject: [PATCH 38/43] Allow the old VerificationState enum to be deserialized into the new one --- .../src/deserialized_responses.rs | 29 +++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/crates/matrix-sdk-common/src/deserialized_responses.rs b/crates/matrix-sdk-common/src/deserialized_responses.rs index 4c86d993e..90a950ba2 100644 --- a/crates/matrix-sdk-common/src/deserialized_responses.rs +++ b/crates/matrix-sdk-common/src/deserialized_responses.rs @@ -17,6 +17,7 @@ const UNKNOWN_DEVICE: &str = "Encrypted by an unknown or deleted device."; /// Represents the state of verification for a decrypted message sent by a /// device. #[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)] +#[serde(from = "OldVerificationStateHelper")] pub enum VerificationState { /// This message is guaranteed to be authentic as it is coming from a device /// belonging to a user that we have verified. @@ -31,6 +32,34 @@ pub enum VerificationState { Unverified(VerificationLevel), } +// TODO: Remove this once we're confident that everybody that serialized these +// states uses the new enum. +#[derive(Clone, Debug, Deserialize)] +enum OldVerificationStateHelper { + Untrusted, + UnknownDevice, + #[serde(alias = "Trusted")] + Verified, + Unverified(VerificationLevel), +} + +impl From for VerificationState { + fn from(value: OldVerificationStateHelper) -> Self { + match value { + // This mapping isn't strictly correct but we don't know which part in the old + // `VerificationState` enum was unverified. + OldVerificationStateHelper::Untrusted => { + VerificationState::Unverified(VerificationLevel::UnsignedDevice) + } + OldVerificationStateHelper::UnknownDevice => { + Self::Unverified(VerificationLevel::None(DeviceLinkProblem::MissingDevice)) + } + OldVerificationStateHelper::Verified => Self::Verified, + OldVerificationStateHelper::Unverified(l) => Self::Unverified(l), + } + } +} + impl VerificationState { /// Convert the `VerificationState` into a `ShieldState` which can be /// directly used to decorate messages in the recommended way. From 8652cdf752be1cd43b794fb22175470e7ca2528d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Damir=20Jeli=C4=87?= Date: Tue, 21 Mar 2023 09:52:11 +0100 Subject: [PATCH 39/43] Test the VerificationState migration --- .../src/deserialized_responses.rs | 44 ++++++++++++++++++- 1 file changed, 43 insertions(+), 1 deletion(-) diff --git a/crates/matrix-sdk-common/src/deserialized_responses.rs b/crates/matrix-sdk-common/src/deserialized_responses.rs index 90a950ba2..0bda30b75 100644 --- a/crates/matrix-sdk-common/src/deserialized_responses.rs +++ b/crates/matrix-sdk-common/src/deserialized_responses.rs @@ -332,9 +332,11 @@ mod tests { events::{room::message::RoomMessageEventContent, AnySyncTimelineEvent}, serde::Raw, }; + use serde::Deserialize; use serde_json::json; - use super::{SyncTimelineEvent, TimelineEvent}; + use super::{SyncTimelineEvent, TimelineEvent, VerificationState}; + use crate::deserialized_responses::{DeviceLinkProblem, VerificationLevel}; fn example_event() -> serde_json::Value { json!({ @@ -368,4 +370,44 @@ mod tests { assert_eq!(converted_event.event_id(), "$xxxxx:example.org"); assert_eq!(converted_event.sender(), "@carl:example.com"); } + + #[test] + fn old_verification_state_to_new_migration() { + #[derive(Deserialize)] + struct State { + state: VerificationState, + } + + let state = json!({ + "state": "Trusted", + }); + let deserialized: State = + serde_json::from_value(state).expect("We can deserialize the old trusted value"); + assert_eq!(deserialized.state, VerificationState::Verified); + + let state = json!({ + "state": "UnknownDevice", + }); + + let deserialized: State = + serde_json::from_value(state).expect("We can deserialize the old unknown device value"); + + assert_eq!( + deserialized.state, + VerificationState::Unverified(VerificationLevel::None( + DeviceLinkProblem::MissingDevice + )) + ); + + let state = json!({ + "state": "Untrusted", + }); + let deserialized: State = + serde_json::from_value(state).expect("We can deserialize the old trusted value"); + + assert_eq!( + deserialized.state, + VerificationState::Unverified(VerificationLevel::UnsignedDevice) + ); + } } From 1ce1c5636e0998cdda0eb0adc18175b1f9f54c1b Mon Sep 17 00:00:00 2001 From: Mauro <34335419+Velin92@users.noreply.github.com> Date: Tue, 21 Mar 2023 12:23:17 +0100 Subject: [PATCH 40/43] Add support for (un)ignoring users --- bindings/matrix-sdk-ffi/src/api.udl | 10 +- bindings/matrix-sdk-ffi/src/client.rs | 16 +++ bindings/matrix-sdk-ffi/src/lib.rs | 6 +- bindings/matrix-sdk-ffi/src/room.rs | 64 +----------- bindings/matrix-sdk-ffi/src/room_member.rs | 110 ++++++++++++++++++++ crates/matrix-sdk-base/src/rooms/members.rs | 6 ++ crates/matrix-sdk-base/src/rooms/normal.rs | 12 +++ crates/matrix-sdk/src/account.rs | 40 ++++++- crates/matrix-sdk/src/room/member.rs | 20 ++++ 9 files changed, 210 insertions(+), 74 deletions(-) create mode 100644 bindings/matrix-sdk-ffi/src/room_member.rs diff --git a/bindings/matrix-sdk-ffi/src/api.udl b/bindings/matrix-sdk-ffi/src/api.udl index 8194433a8..a69816f35 100644 --- a/bindings/matrix-sdk-ffi/src/api.udl +++ b/bindings/matrix-sdk-ffi/src/api.udl @@ -212,15 +212,7 @@ enum MembershipState { "Leave", }; -dictionary RoomMember { - string user_id; - string? display_name; - string? avatar_url; - MembershipState membership; - boolean is_name_ambiguous; - i64 power_level; - i64 normalized_power_level; -}; +interface RoomMember { }; interface Room { [Throws=ClientError] diff --git a/bindings/matrix-sdk-ffi/src/client.rs b/bindings/matrix-sdk-ffi/src/client.rs index f9698a983..ab069a59a 100644 --- a/bindings/matrix-sdk-ffi/src/client.rs +++ b/bindings/matrix-sdk-ffi/src/client.rs @@ -505,6 +505,22 @@ impl Client { let dm = sdk_room.map(|room| Arc::new(Room::new(room))); Ok(dm) } + + pub fn ignore_user(&self, user_id: String) -> Result<(), ClientError> { + RUNTIME.block_on(async move { + let user_id = UserId::parse(user_id)?; + self.client.account().ignore_user(&user_id).await?; + Ok(()) + }) + } + + pub fn unignore_user(&self, user_id: String) -> Result<(), ClientError> { + RUNTIME.block_on(async move { + let user_id = UserId::parse(user_id)?; + self.client.account().unignore_user(&user_id).await?; + Ok(()) + }) + } } impl Client { diff --git a/bindings/matrix-sdk-ffi/src/lib.rs b/bindings/matrix-sdk-ffi/src/lib.rs index 4c388aa59..f48cf414b 100644 --- a/bindings/matrix-sdk-ffi/src/lib.rs +++ b/bindings/matrix-sdk-ffi/src/lib.rs @@ -28,6 +28,7 @@ pub mod client_builder; mod helpers; pub mod notification_service; pub mod room; +pub mod room_member; pub mod session_verification; pub mod sliding_sync; pub mod timeline; @@ -47,7 +48,7 @@ pub use matrix_sdk::{ }; pub use self::{ - authentication_service::*, client::*, notification_service::*, room::*, + authentication_service::*, client::*, notification_service::*, room::*, room_member::*, session_verification::*, sliding_sync::*, timeline::*, }; @@ -109,7 +110,8 @@ mod uniffi_types { PusherKind, Session, }, client_builder::ClientBuilder, - room::{Membership, MembershipState, Room, RoomMember}, + room::{Membership, Room}, + room_member::{MembershipState, RoomMember}, session_verification::{SessionVerificationController, SessionVerificationEmoji}, sliding_sync::{ RequiredState, RoomListEntry, SlidingSync, SlidingSyncBuilder, SlidingSyncList, diff --git a/bindings/matrix-sdk-ffi/src/room.rs b/bindings/matrix-sdk-ffi/src/room.rs index ff0e39674..e0879e46d 100644 --- a/bindings/matrix-sdk-ffi/src/room.rs +++ b/bindings/matrix-sdk-ffi/src/room.rs @@ -24,7 +24,7 @@ use mime::Mime; use tracing::error; use super::RUNTIME; -use crate::{TimelineDiff, TimelineItem, TimelineListener}; +use crate::{RoomMember, TimelineDiff, TimelineItem, TimelineListener}; #[derive(uniffi::Enum)] pub enum Membership { @@ -40,56 +40,6 @@ pub struct Room { timeline: TimelineLock, } -#[derive(Clone)] -pub enum MembershipState { - /// The user is banned. - Ban, - - /// The user has been invited. - Invite, - - /// The user has joined. - Join, - - /// The user has requested to join. - Knock, - - /// The user has left. - Leave, -} - -#[derive(uniffi::Object)] -pub struct RoomMember { - pub user_id: String, - pub display_name: Option, - pub avatar_url: Option, - pub membership: MembershipState, - pub is_name_ambiguous: bool, - pub power_level: i64, - pub normalized_power_level: i64, -} - -impl From for MembershipState { - fn from(m: matrix_sdk::ruma::events::room::member::MembershipState) -> Self { - match m { - matrix_sdk::ruma::events::room::member::MembershipState::Ban => MembershipState::Ban, - matrix_sdk::ruma::events::room::member::MembershipState::Invite => { - MembershipState::Invite - } - matrix_sdk::ruma::events::room::member::MembershipState::Join => MembershipState::Join, - matrix_sdk::ruma::events::room::member::MembershipState::Knock => { - MembershipState::Knock - } - matrix_sdk::ruma::events::room::member::MembershipState::Leave => { - MembershipState::Leave - } - _ => todo!( - "Handle Custom case: https://github.com/matrix-org/matrix-rust-sdk/issues/1254" - ), - } - } -} - #[uniffi::export] impl Room { pub fn id(&self) -> String { @@ -198,22 +148,14 @@ impl Room { }) } - pub fn members(&self) -> Result> { + pub fn members(&self) -> Result>> { let room = self.room.clone(); RUNTIME.block_on(async move { let members = room .members() .await? .iter() - .map(|m| RoomMember { - user_id: m.user_id().to_string(), - display_name: m.display_name().map(|d| d.to_owned()), - avatar_url: m.avatar_url().map(|a| a.to_string()), - membership: m.membership().to_owned().into(), - is_name_ambiguous: m.name_ambiguous(), - power_level: m.power_level(), - normalized_power_level: m.normalized_power_level(), - }) + .map(|m| Arc::new(RoomMember::new(m.clone()))) .collect(); Ok(members) }) diff --git a/bindings/matrix-sdk-ffi/src/room_member.rs b/bindings/matrix-sdk-ffi/src/room_member.rs new file mode 100644 index 000000000..4b4a1dfd4 --- /dev/null +++ b/bindings/matrix-sdk-ffi/src/room_member.rs @@ -0,0 +1,110 @@ +use matrix_sdk::room::RoomMember as SdkRoomMember; + +use super::RUNTIME; +use crate::ClientError; + +#[derive(Clone)] +pub enum MembershipState { + /// The user is banned. + Ban, + + /// The user has been invited. + Invite, + + /// The user has joined. + Join, + + /// The user has requested to join. + Knock, + + /// The user has left. + Leave, +} + +impl From for MembershipState { + fn from(m: matrix_sdk::ruma::events::room::member::MembershipState) -> Self { + match m { + matrix_sdk::ruma::events::room::member::MembershipState::Ban => MembershipState::Ban, + matrix_sdk::ruma::events::room::member::MembershipState::Invite => { + MembershipState::Invite + } + matrix_sdk::ruma::events::room::member::MembershipState::Join => MembershipState::Join, + matrix_sdk::ruma::events::room::member::MembershipState::Knock => { + MembershipState::Knock + } + matrix_sdk::ruma::events::room::member::MembershipState::Leave => { + MembershipState::Leave + } + _ => todo!( + "Handle Custom case: https://github.com/matrix-org/matrix-rust-sdk/issues/1254" + ), + } + } +} + +pub struct RoomMember { + inner: SdkRoomMember, +} + +#[uniffi::export] +impl RoomMember { + pub fn user_id(&self) -> String { + self.inner.user_id().to_string() + } + + pub fn display_name(&self) -> Option { + self.inner.display_name().map(|d| d.to_owned()) + } + + pub fn avatar_url(&self) -> Option { + self.inner.avatar_url().map(ToString::to_string) + } + + pub fn membership(&self) -> MembershipState { + self.inner.membership().to_owned().into() + } + + pub fn is_name_ambiguous(&self) -> bool { + self.inner.name_ambiguous() + } + + pub fn power_level(&self) -> i64 { + self.inner.power_level() + } + + pub fn normalized_power_level(&self) -> i64 { + self.inner.normalized_power_level() + } + + pub fn is_ignored(&self) -> bool { + self.inner.is_ignored() + } + + pub fn is_account_user(&self) -> bool { + self.inner.is_account_user() + } + + /// Adds the room member to the current account data's ignore list + /// which will ignore the user across all rooms. + pub fn ignore(&self) -> Result<(), ClientError> { + RUNTIME.block_on(async move { + self.inner.ignore().await?; + Ok(()) + }) + } + + /// Removes the room member from the current account data's ignore list + /// which will unignore the user across all rooms. + pub fn unignore(&self) -> Result<(), ClientError> { + RUNTIME.block_on(async move { + self.inner.unignore().await?; + Ok(()) + }) + } +} + +impl RoomMember { + pub fn new(room_member: SdkRoomMember) -> Self { + RoomMember { inner: room_member } + } +} diff --git a/crates/matrix-sdk-base/src/rooms/members.rs b/crates/matrix-sdk-base/src/rooms/members.rs index 86718a731..e3dea619f 100644 --- a/crates/matrix-sdk-base/src/rooms/members.rs +++ b/crates/matrix-sdk-base/src/rooms/members.rs @@ -41,6 +41,7 @@ pub struct RoomMember { pub(crate) max_power_level: i64, pub(crate) is_room_creator: bool, pub(crate) display_name_ambiguous: bool, + pub(crate) is_ignored: bool, } impl RoomMember { @@ -125,4 +126,9 @@ impl RoomMember { pub fn membership(&self) -> &MembershipState { self.event.membership() } + + /// Is the room member ignored by the current account user + pub fn is_ignored(&self) -> bool { + self.is_ignored + } } diff --git a/crates/matrix-sdk-base/src/rooms/normal.rs b/crates/matrix-sdk-base/src/rooms/normal.rs index 3b2fb92ad..bbb545767 100644 --- a/crates/matrix-sdk-base/src/rooms/normal.rs +++ b/crates/matrix-sdk-base/src/rooms/normal.rs @@ -21,6 +21,7 @@ use futures_util::stream::{self, StreamExt}; use ruma::{ api::client::sync::sync_events::v3::RoomSummary as RumaSummary, events::{ + ignored_user_list::IgnoredUserListEventContent, receipt::{Receipt, ReceiptThread, ReceiptType}, room::{ create::RoomCreateEventContent, encryption::RoomEncryptionEventContent, @@ -457,6 +458,16 @@ impl Room { .len() > 1; + let is_ignored = self + .store + .get_account_data_event_static::() + .await? + .map(|c| c.deserialize()) + .transpose()? + .map(|e| e.content) + .map(|l| l.ignored_users.contains_key(member_event.user_id())) + .unwrap_or(false); + Ok(Some(RoomMember { event: Arc::new(member_event), profile: profile.into(), @@ -465,6 +476,7 @@ impl Room { max_power_level, is_room_creator, display_name_ambiguous: ambiguous, + is_ignored, })) } diff --git a/crates/matrix-sdk/src/account.rs b/crates/matrix-sdk/src/account.rs index 3550bc0bc..038ea9c88 100644 --- a/crates/matrix-sdk/src/account.rs +++ b/crates/matrix-sdk/src/account.rs @@ -34,12 +34,14 @@ use ruma::{ }, assign, events::{ - room::MediaSource, AnyGlobalAccountDataEventContent, GlobalAccountDataEventContent, + ignored_user_list::{IgnoredUser, IgnoredUserListEventContent}, + room::MediaSource, + AnyGlobalAccountDataEventContent, GlobalAccountDataEventContent, GlobalAccountDataEventType, StaticEventContent, }, serde::Raw, thirdparty::Medium, - ClientSecret, MxcUri, OwnedMxcUri, OwnedUserId, RoomId, SessionId, UInt, + ClientSecret, MxcUri, OwnedMxcUri, OwnedUserId, RoomId, SessionId, UInt, UserId, }; use serde::Deserialize; @@ -786,6 +788,40 @@ impl Account { Ok(()) } + + /// Adds the given user ID to the account's ignore list. + pub async fn ignore_user(&self, user_id: &UserId) -> Result<()> { + let mut ignored_user_list = self.get_ignored_user_list_event_content().await?; + ignored_user_list.ignored_users.insert(user_id.to_owned(), IgnoredUser::new()); + + // Updating the account data + self.set_account_data(ignored_user_list).await?; + // TODO: I think I should reset all the storage and perform a new local sync + // here but I don't know how + Ok(()) + } + + /// Removes the given user ID from the account's ignore list. + pub async fn unignore_user(&self, user_id: &UserId) -> Result<()> { + let mut ignored_user_list = self.get_ignored_user_list_event_content().await?; + ignored_user_list.ignored_users.remove(user_id); + + // Updating the account data + self.set_account_data(ignored_user_list).await?; + // TODO: I think I should reset all the storage and perform a new local sync + // here but I don't know how + Ok(()) + } + + async fn get_ignored_user_list_event_content(&self) -> Result { + let ignored_user_list = self + .account_data::() + .await? + .map(|c| c.deserialize()) + .transpose()? + .unwrap_or_default(); + Ok(ignored_user_list) + } } fn get_raw_content(raw: Option>) -> Result>> { diff --git a/crates/matrix-sdk/src/room/member.rs b/crates/matrix-sdk/src/room/member.rs index 3585e51dd..69d04499c 100644 --- a/crates/matrix-sdk/src/room/member.rs +++ b/crates/matrix-sdk/src/room/member.rs @@ -64,4 +64,24 @@ impl RoomMember { let request = MediaRequest { source: MediaSource::Plain(url.to_owned()), format }; Ok(Some(self.client.media().get_media_content(&request, true).await?)) } + + /// Adds the room member to the current account data's ignore list + /// which will ignore the user across all rooms. + pub async fn ignore(&self) -> Result<()> { + self.client.account().ignore_user(self.inner.user_id()).await + } + + /// Removes the room member from the current account data's ignore list + /// which will unignore the user across all rooms. + pub async fn unignore(&self) -> Result<()> { + self.client.account().unignore_user(self.inner.user_id()).await + } + + /// Returns true if the member of the room is the user of the account + pub fn is_account_user(&self) -> bool { + match self.client.user_id() { + Some(id) => id == self.inner.user_id(), + None => false, + } + } } From 9ee2d04a41fe17e38d93b6a61d9644acc89570d7 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Tue, 21 Mar 2023 12:10:25 +0100 Subject: [PATCH 41/43] Feature-gate everything in sliding-sync-integration-test --- .../sliding-sync-integration-test/src/lib.rs | 2391 ++++++++--------- 1 file changed, 1177 insertions(+), 1214 deletions(-) diff --git a/testing/sliding-sync-integration-test/src/lib.rs b/testing/sliding-sync-integration-test/src/lib.rs index d8d10b783..7adbb690a 100644 --- a/testing/sliding-sync-integration-test/src/lib.rs +++ b/testing/sliding-sync-integration-test/src/lib.rs @@ -1,10 +1,34 @@ +#![cfg(test)] + +use std::{ + iter::{once, repeat}, + time::{Duration, Instant}, +}; + +use anyhow::{bail, Context}; +use assert_matches::assert_matches; +use eyeball::unique::Observable; +use eyeball_im::VectorDiff; +use futures::{pin_mut, stream::StreamExt}; use matrix_sdk::{ - ruma::api::client::room::create_room::v3::Request as CreateRoomRequest, Client, RoomListEntry, - SlidingSyncBuilder, + room::timeline::EventTimelineItem, + ruma::{ + api::client::{ + error::ErrorKind as RumaError, + receipt::create_receipt::v3::ReceiptType as CreateReceiptType, + room::create_room::v3::Request as CreateRoomRequest, + sync::sync_events::v4::ReceiptsConfig, + }, + events::{ + receipt::{ReceiptThread, ReceiptType}, + room::message::RoomMessageEventContent, + }, + uint, + }, + Client, RoomListEntry, SlidingSyncBuilder, SlidingSyncList, SlidingSyncMode, SlidingSyncState, }; use matrix_sdk_integration_testing::helpers::get_client_for_user; -#[allow(dead_code)] async fn setup(name: String, use_sled_store: bool) -> anyhow::Result<(Client, SlidingSyncBuilder)> { let sliding_sync_proxy_url = option_env!("SLIDING_SYNC_PROXY_URL").unwrap_or("http://localhost:8338").to_owned(); @@ -17,14 +41,12 @@ async fn setup(name: String, use_sled_store: bool) -> anyhow::Result<(Client, Sl Ok((client, sliding_sync_builder)) } -#[allow(dead_code)] async fn random_setup_with_rooms( number_of_rooms: usize, ) -> anyhow::Result<(Client, SlidingSyncBuilder)> { random_setup_with_rooms_opt_store(number_of_rooms, false).await } -#[allow(dead_code)] async fn random_setup_with_rooms_opt_store( number_of_rooms: usize, use_sled_store: bool, @@ -39,7 +61,6 @@ async fn random_setup_with_rooms_opt_store( Ok((client, sliding_sync_builder)) } -#[allow(dead_code)] async fn make_room(client: &Client, room_name: String) -> anyhow::Result<()> { let mut request = CreateRoomRequest::new(); request.name = Some(room_name); @@ -64,125 +85,33 @@ impl From<&RoomListEntry> for RoomListEntryEasy { } } -#[cfg(test)] -mod tests { - use std::{ - iter::{once, repeat}, - time::{Duration, Instant}, - }; +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn it_works_smoke_test() -> anyhow::Result<()> { + let (_client, sync_proxy_builder) = setup("odo".to_owned(), false).await?; + let sync_proxy = sync_proxy_builder.add_fullsync_list().build().await?; + let stream = sync_proxy.stream(); + pin_mut!(stream); + let room_summary = + stream.next().await.context("No room summary found, loop ended unsuccessfully")?; + let summary = room_summary?; + assert_eq!(summary.rooms.len(), 0); + Ok(()) +} - use anyhow::{bail, Context}; - use assert_matches::assert_matches; - use eyeball::unique::Observable; - use eyeball_im::VectorDiff; - use futures::{pin_mut, stream::StreamExt}; - use matrix_sdk::{ - room::timeline::EventTimelineItem, - ruma::{ - api::client::{ - error::ErrorKind as RumaError, - receipt::create_receipt::v3::ReceiptType as CreateReceiptType, - sync::sync_events::v4::ReceiptsConfig, - }, - events::{ - receipt::{ReceiptThread, ReceiptType}, - room::message::RoomMessageEventContent, - }, - uint, - }, - SlidingSyncList, SlidingSyncMode, SlidingSyncState, - }; - - use super::*; - - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - async fn it_works_smoke_test() -> anyhow::Result<()> { - let (_client, sync_proxy_builder) = setup("odo".to_owned(), false).await?; - let sync_proxy = sync_proxy_builder.add_fullsync_list().build().await?; - let stream = sync_proxy.stream(); - pin_mut!(stream); - let room_summary = - stream.next().await.context("No room summary found, loop ended unsuccessfully")?; - let summary = room_summary?; - assert_eq!(summary.rooms.len(), 0); - Ok(()) - } - - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - async fn modifying_timeline_limit() -> anyhow::Result<()> { - let (client, sync_builder) = random_setup_with_rooms(1).await?; - - // List one room. - let room_id = { - let sync = sync_builder - .clone() - .add_list( - SlidingSyncList::builder() - .sync_mode(SlidingSyncMode::Selective) - .add_range(0u32, 1) - .timeline_limit(0u32) - .name("init_list") - .build()?, - ) - .build() - .await?; - - // Get the sync stream. - let stream = sync.stream(); - pin_mut!(stream); - - // Get the list to all rooms to check the list' state. - let list = sync.list("init_list").context("list `init_list` isn't found")?; - assert_eq!(list.state(), SlidingSyncState::NotLoaded); - - // Send the request and wait for a response. - let update_summary = stream - .next() - .await - .context("No room summary found, loop ended unsuccessfully")??; - - // Check the state has switched to `Live`. - assert_eq!(list.state(), SlidingSyncState::FullyLoaded); - - // One room has received an update. - assert_eq!(update_summary.rooms.len(), 1); - - // Let's fetch the room ID then. - let room_id = update_summary.rooms[0].clone(); - - // Let's fetch the room ID from the list too. - assert_matches!(list.rooms_list().get(0), Some(RoomListEntry::Filled(same_room_id)) => { - assert_eq!(same_room_id, &room_id); - }); - - room_id - }; - - // Join a room and send 20 messages. - { - // Join the room. - let room = - client.get_joined_room(&room_id).context("Failed to join room `{room_id}`")?; - - // In this room, let's send 20 messages! - for nth in 0..20 { - let message = RoomMessageEventContent::text_plain(format!("Message #{nth}")); - - room.send(message, None).await?; - } - - // Wait on the server to receive all the messages. - tokio::time::sleep(Duration::from_secs(1)).await; - } +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn modifying_timeline_limit() -> anyhow::Result<()> { + let (client, sync_builder) = random_setup_with_rooms(1).await?; + // List one room. + let room_id = { let sync = sync_builder .clone() .add_list( SlidingSyncList::builder() .sync_mode(SlidingSyncMode::Selective) - .name("visible_rooms_list") .add_range(0u32, 1) - .timeline_limit(1u32) + .timeline_limit(0u32) + .name("init_list") .build()?, ) .build() @@ -192,940 +121,750 @@ mod tests { let stream = sync.stream(); pin_mut!(stream); - // Get the list. - let list = - sync.list("visible_rooms_list").context("list `visible_rooms_list` isn't found")?; + // Get the list to all rooms to check the list' state. + let list = sync.list("init_list").context("list `init_list` isn't found")?; + assert_eq!(list.state(), SlidingSyncState::NotLoaded); - let mut all_event_ids = Vec::new(); + // Send the request and wait for a response. + let update_summary = + stream.next().await.context("No room summary found, loop ended unsuccessfully")??; - // Sync to receive a message with a `timeline_limit` set to 1. - let (room, _timeline, mut timeline_stream) = { - let mut update_summary; + // Check the state has switched to `Live`. + assert_eq!(list.state(), SlidingSyncState::FullyLoaded); - loop { - // Wait for a response. - update_summary = stream - .next() - .await - .context("No update summary found, loop ended unsuccessfully")??; + // One room has received an update. + assert_eq!(update_summary.rooms.len(), 1); - if !update_summary.rooms.is_empty() { - break; - } + // Let's fetch the room ID then. + let room_id = update_summary.rooms[0].clone(); + + // Let's fetch the room ID from the list too. + assert_matches!(list.rooms_list().get(0), Some(RoomListEntry::Filled(same_room_id)) => { + assert_eq!(same_room_id, &room_id); + }); + + room_id + }; + + // Join a room and send 20 messages. + { + // Join the room. + let room = client.get_joined_room(&room_id).context("Failed to join room `{room_id}`")?; + + // In this room, let's send 20 messages! + for nth in 0..20 { + let message = RoomMessageEventContent::text_plain(format!("Message #{nth}")); + + room.send(message, None).await?; + } + + // Wait on the server to receive all the messages. + tokio::time::sleep(Duration::from_secs(1)).await; + } + + let sync = sync_builder + .clone() + .add_list( + SlidingSyncList::builder() + .sync_mode(SlidingSyncMode::Selective) + .name("visible_rooms_list") + .add_range(0u32, 1) + .timeline_limit(1u32) + .build()?, + ) + .build() + .await?; + + // Get the sync stream. + let stream = sync.stream(); + pin_mut!(stream); + + // Get the list. + let list = sync.list("visible_rooms_list").context("list `visible_rooms_list` isn't found")?; + + let mut all_event_ids = Vec::new(); + + // Sync to receive a message with a `timeline_limit` set to 1. + let (room, _timeline, mut timeline_stream) = { + let mut update_summary; + + loop { + // Wait for a response. + update_summary = stream + .next() + .await + .context("No update summary found, loop ended unsuccessfully")??; + + if !update_summary.rooms.is_empty() { + break; } + } - // We see that one room has received an update, and it's our room! - assert_eq!(update_summary.rooms.len(), 1); - assert_eq!(room_id, update_summary.rooms[0]); + // We see that one room has received an update, and it's our room! + assert_eq!(update_summary.rooms.len(), 1); + assert_eq!(room_id, update_summary.rooms[0]); - // OK, now let's read the timeline! - let room = sync.get_room(&room_id).expect("Failed to get the room"); + // OK, now let's read the timeline! + let room = sync.get_room(&room_id).expect("Failed to get the room"); - // Test the `Timeline`. - let timeline = room.timeline().await.unwrap(); - let (timeline_items, timeline_stream) = timeline.subscribe().await; + // Test the `Timeline`. + let timeline = room.timeline().await.unwrap(); + let (timeline_items, timeline_stream) = timeline.subscribe().await; - // First timeline item. - assert_matches!(timeline_items[0].as_virtual(), Some(_)); + // First timeline item. + assert_matches!(timeline_items[0].as_virtual(), Some(_)); - // Second timeline item. - let latest_remote_event = assert_matches!( - timeline_items[1].as_event(), - Some(EventTimelineItem::Remote(remote_event)) => remote_event - ); - all_event_ids.push(latest_remote_event.event_id().to_owned()); + // Second timeline item. + let latest_remote_event = assert_matches!( + timeline_items[1].as_event(), + Some(EventTimelineItem::Remote(remote_event)) => remote_event + ); + all_event_ids.push(latest_remote_event.event_id().to_owned()); - // Test the room to see the last event. - assert_matches!(room.latest_event().await, Some(EventTimelineItem::Remote(remote_event)) => { - assert_eq!(remote_event.event_id(), latest_remote_event.event_id(), "Unexpected latest event"); - assert_eq!(remote_event.content().as_message().unwrap().body(), "Message #19"); - }); + // Test the room to see the last event. + assert_matches!(room.latest_event().await, Some(EventTimelineItem::Remote(remote_event)) => { + assert_eq!(remote_event.event_id(), latest_remote_event.event_id(), "Unexpected latest event"); + assert_eq!(remote_event.content().as_message().unwrap().body(), "Message #19"); + }); - (room, timeline, timeline_stream) - }; + (room, timeline, timeline_stream) + }; - // Sync to receive messages with a `timeline_limit` set to 20. - { - Observable::set(&mut list.timeline_limit.write().unwrap(), Some(uint!(20))); + // Sync to receive messages with a `timeline_limit` set to 20. + { + Observable::set(&mut list.timeline_limit.write().unwrap(), Some(uint!(20))); - let mut update_summary; + let mut update_summary; - loop { - // Wait for a response. - update_summary = stream - .next() - .await - .context("No update summary found, loop ended unsuccessfully")??; + loop { + // Wait for a response. + update_summary = stream + .next() + .await + .context("No update summary found, loop ended unsuccessfully")??; - if !update_summary.rooms.is_empty() { - break; - } + if !update_summary.rooms.is_empty() { + break; } + } - // We see that one room has received an update, and it's our room! - assert_eq!(update_summary.rooms.len(), 1); - assert_eq!(room_id, update_summary.rooms[0]); + // We see that one room has received an update, and it's our room! + assert_eq!(update_summary.rooms.len(), 1); + assert_eq!(room_id, update_summary.rooms[0]); - // Let's fetch the room ID from the list too. - assert_matches!(list.rooms_list().get(0), Some(RoomListEntry::Filled(same_room_id)) => { - assert_eq!(same_room_id, &room_id); - }); + // Let's fetch the room ID from the list too. + assert_matches!(list.rooms_list().get(0), Some(RoomListEntry::Filled(same_room_id)) => { + assert_eq!(same_room_id, &room_id); + }); - // Test the `Timeline`. + // Test the `Timeline`. - // The first 19th items are `VectorDiff::PushBack`. - for nth in 0..19 { - assert_matches!(timeline_stream.next().await, Some(VectorDiff::PushBack { value }) => { - let remote_event = assert_matches!( - value.as_event(), - Some(EventTimelineItem::Remote(remote_event)) => remote_event - ); - - // Check messages arrived in the correct order. - assert_eq!( - remote_event.content().as_message().expect("Received event is not a message").body(), - format!("Message #{nth}"), - ); - - all_event_ids.push(remote_event.event_id().to_owned()); - }); - } - - // The 20th item is a `VectorDiff::Remove`, i.e. the first message is removed. - assert_matches!(timeline_stream.next().await, Some(VectorDiff::Remove { index }) => { - // Index 0 is for day divider. So our first event is at index 1. - assert_eq!(index, 1); - }); - - // And now, the initial message is pushed at the bottom, so the 21th item is a - // `VectorDiff::PushBack`. - let latest_remote_event = assert_matches!(timeline_stream.next().await, Some(VectorDiff::PushBack { value }) => { + // The first 19th items are `VectorDiff::PushBack`. + for nth in 0..19 { + assert_matches!(timeline_stream.next().await, Some(VectorDiff::PushBack { value }) => { let remote_event = assert_matches!( value.as_event(), Some(EventTimelineItem::Remote(remote_event)) => remote_event ); - assert_eq!(remote_event.content().as_message().unwrap().body(), "Message #19"); - assert_eq!(remote_event.event_id(), all_event_ids[0]); - remote_event.clone() + // Check messages arrived in the correct order. + assert_eq!( + remote_event.content().as_message().expect("Received event is not a message").body(), + format!("Message #{nth}"), + ); + + all_event_ids.push(remote_event.event_id().to_owned()); }); - - // Test the room to see the last event. - assert_matches!(room.latest_event().await, Some(EventTimelineItem::Remote(remote_event)) => { - assert_eq!(remote_event.content().as_message().unwrap().body(), "Message #19"); - assert_eq!(remote_event.event_id(), latest_remote_event.event_id(), "Unexpected latest event"); - }); - - // Ensure there is no event ID duplication. - { - let mut dedup_event_ids = all_event_ids.clone(); - dedup_event_ids.sort(); - dedup_event_ids.dedup(); - - assert_eq!(dedup_event_ids.len(), all_event_ids.len(), "Found duplicated event ID"); - } } - Ok(()) + // The 20th item is a `VectorDiff::Remove`, i.e. the first message is removed. + assert_matches!(timeline_stream.next().await, Some(VectorDiff::Remove { index }) => { + // Index 0 is for day divider. So our first event is at index 1. + assert_eq!(index, 1); + }); + + // And now, the initial message is pushed at the bottom, so the 21th item is a + // `VectorDiff::PushBack`. + let latest_remote_event = assert_matches!(timeline_stream.next().await, Some(VectorDiff::PushBack { value }) => { + let remote_event = assert_matches!( + value.as_event(), + Some(EventTimelineItem::Remote(remote_event)) => remote_event + ); + assert_eq!(remote_event.content().as_message().unwrap().body(), "Message #19"); + assert_eq!(remote_event.event_id(), all_event_ids[0]); + + remote_event.clone() + }); + + // Test the room to see the last event. + assert_matches!(room.latest_event().await, Some(EventTimelineItem::Remote(remote_event)) => { + assert_eq!(remote_event.content().as_message().unwrap().body(), "Message #19"); + assert_eq!(remote_event.event_id(), latest_remote_event.event_id(), "Unexpected latest event"); + }); + + // Ensure there is no event ID duplication. + { + let mut dedup_event_ids = all_event_ids.clone(); + dedup_event_ids.sort(); + dedup_event_ids.dedup(); + + assert_eq!(dedup_event_ids.len(), all_event_ids.len(), "Found duplicated event ID"); + } } - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - async fn adding_list_later() -> anyhow::Result<()> { - let list_name_1 = "sliding1"; - let list_name_2 = "sliding2"; - let list_name_3 = "sliding3"; + Ok(()) +} - let (client, sync_proxy_builder) = random_setup_with_rooms(20).await?; - let build_list = |name| { - SlidingSyncList::builder() - .sync_mode(SlidingSyncMode::Selective) - .set_range(0u32, 10u32) - .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) - .name(name) - .build() - }; - let sync_proxy = sync_proxy_builder - .add_list(build_list(list_name_1)?) - .add_list(build_list(list_name_2)?) +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn adding_list_later() -> anyhow::Result<()> { + let list_name_1 = "sliding1"; + let list_name_2 = "sliding2"; + let list_name_3 = "sliding3"; + + let (client, sync_proxy_builder) = random_setup_with_rooms(20).await?; + let build_list = |name| { + SlidingSyncList::builder() + .sync_mode(SlidingSyncMode::Selective) + .set_range(0u32, 10u32) + .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) + .name(name) .build() - .await?; - let list1 = sync_proxy.list(list_name_1).context("but we just added that list!")?; - let _list2 = sync_proxy.list(list_name_2).context("but we just added that list!")?; + }; + let sync_proxy = sync_proxy_builder + .add_list(build_list(list_name_1)?) + .add_list(build_list(list_name_2)?) + .build() + .await?; + let list1 = sync_proxy.list(list_name_1).context("but we just added that list!")?; + let _list2 = sync_proxy.list(list_name_2).context("but we just added that list!")?; - assert!(sync_proxy.list(list_name_3).is_none()); + assert!(sync_proxy.list(list_name_3).is_none()); - let stream = sync_proxy.stream(); - pin_mut!(stream); - let room_summary = - stream.next().await.context("No room summary found, loop ended unsuccessfully")?; + let stream = sync_proxy.stream(); + pin_mut!(stream); + let room_summary = + stream.next().await.context("No room summary found, loop ended unsuccessfully")?; + let summary = room_summary?; + // we only heard about the ones we had asked for + assert_eq!(summary.lists, [list_name_1, list_name_2]); + + assert!(sync_proxy.add_list(build_list(list_name_3)?).is_none()); + + // we need to restart the stream after every list listing update + let stream = sync_proxy.stream(); + pin_mut!(stream); + + let mut saw_update = false; + for _n in 0..2 { + let room_summary = stream.next().await.context("sync has closed unexpectedly")?; let summary = room_summary?; // we only heard about the ones we had asked for - assert_eq!(summary.lists, [list_name_1, list_name_2]); - - assert!(sync_proxy.add_list(build_list(list_name_3)?).is_none()); - - // we need to restart the stream after every list listing update - let stream = sync_proxy.stream(); - pin_mut!(stream); - - let mut saw_update = false; - for _n in 0..2 { - let room_summary = stream.next().await.context("sync has closed unexpectedly")?; - let summary = room_summary?; - // we only heard about the ones we had asked for - if !summary.lists.is_empty() { - // only if we saw an update come through - assert_eq!(summary.lists, [list_name_3]); - // we didn't update the other lists, so only no 2 should se an update - saw_update = true; - break; - } + if !summary.lists.is_empty() { + // only if we saw an update come through + assert_eq!(summary.lists, [list_name_3]); + // we didn't update the other lists, so only no 2 should se an update + saw_update = true; + break; } - - assert!(saw_update, "We didn't see the update come through the pipe"); - - // and let's update the order of all lists again - let room_id = assert_matches!(list1.rooms_list().get(4), Some(RoomListEntry::Filled(room_id)) => room_id.clone()); - - let room = client.get_joined_room(&room_id).context("No joined room {room_id}")?; - - let content = RoomMessageEventContent::text_plain("Hello world"); - - room.send(content, None).await?; // this should put our room up to the most recent - - let mut saw_update = false; - for _n in 0..2 { - let room_summary = stream.next().await.context("sync has closed unexpectedly")?; - let summary = room_summary?; - // we only heard about the ones we had asked for - if !summary.lists.is_empty() { - // only if we saw an update come through - assert_eq!(summary.lists, [list_name_1, list_name_2, list_name_3,]); - // notice that our list 2 is now the last list, but all have seen updates - saw_update = true; - break; - } - } - - assert!(saw_update, "We didn't see the update come through the pipe"); - - Ok(()) } - // index-based lists don't support removing lists. Leaving this test for an API - // update later. - // - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - async fn live_lists() -> anyhow::Result<()> { - let list_name_1 = "sliding1"; - let list_name_2 = "sliding2"; - let list_name_3 = "sliding3"; + assert!(saw_update, "We didn't see the update come through the pipe"); - let (client, sync_proxy_builder) = random_setup_with_rooms(20).await?; - let build_list = |name| { - SlidingSyncList::builder() - .sync_mode(SlidingSyncMode::Selective) - .set_range(0u32, 10u32) - .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) - .name(name) - .build() - }; - let sync_proxy = sync_proxy_builder - .add_list(build_list(list_name_1)?) - .add_list(build_list(list_name_2)?) - .add_list(build_list(list_name_3)?) + // and let's update the order of all lists again + let room_id = assert_matches!(list1.rooms_list().get(4), Some(RoomListEntry::Filled(room_id)) => room_id.clone()); + + let room = client.get_joined_room(&room_id).context("No joined room {room_id}")?; + + let content = RoomMessageEventContent::text_plain("Hello world"); + + room.send(content, None).await?; // this should put our room up to the most recent + + let mut saw_update = false; + for _n in 0..2 { + let room_summary = stream.next().await.context("sync has closed unexpectedly")?; + let summary = room_summary?; + // we only heard about the ones we had asked for + if !summary.lists.is_empty() { + // only if we saw an update come through + assert_eq!(summary.lists, [list_name_1, list_name_2, list_name_3,]); + // notice that our list 2 is now the last list, but all have seen updates + saw_update = true; + break; + } + } + + assert!(saw_update, "We didn't see the update come through the pipe"); + + Ok(()) +} + +// index-based lists don't support removing lists. Leaving this test for an API +// update later. +// +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn live_lists() -> anyhow::Result<()> { + let list_name_1 = "sliding1"; + let list_name_2 = "sliding2"; + let list_name_3 = "sliding3"; + + let (client, sync_proxy_builder) = random_setup_with_rooms(20).await?; + let build_list = |name| { + SlidingSyncList::builder() + .sync_mode(SlidingSyncMode::Selective) + .set_range(0u32, 10u32) + .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) + .name(name) .build() - .await?; - let Some(list1 )= sync_proxy.list(list_name_1) else { + }; + let sync_proxy = sync_proxy_builder + .add_list(build_list(list_name_1)?) + .add_list(build_list(list_name_2)?) + .add_list(build_list(list_name_3)?) + .build() + .await?; + let Some(list1 )= sync_proxy.list(list_name_1) else { bail!("but we just added that list!"); }; - let Some(_list2 )= sync_proxy.list(list_name_2) else { + let Some(_list2 )= sync_proxy.list(list_name_2) else { bail!("but we just added that list!"); }; - let Some(_list3 )= sync_proxy.list(list_name_3) else { + let Some(_list3 )= sync_proxy.list(list_name_3) else { bail!("but we just added that list!"); }; - let stream = sync_proxy.stream(); - pin_mut!(stream); - let Some(room_summary ) = stream.next().await else { + let stream = sync_proxy.stream(); + pin_mut!(stream); + let Some(room_summary ) = stream.next().await else { bail!("No room summary found, loop ended unsuccessfully"); }; - let summary = room_summary?; - // we only heard about the ones we had asked for - assert_eq!(summary.lists, [list_name_1, list_name_2, list_name_3]); + let summary = room_summary?; + // we only heard about the ones we had asked for + assert_eq!(summary.lists, [list_name_1, list_name_2, list_name_3]); - let Some(list_2) = sync_proxy.pop_list(&list_name_2.to_owned()) else { + let Some(list_2) = sync_proxy.pop_list(&list_name_2.to_owned()) else { bail!("Room exists"); }; - // we need to restart the stream after every list listing update - let stream = sync_proxy.stream(); - pin_mut!(stream); + // we need to restart the stream after every list listing update + let stream = sync_proxy.stream(); + pin_mut!(stream); - // Let's trigger an update by sending a message to room pos=3, making it move to - // pos 0 + // Let's trigger an update by sending a message to room pos=3, making it move to + // pos 0 - let room_id = assert_matches!(list1.rooms_list().get(3), Some(RoomListEntry::Filled(room_id)) => room_id.clone()); + let room_id = assert_matches!(list1.rooms_list().get(3), Some(RoomListEntry::Filled(room_id)) => room_id.clone()); - let Some(room) = client.get_joined_room(&room_id) else { + let Some(room) = client.get_joined_room(&room_id) else { bail!("No joined room {room_id}"); }; - let content = RoomMessageEventContent::text_plain("Hello world"); + let content = RoomMessageEventContent::text_plain("Hello world"); - room.send(content, None).await?; // this should put our room up to the most recent + room.send(content, None).await?; // this should put our room up to the most recent - let mut saw_update = false; - for _n in 0..2 { - let Some(room_summary ) = stream.next().await else { + let mut saw_update = false; + for _n in 0..2 { + let Some(room_summary ) = stream.next().await else { bail!("sync has closed unexpectedly"); }; - let summary = room_summary?; - // we only heard about the ones we had asked for - if !summary.lists.is_empty() { - // only if we saw an update come through - assert_eq!(summary.lists, [list_name_1, list_name_3]); - saw_update = true; - break; - } - } - - assert!(saw_update, "We didn't see the update come through the pipe"); - - assert!(sync_proxy.add_list(list_2).is_none()); - - // we need to restart the stream after every list listing update - let stream = sync_proxy.stream(); - pin_mut!(stream); - - // and let's update the order of all lists again - let room_id = assert_matches!(list1.rooms_list().get(4), Some(RoomListEntry::Filled(room_id)) => room_id.clone()); - - let Some(room) = client.get_joined_room(&room_id) else { - bail!("No joined room {room_id}"); - }; - - let content = RoomMessageEventContent::text_plain("Hello world"); - - room.send(content, None).await?; // this should put our room up to the most recent - - let mut saw_update = false; - for _n in 0..2 { - let Some(room_summary ) = stream.next().await else { - bail!("sync has closed unexpectedly"); - }; - let summary = room_summary?; - // we only heard about the ones we had asked for - if !summary.lists.is_empty() { - // only if we saw an update come through - assert_eq!(summary.lists, [list_name_1, list_name_2, list_name_3]); // all lists are visible again - saw_update = true; - break; - } - } - - assert!(saw_update, "We didn't see the update come through the pipe"); - - Ok(()) - } - - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - async fn list_goes_live() -> anyhow::Result<()> { - let (_client, sync_proxy_builder) = random_setup_with_rooms(21).await?; - let sliding_window_list = SlidingSyncList::builder() - .sync_mode(SlidingSyncMode::Selective) - .set_range(0u32, 10u32) - .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) - .name("sliding") - .build()?; - - let full = SlidingSyncList::builder() - .sync_mode(SlidingSyncMode::GrowingFullSync) - .full_sync_batch_size(10u32) - .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) - .name("full") - .build()?; - let sync_proxy = - sync_proxy_builder.add_list(sliding_window_list).add_list(full).build().await?; - - let list = sync_proxy.list("sliding").context("but we just added that list!")?; - let full_list = sync_proxy.list("full").context("but we just added that list!")?; - assert_eq!(list.state(), SlidingSyncState::NotLoaded, "list isn't cold"); - assert_eq!(full_list.state(), SlidingSyncState::NotLoaded, "full isn't cold"); - - let stream = sync_proxy.stream(); - pin_mut!(stream); - - // Exactly one poll! - // Ranges are 0..=9 for selective list, and 0..=9 for growing list. - let room_summary = - stream.next().await.context("No room summary found, loop ended unsuccessfully")??; - - // we only heard about the ones we had asked for - assert_eq!(room_summary.rooms.len(), 11); - assert_eq!(list.state(), SlidingSyncState::FullyLoaded, "list isn't live"); - assert_eq!(full_list.state(), SlidingSyncState::PartiallyLoaded, "full isn't preloading"); - - // Another poll! - // Ranges are 0..=10 for selective list, and 0..=19 for growing list. - let _room_summary = - stream.next().await.context("No room summary found, loop ended unsuccessfully")??; - - let rooms_list = full_list.rooms_list::(); - - assert_eq!( - rooms_list, - repeat(RoomListEntryEasy::Filled) - .take(20) - .chain(once(RoomListEntryEasy::Empty)) - .collect::>() - ); - assert_eq!(full_list.state(), SlidingSyncState::PartiallyLoaded, "full isn't preloading"); - - // One last poll, and we should get all rooms loaded. - let _room_summary = - stream.next().await.context("No room summary found, loop ended unsecessfully")??; - - let rooms_list = full_list.rooms_list::(); - - assert_eq!(rooms_list, repeat(RoomListEntryEasy::Filled).take(21).collect::>()); - assert_eq!(full_list.state(), SlidingSyncState::FullyLoaded, "full isn't fully loaded"); - - Ok(()) - } - - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - async fn resizing_sliding_window() -> anyhow::Result<()> { - let (_client, sync_proxy_builder) = random_setup_with_rooms(20).await?; - let sliding_window_list = SlidingSyncList::builder() - .sync_mode(SlidingSyncMode::Selective) - .set_range(0u32, 10u32) - .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) - .name("sliding") - .build()?; - let sync_proxy = sync_proxy_builder.add_list(sliding_window_list).build().await?; - let list = sync_proxy.list("sliding").context("but we just added that list!")?; - let stream = sync_proxy.stream(); - pin_mut!(stream); - let room_summary = - stream.next().await.context("No room summary found, loop ended unsuccessfully")?; let summary = room_summary?; // we only heard about the ones we had asked for - assert_eq!(summary.rooms.len(), 11); - - let collection_simple = list.rooms_list::(); - - assert_eq!( - collection_simple, - repeat(RoomListEntryEasy::Filled) - .take(11) - .chain(repeat(RoomListEntryEasy::Empty).take(9)) - .collect::>() - ); - - let _signal = list.rooms_list_stream(); - - // let's move the window - - list.set_range(1, 10); - // Ensure 0-0 invalidation ranges work. - - for _n in 0..2 { - let room_summary = stream.next().await.context("sync has closed unexpectedly")?; - let summary = room_summary?; - // we only heard about the ones we had asked for - if summary.lists.iter().any(|s| s == "sliding") { - break; - } + if !summary.lists.is_empty() { + // only if we saw an update come through + assert_eq!(summary.lists, [list_name_1, list_name_3]); + saw_update = true; + break; } - - let collection_simple = list.rooms_list::(); - - assert_eq!( - collection_simple, - repeat(RoomListEntryEasy::Invalid) - .take(1) - .chain(repeat(RoomListEntryEasy::Filled).take(10)) - .chain(repeat(RoomListEntryEasy::Empty).take(9)) - .collect::>() - ); - - list.set_range(5, 10); - - for _n in 0..2 { - let room_summary = stream.next().await.context("sync has closed unexpectedly")?; - let summary = room_summary?; - // we only heard about the ones we had asked for - if summary.lists.iter().any(|s| s == "sliding") { - break; - } - } - - let collection_simple = list.rooms_list::(); - - assert_eq!( - collection_simple, - repeat(RoomListEntryEasy::Invalid) - .take(5) - .chain(repeat(RoomListEntryEasy::Filled).take(6)) - .chain(repeat(RoomListEntryEasy::Empty).take(9)) - .collect::>() - ); - - // let's move the window - - list.set_range(5, 15); - - for _n in 0..2 { - let room_summary = stream.next().await.context("sync has closed unexpectedly")?; - let summary = room_summary?; - // we only heard about the ones we had asked for - if summary.lists.iter().any(|s| s == "sliding") { - break; - } - } - - let collection_simple = list.rooms_list::(); - - assert_eq!( - collection_simple, - repeat(RoomListEntryEasy::Invalid) - .take(5) - .chain(repeat(RoomListEntryEasy::Filled).take(11)) - .chain(repeat(RoomListEntryEasy::Empty).take(4)) - .collect::>() - ); - Ok(()) } - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - async fn moving_out_of_sliding_window() -> anyhow::Result<()> { - let (client, sync_proxy_builder) = random_setup_with_rooms(20).await?; + assert!(saw_update, "We didn't see the update come through the pipe"); + + assert!(sync_proxy.add_list(list_2).is_none()); + + // we need to restart the stream after every list listing update + let stream = sync_proxy.stream(); + pin_mut!(stream); + + // and let's update the order of all lists again + let room_id = assert_matches!(list1.rooms_list().get(4), Some(RoomListEntry::Filled(room_id)) => room_id.clone()); + + let Some(room) = client.get_joined_room(&room_id) else { + bail!("No joined room {room_id}"); + }; + + let content = RoomMessageEventContent::text_plain("Hello world"); + + room.send(content, None).await?; // this should put our room up to the most recent + + let mut saw_update = false; + for _n in 0..2 { + let Some(room_summary ) = stream.next().await else { + bail!("sync has closed unexpectedly"); + }; + let summary = room_summary?; + // we only heard about the ones we had asked for + if !summary.lists.is_empty() { + // only if we saw an update come through + assert_eq!(summary.lists, [list_name_1, list_name_2, list_name_3]); // all lists are visible again + saw_update = true; + break; + } + } + + assert!(saw_update, "We didn't see the update come through the pipe"); + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn list_goes_live() -> anyhow::Result<()> { + let (_client, sync_proxy_builder) = random_setup_with_rooms(21).await?; + let sliding_window_list = SlidingSyncList::builder() + .sync_mode(SlidingSyncMode::Selective) + .set_range(0u32, 10u32) + .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) + .name("sliding") + .build()?; + + let full = SlidingSyncList::builder() + .sync_mode(SlidingSyncMode::GrowingFullSync) + .full_sync_batch_size(10u32) + .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) + .name("full") + .build()?; + let sync_proxy = + sync_proxy_builder.add_list(sliding_window_list).add_list(full).build().await?; + + let list = sync_proxy.list("sliding").context("but we just added that list!")?; + let full_list = sync_proxy.list("full").context("but we just added that list!")?; + assert_eq!(list.state(), SlidingSyncState::NotLoaded, "list isn't cold"); + assert_eq!(full_list.state(), SlidingSyncState::NotLoaded, "full isn't cold"); + + let stream = sync_proxy.stream(); + pin_mut!(stream); + + // Exactly one poll! + // Ranges are 0..=9 for selective list, and 0..=9 for growing list. + let room_summary = + stream.next().await.context("No room summary found, loop ended unsuccessfully")??; + + // we only heard about the ones we had asked for + assert_eq!(room_summary.rooms.len(), 11); + assert_eq!(list.state(), SlidingSyncState::FullyLoaded, "list isn't live"); + assert_eq!(full_list.state(), SlidingSyncState::PartiallyLoaded, "full isn't preloading"); + + // Another poll! + // Ranges are 0..=10 for selective list, and 0..=19 for growing list. + let _room_summary = + stream.next().await.context("No room summary found, loop ended unsuccessfully")??; + + let rooms_list = full_list.rooms_list::(); + + assert_eq!( + rooms_list, + repeat(RoomListEntryEasy::Filled) + .take(20) + .chain(once(RoomListEntryEasy::Empty)) + .collect::>() + ); + assert_eq!(full_list.state(), SlidingSyncState::PartiallyLoaded, "full isn't preloading"); + + // One last poll, and we should get all rooms loaded. + let _room_summary = + stream.next().await.context("No room summary found, loop ended unsecessfully")??; + + let rooms_list = full_list.rooms_list::(); + + assert_eq!(rooms_list, repeat(RoomListEntryEasy::Filled).take(21).collect::>()); + assert_eq!(full_list.state(), SlidingSyncState::FullyLoaded, "full isn't fully loaded"); + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn resizing_sliding_window() -> anyhow::Result<()> { + let (_client, sync_proxy_builder) = random_setup_with_rooms(20).await?; + let sliding_window_list = SlidingSyncList::builder() + .sync_mode(SlidingSyncMode::Selective) + .set_range(0u32, 10u32) + .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) + .name("sliding") + .build()?; + let sync_proxy = sync_proxy_builder.add_list(sliding_window_list).build().await?; + let list = sync_proxy.list("sliding").context("but we just added that list!")?; + let stream = sync_proxy.stream(); + pin_mut!(stream); + let room_summary = + stream.next().await.context("No room summary found, loop ended unsuccessfully")?; + let summary = room_summary?; + // we only heard about the ones we had asked for + assert_eq!(summary.rooms.len(), 11); + + let collection_simple = list.rooms_list::(); + + assert_eq!( + collection_simple, + repeat(RoomListEntryEasy::Filled) + .take(11) + .chain(repeat(RoomListEntryEasy::Empty).take(9)) + .collect::>() + ); + + let _signal = list.rooms_list_stream(); + + // let's move the window + + list.set_range(1, 10); + // Ensure 0-0 invalidation ranges work. + + for _n in 0..2 { + let room_summary = stream.next().await.context("sync has closed unexpectedly")?; + let summary = room_summary?; + // we only heard about the ones we had asked for + if summary.lists.iter().any(|s| s == "sliding") { + break; + } + } + + let collection_simple = list.rooms_list::(); + + assert_eq!( + collection_simple, + repeat(RoomListEntryEasy::Invalid) + .take(1) + .chain(repeat(RoomListEntryEasy::Filled).take(10)) + .chain(repeat(RoomListEntryEasy::Empty).take(9)) + .collect::>() + ); + + list.set_range(5, 10); + + for _n in 0..2 { + let room_summary = stream.next().await.context("sync has closed unexpectedly")?; + let summary = room_summary?; + // we only heard about the ones we had asked for + if summary.lists.iter().any(|s| s == "sliding") { + break; + } + } + + let collection_simple = list.rooms_list::(); + + assert_eq!( + collection_simple, + repeat(RoomListEntryEasy::Invalid) + .take(5) + .chain(repeat(RoomListEntryEasy::Filled).take(6)) + .chain(repeat(RoomListEntryEasy::Empty).take(9)) + .collect::>() + ); + + // let's move the window + + list.set_range(5, 15); + + for _n in 0..2 { + let room_summary = stream.next().await.context("sync has closed unexpectedly")?; + let summary = room_summary?; + // we only heard about the ones we had asked for + if summary.lists.iter().any(|s| s == "sliding") { + break; + } + } + + let collection_simple = list.rooms_list::(); + + assert_eq!( + collection_simple, + repeat(RoomListEntryEasy::Invalid) + .take(5) + .chain(repeat(RoomListEntryEasy::Filled).take(11)) + .chain(repeat(RoomListEntryEasy::Empty).take(4)) + .collect::>() + ); + Ok(()) +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn moving_out_of_sliding_window() -> anyhow::Result<()> { + let (client, sync_proxy_builder) = random_setup_with_rooms(20).await?; + let sliding_window_list = SlidingSyncList::builder() + .sync_mode(SlidingSyncMode::Selective) + .set_range(1u32, 10u32) + .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) + .name("sliding") + .build()?; + let sync_proxy = sync_proxy_builder.add_list(sliding_window_list).build().await?; + let list = sync_proxy.list("sliding").context("but we just added that list!")?; + let stream = sync_proxy.stream(); + pin_mut!(stream); + let room_summary = + stream.next().await.context("No room summary found, loop ended unsuccessfully")?; + let summary = room_summary?; + // we only heard about the ones we had asked for + assert_eq!(summary.rooms.len(), 10); + let collection_simple = list.rooms_list::(); + + assert_eq!( + collection_simple, + repeat(RoomListEntryEasy::Empty) + .take(1) + .chain(repeat(RoomListEntryEasy::Filled).take(10)) + .chain(repeat(RoomListEntryEasy::Empty).take(9)) + .collect::>() + ); + + let _signal = list.rooms_list_stream(); + + // let's move the window + + list.set_range(0, 10); + + for _n in 0..2 { + let room_summary = stream.next().await.context("sync has closed unexpectedly")?; + let summary = room_summary?; + // we only heard about the ones we had asked for + if summary.lists.iter().any(|s| s == "sliding") { + break; + } + } + + let collection_simple = list.rooms_list::(); + + assert_eq!( + collection_simple, + repeat(RoomListEntryEasy::Filled) + .take(11) + .chain(repeat(RoomListEntryEasy::Empty).take(9)) + .collect::>() + ); + + // let's move the window again + + list.set_range(2, 12); + + for _n in 0..2 { + let room_summary = stream.next().await.context("sync has closed unexpectedly")?; + let summary = room_summary?; + // we only heard about the ones we had asked for + if summary.lists.iter().any(|s| s == "sliding") { + break; + } + } + + let collection_simple = list.rooms_list::(); + + assert_eq!( + collection_simple, + repeat(RoomListEntryEasy::Invalid) + .take(2) + .chain(repeat(RoomListEntryEasy::Filled).take(11)) + .chain(repeat(RoomListEntryEasy::Empty).take(7)) + .collect::>() + ); + + // now we "move" the room of pos 3 to pos 0; + // this is a bordering case + + let room_id = assert_matches!(list.rooms_list().get(3), Some(RoomListEntry::Filled(room_id)) => room_id.clone()); + + let room = client.get_joined_room(&room_id).context("No joined room {room_id}")?; + + let content = RoomMessageEventContent::text_plain("Hello world"); + + room.send(content, None).await?; // this should put our room up to the most recent + + for _n in 0..2 { + let room_summary = stream.next().await.context("sync has closed unexpectedly")?; + let summary = room_summary?; + // we only heard about the ones we had asked for + if summary.lists.iter().any(|s| s == "sliding") { + break; + } + } + + let collection_simple = list.rooms_list::(); + + assert_eq!( + collection_simple, + repeat(RoomListEntryEasy::Invalid) + .take(2) + .chain(repeat(RoomListEntryEasy::Filled).take(11)) + .chain(repeat(RoomListEntryEasy::Empty).take(7)) + .collect::>() + ); + + // items has moved, thus we shouldn't find it where it was + assert!(list.rooms_list::().get(3).unwrap().as_room_id().unwrap() != room_id); + + // let's move the window again + + list.set_range(0, 10); + + for _n in 0..2 { + let room_summary = stream.next().await.context("sync has closed unexpectedly")?; + let summary = room_summary?; + // we only heard about the ones we had asked for + if summary.lists.iter().any(|s| s == "sliding") { + break; + } + } + + let collection_simple = list.rooms_list::(); + + assert_eq!( + collection_simple, + repeat(RoomListEntryEasy::Filled) + .take(11) + .chain(repeat(RoomListEntryEasy::Invalid).take(2)) + .chain(repeat(RoomListEntryEasy::Empty).take(7)) + .collect::>() + ); + + // and check that our room move has been accepted properly, too. + assert_eq!(list.rooms_list::().get(0).unwrap().as_room_id().unwrap(), &room_id); + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore = "this is a slow test about cold cache recovery"] +async fn fast_unfreeze() -> anyhow::Result<()> { + let (_client, sync_proxy_builder) = random_setup_with_rooms(500).await?; + print!("setup took its time"); + let build_lists = || { let sliding_window_list = SlidingSyncList::builder() .sync_mode(SlidingSyncMode::Selective) .set_range(1u32, 10u32) .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) .name("sliding") .build()?; - let sync_proxy = sync_proxy_builder.add_list(sliding_window_list).build().await?; - let list = sync_proxy.list("sliding").context("but we just added that list!")?; - let stream = sync_proxy.stream(); - pin_mut!(stream); - let room_summary = - stream.next().await.context("No room summary found, loop ended unsuccessfully")?; - let summary = room_summary?; - // we only heard about the ones we had asked for - assert_eq!(summary.rooms.len(), 10); - let collection_simple = list.rooms_list::(); + let growing_sync = SlidingSyncList::builder() + .sync_mode(SlidingSyncMode::GrowingFullSync) + .full_sync_maximum_number_of_rooms_to_fetch(100) + .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) + .name("growing") + .build()?; + anyhow::Ok((sliding_window_list, growing_sync)) + }; - assert_eq!( - collection_simple, - repeat(RoomListEntryEasy::Empty) - .take(1) - .chain(repeat(RoomListEntryEasy::Filled).take(10)) - .chain(repeat(RoomListEntryEasy::Empty).take(9)) - .collect::>() - ); + println!("starting the sliding sync setup"); - let _signal = list.rooms_list_stream(); - - // let's move the window - - list.set_range(0, 10); - - for _n in 0..2 { - let room_summary = stream.next().await.context("sync has closed unexpectedly")?; - let summary = room_summary?; - // we only heard about the ones we had asked for - if summary.lists.iter().any(|s| s == "sliding") { - break; - } - } - - let collection_simple = list.rooms_list::(); - - assert_eq!( - collection_simple, - repeat(RoomListEntryEasy::Filled) - .take(11) - .chain(repeat(RoomListEntryEasy::Empty).take(9)) - .collect::>() - ); - - // let's move the window again - - list.set_range(2, 12); - - for _n in 0..2 { - let room_summary = stream.next().await.context("sync has closed unexpectedly")?; - let summary = room_summary?; - // we only heard about the ones we had asked for - if summary.lists.iter().any(|s| s == "sliding") { - break; - } - } - - let collection_simple = list.rooms_list::(); - - assert_eq!( - collection_simple, - repeat(RoomListEntryEasy::Invalid) - .take(2) - .chain(repeat(RoomListEntryEasy::Filled).take(11)) - .chain(repeat(RoomListEntryEasy::Empty).take(7)) - .collect::>() - ); - - // now we "move" the room of pos 3 to pos 0; - // this is a bordering case - - let room_id = assert_matches!(list.rooms_list().get(3), Some(RoomListEntry::Filled(room_id)) => room_id.clone()); - - let room = client.get_joined_room(&room_id).context("No joined room {room_id}")?; - - let content = RoomMessageEventContent::text_plain("Hello world"); - - room.send(content, None).await?; // this should put our room up to the most recent - - for _n in 0..2 { - let room_summary = stream.next().await.context("sync has closed unexpectedly")?; - let summary = room_summary?; - // we only heard about the ones we had asked for - if summary.lists.iter().any(|s| s == "sliding") { - break; - } - } - - let collection_simple = list.rooms_list::(); - - assert_eq!( - collection_simple, - repeat(RoomListEntryEasy::Invalid) - .take(2) - .chain(repeat(RoomListEntryEasy::Filled).take(11)) - .chain(repeat(RoomListEntryEasy::Empty).take(7)) - .collect::>() - ); - - // items has moved, thus we shouldn't find it where it was - assert!( - list.rooms_list::().get(3).unwrap().as_room_id().unwrap() != room_id - ); - - // let's move the window again - - list.set_range(0, 10); - - for _n in 0..2 { - let room_summary = stream.next().await.context("sync has closed unexpectedly")?; - let summary = room_summary?; - // we only heard about the ones we had asked for - if summary.lists.iter().any(|s| s == "sliding") { - break; - } - } - - let collection_simple = list.rooms_list::(); - - assert_eq!( - collection_simple, - repeat(RoomListEntryEasy::Filled) - .take(11) - .chain(repeat(RoomListEntryEasy::Invalid).take(2)) - .chain(repeat(RoomListEntryEasy::Empty).take(7)) - .collect::>() - ); - - // and check that our room move has been accepted properly, too. - assert_eq!( - list.rooms_list::().get(0).unwrap().as_room_id().unwrap(), - &room_id - ); - - Ok(()) - } - - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - #[ignore = "this is a slow test about cold cache recovery"] - async fn fast_unfreeze() -> anyhow::Result<()> { - let (_client, sync_proxy_builder) = random_setup_with_rooms(500).await?; - print!("setup took its time"); - let build_lists = || { - let sliding_window_list = SlidingSyncList::builder() - .sync_mode(SlidingSyncMode::Selective) - .set_range(1u32, 10u32) - .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) - .name("sliding") - .build()?; - let growing_sync = SlidingSyncList::builder() - .sync_mode(SlidingSyncMode::GrowingFullSync) - .full_sync_maximum_number_of_rooms_to_fetch(100) - .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) - .name("growing") - .build()?; - anyhow::Ok((sliding_window_list, growing_sync)) - }; - - println!("starting the sliding sync setup"); - - { - // SETUP - let (sliding_window_list, growing_sync) = build_lists()?; - let sync_proxy = sync_proxy_builder - .clone() - .cold_cache("sliding_sync") - .add_list(sliding_window_list) - .add_list(growing_sync) - .build() - .await?; - let growing_sync = - sync_proxy.list("growing").context("but we just added that list!")?; // let's catch it up fully. - let stream = sync_proxy.stream(); - pin_mut!(stream); - while growing_sync.state() != SlidingSyncState::FullyLoaded { - // we wait until growing sync is all done, too - println!("awaiting"); - let _room_summary = stream - .next() - .await - .context("No room summary found, loop ended unsuccessfully")??; - } - } - - println!("starting from cold"); - // recover from frozen state. + { + // SETUP let (sliding_window_list, growing_sync) = build_lists()?; - // we recover only the window. this should be quick! - - let start = Instant::now(); - let _sync_proxy = sync_proxy_builder + let sync_proxy = sync_proxy_builder .clone() .cold_cache("sliding_sync") .add_list(sliding_window_list) .add_list(growing_sync) .build() .await?; - let duration = start.elapsed(); - - assert!(duration < Duration::from_micros(10), "cold recovery was too slow: {duration:?}"); - - Ok(()) - } - - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - async fn growing_sync_keeps_going() -> anyhow::Result<()> { - let (_client, sync_proxy_builder) = random_setup_with_rooms(20).await?; - let growing_sync = SlidingSyncList::builder() - .sync_mode(SlidingSyncMode::GrowingFullSync) - .full_sync_batch_size(5u32) - .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) - .name("growing") - .build()?; - - let sync_proxy = sync_proxy_builder.clone().add_list(growing_sync).build().await?; - let list = sync_proxy.list("growing").context("but we just added that list!")?; - + let growing_sync = sync_proxy.list("growing").context("but we just added that list!")?; // let's catch it up fully. let stream = sync_proxy.stream(); pin_mut!(stream); - - // we have 20 and catch up in batches of 5. so let's get over to 15. - - for _ in 0..=2 { - let room_summary = stream.next().await.context("sync has closed unexpectedly")?; - let _summary = room_summary?; - } - - let collection_simple = list.rooms_list::(); - - assert_eq!( - collection_simple, - repeat(RoomListEntryEasy::Filled) - .take(15) - .chain(repeat(RoomListEntryEasy::Empty).take(5)) - .collect::>() - ); - - // we have 20 and catch up in batches of 5. let's go one more, see it grows. - let room_summary = stream.next().await.context("sync has closed unexpectedly")?; - let _summary = room_summary?; - - let collection_simple = list.rooms_list::(); - - assert_eq!( - collection_simple, - repeat(RoomListEntryEasy::Filled).take(20).collect::>() - ); - - Ok(()) - } - - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - async fn growing_sync_keeps_going_after_restart() -> anyhow::Result<()> { - let (_client, sync_proxy_builder) = random_setup_with_rooms(20).await?; - let growing_sync = SlidingSyncList::builder() - .sync_mode(SlidingSyncMode::GrowingFullSync) - .full_sync_batch_size(5u32) - .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) - .name("growing") - .build()?; - - let sync_proxy = sync_proxy_builder.clone().add_list(growing_sync).build().await?; - let list = sync_proxy.list("growing").context("but we just added that list!")?; - - let stream = sync_proxy.stream(); - pin_mut!(stream); - - // we have 20 and catch up in batches of 5. so let's get over to 15. - - for _ in 0..=2 { - let room_summary = stream.next().await.context("sync has closed unexpectedly")?; - let _summary = room_summary?; - } - - let collection_simple = list.rooms_list::(); - - assert_eq!( - collection_simple.iter().fold(0, |acc, i| if *i == RoomListEntryEasy::Filled { - acc + 1 - } else { - acc - }), - 15 - ); - - // we have 20 and catch up in batches of 5. Let's make sure the restart - // continues. - - let stream = sync_proxy.stream(); - pin_mut!(stream); - - let room_summary = stream.next().await.context("sync has closed unexpectedly")?; - let _summary = room_summary?; - - let collection_simple = list.rooms_list::(); - - assert_eq!( - collection_simple.iter().fold(0, |acc, i| if *i == RoomListEntryEasy::Filled { - acc + 1 - } else { - acc - }), - 20 - ); - - Ok(()) - } - - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - async fn continue_on_reset() -> anyhow::Result<()> { - let (_client, sync_proxy_builder) = random_setup_with_rooms(10).await?; - print!("setup took its time"); - let growing_sync = SlidingSyncList::builder() - .sync_mode(SlidingSyncMode::GrowingFullSync) - .full_sync_batch_size(5u32) - .full_sync_maximum_number_of_rooms_to_fetch(100) - .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) - .name("growing") - .build()?; - - println!("starting the sliding sync setup"); - let sync_proxy = sync_proxy_builder - .clone() - .cold_cache("sliding_sync") - .add_list(growing_sync) - .build() - .await?; - let list = sync_proxy.list("growing").context("but we just added that list!")?; // let's catch it up fully. - let stream = sync_proxy.stream(); - pin_mut!(stream); - - for _ in 0..=2 { - let room_summary = stream.next().await.context("sync has closed unexpectedly")?; - let summary = room_summary?; - - if summary.lists.iter().any(|s| s == "growing") { - break; - } - } - - let collection_simple = list.rooms_list::(); - - assert_eq!( - collection_simple.iter().fold(0, |acc, i| if *i == RoomListEntryEasy::Filled { - acc + 1 - } else { - acc - }), - 5 - ); - - // force the pos to be invalid and thus this being reset internally - sync_proxy.set_pos("100".to_owned()); - let mut error_seen = false; - - for _ in 0..2 { - let summary = match stream.next().await { - Some(Ok(e)) => e, - Some(Err(e)) => { - match e.client_api_error_kind() { - Some(RumaError::UnknownPos) => { - // we expect this to come through. - error_seen = true; - continue; - } - _ => Err(e)?, - } - } - None => anyhow::bail!("Stream ended unexpectedly."), - }; - - // we only heard about the ones we had asked for - if summary.lists.iter().any(|s| s == "growing") { - break; - } - } - - assert!(error_seen, "We have not seen the UnknownPos error"); - - let collection_simple = list.rooms_list::(); - - assert_eq!( - collection_simple.iter().fold(0, |acc, i| if *i == RoomListEntryEasy::Filled { - acc + 1 - } else { - acc - }), - 10 - ); - - Ok(()) - } - - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - async fn noticing_new_rooms_in_growing() -> anyhow::Result<()> { - let (client, sync_proxy_builder) = random_setup_with_rooms(20).await?; - print!("setup took its time"); - let growing_sync = SlidingSyncList::builder() - .sync_mode(SlidingSyncMode::GrowingFullSync) - .full_sync_batch_size(10u32) - .full_sync_maximum_number_of_rooms_to_fetch(100) - .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) - .name("growing") - .build()?; - - println!("starting the sliding sync setup"); - let sync_proxy = sync_proxy_builder - .clone() - .cold_cache("sliding_sync") - .add_list(growing_sync) - .build() - .await?; - let list = sync_proxy.list("growing").context("but we just added that list!")?; // let's catch it up fully. - let stream = sync_proxy.stream(); - pin_mut!(stream); - while list.state() != SlidingSyncState::FullyLoaded { + while growing_sync.state() != SlidingSyncState::FullyLoaded { // we wait until growing sync is all done, too println!("awaiting"); let _room_summary = stream @@ -1133,279 +872,503 @@ mod tests { .await .context("No room summary found, loop ended unsuccessfully")??; } - - let collection_simple = list.rooms_list::(); - - assert_eq!( - collection_simple.iter().fold(0, |acc, i| if *i == RoomListEntryEasy::Filled { - acc + 1 - } else { - acc - }), - 20 - ); - // all found. let's add two more. - - make_room(&client, "one-more".to_owned()).await?; - make_room(&client, "two-more".to_owned()).await?; - - let mut seen = false; - - for _n in 0..4 { - let room_summary = stream.next().await.context("sync has closed unexpectedly")?; - let summary = room_summary?; - // we only heard about the ones we had asked for - if summary.lists.iter().any(|s| s == "growing") - && list.maximum_number_of_rooms().unwrap_or_default() == 22 - { - if seen { - // once we saw 22, we give it another loop to catch up! - break; - } else { - seen = true; - } - } - } - - let collection_simple = list.rooms_list::(); - - assert_eq!( - collection_simple.iter().fold(0, |acc, i| if *i == RoomListEntryEasy::Filled { - acc + 1 - } else { - acc - }), - 22 - ); - - Ok(()) } - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - async fn restart_room_resubscription() -> anyhow::Result<()> { - let (client, sync_proxy_builder) = random_setup_with_rooms(3).await?; + println!("starting from cold"); + // recover from frozen state. + let (sliding_window_list, growing_sync) = build_lists()?; + // we recover only the window. this should be quick! - let sync_proxy = sync_proxy_builder - .add_list( - SlidingSyncList::builder() - .sync_mode(SlidingSyncMode::Selective) - .set_range(0u32, 2u32) - .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) - .name("sliding_list") - .build()?, - ) - .build() - .await?; + let start = Instant::now(); + let _sync_proxy = sync_proxy_builder + .clone() + .cold_cache("sliding_sync") + .add_list(sliding_window_list) + .add_list(growing_sync) + .build() + .await?; + let duration = start.elapsed(); - let list = sync_proxy.list("sliding_list").context("list `sliding_list` isn't found")?; + assert!(duration < Duration::from_micros(10), "cold recovery was too slow: {duration:?}"); - let stream = sync_proxy.stream(); - pin_mut!(stream); + Ok(()) +} - let room_summary = - stream.next().await.context("No room summary found, loop ended unsuccessfully")??; +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn growing_sync_keeps_going() -> anyhow::Result<()> { + let (_client, sync_proxy_builder) = random_setup_with_rooms(20).await?; + let growing_sync = SlidingSyncList::builder() + .sync_mode(SlidingSyncMode::GrowingFullSync) + .full_sync_batch_size(5u32) + .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) + .name("growing") + .build()?; + + let sync_proxy = sync_proxy_builder.clone().add_list(growing_sync).build().await?; + let list = sync_proxy.list("growing").context("but we just added that list!")?; + + let stream = sync_proxy.stream(); + pin_mut!(stream); + + // we have 20 and catch up in batches of 5. so let's get over to 15. + + for _ in 0..=2 { + let room_summary = stream.next().await.context("sync has closed unexpectedly")?; + let _summary = room_summary?; + } + + let collection_simple = list.rooms_list::(); + + assert_eq!( + collection_simple, + repeat(RoomListEntryEasy::Filled) + .take(15) + .chain(repeat(RoomListEntryEasy::Empty).take(5)) + .collect::>() + ); + + // we have 20 and catch up in batches of 5. let's go one more, see it grows. + let room_summary = stream.next().await.context("sync has closed unexpectedly")?; + let _summary = room_summary?; + + let collection_simple = list.rooms_list::(); + + assert_eq!(collection_simple, repeat(RoomListEntryEasy::Filled).take(20).collect::>()); + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn growing_sync_keeps_going_after_restart() -> anyhow::Result<()> { + let (_client, sync_proxy_builder) = random_setup_with_rooms(20).await?; + let growing_sync = SlidingSyncList::builder() + .sync_mode(SlidingSyncMode::GrowingFullSync) + .full_sync_batch_size(5u32) + .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) + .name("growing") + .build()?; + + let sync_proxy = sync_proxy_builder.clone().add_list(growing_sync).build().await?; + let list = sync_proxy.list("growing").context("but we just added that list!")?; + + let stream = sync_proxy.stream(); + pin_mut!(stream); + + // we have 20 and catch up in batches of 5. so let's get over to 15. + + for _ in 0..=2 { + let room_summary = stream.next().await.context("sync has closed unexpectedly")?; + let _summary = room_summary?; + } + + let collection_simple = list.rooms_list::(); + + assert_eq!( + collection_simple.iter().fold(0, |acc, i| if *i == RoomListEntryEasy::Filled { + acc + 1 + } else { + acc + }), + 15 + ); + + // we have 20 and catch up in batches of 5. Let's make sure the restart + // continues. + + let stream = sync_proxy.stream(); + pin_mut!(stream); + + let room_summary = stream.next().await.context("sync has closed unexpectedly")?; + let _summary = room_summary?; + + let collection_simple = list.rooms_list::(); + + assert_eq!( + collection_simple.iter().fold(0, |acc, i| if *i == RoomListEntryEasy::Filled { + acc + 1 + } else { + acc + }), + 20 + ); + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn continue_on_reset() -> anyhow::Result<()> { + let (_client, sync_proxy_builder) = random_setup_with_rooms(10).await?; + print!("setup took its time"); + let growing_sync = SlidingSyncList::builder() + .sync_mode(SlidingSyncMode::GrowingFullSync) + .full_sync_batch_size(5u32) + .full_sync_maximum_number_of_rooms_to_fetch(100) + .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) + .name("growing") + .build()?; + + println!("starting the sliding sync setup"); + let sync_proxy = sync_proxy_builder + .clone() + .cold_cache("sliding_sync") + .add_list(growing_sync) + .build() + .await?; + let list = sync_proxy.list("growing").context("but we just added that list!")?; // let's catch it up fully. + let stream = sync_proxy.stream(); + pin_mut!(stream); + + for _ in 0..=2 { + let room_summary = stream.next().await.context("sync has closed unexpectedly")?; + let summary = room_summary?; + + if summary.lists.iter().any(|s| s == "growing") { + break; + } + } + + let collection_simple = list.rooms_list::(); + + assert_eq!( + collection_simple.iter().fold(0, |acc, i| if *i == RoomListEntryEasy::Filled { + acc + 1 + } else { + acc + }), + 5 + ); + + // force the pos to be invalid and thus this being reset internally + sync_proxy.set_pos("100".to_owned()); + let mut error_seen = false; + + for _ in 0..2 { + let summary = match stream.next().await { + Some(Ok(e)) => e, + Some(Err(e)) => { + match e.client_api_error_kind() { + Some(RumaError::UnknownPos) => { + // we expect this to come through. + error_seen = true; + continue; + } + _ => Err(e)?, + } + } + None => anyhow::bail!("Stream ended unexpectedly."), + }; // we only heard about the ones we had asked for - assert_eq!(room_summary.rooms.len(), 3); - - let collection_simple = list.rooms_list::(); - - assert_eq!( - collection_simple, - repeat(RoomListEntryEasy::Filled).take(3).collect::>() - ); - - let _signal = list.rooms_list_stream(); - - // let's move the window - - list.set_range(1, 2); - - for _n in 0..2 { - let room_summary = stream.next().await.context("sync has closed unexpectedly")??; - - // we only heard about the ones we had asked for - if room_summary.lists.iter().any(|s| s == "sliding_list") { - break; - } + if summary.lists.iter().any(|s| s == "growing") { + break; } - - let collection_simple = list.rooms_list::(); - - assert_eq!( - collection_simple, - repeat(RoomListEntryEasy::Invalid) - .take(1) - .chain(repeat(RoomListEntryEasy::Filled).take(2)) - .collect::>() - ); - - // let's get that first entry - - let room_id = assert_matches!(list.rooms_list().get(0), Some(RoomListEntry::Invalidated(room_id)) => room_id.clone()); - - // send a message - - let room = client.get_joined_room(&room_id).context("No joined room {room_id}")?; - - let content = RoomMessageEventContent::text_plain("Hello world"); - - room.send(content, None).await?; // this should put our room up to the most recent - - // let's subscribe - - sync_proxy.subscribe(room_id.clone(), Default::default()); - - let mut room_updated = false; - - for _n in 0..2 { - let room_summary = stream.next().await.context("sync has closed unexpectedly")??; - - // we only heard about the ones we had asked for - if room_summary.rooms.iter().any(|s| s == &room_id) { - room_updated = true; - break; - } - } - - assert!(room_updated, "Room update has not been seen"); - - // force the pos to be invalid and thus this being reset internally - sync_proxy.set_pos("100".to_owned()); - - let mut error_seen = false; - let mut room_updated = false; - - for _n in 0..2 { - let summary = match stream.next().await { - Some(Ok(e)) => e, - Some(Err(e)) => { - match e.client_api_error_kind() { - Some(RumaError::UnknownPos) => { - // we expect this to come through. - error_seen = true; - continue; - } - _ => Err(e)?, - } - } - None => anyhow::bail!("Stream ended unexpectedly."), - }; - - // we only heard about the ones we had asked for - if summary.rooms.iter().any(|s| s == &room_id) { - room_updated = true; - break; - } - } - - assert!(error_seen, "We have not seen the UnknownPos error"); - assert!(room_updated, "Room update has not been seen"); - - // send another message - - let room = client.get_joined_room(&room_id).context("No joined room {room_id}")?; - - let content = RoomMessageEventContent::text_plain("Hello world"); - - let event_id = room.send(content, None).await?.event_id; // this should put our room up to the most recent - - // let's see for it to come down the pipe - let mut room_updated = false; - - for _n in 0..2 { - let room_summary = stream.next().await.context("sync has closed unexpectedly")??; - - // we only heard about the ones we had asked for - if room_summary.rooms.iter().any(|s| s == &room_id) { - room_updated = true; - break; - } - } - assert!(room_updated, "Room update has not been seen"); - - let sliding_sync_room = sync_proxy.get_room(&room_id).expect("Slidin Sync room not found"); - let event = sliding_sync_room.latest_event().await.expect("No even found"); - - let collection_simple = list.rooms_list::(); - - assert_eq!( - collection_simple, - repeat(RoomListEntryEasy::Invalid) - .take(1) - .chain(repeat(RoomListEntryEasy::Filled).take(2)) - .collect::>() - ); - - assert_eq!( - event.event_id().unwrap(), - event_id, - "Latest event is different than what we've sent" - ); - - Ok(()) } - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - async fn receipts_extension_works() -> anyhow::Result<()> { - let (client, sync_proxy_builder) = random_setup_with_rooms(1).await?; - let list = SlidingSyncList::builder() - .sync_mode(SlidingSyncMode::Selective) - .ranges(vec![(0u32, 1u32)]) - .sort(vec!["by_recency".to_owned()]) - .name("a") - .build()?; + assert!(error_seen, "We have not seen the UnknownPos error"); - let mut config = ReceiptsConfig::default(); - config.enabled = Some(true); + let collection_simple = list.rooms_list::(); - let sync_proxy = sync_proxy_builder - .clone() - .add_list(list) - .with_receipt_extension(config) - .build() - .await?; - let list = sync_proxy.list("a").context("but we just added that list!")?; + assert_eq!( + collection_simple.iter().fold(0, |acc, i| if *i == RoomListEntryEasy::Filled { + acc + 1 + } else { + acc + }), + 10 + ); - let stream = sync_proxy.stream(); - pin_mut!(stream); + Ok(()) +} - stream.next().await.context("sync has closed unexpectedly")??; +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn noticing_new_rooms_in_growing() -> anyhow::Result<()> { + let (client, sync_proxy_builder) = random_setup_with_rooms(20).await?; + print!("setup took its time"); + let growing_sync = SlidingSyncList::builder() + .sync_mode(SlidingSyncMode::GrowingFullSync) + .full_sync_batch_size(10u32) + .full_sync_maximum_number_of_rooms_to_fetch(100) + .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) + .name("growing") + .build()?; - // find the room and send an event which we will send a receipt for - let room_id = list.get_room_id(0).unwrap(); - let room = client.get_joined_room(&room_id).context("No joined room {room_id}")?; - let event_id = - room.send(RoomMessageEventContent::text_plain("Hello world"), None).await?.event_id; + println!("starting the sliding sync setup"); + let sync_proxy = sync_proxy_builder + .clone() + .cold_cache("sliding_sync") + .add_list(growing_sync) + .build() + .await?; + let list = sync_proxy.list("growing").context("but we just added that list!")?; // let's catch it up fully. + let stream = sync_proxy.stream(); + pin_mut!(stream); + while list.state() != SlidingSyncState::FullyLoaded { + // we wait until growing sync is all done, too + println!("awaiting"); + let _room_summary = + stream.next().await.context("No room summary found, loop ended unsuccessfully")??; + } - // now send a receipt - room.send_single_receipt( - CreateReceiptType::Read, - ReceiptThread::Unthreaded, - event_id.clone(), + let collection_simple = list.rooms_list::(); + + assert_eq!( + collection_simple.iter().fold(0, |acc, i| if *i == RoomListEntryEasy::Filled { + acc + 1 + } else { + acc + }), + 20 + ); + // all found. let's add two more. + + make_room(&client, "one-more".to_owned()).await?; + make_room(&client, "two-more".to_owned()).await?; + + let mut seen = false; + + for _n in 0..4 { + let room_summary = stream.next().await.context("sync has closed unexpectedly")?; + let summary = room_summary?; + // we only heard about the ones we had asked for + if summary.lists.iter().any(|s| s == "growing") + && list.maximum_number_of_rooms().unwrap_or_default() == 22 + { + if seen { + // once we saw 22, we give it another loop to catch up! + break; + } else { + seen = true; + } + } + } + + let collection_simple = list.rooms_list::(); + + assert_eq!( + collection_simple.iter().fold(0, |acc, i| if *i == RoomListEntryEasy::Filled { + acc + 1 + } else { + acc + }), + 22 + ); + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn restart_room_resubscription() -> anyhow::Result<()> { + let (client, sync_proxy_builder) = random_setup_with_rooms(3).await?; + + let sync_proxy = sync_proxy_builder + .add_list( + SlidingSyncList::builder() + .sync_mode(SlidingSyncMode::Selective) + .set_range(0u32, 2u32) + .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) + .name("sliding_list") + .build()?, ) + .build() .await?; - // we expect to see it because we have enabled the receipt extension. We don't - // know when we'll see it though - let mut found_receipt = false; - for _n in 0..3 { - stream.next().await.context("sync has closed unexpectedly")??; + let list = sync_proxy.list("sliding_list").context("list `sliding_list` isn't found")?; - // try to find it - let room = client.get_room(&room_id).context("No joined room {room_id}")?; - let receipts = room - .event_receipts(ReceiptType::Read, ReceiptThread::Unthreaded, &event_id) - .await - .unwrap(); + let stream = sync_proxy.stream(); + pin_mut!(stream); - let expected_user_id = client.user_id().unwrap(); - found_receipt = receipts.iter().any(|(user_id, _)| user_id == expected_user_id); - if found_receipt { - break; - } + let room_summary = + stream.next().await.context("No room summary found, loop ended unsuccessfully")??; + + // we only heard about the ones we had asked for + assert_eq!(room_summary.rooms.len(), 3); + + let collection_simple = list.rooms_list::(); + + assert_eq!(collection_simple, repeat(RoomListEntryEasy::Filled).take(3).collect::>()); + + let _signal = list.rooms_list_stream(); + + // let's move the window + + list.set_range(1, 2); + + for _n in 0..2 { + let room_summary = stream.next().await.context("sync has closed unexpectedly")??; + + // we only heard about the ones we had asked for + if room_summary.lists.iter().any(|s| s == "sliding_list") { + break; } - assert!(found_receipt); - Ok(()) } + + let collection_simple = list.rooms_list::(); + + assert_eq!( + collection_simple, + repeat(RoomListEntryEasy::Invalid) + .take(1) + .chain(repeat(RoomListEntryEasy::Filled).take(2)) + .collect::>() + ); + + // let's get that first entry + + let room_id = assert_matches!(list.rooms_list().get(0), Some(RoomListEntry::Invalidated(room_id)) => room_id.clone()); + + // send a message + + let room = client.get_joined_room(&room_id).context("No joined room {room_id}")?; + + let content = RoomMessageEventContent::text_plain("Hello world"); + + room.send(content, None).await?; // this should put our room up to the most recent + + // let's subscribe + + sync_proxy.subscribe(room_id.clone(), Default::default()); + + let mut room_updated = false; + + for _n in 0..2 { + let room_summary = stream.next().await.context("sync has closed unexpectedly")??; + + // we only heard about the ones we had asked for + if room_summary.rooms.iter().any(|s| s == &room_id) { + room_updated = true; + break; + } + } + + assert!(room_updated, "Room update has not been seen"); + + // force the pos to be invalid and thus this being reset internally + sync_proxy.set_pos("100".to_owned()); + + let mut error_seen = false; + let mut room_updated = false; + + for _n in 0..2 { + let summary = match stream.next().await { + Some(Ok(e)) => e, + Some(Err(e)) => { + match e.client_api_error_kind() { + Some(RumaError::UnknownPos) => { + // we expect this to come through. + error_seen = true; + continue; + } + _ => Err(e)?, + } + } + None => anyhow::bail!("Stream ended unexpectedly."), + }; + + // we only heard about the ones we had asked for + if summary.rooms.iter().any(|s| s == &room_id) { + room_updated = true; + break; + } + } + + assert!(error_seen, "We have not seen the UnknownPos error"); + assert!(room_updated, "Room update has not been seen"); + + // send another message + + let room = client.get_joined_room(&room_id).context("No joined room {room_id}")?; + + let content = RoomMessageEventContent::text_plain("Hello world"); + + let event_id = room.send(content, None).await?.event_id; // this should put our room up to the most recent + + // let's see for it to come down the pipe + let mut room_updated = false; + + for _n in 0..2 { + let room_summary = stream.next().await.context("sync has closed unexpectedly")??; + + // we only heard about the ones we had asked for + if room_summary.rooms.iter().any(|s| s == &room_id) { + room_updated = true; + break; + } + } + assert!(room_updated, "Room update has not been seen"); + + let sliding_sync_room = sync_proxy.get_room(&room_id).expect("Slidin Sync room not found"); + let event = sliding_sync_room.latest_event().await.expect("No even found"); + + let collection_simple = list.rooms_list::(); + + assert_eq!( + collection_simple, + repeat(RoomListEntryEasy::Invalid) + .take(1) + .chain(repeat(RoomListEntryEasy::Filled).take(2)) + .collect::>() + ); + + assert_eq!( + event.event_id().unwrap(), + event_id, + "Latest event is different than what we've sent" + ); + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn receipts_extension_works() -> anyhow::Result<()> { + let (client, sync_proxy_builder) = random_setup_with_rooms(1).await?; + let list = SlidingSyncList::builder() + .sync_mode(SlidingSyncMode::Selective) + .ranges(vec![(0u32, 1u32)]) + .sort(vec!["by_recency".to_owned()]) + .name("a") + .build()?; + + let mut config = ReceiptsConfig::default(); + config.enabled = Some(true); + + let sync_proxy = + sync_proxy_builder.clone().add_list(list).with_receipt_extension(config).build().await?; + let list = sync_proxy.list("a").context("but we just added that list!")?; + + let stream = sync_proxy.stream(); + pin_mut!(stream); + + stream.next().await.context("sync has closed unexpectedly")??; + + // find the room and send an event which we will send a receipt for + let room_id = list.get_room_id(0).unwrap(); + let room = client.get_joined_room(&room_id).context("No joined room {room_id}")?; + let event_id = + room.send(RoomMessageEventContent::text_plain("Hello world"), None).await?.event_id; + + // now send a receipt + room.send_single_receipt(CreateReceiptType::Read, ReceiptThread::Unthreaded, event_id.clone()) + .await?; + + // we expect to see it because we have enabled the receipt extension. We don't + // know when we'll see it though + let mut found_receipt = false; + for _n in 0..3 { + stream.next().await.context("sync has closed unexpectedly")??; + + // try to find it + let room = client.get_room(&room_id).context("No joined room {room_id}")?; + let receipts = room + .event_receipts(ReceiptType::Read, ReceiptThread::Unthreaded, &event_id) + .await + .unwrap(); + + let expected_user_id = client.user_id().unwrap(); + found_receipt = receipts.iter().any(|(user_id, _)| user_id == expected_user_id); + if found_receipt { + break; + } + } + assert!(found_receipt); + Ok(()) } From a32fce6cdf5747614e2deb78c6923fa78941f51b Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Tue, 21 Mar 2023 13:05:32 +0100 Subject: [PATCH 42/43] Make all sliding-sync-integration-test dependencies dev-dependencies --- testing/sliding-sync-integration-test/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/sliding-sync-integration-test/Cargo.toml b/testing/sliding-sync-integration-test/Cargo.toml index 49870627c..1d5486a38 100644 --- a/testing/sliding-sync-integration-test/Cargo.toml +++ b/testing/sliding-sync-integration-test/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.0" edition = "2021" publish = false -[dependencies] +[dev-dependencies] anyhow = { workspace = true } eyeball = { workspace = true } eyeball-im = { workspace = true } From a134cc378d414271c42f223717d48dd7095d0d49 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Tue, 21 Mar 2023 13:05:54 +0100 Subject: [PATCH 43/43] Sort sliding-sync-integration-test dependencies --- testing/sliding-sync-integration-test/Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/sliding-sync-integration-test/Cargo.toml b/testing/sliding-sync-integration-test/Cargo.toml index 1d5486a38..cecf2993d 100644 --- a/testing/sliding-sync-integration-test/Cargo.toml +++ b/testing/sliding-sync-integration-test/Cargo.toml @@ -6,11 +6,11 @@ publish = false [dev-dependencies] anyhow = { workspace = true } +assert_matches = "1.5.0" eyeball = { workspace = true } eyeball-im = { workspace = true } +futures = { version = "0.3.25" } matrix-sdk-integration-testing = { path = "../matrix-sdk-integration-testing", features = ["helpers"] } matrix-sdk = { path = "../../crates/matrix-sdk", features = ["experimental-sliding-sync", "testing"] } tokio = { version = "1", features = ["rt", "rt-multi-thread", "macros"] } -futures = { version = "0.3.25" } uuid = { version = "1.2.2" } -assert_matches = "1.5.0"