Merge branch 'main' into media-retention-policy

Signed-off-by: Kévin Commaille <zecakeh@tedomum.fr>
This commit is contained in:
Kévin Commaille
2025-01-28 15:46:55 +01:00
142 changed files with 2713 additions and 1823 deletions

View File

@@ -286,24 +286,6 @@ jobs:
run: |
target/debug/xtask ci wasm-pack ${{ matrix.cmd }}
formatting:
name: Check Formatting
runs-on: ubuntu-latest
steps:
- name: Checkout the repo
uses: actions/checkout@v4
- name: Install Rust
uses: dtolnay/rust-toolchain@master
with:
toolchain: nightly-2024-11-26
components: rustfmt
- name: Cargo fmt
run: |
cargo fmt -- --check
typos:
name: Spell Check with Typos
runs-on: ubuntu-latest
@@ -315,8 +297,8 @@ jobs:
- name: Check the spelling of the files in our repo
uses: crate-ci/typos@v1.29.4
clippy:
name: Run clippy
lint:
name: Lint
needs: xtask
runs-on: ubuntu-latest
@@ -333,7 +315,7 @@ jobs:
uses: dtolnay/rust-toolchain@master
with:
toolchain: nightly-2024-11-26
components: clippy
components: clippy, rustfmt
- name: Load cache
uses: Swatinem/rust-cache@v2
@@ -347,6 +329,10 @@ jobs:
key: "${{ needs.xtask.outputs.cachekey-linux }}"
fail-on-cache-miss: true
- name: Check Formatting
run: |
target/debug/xtask ci style
- name: Clippy
run: |
target/debug/xtask ci clippy

34
Cargo.lock generated
View File

@@ -1410,6 +1410,15 @@ dependencies = [
"zeroize",
]
[[package]]
name = "emojis"
version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "99e1f1df1f181f2539bac8bf027d31ca5ffbf9e559e3f2d09413b9107b5c02f4"
dependencies = [
"phf",
]
[[package]]
name = "encode_unicode"
version = "0.3.6"
@@ -1684,9 +1693,9 @@ dependencies = [
[[package]]
name = "eyeball-im"
version = "0.5.1"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1c02432230060cae0621e15803e073976d22974e0f013c9cb28a4ea1b484629"
checksum = "ad276eb017655257443d34f27455f60e8b02b839c6ebcaa8d6f06cc498784e8f"
dependencies = [
"futures-core",
"imbl",
@@ -1696,9 +1705,9 @@ dependencies = [
[[package]]
name = "eyeball-im-util"
version = "0.7.0"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f63a70e454238b5f66a0a0544c3e6a38be765cb01f34da9b94a2f3ecd8777cf8"
checksum = "eac7f06ce388e4f64876ad3836b275d0972ab64ae8bd8456862d5ebdb7bec4f5"
dependencies = [
"arrayvec",
"eyeball-im",
@@ -2476,9 +2485,9 @@ dependencies = [
[[package]]
name = "imbl"
version = "3.0.0"
version = "4.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bc3be8d8cd36f33a46b1849f31f837c44d9fa87223baee3b4bd96b8f11df81eb"
checksum = "5ae128b3bc67ed43ec0a7bb1c337a9f026717628b3c4033f07ded1da3e854951"
dependencies = [
"bitmaps",
"imbl-sized-chunks",
@@ -2490,9 +2499,9 @@ dependencies = [
[[package]]
name = "imbl-sized-chunks"
version = "0.1.2"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "144006fb58ed787dcae3f54575ff4349755b00ccc99f4b4873860b654be1ed63"
checksum = "8f4241005618a62f8d57b2febd02510fb96e0137304728543dfc5fd6f052c22d"
dependencies = [
"bitmaps",
]
@@ -3505,6 +3514,7 @@ dependencies = [
"async-stream",
"async_cell",
"chrono",
"emojis",
"eyeball",
"eyeball-im",
"eyeball-im-util",
@@ -3531,7 +3541,9 @@ dependencies = [
"tokio-stream",
"tracing",
"unicode-normalization",
"unicode-segmentation",
"uniffi",
"url",
"wiremock",
]
@@ -4376,7 +4388,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "157c5a9d7ea5c2ed2d9fb8f495b64759f7816c7eaea54ba3978f0d63000162e3"
dependencies = [
"anyhow",
"itertools 0.10.5",
"itertools 0.13.0",
"proc-macro2",
"quote",
"syn",
@@ -6149,9 +6161,9 @@ dependencies = [
[[package]]
name = "unicode-segmentation"
version = "1.11.0"
version = "1.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202"
checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493"
[[package]]
name = "unicode-truncate"

View File

@@ -34,8 +34,8 @@ base64 = "0.22.1"
byteorder = "1.5.0"
chrono = "0.4.38"
eyeball = { version = "0.8.8", features = ["tracing"] }
eyeball-im = { version = "0.5.1", features = ["tracing"] }
eyeball-im-util = "0.7.0"
eyeball-im = { version = "0.6.0", features = ["tracing"] }
eyeball-im-util = "0.8.0"
futures-core = "0.3.31"
futures-executor = "0.3.21"
futures-util = "0.3.31"
@@ -44,7 +44,7 @@ growable-bloom-filter = "2.1.1"
hkdf = "0.12.4"
hmac = "0.12.1"
http = "1.1.0"
imbl = "3.0.0"
imbl = "4.0.1"
indexmap = "2.6.0"
insta = { version = "1.41.1", features = ["json"] }
itertools = "0.13.0"

View File

@@ -26,11 +26,9 @@ The rust-sdk consists of multiple crates that can be picked at your convenience:
## Status
The library is in an alpha state, things that are implemented generally work but
the API will change in breaking ways.
The library is considered production ready and backs multiple client implementations such as Element X [[1]](https://github.com/element-hq/element-x-ios) [[2]](https://github.com/element-hq/element-x-android) and [Fractal](https://gitlab.gnome.org/World/fractal). Client developers should feel confident to build upon it.
If you are interested in using the matrix-sdk now is the time to try it out and
provide feedback.
Development of the SDK has been primarily sponsored by Element though accepts contributions from all.
## Bindings

View File

@@ -2,8 +2,8 @@ use std::sync::Arc;
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
use matrix_sdk::{
authentication::matrix::{MatrixSession, MatrixSessionTokens},
config::StoreConfig,
matrix_auth::{MatrixSession, MatrixSessionTokens},
Client, RoomInfo, RoomState, StateChanges,
};
use matrix_sdk_base::{store::MemoryStore, SessionMeta, StateStore as _};

View File

@@ -680,15 +680,20 @@ pub struct EncryptionSettings {
impl From<EncryptionSettings> for RustEncryptionSettings {
fn from(v: EncryptionSettings) -> Self {
let sharing_strategy = if v.only_allow_trusted_devices {
CollectStrategy::OnlyTrustedDevices
} else if v.error_on_verified_user_problem {
CollectStrategy::ErrorOnVerifiedUserProblem
} else {
CollectStrategy::AllDevices
};
RustEncryptionSettings {
algorithm: v.algorithm.into(),
rotation_period: Duration::from_secs(v.rotation_period),
rotation_period_msgs: v.rotation_period_msgs,
history_visibility: v.history_visibility.into(),
sharing_strategy: CollectStrategy::DeviceBasedStrategy {
only_allow_trusted_devices: v.only_allow_trusted_devices,
error_on_verified_user_problem: v.error_on_verified_user_problem,
},
sharing_strategy,
}
}
}

View File

@@ -5,7 +5,7 @@ use std::{
};
use matrix_sdk::{
oidc::{
authentication::oidc::{
registrations::OidcRegistrationsError,
types::{
iana::oauth::OAuthClientAuthenticationMethod,

View File

@@ -7,11 +7,7 @@ use std::{
use anyhow::{anyhow, Context as _};
use matrix_sdk::{
media::{
MediaFileHandle as SdkMediaFileHandle, MediaFormat, MediaRequestParameters,
MediaThumbnailSettings,
},
oidc::{
authentication::oidc::{
registrations::{ClientId, OidcRegistrations},
requests::account_management::AccountManagementActionFull,
types::{
@@ -23,6 +19,10 @@ use matrix_sdk::{
},
OidcAuthorizationData, OidcSession,
},
media::{
MediaFileHandle as SdkMediaFileHandle, MediaFormat, MediaRequestParameters,
MediaThumbnailSettings,
},
reqwest::StatusCode,
ruma::{
api::client::{
@@ -1535,10 +1535,13 @@ impl Session {
match auth_api {
// Build the session from the regular Matrix Auth Session.
AuthApi::Matrix(a) => {
let matrix_sdk::matrix_auth::MatrixSession {
let matrix_sdk::authentication::matrix::MatrixSession {
meta: matrix_sdk::SessionMeta { user_id, device_id },
tokens:
matrix_sdk::matrix_auth::MatrixSessionTokens { access_token, refresh_token },
matrix_sdk::authentication::matrix::MatrixSessionTokens {
access_token,
refresh_token,
},
} = a.session().context("Missing session")?;
Ok(Session {
@@ -1553,10 +1556,10 @@ impl Session {
}
// Build the session from the OIDC UserSession.
AuthApi::Oidc(api) => {
let matrix_sdk::oidc::UserSession {
let matrix_sdk::authentication::oidc::UserSession {
meta: matrix_sdk::SessionMeta { user_id, device_id },
tokens:
matrix_sdk::oidc::OidcSessionTokens {
matrix_sdk::authentication::oidc::OidcSessionTokens {
access_token,
refresh_token,
latest_id_token,
@@ -1617,12 +1620,12 @@ impl TryFrom<Session> for AuthSession {
.transpose()
.context("OIDC latest_id_token is invalid.")?;
let user_session = matrix_sdk::oidc::UserSession {
let user_session = matrix_sdk::authentication::oidc::UserSession {
meta: matrix_sdk::SessionMeta {
user_id: user_id.try_into()?,
device_id: device_id.into(),
},
tokens: matrix_sdk::oidc::OidcSessionTokens {
tokens: matrix_sdk::authentication::oidc::OidcSessionTokens {
access_token,
refresh_token,
latest_id_token,
@@ -1639,12 +1642,12 @@ impl TryFrom<Session> for AuthSession {
Ok(AuthSession::Oidc(session.into()))
} else {
// Create a regular Matrix Session.
let session = matrix_sdk::matrix_auth::MatrixSession {
let session = matrix_sdk::authentication::matrix::MatrixSession {
meta: matrix_sdk::SessionMeta {
user_id: user_id.try_into()?,
device_id: device_id.into(),
},
tokens: matrix_sdk::matrix_auth::MatrixSessionTokens {
tokens: matrix_sdk::authentication::matrix::MatrixSessionTokens {
access_token,
refresh_token,
},

View File

@@ -1,15 +1,15 @@
use std::{collections::HashMap, fmt, fmt::Display};
use matrix_sdk::{
encryption::CryptoStoreError, event_cache::EventCacheError, oidc::OidcError, reqwest,
room::edit::EditError, send_queue::RoomSendQueueError, HttpError, IdParseError,
authentication::oidc::OidcError, encryption::CryptoStoreError, event_cache::EventCacheError,
reqwest, room::edit::EditError, send_queue::RoomSendQueueError, HttpError, IdParseError,
NotificationSettingsError as SdkNotificationSettingsError,
QueueWedgeError as SdkQueueWedgeError, StoreError,
};
use matrix_sdk_ui::{encryption_sync_service, notification_client, sync_service, timeline};
use uniffi::UnexpectedUniFFICallbackError;
use crate::room_list::RoomListError;
use crate::{room_list::RoomListError, timeline::FocusEventError};
#[derive(Debug, thiserror::Error)]
pub enum ClientError {
@@ -161,6 +161,12 @@ impl From<NotYetImplemented> for ClientError {
}
}
impl From<FocusEventError> for ClientError {
fn from(e: FocusEventError) -> Self {
Self::new(e)
}
}
/// Bindings version of the sdk type replacing OwnedUserId/DeviceIds with simple
/// String.
///

View File

@@ -14,6 +14,7 @@ mod error;
mod event;
mod helpers;
mod identity_status_change;
mod live_location_share;
mod notification;
mod notification_settings;
mod platform;

View File

@@ -0,0 +1,32 @@
// Copyright 2024 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::ruma::LocationContent;
#[derive(uniffi::Record)]
pub struct LastLocation {
/// The most recent location content of the user.
pub location: LocationContent,
/// A timestamp in milliseconds since Unix Epoch on that day in local
/// time.
pub ts: u64,
}
/// Details of a users live location share.
#[derive(uniffi::Record)]
pub struct LiveLocationShare {
/// The user's last known location.
pub last_location: LastLocation,
/// The live status of the live location share.
pub(crate) is_live: bool,
/// The user ID of the person sharing their live location.
pub user_id: String,
}

View File

@@ -4,14 +4,13 @@ use anyhow::{Context, Result};
use futures_util::{pin_mut, StreamExt};
use matrix_sdk::{
crypto::LocalTrust,
event_cache::paginator::PaginatorError,
room::{
edit::EditedContent, power_levels::RoomPowerLevelChanges, Room as SdkRoom, RoomMemberRole,
},
ComposerDraft as SdkComposerDraft, ComposerDraftType as SdkComposerDraftType,
RoomHero as SdkRoomHero, RoomMemberships, RoomState,
};
use matrix_sdk_ui::timeline::{default_event_filter, PaginationError, RoomExt, TimelineFocus};
use matrix_sdk_ui::timeline::{default_event_filter, RoomExt};
use mime::Mime;
use ruma::{
api::client::room::report_content,
@@ -29,19 +28,23 @@ use ruma::{
EventId, Int, OwnedDeviceId, OwnedUserId, RoomAliasId, UserId,
};
use tokio::sync::RwLock;
use tracing::error;
use tracing::{error, warn};
use super::RUNTIME;
use crate::{
chunk_iterator::ChunkIterator,
client::{JoinRule, RoomVisibility},
error::{ClientError, MediaInfoError, NotYetImplemented, RoomError},
event::{MessageLikeEventType, RoomMessageEventMessageType, StateEventType},
event::{MessageLikeEventType, StateEventType},
identity_status_change::IdentityStatusChange,
live_location_share::{LastLocation, LiveLocationShare},
room_info::RoomInfo,
room_member::RoomMember,
ruma::{ImageInfo, Mentions, NotifyType},
timeline::{DateDividerMode, FocusEventError, ReceiptType, SendHandle, Timeline},
ruma::{ImageInfo, LocationContent, Mentions, NotifyType},
timeline::{
configuration::{AllowedMessageTypes, TimelineConfiguration},
ReceiptType, SendHandle, Timeline,
},
utils::u64_to_uint,
TaskHandle,
};
@@ -87,10 +90,6 @@ impl Room {
#[matrix_sdk_ffi_macros::export]
impl Room {
pub fn id(&self) -> String {
self.inner.room_id().to_string()
}
/// Returns the room's name from the state event if available, otherwise
/// compute a room name based on the room's nature (DM or not) and number of
/// members.
@@ -200,115 +199,44 @@ impl Room {
}
}
/// Returns a timeline focused on the given event.
///
/// Note: this timeline is independent from that returned with
/// [`Self::timeline`], and as such it is not cached.
pub async fn timeline_focused_on_event(
/// Build a new timeline instance with the given configuration.
pub async fn timeline_with_configuration(
&self,
event_id: String,
num_context_events: u16,
internal_id_prefix: Option<String>,
) -> Result<Arc<Timeline>, FocusEventError> {
let parsed_event_id = EventId::parse(&event_id).map_err(|err| {
FocusEventError::InvalidEventId { event_id: event_id.clone(), err: err.to_string() }
})?;
let room = &self.inner;
let mut builder = matrix_sdk_ui::timeline::Timeline::builder(room);
if let Some(internal_id_prefix) = internal_id_prefix {
builder = builder.with_internal_id_prefix(internal_id_prefix);
}
let timeline = match builder
.with_focus(TimelineFocus::Event { target: parsed_event_id, num_context_events })
.build()
.await
{
Ok(t) => t,
Err(err) => {
if let matrix_sdk_ui::timeline::Error::PaginationError(
PaginationError::Paginator(PaginatorError::EventNotFound(..)),
) = err
{
return Err(FocusEventError::EventNotFound { event_id: event_id.to_string() });
}
return Err(FocusEventError::Other { msg: err.to_string() });
}
};
Ok(Timeline::new(timeline))
}
pub async fn pinned_events_timeline(
&self,
internal_id_prefix: Option<String>,
max_events_to_load: u16,
max_concurrent_requests: u16,
) -> Result<Arc<Timeline>, ClientError> {
let room = &self.inner;
let mut builder = matrix_sdk_ui::timeline::Timeline::builder(room);
if let Some(internal_id_prefix) = internal_id_prefix {
builder = builder.with_internal_id_prefix(internal_id_prefix);
}
let timeline = builder
.with_focus(TimelineFocus::PinnedEvents { max_events_to_load, max_concurrent_requests })
.build()
.await?;
Ok(Timeline::new(timeline))
}
/// A timeline instance that can be configured to only include RoomMessage
/// type events and filter those further based on their message type.
///
/// Virtual timeline items will still be provided and the
/// `default_event_filter` will be applied before everything else.
///
/// # Arguments
///
/// * `internal_id_prefix` - An optional String that will be prepended to
/// all the timeline item's internal IDs, making it possible to
/// distinguish different timeline instances from each other.
///
/// * `allowed_message_types` - A list of `RoomMessageEventMessageType` that
/// will be allowed to appear in the timeline
pub async fn message_filtered_timeline(
&self,
internal_id_prefix: Option<String>,
allowed_message_types: Vec<RoomMessageEventMessageType>,
date_divider_mode: DateDividerMode,
configuration: TimelineConfiguration,
) -> Result<Arc<Timeline>, ClientError> {
let mut builder = matrix_sdk_ui::timeline::Timeline::builder(&self.inner);
if let Some(internal_id_prefix) = internal_id_prefix {
builder = builder.with_focus(configuration.focus.try_into()?);
if let AllowedMessageTypes::Only { types } = configuration.allowed_message_types {
builder = builder.event_filter(move |event, room_version_id| {
default_event_filter(event, room_version_id)
&& match event {
AnySyncTimelineEvent::MessageLike(msg) => match msg.original_content() {
Some(AnyMessageLikeEventContent::RoomMessage(content)) => {
types.contains(&content.msgtype.into())
}
_ => false,
},
_ => false,
}
});
}
if let Some(internal_id_prefix) = configuration.internal_id_prefix {
builder = builder.with_internal_id_prefix(internal_id_prefix);
}
builder = builder.with_date_divider_mode(date_divider_mode.into());
builder = builder.event_filter(move |event, room_version_id| {
default_event_filter(event, room_version_id)
&& match event {
AnySyncTimelineEvent::MessageLike(msg) => match msg.original_content() {
Some(AnyMessageLikeEventContent::RoomMessage(content)) => {
allowed_message_types.contains(&content.msgtype.into())
}
_ => false,
},
_ => false,
}
});
builder = builder.with_date_divider_mode(configuration.date_divider_mode.into());
let timeline = builder.build().await?;
Ok(Timeline::new(timeline))
}
pub fn id(&self) -> String {
self.inner.room_id().to_string()
}
pub fn is_encrypted(&self) -> Result<bool, ClientError> {
Ok(RUNTIME.block_on(self.inner.is_encrypted())?)
}
@@ -1042,6 +970,75 @@ impl Room {
let visibility = self.inner.privacy_settings().get_room_visibility().await?;
Ok(visibility.into())
}
/// Start the current users live location share in the room.
pub async fn start_live_location_share(&self, duration_millis: u64) -> Result<(), ClientError> {
self.inner.start_live_location_share(duration_millis, None).await?;
Ok(())
}
/// Stop the current users live location share in the room.
pub async fn stop_live_location_share(&self) -> Result<(), ClientError> {
self.inner.stop_live_location_share().await.expect("Unable to stop live location share");
Ok(())
}
/// Send the current users live location beacon in the room.
pub async fn send_live_location(&self, geo_uri: String) -> Result<(), ClientError> {
self.inner
.send_location_beacon(geo_uri)
.await
.expect("Unable to send live location beacon");
Ok(())
}
/// Subscribes to live location shares in this room, using a `listener` to
/// be notified of the changes.
///
/// The current live location shares will be emitted immediately when
/// subscribing, along with a [`TaskHandle`] to cancel the subscription.
pub fn subscribe_to_live_location_shares(
self: Arc<Self>,
listener: Box<dyn LiveLocationShareListener>,
) -> Arc<TaskHandle> {
let room = self.inner.clone();
Arc::new(TaskHandle::new(RUNTIME.spawn(async move {
let subscription = room.observe_live_location_shares();
let mut stream = subscription.subscribe();
let mut pinned_stream = pin!(stream);
while let Some(event) = pinned_stream.next().await {
let last_location = LocationContent {
body: "".to_owned(),
geo_uri: event.last_location.location.uri.clone().to_string(),
description: None,
zoom_level: None,
asset: None,
};
let Some(beacon_info) = event.beacon_info else {
warn!("Live location share is missing the associated beacon_info state, skipping event.");
continue;
};
listener.call(vec![LiveLocationShare {
last_location: LastLocation {
location: last_location,
ts: event.last_location.ts.0.into(),
},
is_live: beacon_info.is_live(),
user_id: event.user_id.to_string(),
}])
}
})))
}
}
/// A listener for receiving new live location shares in a room.
#[matrix_sdk_ffi_macros::export(callback_interface)]
pub trait LiveLocationShareListener: Sync + Send {
fn call(&self, live_location_shares: Vec<LiveLocationShare>);
}
impl From<matrix_sdk::room::knock_requests::KnockRequest> for KnockRequest {

View File

@@ -0,0 +1,85 @@
use ruma::EventId;
use super::FocusEventError;
use crate::{error::ClientError, event::RoomMessageEventMessageType};
#[derive(uniffi::Enum)]
pub enum TimelineFocus {
Live,
Event { event_id: String, num_context_events: u16 },
PinnedEvents { max_events_to_load: u16, max_concurrent_requests: u16 },
}
impl TryFrom<TimelineFocus> for matrix_sdk_ui::timeline::TimelineFocus {
type Error = ClientError;
fn try_from(
value: TimelineFocus,
) -> Result<matrix_sdk_ui::timeline::TimelineFocus, Self::Error> {
match value {
TimelineFocus::Live => Ok(Self::Live),
TimelineFocus::Event { event_id, num_context_events } => {
let parsed_event_id =
EventId::parse(&event_id).map_err(|err| FocusEventError::InvalidEventId {
event_id: event_id.clone(),
err: err.to_string(),
})?;
Ok(Self::Event { target: parsed_event_id, num_context_events })
}
TimelineFocus::PinnedEvents { max_events_to_load, max_concurrent_requests } => {
Ok(Self::PinnedEvents { max_events_to_load, max_concurrent_requests })
}
}
}
}
/// Changes how date dividers get inserted, either in between each day or in
/// between each month
#[derive(uniffi::Enum)]
pub enum DateDividerMode {
Daily,
Monthly,
}
impl From<DateDividerMode> for matrix_sdk_ui::timeline::DateDividerMode {
fn from(value: DateDividerMode) -> Self {
match value {
DateDividerMode::Daily => Self::Daily,
DateDividerMode::Monthly => Self::Monthly,
}
}
}
#[derive(uniffi::Enum)]
pub enum AllowedMessageTypes {
All,
Only { types: Vec<RoomMessageEventMessageType> },
}
/// Various options used to configure the timeline's behavior.
///
/// # Arguments
///
/// * `internal_id_prefix` -
///
/// * `allowed_message_types` -
///
/// * `date_divider_mode` -
#[derive(uniffi::Record)]
pub struct TimelineConfiguration {
/// What should the timeline focus on?
pub focus: TimelineFocus,
/// A list of [`RoomMessageEventMessageType`] that will be allowed to appear
/// in the timeline
pub allowed_message_types: AllowedMessageTypes,
/// An optional String that will be prepended to
/// all the timeline item's internal IDs, making it possible to
/// distinguish different timeline instances from each other.
pub internal_id_prefix: Option<String>,
/// How often to insert date dividers
pub date_divider_mode: DateDividerMode,
}

View File

@@ -19,8 +19,6 @@ use as_variant::as_variant;
use content::{InReplyToDetails, RepliedToEventDetails};
use eyeball_im::VectorDiff;
use futures_util::{pin_mut, StreamExt as _};
#[cfg(doc)]
use matrix_sdk::crypto::CollectStrategy;
use matrix_sdk::{
attachment::{
AttachmentConfig, AttachmentInfo, BaseAudioInfo, BaseFileInfo, BaseImageInfo,
@@ -63,8 +61,6 @@ use tracing::{error, warn};
use uuid::Uuid;
use self::content::{Reaction, ReactionSenderData, TimelineItemContent};
#[cfg(doc)]
use crate::client_builder::ClientBuilder;
use crate::{
client::ProgressWatcher,
error::{ClientError, RoomError},
@@ -79,6 +75,7 @@ use crate::{
RUNTIME,
};
pub mod configuration;
mod content;
pub use content::MessageContent;
@@ -215,7 +212,7 @@ pub struct UploadParameters {
#[matrix_sdk_ffi_macros::export]
impl Timeline {
pub async fn add_listener(&self, listener: Box<dyn TimelineListener>) -> Arc<TaskHandle> {
let (timeline_items, timeline_stream) = self.inner.subscribe_batched().await;
let (timeline_items, timeline_stream) = self.inner.subscribe().await;
Arc::new(TaskHandle::new(RUNTIME.spawn(async move {
pin_mut!(timeline_stream);
@@ -270,16 +267,16 @@ impl Timeline {
/// Paginate backwards, whether we are in focused mode or in live mode.
///
/// Returns whether we hit the end of the timeline or not.
/// Returns whether we hit the start of the timeline or not.
pub async fn paginate_backwards(&self, num_events: u16) -> Result<bool, ClientError> {
Ok(self.inner.paginate_backwards(num_events).await?)
}
/// Paginate forwards, when in focused mode.
/// Paginate forwards, whether we are in focused mode or in live mode.
///
/// Returns whether we hit the end of the timeline or not.
pub async fn focused_paginate_forwards(&self, num_events: u16) -> Result<bool, ClientError> {
Ok(self.inner.focused_paginate_forwards(num_events).await?)
pub async fn paginate_forwards(&self, num_events: u16) -> Result<bool, ClientError> {
Ok(self.inner.paginate_forwards(num_events).await?)
}
pub async fn send_read_receipt(
@@ -1321,21 +1318,8 @@ impl LazyTimelineItemProvider {
fn get_send_handle(&self) -> Option<Arc<SendHandle>> {
self.0.local_echo_send_handle().map(|handle| Arc::new(SendHandle::new(handle)))
}
}
/// Changes how date dividers get inserted, either in between each day or in
/// between each month
#[derive(Debug, Clone, uniffi::Enum)]
pub enum DateDividerMode {
Daily,
Monthly,
}
impl From<DateDividerMode> for matrix_sdk_ui::timeline::DateDividerMode {
fn from(value: DateDividerMode) -> Self {
match value {
DateDividerMode::Daily => Self::Daily,
DateDividerMode::Monthly => Self::Monthly,
}
fn contains_only_emojis(&self) -> bool {
self.0.contains_only_emojis()
}
}

View File

@@ -30,6 +30,11 @@ All notable changes to this project will be documented in this file.
cached value from the previous successful computation. If you need a sync
variant, consider using `Room::cached_display_name()`.
([#4470](https://github.com/matrix-org/matrix-rust-sdk/pull/4470))
- [**breaking**]: The reexported types `SyncTimelineEvent` and `TimelineEvent`
have been fused into a single type `TimelineEvent`, and its field
`push_actions` has been made `Option`al (it is set to `None` when we couldn't
compute the push actions, because we lacked some information).
([#4568](https://github.com/matrix-org/matrix-rust-sdk/pull/4568))
## [0.9.0] - 2024-12-18

View File

@@ -68,7 +68,7 @@ use crate::latest_event::{is_suitable_for_latest_event, LatestEvent, PossibleLat
#[cfg(feature = "e2e-encryption")]
use crate::RoomMemberships;
use crate::{
deserialized_responses::{DisplayName, RawAnySyncOrStrippedTimelineEvent, SyncTimelineEvent},
deserialized_responses::{DisplayName, RawAnySyncOrStrippedTimelineEvent, TimelineEvent},
error::{Error, Result},
event_cache::store::EventCacheStoreLock,
response_processors::AccountDataProcessor,
@@ -347,9 +347,9 @@ impl BaseClient {
Ok(())
}
/// Attempt to decrypt the given raw event into a `SyncTimelineEvent`.
/// Attempt to decrypt the given raw event into a [`TimelineEvent`].
///
/// In the case of a decryption error, returns a `SyncTimelineEvent`
/// In the case of a decryption error, returns a [`TimelineEvent`]
/// representing the decryption error; in the case of problems with our
/// application, returns `Err`.
///
@@ -359,7 +359,7 @@ impl BaseClient {
&self,
event: &Raw<AnySyncTimelineEvent>,
room_id: &RoomId,
) -> Result<Option<SyncTimelineEvent>> {
) -> Result<Option<TimelineEvent>> {
let olm = self.olm_machine().await;
let Some(olm) = olm.as_ref() else { return Ok(None) };
@@ -372,7 +372,7 @@ impl BaseClient {
.await?
{
RoomEventDecryptionResult::Decrypted(decrypted) => {
let event: SyncTimelineEvent = decrypted.into();
let event: TimelineEvent = decrypted.into();
if let Ok(AnySyncTimelineEvent::MessageLike(e)) = event.raw().deserialize() {
match &e {
@@ -394,7 +394,7 @@ impl BaseClient {
event
}
RoomEventDecryptionResult::UnableToDecrypt(utd_info) => {
SyncTimelineEvent::new_utd_event(event.clone(), utd_info)
TimelineEvent::new_utd_event(event.clone(), utd_info)
}
};
@@ -423,7 +423,7 @@ impl BaseClient {
for raw_event in events {
// Start by assuming we have a plaintext event. We'll replace it with a
// decrypted or UTD event below if necessary.
let mut event = SyncTimelineEvent::new(raw_event);
let mut event = TimelineEvent::new(raw_event);
match event.raw().deserialize() {
Ok(e) => {
@@ -535,7 +535,7 @@ impl BaseClient {
},
);
}
event.push_actions = actions.to_owned();
event.push_actions = Some(actions.to_owned());
}
}
Err(e) => {

View File

@@ -14,12 +14,12 @@
//! Event cache store and common types shared with `matrix_sdk::event_cache`.
use matrix_sdk_common::deserialized_responses::SyncTimelineEvent;
use matrix_sdk_common::deserialized_responses::TimelineEvent;
pub mod store;
/// The kind of event the event storage holds.
pub type Event = SyncTimelineEvent;
pub type Event = TimelineEvent;
/// The kind of gap the event storage holds.
#[derive(Clone, Debug)]

View File

@@ -18,7 +18,7 @@ use assert_matches::assert_matches;
use async_trait::async_trait;
use matrix_sdk_common::{
deserialized_responses::{
AlgorithmInfo, DecryptedRoomEvent, EncryptionInfo, SyncTimelineEvent, TimelineEventKind,
AlgorithmInfo, DecryptedRoomEvent, EncryptionInfo, TimelineEvent, TimelineEventKind,
VerificationState,
},
linked_chunk::{
@@ -42,7 +42,7 @@ use crate::{
/// correctly stores event data.
///
/// Keep in sync with [`check_test_event`].
pub fn make_test_event(room_id: &RoomId, content: &str) -> SyncTimelineEvent {
pub fn make_test_event(room_id: &RoomId, content: &str) -> TimelineEvent {
let encryption_info = EncryptionInfo {
sender: (*ALICE).into(),
sender_device: None,
@@ -60,13 +60,13 @@ pub fn make_test_event(room_id: &RoomId, content: &str) -> SyncTimelineEvent {
.into_raw_timeline()
.cast();
SyncTimelineEvent {
TimelineEvent {
kind: TimelineEventKind::Decrypted(DecryptedRoomEvent {
event,
encryption_info,
unsigned_encryption_info: None,
}),
push_actions: vec![Action::Notify],
push_actions: Some(vec![Action::Notify]),
}
}
@@ -75,9 +75,9 @@ pub fn make_test_event(room_id: &RoomId, content: &str) -> SyncTimelineEvent {
///
/// Keep in sync with [`make_test_event`].
#[track_caller]
pub fn check_test_event(event: &SyncTimelineEvent, text: &str) {
pub fn check_test_event(event: &TimelineEvent, text: &str) {
// Check push actions.
let actions = &event.push_actions;
let actions = event.push_actions.as_ref().unwrap();
assert_eq!(actions.len(), 1);
assert_matches!(&actions[0], Action::Notify);

View File

@@ -1,7 +1,7 @@
//! Utilities for working with events to decide whether they are suitable for
//! use as a [crate::Room::latest_event].
use matrix_sdk_common::deserialized_responses::SyncTimelineEvent;
use matrix_sdk_common::deserialized_responses::TimelineEvent;
#[cfg(feature = "e2e-encryption")]
use ruma::{
events::{
@@ -164,7 +164,7 @@ pub fn is_suitable_for_latest_event<'a>(
#[derive(Clone, Debug, Serialize)]
pub struct LatestEvent {
/// The actual event.
event: SyncTimelineEvent,
event: TimelineEvent,
/// The member profile of the event' sender.
#[serde(skip_serializing_if = "Option::is_none")]
@@ -178,7 +178,7 @@ pub struct LatestEvent {
#[derive(Deserialize)]
struct SerializedLatestEvent {
/// The actual event.
event: SyncTimelineEvent,
event: TimelineEvent,
/// The member profile of the event' sender.
#[serde(skip_serializing_if = "Option::is_none")]
@@ -211,7 +211,7 @@ impl<'de> Deserialize<'de> for LatestEvent {
Err(err) => variant_errors.push(err),
}
match serde_json::from_str::<SyncTimelineEvent>(raw.get()) {
match serde_json::from_str::<TimelineEvent>(raw.get()) {
Ok(value) => {
return Ok(LatestEvent {
event: value,
@@ -230,13 +230,13 @@ impl<'de> Deserialize<'de> for LatestEvent {
impl LatestEvent {
/// Create a new [`LatestEvent`] without the sender's profile.
pub fn new(event: SyncTimelineEvent) -> Self {
pub fn new(event: TimelineEvent) -> Self {
Self { event, sender_profile: None, sender_name_is_ambiguous: None }
}
/// Create a new [`LatestEvent`] with maybe the sender's profile.
pub fn new_with_sender_details(
event: SyncTimelineEvent,
event: TimelineEvent,
sender_profile: Option<MinimalRoomMemberEvent>,
sender_name_is_ambiguous: Option<bool>,
) -> Self {
@@ -244,17 +244,17 @@ impl LatestEvent {
}
/// Transform [`Self`] into an event.
pub fn into_event(self) -> SyncTimelineEvent {
pub fn into_event(self) -> TimelineEvent {
self.event
}
/// Get a reference to the event.
pub fn event(&self) -> &SyncTimelineEvent {
pub fn event(&self) -> &TimelineEvent {
&self.event
}
/// Get a mutable reference to the event.
pub fn event_mut(&mut self) -> &mut SyncTimelineEvent {
pub fn event_mut(&mut self) -> &mut TimelineEvent {
&mut self.event
}
@@ -301,7 +301,7 @@ mod tests {
use assert_matches::assert_matches;
#[cfg(feature = "e2e-encryption")]
use assert_matches2::assert_let;
use matrix_sdk_common::deserialized_responses::SyncTimelineEvent;
use matrix_sdk_common::deserialized_responses::TimelineEvent;
use ruma::serde::Raw;
#[cfg(feature = "e2e-encryption")]
use ruma::{
@@ -596,7 +596,7 @@ mod tests {
latest_event: LatestEvent,
}
let event = SyncTimelineEvent::new(
let event = TimelineEvent::new(
Raw::from_json_string(json!({ "event_id": "$1" }).to_string()).unwrap(),
);

View File

@@ -123,7 +123,7 @@ use std::{
};
use eyeball_im::Vector;
use matrix_sdk_common::{deserialized_responses::SyncTimelineEvent, ring_buffer::RingBuffer};
use matrix_sdk_common::{deserialized_responses::TimelineEvent, ring_buffer::RingBuffer};
use ruma::{
events::{
poll::{start::PollStartEventContent, unstable_start::UnstablePollStartEventContent},
@@ -202,7 +202,7 @@ impl RoomReadReceipts {
///
/// Returns whether a new event triggered a new unread/notification/mention.
#[inline(always)]
fn process_event(&mut self, event: &SyncTimelineEvent, user_id: &UserId) {
fn process_event(&mut self, event: &TimelineEvent, user_id: &UserId) {
if marks_as_unread(event.raw(), user_id) {
self.num_unread += 1;
}
@@ -210,7 +210,11 @@ impl RoomReadReceipts {
let mut has_notify = false;
let mut has_mention = false;
for action in &event.push_actions {
let Some(actions) = event.push_actions.as_ref() else {
return;
};
for action in actions.iter() {
if !has_notify && action.should_notify() {
self.num_notifications += 1;
has_notify = true;
@@ -236,7 +240,7 @@ impl RoomReadReceipts {
&mut self,
receipt_event_id: &EventId,
user_id: &UserId,
events: impl IntoIterator<Item = &'a SyncTimelineEvent>,
events: impl IntoIterator<Item = &'a TimelineEvent>,
) -> bool {
let mut counting_receipts = false;
@@ -269,11 +273,11 @@ impl RoomReadReceipts {
pub trait PreviousEventsProvider: Send + Sync {
/// Returns the list of known timeline events, in sync order, for the given
/// room.
fn for_room(&self, room_id: &RoomId) -> Vector<SyncTimelineEvent>;
fn for_room(&self, room_id: &RoomId) -> Vector<TimelineEvent>;
}
impl PreviousEventsProvider for () {
fn for_room(&self, _: &RoomId) -> Vector<SyncTimelineEvent> {
fn for_room(&self, _: &RoomId) -> Vector<TimelineEvent> {
Vector::new()
}
}
@@ -292,7 +296,7 @@ struct ReceiptSelector {
impl ReceiptSelector {
fn new(
all_events: &Vector<SyncTimelineEvent>,
all_events: &Vector<TimelineEvent>,
latest_active_receipt_event: Option<&EventId>,
) -> Self {
let event_id_to_pos = Self::create_sync_index(all_events.iter());
@@ -310,7 +314,7 @@ impl ReceiptSelector {
/// Create a mapping of `event_id` -> sync order for all events that have an
/// `event_id`.
fn create_sync_index<'a>(
events: impl Iterator<Item = &'a SyncTimelineEvent> + 'a,
events: impl Iterator<Item = &'a TimelineEvent> + 'a,
) -> BTreeMap<OwnedEventId, usize> {
// TODO: this should be cached and incrementally updated.
BTreeMap::from_iter(
@@ -405,7 +409,7 @@ impl ReceiptSelector {
/// Try to match an implicit receipt, that is, the one we get for events we
/// sent ourselves.
#[instrument(skip_all)]
fn try_match_implicit(&mut self, user_id: &UserId, new_events: &[SyncTimelineEvent]) {
fn try_match_implicit(&mut self, user_id: &UserId, new_events: &[TimelineEvent]) {
for ev in new_events {
// Get the `sender` field, if any, or skip this event.
let Ok(Some(sender)) = ev.raw().get_field::<OwnedUserId>("sender") else { continue };
@@ -432,8 +436,8 @@ impl ReceiptSelector {
/// Returns true if there's an event common to both groups of events, based on
/// their event id.
fn events_intersects<'a>(
previous_events: impl Iterator<Item = &'a SyncTimelineEvent>,
new_events: &[SyncTimelineEvent],
previous_events: impl Iterator<Item = &'a TimelineEvent>,
new_events: &[TimelineEvent],
) -> bool {
let previous_events_ids = BTreeSet::from_iter(previous_events.filter_map(|ev| ev.event_id()));
new_events
@@ -454,8 +458,8 @@ pub(crate) fn compute_unread_counts(
user_id: &UserId,
room_id: &RoomId,
receipt_event: Option<&ReceiptEventContent>,
previous_events: Vector<SyncTimelineEvent>,
new_events: &[SyncTimelineEvent],
previous_events: Vector<TimelineEvent>,
new_events: &[TimelineEvent],
read_receipts: &mut RoomReadReceipts,
) {
debug!(?read_receipts, "Starting.");
@@ -620,7 +624,7 @@ mod tests {
use std::{num::NonZeroUsize, ops::Not as _};
use eyeball_im::Vector;
use matrix_sdk_common::{deserialized_responses::SyncTimelineEvent, ring_buffer::RingBuffer};
use matrix_sdk_common::{deserialized_responses::TimelineEvent, ring_buffer::RingBuffer};
use matrix_sdk_test::event_factory::EventFactory;
use ruma::{
event_id,
@@ -720,13 +724,13 @@ mod tests {
#[test]
fn test_count_unread_and_mentions() {
fn make_event(user_id: &UserId, push_actions: Vec<Action>) -> SyncTimelineEvent {
fn make_event(user_id: &UserId, push_actions: Vec<Action>) -> TimelineEvent {
let mut ev = EventFactory::new()
.text_msg("A")
.sender(user_id)
.event_id(event_id!("$ida"))
.into_sync();
ev.push_actions = push_actions;
.into_event();
ev.push_actions = Some(push_actions);
ev
}
@@ -801,7 +805,7 @@ mod tests {
// When provided with one event, that's not the receipt event, we don't count
// it.
fn make_event(event_id: &EventId) -> SyncTimelineEvent {
fn make_event(event_id: &EventId) -> TimelineEvent {
EventFactory::new()
.text_msg("A")
.sender(user_id!("@bob:example.org"))
@@ -915,8 +919,8 @@ mod tests {
let mut previous_events = Vector::new();
let f = EventFactory::new();
let ev1 = f.text_msg("A").sender(other_user_id).event_id(receipt_event_id).into_sync();
let ev2 = f.text_msg("A").sender(other_user_id).event_id(event_id!("$2")).into_sync();
let ev1 = f.text_msg("A").sender(other_user_id).event_id(receipt_event_id).into_event();
let ev2 = f.text_msg("A").sender(other_user_id).event_id(event_id!("$2")).into_event();
let receipt_event = f
.read_receipts()
@@ -940,7 +944,8 @@ mod tests {
previous_events.push_back(ev1);
previous_events.push_back(ev2);
let new_event = f.text_msg("A").sender(other_user_id).event_id(event_id!("$3")).into_sync();
let new_event =
f.text_msg("A").sender(other_user_id).event_id(event_id!("$3")).into_event();
compute_unread_counts(
user_id,
room_id,
@@ -954,7 +959,7 @@ mod tests {
assert_eq!(read_receipts.num_unread, 2);
}
fn make_test_events(user_id: &UserId) -> Vector<SyncTimelineEvent> {
fn make_test_events(user_id: &UserId) -> Vector<TimelineEvent> {
let f = EventFactory::new().sender(user_id);
let ev1 = f.text_msg("With the lights out, it's less dangerous").event_id(event_id!("$1"));
let ev2 = f.text_msg("Here we are now, entertain us").event_id(event_id!("$2"));
@@ -1130,7 +1135,7 @@ mod tests {
let events = make_test_events(uid);
// An event with no id.
let ev6 = EventFactory::new().text_msg("yolo").sender(uid).no_event_id().into_sync();
let ev6 = EventFactory::new().text_msg("yolo").sender(uid).no_event_id().into_event();
let index = ReceiptSelector::create_sync_index(events.iter().chain(&[ev6]));
@@ -1197,8 +1202,8 @@ mod tests {
fn test_receipt_selector_handle_pending_receipts_noop() {
let sender = user_id!("@bob:example.org");
let f = EventFactory::new().sender(sender);
let ev1 = f.text_msg("yo").event_id(event_id!("$1")).into_sync();
let ev2 = f.text_msg("well?").event_id(event_id!("$2")).into_sync();
let ev1 = f.text_msg("yo").event_id(event_id!("$1")).into_event();
let ev2 = f.text_msg("well?").event_id(event_id!("$2")).into_event();
let events: Vector<_> = vec![ev1, ev2].into();
{
@@ -1233,8 +1238,8 @@ mod tests {
fn test_receipt_selector_handle_pending_receipts_doesnt_match_known_events() {
let sender = user_id!("@bob:example.org");
let f = EventFactory::new().sender(sender);
let ev1 = f.text_msg("yo").event_id(event_id!("$1")).into_sync();
let ev2 = f.text_msg("well?").event_id(event_id!("$2")).into_sync();
let ev1 = f.text_msg("yo").event_id(event_id!("$1")).into_event();
let ev2 = f.text_msg("well?").event_id(event_id!("$2")).into_event();
let events: Vector<_> = vec![ev1, ev2].into();
{
@@ -1270,8 +1275,8 @@ mod tests {
fn test_receipt_selector_handle_pending_receipts_matches_known_events_no_initial() {
let sender = user_id!("@bob:example.org");
let f = EventFactory::new().sender(sender);
let ev1 = f.text_msg("yo").event_id(event_id!("$1")).into_sync();
let ev2 = f.text_msg("well?").event_id(event_id!("$2")).into_sync();
let ev1 = f.text_msg("yo").event_id(event_id!("$1")).into_event();
let ev2 = f.text_msg("well?").event_id(event_id!("$2")).into_event();
let events: Vector<_> = vec![ev1, ev2].into();
{
@@ -1312,8 +1317,8 @@ mod tests {
fn test_receipt_selector_handle_pending_receipts_matches_known_events_with_initial() {
let sender = user_id!("@bob:example.org");
let f = EventFactory::new().sender(sender);
let ev1 = f.text_msg("yo").event_id(event_id!("$1")).into_sync();
let ev2 = f.text_msg("well?").event_id(event_id!("$2")).into_sync();
let ev1 = f.text_msg("yo").event_id(event_id!("$1")).into_event();
let ev2 = f.text_msg("well?").event_id(event_id!("$2")).into_event();
let events: Vector<_> = vec![ev1, ev2].into();
{
@@ -1491,10 +1496,10 @@ mod tests {
f.text_msg("A mulatto, an albino")
.sender(&myself)
.event_id(event_id!("$6"))
.into_sync(),
.into_event(),
);
events.push_back(
f.text_msg("A mosquito, my libido").sender(bob).event_id(event_id!("$7")).into_sync(),
f.text_msg("A mosquito, my libido").sender(bob).event_id(event_id!("$7")).into_event(),
);
let mut selector = ReceiptSelector::new(&events, None);
@@ -1520,15 +1525,15 @@ mod tests {
f.text_msg("A mulatto, an albino")
.sender(user_id)
.event_id(event_id!("$6"))
.into_sync(),
.into_event(),
);
// And others by Bob,
events.push_back(
f.text_msg("A mosquito, my libido").sender(bob).event_id(event_id!("$7")).into_sync(),
f.text_msg("A mosquito, my libido").sender(bob).event_id(event_id!("$7")).into_event(),
);
events.push_back(
f.text_msg("A denial, a denial").sender(bob).event_id(event_id!("$8")).into_sync(),
f.text_msg("A denial, a denial").sender(bob).event_id(event_id!("$8")).into_event(),
);
let events: Vec<_> = events.into_iter().collect();

View File

@@ -2164,7 +2164,7 @@ mod tests {
};
use assign::assign;
use matrix_sdk_common::deserialized_responses::SyncTimelineEvent;
use matrix_sdk_common::deserialized_responses::TimelineEvent;
use matrix_sdk_test::{
async_test,
event_factory::EventFactory,
@@ -2241,7 +2241,7 @@ mod tests {
last_prev_batch: Some("pb".to_owned()),
sync_info: SyncInfo::FullySynced,
encryption_state_synced: true,
latest_event: Some(Box::new(LatestEvent::new(SyncTimelineEvent::new(
latest_event: Some(Box::new(LatestEvent::new(TimelineEvent::new(
Raw::from_json_string(json!({"sender": "@u:i.uk"}).to_string()).unwrap(),
)))),
base_info: Box::new(
@@ -3324,7 +3324,7 @@ mod tests {
#[cfg(feature = "e2e-encryption")]
fn make_latest_event(event_id: &str) -> Box<LatestEvent> {
Box::new(LatestEvent::new(SyncTimelineEvent::new(
Box::new(LatestEvent::new(TimelineEvent::new(
Raw::from_json_string(json!({ "event_id": event_id }).to_string()).unwrap(),
)))
}

View File

@@ -21,7 +21,7 @@ use std::ops::Deref;
use std::{borrow::Cow, collections::BTreeMap};
#[cfg(feature = "e2e-encryption")]
use matrix_sdk_common::deserialized_responses::SyncTimelineEvent;
use matrix_sdk_common::deserialized_responses::TimelineEvent;
use ruma::{
api::client::sync::sync_events::v3::{self, InvitedRoom, KnockedRoom},
events::{
@@ -690,7 +690,7 @@ impl BaseClient {
async fn cache_latest_events(
room: &Room,
room_info: &mut RoomInfo,
events: &[SyncTimelineEvent],
events: &[TimelineEvent],
changes: Option<&StateChanges>,
store: Option<&Store>,
) {
@@ -900,7 +900,7 @@ mod tests {
use std::sync::{Arc, RwLock as SyncRwLock};
use assert_matches::assert_matches;
use matrix_sdk_common::deserialized_responses::SyncTimelineEvent;
use matrix_sdk_common::deserialized_responses::TimelineEvent;
#[cfg(feature = "e2e-encryption")]
use matrix_sdk_common::{
deserialized_responses::{UnableToDecryptInfo, UnableToDecryptReason},
@@ -2606,7 +2606,7 @@ mod tests {
}
#[cfg(feature = "e2e-encryption")]
async fn choose_event_to_cache(events: &[SyncTimelineEvent]) -> Option<SyncTimelineEvent> {
async fn choose_event_to_cache(events: &[TimelineEvent]) -> Option<TimelineEvent> {
let room = make_room();
let mut room_info = room.clone_info();
cache_latest_events(&room, &mut room_info, events, None, None).await;
@@ -2615,11 +2615,11 @@ mod tests {
}
#[cfg(feature = "e2e-encryption")]
fn rawev_id(event: SyncTimelineEvent) -> String {
fn rawev_id(event: TimelineEvent) -> String {
event.event_id().unwrap().to_string()
}
fn ev_id(event: Option<SyncTimelineEvent>) -> String {
fn ev_id(event: Option<TimelineEvent>) -> String {
event.unwrap().event_id().unwrap().to_string()
}
@@ -2629,7 +2629,7 @@ mod tests {
}
#[cfg(feature = "e2e-encryption")]
fn evs_ids(events: &[SyncTimelineEvent]) -> Vec<String> {
fn evs_ids(events: &[TimelineEvent]) -> Vec<String> {
events.iter().map(|e| e.event_id().unwrap().to_string()).collect()
}
@@ -2661,13 +2661,13 @@ mod tests {
}
#[cfg(feature = "e2e-encryption")]
fn make_event(typ: &str, id: &str) -> SyncTimelineEvent {
SyncTimelineEvent::new(make_raw_event(typ, id))
fn make_event(typ: &str, id: &str) -> TimelineEvent {
TimelineEvent::new(make_raw_event(typ, id))
}
#[cfg(feature = "e2e-encryption")]
fn make_encrypted_event(id: &str) -> SyncTimelineEvent {
SyncTimelineEvent::new_utd_event(
fn make_encrypted_event(id: &str) -> TimelineEvent {
TimelineEvent::new_utd_event(
Raw::from_json_string(
json!({
"type": "m.room.encrypted",

View File

@@ -19,7 +19,7 @@ use std::{
sync::Arc,
};
use matrix_sdk_common::deserialized_responses::SyncTimelineEvent;
use matrix_sdk_common::deserialized_responses::TimelineEvent;
use ruma::{
events::{
direct::OwnedDirectUserIdentifier,
@@ -76,7 +76,7 @@ pub struct RoomInfoV1 {
sync_info: SyncInfo,
#[serde(default = "encryption_state_default")] // see fn docs for why we use this default
encryption_state_synced: bool,
latest_event: Option<SyncTimelineEvent>,
latest_event: Option<TimelineEvent>,
base_info: BaseRoomInfoV1,
}

View File

@@ -16,7 +16,7 @@
use std::{collections::BTreeMap, fmt};
use matrix_sdk_common::{debug::DebugRawEvent, deserialized_responses::SyncTimelineEvent};
use matrix_sdk_common::{debug::DebugRawEvent, deserialized_responses::TimelineEvent};
use ruma::{
api::client::sync::sync_events::{
v3::{InvitedRoom as InvitedRoomUpdate, KnockedRoom as KnockedRoomUpdate},
@@ -236,7 +236,7 @@ pub struct Timeline {
pub prev_batch: Option<String>,
/// A list of events.
pub events: Vec<SyncTimelineEvent>,
pub events: Vec<TimelineEvent>,
}
impl Timeline {

View File

@@ -6,6 +6,11 @@ All notable changes to this project will be documented in this file.
## [Unreleased] - ReleaseDate
- [**breaking**]: `SyncTimelineEvent` and `TimelineEvent` have been fused into a single type
`TimelineEvent`, and its field `push_actions` has been made `Option`al (it is set to `None` when
we couldn't compute the push actions, because we lacked some information).
([#4568](https://github.com/matrix-org/matrix-rust-sdk/pull/4568))
## [0.9.0] - 2024-12-18
### Bug Fixes

View File

@@ -14,8 +14,10 @@
use std::{collections::BTreeMap, fmt};
#[cfg(doc)]
use ruma::events::AnyTimelineEvent;
use ruma::{
events::{AnyMessageLikeEvent, AnySyncTimelineEvent, AnyTimelineEvent},
events::{AnyMessageLikeEvent, AnySyncTimelineEvent},
push::Action,
serde::{
AsRefStr, AsStrAsRefStr, DebugAsRefStr, DeserializeFromCowStr, FromString, JsonObject, Raw,
@@ -311,13 +313,13 @@ pub struct EncryptionInfo {
//
// 🚨 Note about this type, please read! 🚨
//
// `SyncTimelineEvent` is heavily used across the SDK crates. In some cases, we
// `TimelineEvent` is heavily used across the SDK crates. In some cases, we
// are reaching a [`recursion_limit`] when the compiler is trying to figure out
// if `SyncTimelineEvent` implements `Sync` when it's embedded in other types.
// if `TimelineEvent` implements `Sync` when it's embedded in other types.
//
// We want to help the compiler so that one doesn't need to increase the
// `recursion_limit`. We stop the recursive check by (un)safely implement `Sync`
// and `Send` on `SyncTimelineEvent` directly.
// and `Send` on `TimelineEvent` directly.
//
// See
// https://github.com/matrix-org/matrix-rust-sdk/pull/3749#issuecomment-2312939823
@@ -325,22 +327,24 @@ pub struct EncryptionInfo {
//
// [`recursion_limit`]: https://doc.rust-lang.org/reference/attributes/limits.html#the-recursion_limit-attribute
#[derive(Clone, Debug, Serialize)]
pub struct SyncTimelineEvent {
pub struct TimelineEvent {
/// The event itself, together with any information on decryption.
pub kind: TimelineEventKind,
/// The push actions associated with this event.
#[serde(skip_serializing_if = "Vec::is_empty")]
pub push_actions: Vec<Action>,
///
/// If it's set to `None`, then it means we couldn't compute those actions.
#[serde(skip_serializing_if = "Option::is_none")]
pub push_actions: Option<Vec<Action>>,
}
// See https://github.com/matrix-org/matrix-rust-sdk/pull/3749#issuecomment-2312939823.
#[cfg(not(feature = "test-send-sync"))]
unsafe impl Send for SyncTimelineEvent {}
unsafe impl Send for TimelineEvent {}
// See https://github.com/matrix-org/matrix-rust-sdk/pull/3749#issuecomment-2312939823.
#[cfg(not(feature = "test-send-sync"))]
unsafe impl Sync for SyncTimelineEvent {}
unsafe impl Sync for TimelineEvent {}
#[cfg(feature = "test-send-sync")]
#[test]
@@ -348,19 +352,19 @@ unsafe impl Sync for SyncTimelineEvent {}
fn test_send_sync_for_sync_timeline_event() {
fn assert_send_sync<T: Send + Sync>() {}
assert_send_sync::<SyncTimelineEvent>();
assert_send_sync::<TimelineEvent>();
}
impl SyncTimelineEvent {
/// Create a new `SyncTimelineEvent` from the given raw event.
impl TimelineEvent {
/// Create a new [`TimelineEvent`] from the given raw event.
///
/// This is a convenience constructor for a plaintext event when you don't
/// need to set `push_action`, for example inside a test.
pub fn new(event: Raw<AnySyncTimelineEvent>) -> Self {
Self { kind: TimelineEventKind::PlainText { event }, push_actions: vec![] }
Self { kind: TimelineEventKind::PlainText { event }, push_actions: None }
}
/// Create a new `SyncTimelineEvent` from the given raw event and push
/// Create a new [`TimelineEvent`] from the given raw event and push
/// actions.
///
/// This is a convenience constructor for a plaintext event, for example
@@ -369,23 +373,23 @@ impl SyncTimelineEvent {
event: Raw<AnySyncTimelineEvent>,
push_actions: Vec<Action>,
) -> Self {
Self { kind: TimelineEventKind::PlainText { event }, push_actions }
Self { kind: TimelineEventKind::PlainText { event }, push_actions: Some(push_actions) }
}
/// Create a new `SyncTimelineEvent` to represent the given decryption
/// Create a new [`TimelineEvent`] to represent the given decryption
/// failure.
pub fn new_utd_event(event: Raw<AnySyncTimelineEvent>, utd_info: UnableToDecryptInfo) -> Self {
Self { kind: TimelineEventKind::UnableToDecrypt { event, utd_info }, push_actions: vec![] }
Self { kind: TimelineEventKind::UnableToDecrypt { event, utd_info }, push_actions: None }
}
/// Get the event id of this `SyncTimelineEvent` if the event has any valid
/// Get the event id of this [`TimelineEvent`] if the event has any valid
/// id.
pub fn event_id(&self) -> Option<OwnedEventId> {
self.kind.event_id()
}
/// Returns a reference to the (potentially decrypted) Matrix event inside
/// this `TimelineEvent`.
/// this [`TimelineEvent`].
pub fn raw(&self) -> &Raw<AnySyncTimelineEvent> {
self.kind.raw()
}
@@ -416,21 +420,14 @@ impl SyncTimelineEvent {
}
}
impl From<TimelineEvent> for SyncTimelineEvent {
fn from(o: TimelineEvent) -> Self {
Self { kind: o.kind, push_actions: o.push_actions.unwrap_or_default() }
}
}
impl From<DecryptedRoomEvent> for SyncTimelineEvent {
impl From<DecryptedRoomEvent> for TimelineEvent {
fn from(decrypted: DecryptedRoomEvent) -> Self {
let timeline_event: TimelineEvent = decrypted.into();
timeline_event.into()
Self { kind: TimelineEventKind::Decrypted(decrypted), push_actions: None }
}
}
impl<'de> Deserialize<'de> for SyncTimelineEvent {
/// Custom deserializer for [`SyncTimelineEvent`], to support older formats.
impl<'de> Deserialize<'de> for TimelineEvent {
/// Custom deserializer for [`TimelineEvent`], to support older formats.
///
/// Ideally we might use an untagged enum and then convert from that;
/// however, that doesn't work due to a [serde bug](https://github.com/serde-rs/json/issues/497).
@@ -451,7 +448,7 @@ impl<'de> Deserialize<'de> for SyncTimelineEvent {
let v0: SyncTimelineEventDeserializationHelperV0 =
serde_json::from_value(Value::Object(value)).map_err(|e| {
serde::de::Error::custom(format!(
"Unable to deserialize V0-format SyncTimelineEvent: {}",
"Unable to deserialize V0-format TimelineEvent: {}",
e
))
})?;
@@ -462,7 +459,7 @@ impl<'de> Deserialize<'de> for SyncTimelineEvent {
let v1: SyncTimelineEventDeserializationHelperV1 =
serde_json::from_value(Value::Object(value)).map_err(|e| {
serde::de::Error::custom(format!(
"Unable to deserialize V1-format SyncTimelineEvent: {}",
"Unable to deserialize V1-format TimelineEvent: {}",
e
))
})?;
@@ -471,74 +468,7 @@ impl<'de> Deserialize<'de> for SyncTimelineEvent {
}
}
/// Represents a matrix room event that has been returned from a Matrix
/// client-server API endpoint such as `/messages`, after initial processing.
///
/// The "initial processing" includes an attempt to decrypt encrypted events, so
/// the main thing this adds over [`AnyTimelineEvent`] is information on
/// encryption.
///
/// Previously, this differed from [`SyncTimelineEvent`] by wrapping an
/// [`AnyTimelineEvent`] instead of an [`AnySyncTimelineEvent`], but nowadays
/// they are essentially identical, and one of them should probably be removed.
#[derive(Clone, Debug)]
pub struct TimelineEvent {
/// The event itself, together with any information on decryption.
pub kind: TimelineEventKind,
/// The push actions associated with this event, if we had sufficient
/// context to compute them.
pub push_actions: Option<Vec<Action>>,
}
impl TimelineEvent {
/// Create a new `TimelineEvent` from the given raw event.
///
/// This is a convenience constructor for a plaintext event when you don't
/// need to set `push_action`, for example inside a test.
pub fn new(event: Raw<AnyTimelineEvent>) -> Self {
Self {
// This conversion is unproblematic since a `SyncTimelineEvent` is just a
// `TimelineEvent` without the `room_id`. By converting the raw value in
// this way, we simply cause the `room_id` field in the json to be
// ignored by a subsequent deserialization.
kind: TimelineEventKind::PlainText { event: event.cast() },
push_actions: None,
}
}
/// Create a new `TimelineEvent` to represent the given decryption failure.
pub fn new_utd_event(event: Raw<AnySyncTimelineEvent>, utd_info: UnableToDecryptInfo) -> Self {
Self { kind: TimelineEventKind::UnableToDecrypt { event, utd_info }, push_actions: None }
}
/// Returns a reference to the (potentially decrypted) Matrix event inside
/// this `TimelineEvent`.
pub fn raw(&self) -> &Raw<AnySyncTimelineEvent> {
self.kind.raw()
}
/// If the event was a decrypted event that was successfully decrypted, get
/// its encryption info. Otherwise, `None`.
pub fn encryption_info(&self) -> Option<&EncryptionInfo> {
self.kind.encryption_info()
}
/// Takes ownership of this `TimelineEvent`, returning the (potentially
/// decrypted) Matrix event within.
pub fn into_raw(self) -> Raw<AnySyncTimelineEvent> {
self.kind.into_raw()
}
}
impl From<DecryptedRoomEvent> for TimelineEvent {
fn from(decrypted: DecryptedRoomEvent) -> Self {
Self { kind: TimelineEventKind::Decrypted(decrypted), push_actions: None }
}
}
/// The event within a [`TimelineEvent`] or [`SyncTimelineEvent`], together with
/// encryption data.
/// The event within a [`TimelineEvent`], together with encryption data.
#[derive(Clone, Serialize, Deserialize)]
pub enum TimelineEventKind {
/// A successfully-decrypted encrypted event.
@@ -877,9 +807,9 @@ impl fmt::Debug for PrivOwnedStr {
}
}
/// Deserialization helper for [`SyncTimelineEvent`], for the modern format.
/// Deserialization helper for [`TimelineEvent`], for the modern format.
///
/// This has the exact same fields as [`SyncTimelineEvent`] itself, but has a
/// This has the exact same fields as [`TimelineEvent`] itself, but has a
/// regular `Deserialize` implementation.
#[derive(Debug, Deserialize)]
struct SyncTimelineEventDeserializationHelperV1 {
@@ -891,14 +821,14 @@ struct SyncTimelineEventDeserializationHelperV1 {
push_actions: Vec<Action>,
}
impl From<SyncTimelineEventDeserializationHelperV1> for SyncTimelineEvent {
impl From<SyncTimelineEventDeserializationHelperV1> for TimelineEvent {
fn from(value: SyncTimelineEventDeserializationHelperV1) -> Self {
let SyncTimelineEventDeserializationHelperV1 { kind, push_actions } = value;
SyncTimelineEvent { kind, push_actions }
TimelineEvent { kind, push_actions: Some(push_actions) }
}
}
/// Deserialization helper for [`SyncTimelineEvent`], for an older format.
/// Deserialization helper for [`TimelineEvent`], for an older format.
#[derive(Deserialize)]
struct SyncTimelineEventDeserializationHelperV0 {
/// The actual event.
@@ -919,7 +849,7 @@ struct SyncTimelineEventDeserializationHelperV0 {
unsigned_encryption_info: Option<BTreeMap<UnsignedEventLocation, UnsignedDecryptionResult>>,
}
impl From<SyncTimelineEventDeserializationHelperV0> for SyncTimelineEvent {
impl From<SyncTimelineEventDeserializationHelperV0> for TimelineEvent {
fn from(value: SyncTimelineEventDeserializationHelperV0) -> Self {
let SyncTimelineEventDeserializationHelperV0 {
event,
@@ -946,7 +876,7 @@ impl From<SyncTimelineEventDeserializationHelperV0> for SyncTimelineEvent {
None => TimelineEventKind::PlainText { event },
};
SyncTimelineEvent { kind, push_actions }
TimelineEvent { kind, push_actions: Some(push_actions) }
}
}
@@ -957,17 +887,15 @@ mod tests {
use assert_matches::assert_matches;
use insta::{assert_json_snapshot, with_settings};
use ruma::{
device_id, event_id,
events::{room::message::RoomMessageEventContent, AnySyncTimelineEvent},
serde::Raw,
user_id, DeviceKeyAlgorithm,
device_id, event_id, events::room::message::RoomMessageEventContent, serde::Raw, user_id,
DeviceKeyAlgorithm,
};
use serde::Deserialize;
use serde_json::json;
use super::{
AlgorithmInfo, DecryptedRoomEvent, DeviceLinkProblem, EncryptionInfo, ShieldState,
ShieldStateCode, SyncTimelineEvent, TimelineEvent, TimelineEventKind, UnableToDecryptInfo,
ShieldStateCode, TimelineEvent, TimelineEventKind, UnableToDecryptInfo,
UnableToDecryptReason, UnsignedDecryptionResult, UnsignedEventLocation, VerificationLevel,
VerificationState, WithheldCode,
};
@@ -985,7 +913,7 @@ mod tests {
#[test]
fn sync_timeline_debug_content() {
let room_event = SyncTimelineEvent::new(Raw::new(&example_event()).unwrap().cast());
let room_event = TimelineEvent::new(Raw::new(&example_event()).unwrap().cast());
let debug_s = format!("{room_event:?}");
assert!(
!debug_s.contains("secret"),
@@ -993,18 +921,6 @@ mod tests {
);
}
#[test]
fn room_event_to_sync_room_event() {
let room_event = TimelineEvent::new(Raw::new(&example_event()).unwrap().cast());
let converted_room_event: SyncTimelineEvent = room_event.into();
let converted_event: AnySyncTimelineEvent =
converted_room_event.raw().deserialize().unwrap();
assert_eq!(converted_event.event_id(), "$xxxxx:example.org");
assert_eq!(converted_event.sender(), "@carl:example.com");
}
#[test]
fn old_verification_state_to_new_migration() {
#[derive(Deserialize)]
@@ -1115,7 +1031,7 @@ mod tests {
#[test]
fn sync_timeline_event_serialisation() {
let room_event = SyncTimelineEvent {
let room_event = TimelineEvent {
kind: TimelineEventKind::Decrypted(DecryptedRoomEvent {
event: Raw::new(&example_event()).unwrap().cast(),
encryption_info: EncryptionInfo {
@@ -1177,7 +1093,7 @@ mod tests {
);
// And it can be properly deserialized from the new format.
let event: SyncTimelineEvent = serde_json::from_value(serialized).unwrap();
let event: TimelineEvent = serde_json::from_value(serialized).unwrap();
assert_eq!(event.event_id(), Some(event_id!("$xxxxx:example.org").to_owned()));
assert_matches!(
event.encryption_info().unwrap().algorithm_info,
@@ -1206,7 +1122,7 @@ mod tests {
"verification_state": "Verified",
},
});
let event: SyncTimelineEvent = serde_json::from_value(serialized).unwrap();
let event: TimelineEvent = serde_json::from_value(serialized).unwrap();
assert_eq!(event.event_id(), Some(event_id!("$xxxxx:example.org").to_owned()));
assert_matches!(
event.encryption_info().unwrap().algorithm_info,
@@ -1239,7 +1155,7 @@ mod tests {
"RelationsReplace": {"UnableToDecrypt": {"session_id": "xyz"}}
}
});
let event: SyncTimelineEvent = serde_json::from_value(serialized).unwrap();
let event: TimelineEvent = serde_json::from_value(serialized).unwrap();
assert_eq!(event.event_id(), Some(event_id!("$xxxxx:example.org").to_owned()));
assert_matches!(
event.encryption_info().unwrap().algorithm_info,
@@ -1305,7 +1221,7 @@ mod tests {
assert!(result.is_ok());
// should have migrated to the new format
let event: SyncTimelineEvent = result.unwrap();
let event: TimelineEvent = result.unwrap();
assert_matches!(
event.kind,
TimelineEventKind::UnableToDecrypt { utd_info, .. }=> {
@@ -1447,7 +1363,7 @@ mod tests {
#[test]
fn snapshot_test_sync_timeline_event() {
let room_event = SyncTimelineEvent {
let room_event = TimelineEvent {
kind: TimelineEventKind::Decrypted(DecryptedRoomEvent {
event: Raw::new(&example_event()).unwrap().cast(),
encryption_info: EncryptionInfo {

View File

@@ -28,6 +28,7 @@ pub mod failures_cache;
pub mod linked_chunk;
pub mod locks;
pub mod ring_buffer;
pub mod sleep;
pub mod store_locks;
pub mod timeout;
pub mod tracing_timer;

View File

@@ -0,0 +1,49 @@
// Copyright 2024 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::time::Duration;
/// Sleep for the specified duration.
///
/// This is a cross-platform sleep implementation that works on both wasm32 and
/// non-wasm32 targets.
pub async fn sleep(duration: Duration) {
#[cfg(not(target_arch = "wasm32"))]
tokio::time::sleep(duration).await;
#[cfg(target_arch = "wasm32")]
gloo_timers::future::TimeoutFuture::new(u32::try_from(duration.as_millis()).unwrap_or_else(
|_| {
tracing::error!("Sleep duration too long, sleeping for u32::MAX ms");
u32::MAX
},
))
.await;
}
#[cfg(test)]
mod tests {
use matrix_sdk_test_macros::async_test;
use super::*;
#[cfg(target_arch = "wasm32")]
wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser);
#[async_test]
async fn test_sleep() {
// Just test that it doesn't panic
sleep(Duration::from_millis(1)).await;
}
}

View File

@@ -46,11 +46,12 @@ use std::{
time::Duration,
};
use tokio::{sync::Mutex, time::sleep};
use tokio::sync::Mutex;
use tracing::{debug, error, info, instrument, trace};
use crate::{
executor::{spawn, JoinHandle},
sleep::sleep,
SendOutsideWasm,
};

View File

@@ -41,7 +41,7 @@ impl Error for ElapsedError {}
/// an error.
pub async fn timeout<F, T>(future: F, duration: Duration) -> Result<T, ElapsedError>
where
F: Future<Output = T> + Unpin,
F: Future<Output = T>,
{
#[cfg(not(target_arch = "wasm32"))]
return tokio_timeout(duration, future).await.map_err(|_| ElapsedError(()));
@@ -51,7 +51,7 @@ where
let timeout_future =
TimeoutFuture::new(u32::try_from(duration.as_millis()).expect("Overlong duration"));
match select(future, timeout_future).await {
match select(std::pin::pin!(future), timeout_future).await {
Either::Left((res, _)) => Ok(res),
Either::Right((_, _)) => Err(ElapsedError(())),
}

View File

@@ -8,6 +8,11 @@ All notable changes to this project will be documented in this file.
### Features
- [**breaking**] `CollectStrategy::DeviceBasedStrategy` is now split into three
separate strategies (`AllDevices`, `ErrorOnVerifiedUserProblem`,
`OnlyTrustedDevices`), to make the behaviour clearer.
([#4581](https://github.com/matrix-org/matrix-rust-sdk/pull/4581))
- Accept stable identifier `sender_device_keys` for MSC4147 (Including device
keys with Olm-encrypted events).
([#4420](https://github.com/matrix-org/matrix-rust-sdk/pull/4420))

View File

@@ -396,12 +396,20 @@ mod tests {
use js_option::JsOption;
use matrix_sdk_test::async_test;
use ruma::{
api::client::keys::get_keys::v3::Response as KeysQueryResponse, assign,
encryption::DeviceKeys, events::AnyToDeviceEvent, room_id, serde::Raw, user_id, DeviceId,
RoomId, TransactionId, UserId,
api::client::{
dehydrated_device::put_dehydrated_device,
keys::get_keys::v3::Response as KeysQueryResponse,
},
assign,
encryption::DeviceKeys,
events::AnyToDeviceEvent,
room_id,
serde::Raw,
user_id, DeviceId, RoomId, TransactionId, UserId,
};
use crate::{
dehydrated_devices::DehydratedDevice,
machine::{
test_helpers::{create_session, get_prepared_machine_test_helper},
tests::to_device_requests_to_content,
@@ -625,24 +633,18 @@ mod tests {
let alice = get_olm_machine().await;
let dehydrated_device = alice.dehydrated_devices().create().await.unwrap();
let mut request =
legacy_dehydrated_device_keys_for_upload(&dehydrated_device, &pickle_key()).await;
let mut transaction = dehydrated_device.store.transaction().await;
let account = transaction.account().await.unwrap();
account.generate_fallback_key_if_needed();
let (device_keys, mut one_time_keys, _fallback_keys) = account.keys_for_upload();
let device_keys = device_keys.unwrap();
let device_data = account.legacy_dehydrate(&pickle_key().inner);
let device_id = account.device_id().to_owned();
transaction.commit().await.unwrap();
let (key_id, one_time_key) = one_time_keys
let (key_id, one_time_key) = request
.one_time_keys
.pop_first()
.expect("The dehydrated device creation request should contain a one-time key");
let device_id = request.device_id;
// Ensure that we know about the public keys of the dehydrated device.
receive_device_keys(&alice, user_id(), &device_id, device_keys.to_raw()).await;
receive_device_keys(&alice, user_id(), &device_id, request.device_keys).await;
// Create a 1-to-1 Olm session with the dehydrated device.
create_session(&alice, user_id(), &device_id, key_id, one_time_key).await;
@@ -666,7 +668,7 @@ mod tests {
// Rehydrate the device.
let rehydrated = bob
.dehydrated_devices()
.rehydrate(&pickle_key(), &device_id, device_data)
.rehydrate(&pickle_key(), &device_id, request.device_data)
.await
.expect("We should be able to rehydrate the device");
@@ -696,4 +698,35 @@ mod tests {
"The session ids of the imported room key and the outbound group session should match"
);
}
/// Duplicates the behaviour of [`DehydratedDevice::keys_for_upload`],
/// except that it calls [`Account::legacy_dehydrate`] instead of
/// [`Account::dehydrate`].
async fn legacy_dehydrated_device_keys_for_upload(
dehydrated_device: &DehydratedDevice,
pickle_key: &DehydratedDeviceKey,
) -> put_dehydrated_device::unstable::Request {
let mut transaction = dehydrated_device.store.transaction().await;
let account = transaction.account().await.unwrap();
account.generate_fallback_key_if_needed();
let (device_keys, one_time_keys, fallback_keys) = account.keys_for_upload();
let mut device_keys = device_keys.unwrap();
dehydrated_device
.store
.private_identity()
.lock()
.await
.sign_device_keys(&mut device_keys)
.await
.expect("Should be able to cross-sign a device");
let device_id = account.device_id().to_owned();
let device_data = account.legacy_dehydrate(pickle_key.inner.as_ref());
transaction.commit().await.unwrap();
assign!(put_dehydrated_device::unstable::Request::new(device_id, device_data, device_keys.to_raw()), {
one_time_keys, fallback_keys
})
}
}

View File

@@ -374,9 +374,7 @@ pub enum SetRoomSettingsError {
pub enum SessionRecipientCollectionError {
/// One or more verified users has one or more unsigned devices.
///
/// Happens only with [`CollectStrategy::DeviceBasedStrategy`] when
/// [`error_on_verified_user_problem`](`CollectStrategy::DeviceBasedStrategy::error_on_verified_user_problem`)
/// is true.
/// Happens only with [`CollectStrategy::ErrorOnVerifiedUserProblem`].
///
/// In order to resolve this, the caller can set the trust level of the
/// affected devices to [`LocalTrust::Ignored`] or
@@ -388,9 +386,8 @@ pub enum SessionRecipientCollectionError {
/// One or more users was previously verified, but they have changed their
/// identity.
///
/// Happens only with [`CollectStrategy::DeviceBasedStrategy`] when
/// [`error_on_verified_user_problem`](`CollectStrategy::DeviceBasedStrategy::error_on_verified_user_problem`)
/// is true, or with [`CollectStrategy::IdentityBasedStrategy`].
/// Happens only with [`CollectStrategy::ErrorOnVerifiedUserProblem`] or
/// [`CollectStrategy::IdentityBasedStrategy`].
///
/// In order to resolve this, the user can:
///

View File

@@ -852,9 +852,14 @@ impl OlmMachine {
let mut decrypted =
transaction.account().await?.decrypt_to_device_event(&self.inner.store, event).await?;
// Handle the decrypted event, e.g. fetch out Megolm sessions out of
// the event.
self.handle_decrypted_to_device_event(transaction.cache(), &mut decrypted, changes).await?;
// We ignore all to-device events from dehydrated devices - we should not
// receive any
if !self.to_device_event_is_from_dehydrated_device(&decrypted, &event.sender).await? {
// Handle the decrypted event, e.g. fetch out Megolm sessions out of
// the event.
self.handle_decrypted_to_device_event(transaction.cache(), &mut decrypted, changes)
.await?;
}
Ok(decrypted)
}
@@ -1259,13 +1264,20 @@ impl OlmMachine {
}
}
/// Decrypt the supplied to-device event (if needed, and if we can) and
/// handle it.
///
/// Return the same event, decrypted if possible and needed.
///
/// If we can identify that this to-device event came from a dehydrated
/// device, this method does not process it, and returns `None`.
#[instrument(skip_all, fields(sender, event_type, message_id))]
async fn receive_to_device_event(
&self,
transaction: &mut StoreTransaction,
changes: &mut Changes,
mut raw_event: Raw<AnyToDeviceEvent>,
) -> Raw<AnyToDeviceEvent> {
) -> Option<Raw<AnyToDeviceEvent>> {
Self::record_message_id(&raw_event);
let event: ToDeviceEvents = match raw_event.deserialize_as() {
@@ -1274,7 +1286,7 @@ impl OlmMachine {
// Skip invalid events.
warn!("Received an invalid to-device event: {e}");
return raw_event;
return Some(raw_event);
}
};
@@ -1299,10 +1311,30 @@ impl OlmMachine {
}
}
return raw_event;
return Some(raw_event);
}
};
// We ignore all to-device events from dehydrated devices - we should not
// receive any
match self.to_device_event_is_from_dehydrated_device(&decrypted, &e.sender).await {
Ok(true) => {
warn!(
sender = ?e.sender,
session = ?decrypted.session,
"Received a to-device event from a dehydrated device. This is unexpected: ignoring event"
);
return None;
}
Ok(false) => {}
Err(err) => {
error!(
error = ?err,
"Couldn't check whether event is from dehydrated device",
);
}
}
// New sessions modify the account so we need to save that
// one as well.
match decrypted.session {
@@ -1336,7 +1368,41 @@ impl OlmMachine {
e => self.handle_to_device_event(changes, &e).await,
}
raw_event
Some(raw_event)
}
/// Decide whether a decrypted to-device event was sent from a dehydrated
/// device.
///
/// This accepts an [`OlmDecryptionInfo`] because it deals with a decrypted
/// event.
async fn to_device_event_is_from_dehydrated_device(
&self,
decrypted: &OlmDecryptionInfo,
sender_user_id: &UserId,
) -> OlmResult<bool> {
// Does the to-device message include device info?
if let Some(device_keys) = decrypted.result.event.sender_device_keys() {
// There is no need to check whether the device keys are signed correctly - any
// to-device message that claims to be from a dehydrated device is weird, so we
// will drop it.
// Does the included device info say the device is dehydrated?
if device_keys.dehydrated.unwrap_or(false) {
return Ok(true);
}
// If not, fall through and check our existing list of devices
// below, just in case the sender is sending us incorrect
// information embedded in the to-device message, but we know
// better.
}
// Do we already know about this device?
Ok(self
.store()
.get_device_from_curve_key(sender_user_id, decrypted.result.sender_key)
.await?
.is_some_and(|d| d.is_dehydrated()))
}
/// Handle a to-device and one-time key counts from a sync response.
@@ -1377,6 +1443,14 @@ impl OlmMachine {
Ok((events, room_key_updates))
}
/// Initial processing of the changes specified within a sync response.
///
/// Returns the to-device events (decrypted where needed and where possible)
/// and the processed set of changes.
///
/// If any of the to-device events in the supplied changes were sent from
/// dehydrated devices, these are not processed, and are omitted from
/// the returned list, as per MSC3814.
pub(crate) async fn preprocess_sync_changes(
&self,
transaction: &mut StoreTransaction,
@@ -1412,7 +1486,10 @@ impl OlmMachine {
for raw_event in sync_changes.to_device_events {
let raw_event =
Box::pin(self.receive_to_device_event(transaction, &mut changes, raw_event)).await;
events.push(raw_event);
if let Some(raw_event) = raw_event {
events.push(raw_event);
}
}
let changed_sessions = self

View File

@@ -34,7 +34,7 @@ use ruma::{
use serde_json::json;
use crate::{
store::Changes,
store::{Changes, MemoryStore},
types::{events::ToDeviceEvent, requests::AnyOutgoingRequest},
CrossSigningBootstrapRequests, DeviceData, OlmMachine,
};
@@ -102,6 +102,23 @@ pub async fn get_machine_after_query_test_helper() -> (OlmMachine, OneTimeKeys)
(machine, otk)
}
pub async fn get_machine_pair_using_store(
alice: &UserId,
bob: &UserId,
use_fallback_key: bool,
alice_store: MemoryStore,
alice_device_id: &DeviceId,
) -> (OlmMachine, OlmMachine, OneTimeKeys) {
let (bob, otk) = get_prepared_machine_test_helper(bob, use_fallback_key).await;
let alice = OlmMachine::with_store(alice, alice_device_id, alice_store, None)
.await
.expect("Failed to create OlmMachine from supplied store");
store_each_others_device_data(&alice, &bob).await;
(alice, bob, otk)
}
pub async fn get_machine_pair(
alice: &UserId,
bob: &UserId,
@@ -112,12 +129,34 @@ pub async fn get_machine_pair(
let alice_device = alice_device_id();
let alice = OlmMachine::new(alice, alice_device).await;
let alice_device = DeviceData::from_machine_test_helper(&alice).await.unwrap();
let bob_device = DeviceData::from_machine_test_helper(&bob).await.unwrap();
store_each_others_device_data(&alice, &bob).await;
(alice, bob, otk)
}
/// Store alice's device data in bob's store and vice versa
async fn store_each_others_device_data(alice: &OlmMachine, bob: &OlmMachine) {
let alice_device = DeviceData::from_machine_test_helper(alice).await.unwrap();
let bob_device = DeviceData::from_machine_test_helper(bob).await.unwrap();
alice.store().save_device_data(&[bob_device]).await.unwrap();
bob.store().save_device_data(&[alice_device]).await.unwrap();
}
(alice, bob, otk)
/// Return a pair of [`OlmMachine`]s, with an olm session created on Alice's
/// side, but with no message yet sent.
///
/// Create Alice's `OlmMachine` using the [`MemoryStore`] provided
pub async fn get_machine_pair_with_session_using_store(
alice: &UserId,
bob: &UserId,
use_fallback_key: bool,
alice_store: MemoryStore,
alice_device_id: &DeviceId,
) -> (OlmMachine, OlmMachine) {
let (alice, bob, one_time_keys) =
get_machine_pair_using_store(alice, bob, use_fallback_key, alice_store, alice_device_id)
.await;
build_session_for_pair(alice, bob, one_time_keys).await
}
/// Return a pair of [`OlmMachine`]s, with an olm session created on Alice's
@@ -127,8 +166,20 @@ pub async fn get_machine_pair_with_session(
bob: &UserId,
use_fallback_key: bool,
) -> (OlmMachine, OlmMachine) {
let (alice, bob, mut one_time_keys) = get_machine_pair(alice, bob, use_fallback_key).await;
let (alice, bob, one_time_keys) = get_machine_pair(alice, bob, use_fallback_key).await;
build_session_for_pair(alice, bob, one_time_keys).await
}
/// Create a session for the two supplied Olm machines to communicate.
async fn build_session_for_pair(
alice: OlmMachine,
bob: OlmMachine,
mut one_time_keys: BTreeMap<
ruma::OwnedKeyId<ruma::OneTimeKeyAlgorithm, ruma::OneTimeKeyName>,
Raw<OneTimeKey>,
>,
) -> (OlmMachine, OlmMachine) {
let (device_key_id, one_time_key) = one_time_keys.pop_first().unwrap();
let one_time_keys = BTreeMap::from([(

View File

@@ -38,7 +38,7 @@ use ruma::{
room_id,
serde::Raw,
uint, user_id, DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch,
OneTimeKeyAlgorithm, TransactionId, UserId,
OneTimeKeyAlgorithm, RoomId, TransactionId, UserId,
};
use serde_json::json;
use vodozemac::{
@@ -48,17 +48,21 @@ use vodozemac::{
use super::CrossSigningBootstrapRequests;
use crate::{
error::EventError,
error::{EventError, OlmResult},
machine::{
test_helpers::{
get_machine_after_query_test_helper, get_machine_pair_with_session,
get_machine_pair_with_session_using_store,
get_machine_pair_with_setup_sessions_test_helper, get_prepared_machine_test_helper,
},
EncryptionSyncChanges, OlmMachine,
},
olm::{BackedUpRoomKey, ExportedRoomKey, SenderData, VerifyJson},
session_manager::CollectStrategy,
store::{BackupDecryptionKey, Changes, CryptoStore, MemoryStore},
store::{
BackupDecryptionKey, Changes, CryptoStore, DeviceChanges, MemoryStore, PendingChanges,
RoomKeyInfo,
},
types::{
events::{
room::encrypted::{EncryptedToDeviceEvent, ToDeviceEncryptedEventContent},
@@ -70,7 +74,7 @@ use crate::{
},
utilities::json_convert,
verification::tests::bob_id,
Account, DecryptionSettings, DeviceData, EncryptionSettings, MegolmError, OlmError,
Account, DecryptionSettings, DeviceData, EncryptionSettings, LocalTrust, MegolmError, OlmError,
RoomEventDecryptionResult, TrustRequirement,
};
@@ -388,33 +392,10 @@ async fn test_missing_sessions_calculation() {
#[async_test]
async fn test_room_key_sharing() {
let (alice, bob) = get_machine_pair_with_session(alice_id(), user_id(), false).await;
let room_id = room_id!("!test:example.org");
let to_device_requests = alice
.share_room_key(room_id, iter::once(bob.user_id()), EncryptionSettings::default())
.await
.unwrap();
let event = ToDeviceEvent::new(
alice.user_id().to_owned(),
to_device_requests_to_content(to_device_requests),
);
let event = json_convert(&event).unwrap();
let alice_session =
alice.inner.group_session_manager.get_outbound_group_session(room_id).unwrap();
let (decrypted, room_key_updates) = bob
.receive_sync_changes(EncryptionSyncChanges {
to_device_events: vec![event],
changed_devices: &Default::default(),
one_time_keys_counts: &Default::default(),
unused_fallback_keys: None,
next_batch_token: None,
})
.await
.unwrap();
let (decrypted, room_key_updates) =
send_room_key_to_device(&alice, &bob, room_id).await.unwrap();
let event = decrypted[0].deserialize().unwrap();
@@ -425,6 +406,9 @@ async fn test_room_key_sharing() {
panic!("expected RoomKeyEvent found {event:?}");
}
let alice_session =
alice.inner.group_session_manager.get_outbound_group_session(room_id).unwrap();
let session = bob.store().get_inbound_group_session(room_id, alice_session.session_id()).await;
assert!(session.unwrap().is_some());
@@ -434,6 +418,101 @@ async fn test_room_key_sharing() {
assert_eq!(room_key_updates[0].session_id, alice_session.session_id());
}
#[async_test]
async fn test_to_device_messages_from_dehydrated_devices_are_ignored() {
// Given alice's device is dehydrated
let (alice, bob) = create_dehydrated_machine_and_pair().await;
// When we send a to-device message from alice to bob
// (Note: we send a room_key message, but it could be any to-device message.)
let room_id = room_id!("!test:example.org");
let (decrypted, room_key_updates) =
send_room_key_to_device(&alice, &bob, room_id).await.unwrap();
// Then the to-device message was discarded, because it was from a dehydrated
// device
assert!(decrypted.is_empty());
// And the room key was not imported as a session
let alice_session =
alice.inner.group_session_manager.get_outbound_group_session(room_id).unwrap();
let session = bob.store().get_inbound_group_session(room_id, alice_session.session_id()).await;
assert!(session.unwrap().is_none());
assert!(room_key_updates.is_empty());
}
/// "Send" a to-device message containing a room key from sender to receiver.
///
/// (Actually constructs the JSON of a to-device message from `sender` and feeds
/// it in to `receiver`'s `receive_sync_changes` method.
///
/// Returns the return value of `receive_sync_changes`, which is a tuple of
/// (decrypted to-device events, updated room keys).
async fn send_room_key_to_device(
sender: &OlmMachine,
receiver: &OlmMachine,
room_id: &RoomId,
) -> OlmResult<(Vec<Raw<AnyToDeviceEvent>>, Vec<RoomKeyInfo>)> {
let to_device_requests = sender
.share_room_key(room_id, iter::once(receiver.user_id()), EncryptionSettings::default())
.await
.unwrap();
let event = ToDeviceEvent::new(
sender.user_id().to_owned(),
to_device_requests_to_content(to_device_requests),
);
let event = json_convert(&event).unwrap();
receiver
.receive_sync_changes(EncryptionSyncChanges {
to_device_events: vec![event],
changed_devices: &Default::default(),
one_time_keys_counts: &Default::default(),
unused_fallback_keys: None,
next_batch_token: None,
})
.await
}
/// Create an alice, bob pair where alice's device is dehydrated. Create a
/// session for messages from alice to bob, and ensure bob knows alice's device
/// is dehydrated.
async fn create_dehydrated_machine_and_pair() -> (OlmMachine, OlmMachine) {
// Create a store holding info about an account that is linked to a dehydrated
// device. This should never happen in real life, so we have to poke the
// info into the store directly.
let alice_store = MemoryStore::new();
let alice_dehydrated_account = Account::new_dehydrated(alice_id());
let mut alice_static_account = alice_dehydrated_account.static_data().clone();
alice_static_account.dehydrated = true;
let alice_device = DeviceData::from_account(&alice_dehydrated_account);
let alice_dehydrated_device_id = alice_device.device_id().to_owned();
alice_device.set_trust_state(LocalTrust::Verified);
let changes = Changes {
devices: DeviceChanges { new: vec![alice_device], ..Default::default() },
..Default::default()
};
alice_store.save_changes(changes).await.expect("Failed to same changes to the store");
alice_store
.save_pending_changes(PendingChanges { account: Some(alice_dehydrated_account) })
.await
.expect("Failed to save pending changes to the store");
// Create the alice machine using the store we have made (and also create a
// normal bob machine)
get_machine_pair_with_session_using_store(
alice_id(),
user_id(),
false,
alice_store,
&alice_dehydrated_device_id,
)
.await
}
#[async_test]
async fn test_request_missing_secrets() {
let (alice, _) = get_machine_pair_with_session(alice_id(), bob_id(), false).await;
@@ -594,10 +673,7 @@ async fn test_withheld_unverified() {
let encryption_settings = EncryptionSettings::default();
let encryption_settings = EncryptionSettings {
sharing_strategy: CollectStrategy::DeviceBasedStrategy {
only_allow_trusted_devices: true,
error_on_verified_user_problem: false,
},
sharing_strategy: CollectStrategy::OnlyTrustedDevices,
..encryption_settings
};
@@ -851,7 +927,7 @@ async fn test_query_ratcheted_key() {
.await
.unwrap()
.expect("should exist")
.set_trust_state(crate::LocalTrust::Verified);
.set_trust_state(LocalTrust::Verified);
alice.create_outbound_group_session_with_defaults_test_helper(room_id).await.unwrap();

View File

@@ -788,10 +788,7 @@ mod tests {
let settings = EncryptionSettings::new(
content.clone(),
HistoryVisibility::Joined,
CollectStrategy::DeviceBasedStrategy {
only_allow_trusted_devices: false,
error_on_verified_user_problem: false,
},
CollectStrategy::AllDevices,
);
assert_eq!(settings.rotation_period, ROTATION_PERIOD);
@@ -803,10 +800,7 @@ mod tests {
let settings = EncryptionSettings::new(
content,
HistoryVisibility::Shared,
CollectStrategy::DeviceBasedStrategy {
only_allow_trusted_devices: false,
error_on_verified_user_problem: false,
},
CollectStrategy::AllDevices,
);
assert_eq!(settings.rotation_period, Duration::from_millis(3600));

View File

@@ -1163,10 +1163,7 @@ mod tests {
.any(|d| d.user_id() == user_id && d.device_id() == device_id));
let settings = EncryptionSettings {
sharing_strategy: CollectStrategy::DeviceBasedStrategy {
only_allow_trusted_devices: true,
error_on_verified_user_problem: false,
},
sharing_strategy: CollectStrategy::OnlyTrustedDevices,
..Default::default()
};
let users = [user_id].into_iter();
@@ -1226,10 +1223,7 @@ mod tests {
let users = keys_claim.one_time_keys.keys().map(Deref::deref);
let settings = EncryptionSettings {
sharing_strategy: CollectStrategy::DeviceBasedStrategy {
only_allow_trusted_devices: true,
error_on_verified_user_problem: false,
},
sharing_strategy: CollectStrategy::OnlyTrustedDevices,
..Default::default()
};

View File

@@ -35,40 +35,43 @@ use crate::{Device, UserIdentity};
/// Strategy to collect the devices that should receive room keys for the
/// current discussion.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
#[cfg_attr(feature = "uniffi", derive(uniffi::Enum))]
#[serde(from = "CollectStrategyDeserializationHelper")]
pub enum CollectStrategy {
/// Device based sharing strategy.
DeviceBasedStrategy {
/// If `true`, devices that are not trusted will be excluded from the
/// conversation. A device is trusted if any of the following is true:
/// - It was manually marked as trusted.
/// - It was marked as verified via interactive verification.
/// - It is signed by its owner identity, and this identity has been
/// trusted via interactive verification.
/// - It is the current own device of the user.
only_allow_trusted_devices: bool,
/// Share with all (unblacklisted) devices.
#[default]
AllDevices,
/// If `true`, and a verified user has an unsigned device, key sharing
/// will fail with a
/// [`SessionRecipientCollectionError::VerifiedUserHasUnsignedDevice`].
///
/// If `true`, and a verified user has replaced their identity, key
/// sharing will fail with a
/// [`SessionRecipientCollectionError::VerifiedUserChangedIdentity`].
///
/// Otherwise, keys are shared with unsigned devices as normal.
///
/// Once the problematic devices are blacklisted or whitelisted the
/// caller can retry to share a second time.
#[serde(default)]
error_on_verified_user_problem: bool,
},
/// Share with all devices, except errors for *verified* users cause sharing
/// to fail with an error.
///
/// In this strategy, if a verified user has an unsigned device,
/// key sharing will fail with a
/// [`SessionRecipientCollectionError::VerifiedUserHasUnsignedDevice`].
/// If a verified user has replaced their identity, key
/// sharing will fail with a
/// [`SessionRecipientCollectionError::VerifiedUserChangedIdentity`].
///
/// Otherwise, keys are shared with unsigned devices as normal.
///
/// Once the problematic devices are blacklisted or whitelisted the
/// caller can retry to share a second time.
ErrorOnVerifiedUserProblem,
/// Share based on identity. Only distribute to devices signed by their
/// owner. If a user has no published identity he will not receive
/// any room keys.
IdentityBasedStrategy,
/// Only share keys with devices that we "trust". A device is trusted if any
/// of the following is true:
/// - It was manually marked as trusted.
/// - It was marked as verified via interactive verification.
/// - It is signed by its owner identity, and this identity has been
/// trusted via interactive verification.
/// - It is the current own device of the user.
OnlyTrustedDevices,
}
impl CollectStrategy {
@@ -78,11 +81,47 @@ impl CollectStrategy {
}
}
impl Default for CollectStrategy {
fn default() -> Self {
CollectStrategy::DeviceBasedStrategy {
only_allow_trusted_devices: false,
error_on_verified_user_problem: false,
/// Deserialization helper for [`CollectStrategy`].
#[derive(Deserialize)]
enum CollectStrategyDeserializationHelper {
/// `AllDevices`, `ErrorOnVerifiedUserProblem` and `OnlyTrustedDevices` used
/// to be implemented as a single strategy with flags.
DeviceBasedStrategy {
#[serde(default)]
error_on_verified_user_problem: bool,
#[serde(default)]
only_allow_trusted_devices: bool,
},
AllDevices,
ErrorOnVerifiedUserProblem,
IdentityBasedStrategy,
OnlyTrustedDevices,
}
impl From<CollectStrategyDeserializationHelper> for CollectStrategy {
fn from(value: CollectStrategyDeserializationHelper) -> Self {
use CollectStrategyDeserializationHelper::*;
match value {
DeviceBasedStrategy {
only_allow_trusted_devices: true,
error_on_verified_user_problem: _,
} => CollectStrategy::OnlyTrustedDevices,
DeviceBasedStrategy {
only_allow_trusted_devices: false,
error_on_verified_user_problem: true,
} => CollectStrategy::ErrorOnVerifiedUserProblem,
DeviceBasedStrategy {
only_allow_trusted_devices: false,
error_on_verified_user_problem: false,
} => CollectStrategy::AllDevices,
AllDevices => CollectStrategy::AllDevices,
ErrorOnVerifiedUserProblem => CollectStrategy::ErrorOnVerifiedUserProblem,
IdentityBasedStrategy => CollectStrategy::IdentityBasedStrategy,
OnlyTrustedDevices => CollectStrategy::OnlyTrustedDevices,
}
}
}
@@ -93,7 +132,7 @@ impl Default for CollectStrategy {
/// (`should_rotate`) and the list of users/devices that should receive
/// (`devices`) or not the session, including withheld reason
/// `withheld_devices`.
#[derive(Debug)]
#[derive(Debug, Default)]
pub(crate) struct CollectRecipientsResult {
/// If true the outbound group session should be rotated
pub should_rotate: bool,
@@ -118,8 +157,7 @@ pub(crate) async fn collect_session_recipients(
outbound: &OutboundGroupSession,
) -> OlmResult<CollectRecipientsResult> {
let users: BTreeSet<&UserId> = users.collect();
let mut devices: BTreeMap<OwnedUserId, Vec<DeviceData>> = Default::default();
let mut withheld_devices: Vec<(DeviceData, WithheldCode)> = Default::default();
let mut result = CollectRecipientsResult::default();
let mut verified_users_with_new_identities: Vec<OwnedUserId> = Default::default();
trace!(?users, ?settings, "Calculating group session recipients");
@@ -144,82 +182,64 @@ pub(crate) async fn collect_session_recipients(
// 4. The encryption algorithm changed.
//
// This is calculated in the following code and stored in this variable.
let mut should_rotate = user_left || visibility_changed || algorithm_changed;
result.should_rotate = user_left || visibility_changed || algorithm_changed;
let own_identity = store.get_user_identity(store.user_id()).await?.and_then(|i| i.into_own());
// Get the recipient and withheld devices, based on the collection strategy.
match settings.sharing_strategy {
CollectStrategy::DeviceBasedStrategy {
only_allow_trusted_devices,
error_on_verified_user_problem,
} => {
CollectStrategy::AllDevices => {
for user_id in users {
trace!(
"CollectStrategy::AllDevices: Considering recipient devices for user {}",
user_id
);
let user_devices = store.get_device_data_for_user_filtered(user_id).await?;
let recipient_devices =
split_devices_for_user_for_all_devices_strategy(user_devices);
update_recipients_for_user(&mut result, outbound, user_id, recipient_devices);
}
}
CollectStrategy::ErrorOnVerifiedUserProblem => {
let mut unsigned_devices_of_verified_users: BTreeMap<OwnedUserId, Vec<OwnedDeviceId>> =
Default::default();
for user_id in users {
trace!("Considering recipient devices for user {}", user_id);
trace!("CollectStrategy::ErrorOnVerifiedUserProblem: Considering recipient devices for user {}", user_id);
let user_devices = store.get_device_data_for_user_filtered(user_id).await?;
// We only need the user identity if `only_allow_trusted_devices` or
// `error_on_verified_user_problem` is set.
let device_owner_identity =
if only_allow_trusted_devices || error_on_verified_user_problem {
store.get_user_identity(user_id).await?
} else {
None
};
let device_owner_identity = store.get_user_identity(user_id).await?;
if error_on_verified_user_problem
&& has_identity_verification_violation(
own_identity.as_ref(),
device_owner_identity.as_ref(),
)
{
if has_identity_verification_violation(
own_identity.as_ref(),
device_owner_identity.as_ref(),
) {
verified_users_with_new_identities.push(user_id.to_owned());
// No point considering the individual devices of this user.
continue;
}
let recipient_devices = split_devices_for_user(
user_devices,
&own_identity,
&device_owner_identity,
only_allow_trusted_devices,
error_on_verified_user_problem,
);
// If `error_on_verified_user_problem` is set, then
// `unsigned_of_verified_user` may be populated. If so, add an entry to the
// list of users with unsigned devices.
if !recipient_devices.unsigned_of_verified_user.is_empty() {
unsigned_devices_of_verified_users.insert(
user_id.to_owned(),
recipient_devices
.unsigned_of_verified_user
.into_iter()
.map(|d| d.device_id().to_owned())
.collect(),
let recipient_devices =
split_devices_for_user_for_error_on_verified_user_problem_strategy(
user_devices,
&own_identity,
&device_owner_identity,
);
}
// If we haven't already concluded that the session should be
// rotated for other reasons, we also need to check whether any
// of the devices in the session got deleted or blacklisted in the
// meantime. If so, we should also rotate the session.
if !should_rotate {
should_rotate = is_session_overshared_for_user(
outbound,
user_id,
&recipient_devices.allowed_devices,
)
match recipient_devices {
ErrorOnVerifiedUserProblemResult::UnsignedDevicesOfVerifiedUser(devices) => {
unsigned_devices_of_verified_users.insert(user_id.to_owned(), devices);
}
ErrorOnVerifiedUserProblemResult::Devices(recipient_devices) => {
update_recipients_for_user(
&mut result,
outbound,
user_id,
recipient_devices,
);
}
}
devices
.entry(user_id.to_owned())
.or_default()
.extend(recipient_devices.allowed_devices);
withheld_devices.extend(recipient_devices.denied_devices_with_code);
}
// If `error_on_verified_user_problem` is set, then
@@ -251,7 +271,7 @@ pub(crate) async fn collect_session_recipients(
}
for user_id in users {
trace!("Considering recipient devices for user {}", user_id);
trace!("CollectStrategy::IdentityBasedStrategy: Considering recipient devices for user {}", user_id);
let user_devices = store.get_device_data_for_user_filtered(user_id).await?;
let device_owner_identity = store.get_user_identity(user_id).await?;
@@ -265,28 +285,28 @@ pub(crate) async fn collect_session_recipients(
continue;
}
let recipient_devices = split_recipients_withhelds_for_user_based_on_identity(
let recipient_devices = split_devices_for_user_for_identity_based_strategy(
user_devices,
&device_owner_identity,
);
// If we haven't already concluded that the session should be
// rotated for other reasons, we also need to check whether any
// of the devices in the session got deleted or blacklisted in the
// meantime. If so, we should also rotate the session.
if !should_rotate {
should_rotate = is_session_overshared_for_user(
outbound,
user_id,
&recipient_devices.allowed_devices,
)
}
update_recipients_for_user(&mut result, outbound, user_id, recipient_devices);
}
}
devices
.entry(user_id.to_owned())
.or_default()
.extend(recipient_devices.allowed_devices);
withheld_devices.extend(recipient_devices.denied_devices_with_code);
CollectStrategy::OnlyTrustedDevices => {
for user_id in users {
trace!("CollectStrategy::OnlyTrustedDevices: Considering recipient devices for user {}", user_id);
let user_devices = store.get_device_data_for_user_filtered(user_id).await?;
let device_owner_identity = store.get_user_identity(user_id).await?;
let recipient_devices = split_devices_for_user_for_only_trusted_devices(
user_devices,
&own_identity,
&device_owner_identity,
);
update_recipients_for_user(&mut result, outbound, user_id, recipient_devices);
}
}
}
@@ -301,18 +321,43 @@ pub(crate) async fn collect_session_recipients(
));
}
if should_rotate {
if result.should_rotate {
debug!(
should_rotate,
result.should_rotate,
user_left,
visibility_changed,
algorithm_changed,
"Rotating room key to protect room history",
);
}
trace!(should_rotate, "Done calculating group session recipients");
trace!(result.should_rotate, "Done calculating group session recipients");
Ok(CollectRecipientsResult { should_rotate, devices, withheld_devices })
Ok(result)
}
/// Update this [`CollectRecipientsResult`] with the device list for a specific
/// user.
fn update_recipients_for_user(
recipients: &mut CollectRecipientsResult,
outbound: &OutboundGroupSession,
user_id: &UserId,
recipient_devices: RecipientDevicesForUser,
) {
// If we haven't already concluded that the session should be
// rotated for other reasons, we also need to check whether any
// of the devices in the session got deleted or blacklisted in the
// meantime. If so, we should also rotate the session.
if !recipients.should_rotate {
recipients.should_rotate =
is_session_overshared_for_user(outbound, user_id, &recipient_devices.allowed_devices)
}
recipients
.devices
.entry(user_id.to_owned())
.or_default()
.extend(recipient_devices.allowed_devices);
recipients.withheld_devices.extend(recipient_devices.denied_devices_with_code);
}
/// Check if the session has been shared with a device belonging to the given
@@ -368,83 +413,132 @@ fn is_session_overshared_for_user(
should_rotate
}
/// Result type for [`split_devices_for_user`].
/// Result type for [`split_devices_for_user_for_all_devices_strategy`],
/// [`split_devices_for_user_for_error_on_verified_user_problem_strategy`],
/// [`split_devices_for_user_for_identity_based_strategy`],
/// [`split_devices_for_user_for_only_trusted_devices`].
///
/// A partitioning of the devices for a given user.
#[derive(Default)]
struct DeviceBasedRecipientDevices {
struct RecipientDevicesForUser {
/// Devices that should receive the room key.
allowed_devices: Vec<DeviceData>,
/// Devices that should receive a withheld code.
denied_devices_with_code: Vec<(DeviceData, WithheldCode)>,
/// Devices that should cause the transmission to fail, due to being an
/// unsigned device belonging to a verified user. Only populated by
/// [`split_devices_for_user`], when
/// `error_on_verified_user_problem` is set.
unsigned_of_verified_user: Vec<DeviceData>,
}
/// Result type for
/// [`split_devices_for_user_for_error_on_verified_user_problem_strategy`].
enum ErrorOnVerifiedUserProblemResult {
/// We found devices that should cause the transmission to fail, due to
/// being an unsigned device belonging to a verified user. Only
/// populated when `error_on_verified_user_problem` is set.
UnsignedDevicesOfVerifiedUser(Vec<OwnedDeviceId>),
/// There were no unsigned devices of verified users.
Devices(RecipientDevicesForUser),
}
/// Partition the list of a user's devices according to whether they should
/// receive the key, for [`CollectStrategy::DeviceBasedStrategy`].
/// receive the key, for [`CollectStrategy::AllDevices`].
fn split_devices_for_user_for_all_devices_strategy(
user_devices: HashMap<OwnedDeviceId, DeviceData>,
) -> RecipientDevicesForUser {
let (left, right) = user_devices.into_values().partition_map(|d| {
if d.is_blacklisted() {
Either::Right((d, WithheldCode::Blacklisted))
} else {
Either::Left(d)
}
});
RecipientDevicesForUser { allowed_devices: left, denied_devices_with_code: right }
}
/// Partition the list of a user's devices according to whether they should
/// receive the key, for [`CollectStrategy::ErrorOnVerifiedUserProblem`].
///
/// We split the list into three buckets:
/// This function returns one of two values:
///
/// * the devices that should receive the room key.
/// * A list of the devices that should cause the transmission to fail due to
/// being unsigned. In this case, we don't bother to return the rest of the
/// devices, because we assume transmission will fail.
///
/// * the devices that should receive a withheld code.
///
/// * If `error_on_verified_user_problem` is set, the devices that should cause
/// the transmission to fail due to being unsigned. (If
/// `error_on_verified_user_problem` is unset, these devices are otherwise
/// partitioned into `allowed_devices`.)
fn split_devices_for_user(
/// * Otherwise, returns a [`RecipientDevicesForUser`] which lists, separately,
/// the devices that should receive the room key, and those that should
/// receive a withheld code.
fn split_devices_for_user_for_error_on_verified_user_problem_strategy(
user_devices: HashMap<OwnedDeviceId, DeviceData>,
own_identity: &Option<OwnUserIdentityData>,
device_owner_identity: &Option<UserIdentityData>,
only_allow_trusted_devices: bool,
error_on_verified_user_problem: bool,
) -> DeviceBasedRecipientDevices {
let mut recipient_devices: DeviceBasedRecipientDevices = Default::default();
) -> ErrorOnVerifiedUserProblemResult {
let mut recipient_devices = RecipientDevicesForUser::default();
// We construct unsigned_devices_of_verified_users lazily, because chances are
// we won't need it.
let mut unsigned_devices_of_verified_users: Option<Vec<OwnedDeviceId>> = None;
for d in user_devices.into_values() {
if d.is_blacklisted() {
recipient_devices.denied_devices_with_code.push((d, WithheldCode::Blacklisted));
} else if d.local_trust_state() == LocalTrust::Ignored {
// Ignore the trust state of that device and share
recipient_devices.allowed_devices.push(d);
} else if only_allow_trusted_devices && !d.is_verified(own_identity, device_owner_identity)
{
recipient_devices.denied_devices_with_code.push((d, WithheldCode::Unverified));
} else if error_on_verified_user_problem
&& is_unsigned_device_of_verified_user(
own_identity.as_ref(),
device_owner_identity.as_ref(),
&d,
)
{
recipient_devices.unsigned_of_verified_user.push(d)
} else {
recipient_devices.allowed_devices.push(d);
match handle_device_for_user_for_error_on_verified_user_problem_strategy(
&d,
own_identity.as_ref(),
device_owner_identity.as_ref(),
) {
ErrorOnVerifiedUserProblemDeviceDecision::Ok => {
recipient_devices.allowed_devices.push(d)
}
ErrorOnVerifiedUserProblemDeviceDecision::Withhold(code) => {
recipient_devices.denied_devices_with_code.push((d, code))
}
ErrorOnVerifiedUserProblemDeviceDecision::UnsignedOfVerified => {
unsigned_devices_of_verified_users
.get_or_insert_with(Vec::default)
.push(d.device_id().to_owned())
}
}
}
recipient_devices
if let Some(devices) = unsigned_devices_of_verified_users {
ErrorOnVerifiedUserProblemResult::UnsignedDevicesOfVerifiedUser(devices)
} else {
ErrorOnVerifiedUserProblemResult::Devices(recipient_devices)
}
}
/// Result type for [`split_recipients_withhelds_for_user_based_on_identity`].
#[derive(Default)]
struct IdentityBasedRecipientDevices {
/// Devices that should receive the room key.
allowed_devices: Vec<DeviceData>,
/// Devices that should receive a withheld code.
denied_devices_with_code: Vec<(DeviceData, WithheldCode)>,
/// Result type for
/// [`handle_device_for_user_for_error_on_verified_user_problem_strategy`].
enum ErrorOnVerifiedUserProblemDeviceDecision {
Ok,
Withhold(WithheldCode),
UnsignedOfVerified,
}
fn split_recipients_withhelds_for_user_based_on_identity(
fn handle_device_for_user_for_error_on_verified_user_problem_strategy(
device: &DeviceData,
own_identity: Option<&OwnUserIdentityData>,
device_owner_identity: Option<&UserIdentityData>,
) -> ErrorOnVerifiedUserProblemDeviceDecision {
if device.is_blacklisted() {
ErrorOnVerifiedUserProblemDeviceDecision::Withhold(WithheldCode::Blacklisted)
} else if device.local_trust_state() == LocalTrust::Ignored {
// Ignore the trust state of that device and share
ErrorOnVerifiedUserProblemDeviceDecision::Ok
} else if is_unsigned_device_of_verified_user(own_identity, device_owner_identity, device) {
ErrorOnVerifiedUserProblemDeviceDecision::UnsignedOfVerified
} else {
ErrorOnVerifiedUserProblemDeviceDecision::Ok
}
}
fn split_devices_for_user_for_identity_based_strategy(
user_devices: HashMap<OwnedDeviceId, DeviceData>,
device_owner_identity: &Option<UserIdentityData>,
) -> IdentityBasedRecipientDevices {
) -> RecipientDevicesForUser {
match device_owner_identity {
None => {
// withheld all the users devices, we need to have an identity for this
// distribution mode
IdentityBasedRecipientDevices {
RecipientDevicesForUser {
allowed_devices: Vec::default(),
denied_devices_with_code: user_devices
.into_values()
@@ -464,7 +558,7 @@ fn split_recipients_withhelds_for_user_based_on_identity(
Either::Right((d, WithheldCode::Unverified))
}
});
IdentityBasedRecipientDevices {
RecipientDevicesForUser {
allowed_devices: recipients,
denied_devices_with_code: withheld_recipients,
}
@@ -472,6 +566,27 @@ fn split_recipients_withhelds_for_user_based_on_identity(
}
}
/// Partition the list of a user's devices according to whether they should
/// receive the key, for [`CollectStrategy::OnlyTrustedDevices`].
fn split_devices_for_user_for_only_trusted_devices(
user_devices: HashMap<OwnedDeviceId, DeviceData>,
own_identity: &Option<OwnUserIdentityData>,
device_owner_identity: &Option<UserIdentityData>,
) -> RecipientDevicesForUser {
let (left, right) = user_devices.into_values().partition_map(|d| {
match (
d.local_trust_state(),
d.is_cross_signing_trusted(own_identity, device_owner_identity),
) {
(LocalTrust::BlackListed, _) => Either::Right((d, WithheldCode::Blacklisted)),
(LocalTrust::Ignored | LocalTrust::Verified, _) => Either::Left(d),
(LocalTrust::Unset, false) => Either::Right((d, WithheldCode::Unverified)),
(LocalTrust::Unset, true) => Either::Left(d),
}
});
RecipientDevicesForUser { allowed_devices: left, denied_devices_with_code: right }
}
fn is_unsigned_device_of_verified_user(
own_identity: Option<&OwnUserIdentityData>,
device_owner_identity: Option<&UserIdentityData>,
@@ -517,6 +632,7 @@ mod tests {
use assert_matches::assert_matches;
use assert_matches2::assert_let;
use insta::assert_snapshot;
use matrix_sdk_common::deserialized_responses::WithheldCode;
use matrix_sdk_test::{
async_test, test_json,
@@ -579,17 +695,65 @@ mod tests {
machine
}
/// Assert that [`CollectStrategy::AllDevices`] retains the same
/// serialization format.
#[test]
fn test_serialize_device_based_strategy() {
let encryption_settings = all_devices_strategy_settings();
let serialized = serde_json::to_string(&encryption_settings).unwrap();
assert_snapshot!(serialized);
}
/// [`CollectStrategy::AllDevices`] used to be known as
/// `DeviceBasedStrategy`. Check we can still deserialize the old
/// representation.
#[test]
fn test_deserialize_old_device_based_strategy() {
let settings: EncryptionSettings = serde_json::from_value(json!({
"algorithm": "m.megolm.v1.aes-sha2",
"rotation_period":{"secs":604800,"nanos":0},
"rotation_period_msgs":100,
"history_visibility":"shared",
"sharing_strategy":{"DeviceBasedStrategy":{"only_allow_trusted_devices":false,"error_on_verified_user_problem":false}},
})).unwrap();
assert_matches!(settings.sharing_strategy, CollectStrategy::AllDevices);
}
/// [`CollectStrategy::ErrorOnVerifiedUserProblem`] used to be represented
/// as a variant on the former `DeviceBasedStrategy`. Check we can still
/// deserialize the old representation.
#[test]
fn test_deserialize_old_error_on_verified_user_problem() {
let settings: EncryptionSettings = serde_json::from_value(json!({
"algorithm": "m.megolm.v1.aes-sha2",
"rotation_period":{"secs":604800,"nanos":0},
"rotation_period_msgs":100,
"history_visibility":"shared",
"sharing_strategy":{"DeviceBasedStrategy":{"only_allow_trusted_devices":false,"error_on_verified_user_problem":true}},
})).unwrap();
assert_matches!(settings.sharing_strategy, CollectStrategy::ErrorOnVerifiedUserProblem);
}
/// [`CollectStrategy::OnlyTrustedDevices`] used to be represented as a
/// variant on the former `DeviceBasedStrategy`. Check we can still
/// deserialize the old representation.
#[test]
fn test_deserialize_old_only_trusted_devices_strategy() {
let settings: EncryptionSettings = serde_json::from_value(json!({
"algorithm": "m.megolm.v1.aes-sha2",
"rotation_period":{"secs":604800,"nanos":0},
"rotation_period_msgs":100,
"history_visibility":"shared",
"sharing_strategy":{"DeviceBasedStrategy":{"only_allow_trusted_devices":true,"error_on_verified_user_problem":false}},
})).unwrap();
assert_matches!(settings.sharing_strategy, CollectStrategy::OnlyTrustedDevices);
}
#[async_test]
async fn test_share_with_per_device_strategy_to_all() {
let machine = set_up_test_machine().await;
let encryption_settings = EncryptionSettings {
sharing_strategy: CollectStrategy::DeviceBasedStrategy {
only_allow_trusted_devices: false,
error_on_verified_user_problem: false,
},
..Default::default()
};
let encryption_settings = all_devices_strategy_settings();
let group_session = create_test_outbound_group_session(&machine, &encryption_settings);
@@ -623,33 +787,11 @@ mod tests {
}
#[async_test]
async fn test_share_with_per_device_strategy_only_trusted() {
test_share_only_trusted_helper(false).await;
}
/// Variation of [`test_share_with_per_device_strategy_only_trusted`] to
/// test the interaction between
/// [`only_allow_trusted_devices`](`CollectStrategy::DeviceBasedStrategy::only_allow_trusted_devices`) and
/// [`error_on_verified_user_problem`](`CollectStrategy::DeviceBasedStrategy::error_on_verified_user_problem`).
///
/// (Given that untrusted devices are ignored, we do not expect
/// [`collect_session_recipients`] to return an error, despite the presence
/// of unsigned devices.)
#[async_test]
async fn test_share_with_per_device_strategy_only_trusted_error_on_unsigned_of_verified() {
test_share_only_trusted_helper(true).await;
}
/// Common helper for [`test_share_with_per_device_strategy_only_trusted`]
/// and [`test_share_with_per_device_strategy_only_trusted_error_on_unsigned_of_verified`].
async fn test_share_only_trusted_helper(error_on_verified_user_problem: bool) {
async fn test_share_with_only_trusted_strategy() {
let machine = set_up_test_machine().await;
let encryption_settings = EncryptionSettings {
sharing_strategy: CollectStrategy::DeviceBasedStrategy {
only_allow_trusted_devices: true,
error_on_verified_user_problem,
},
sharing_strategy: CollectStrategy::OnlyTrustedDevices,
..Default::default()
};
@@ -1090,10 +1232,7 @@ mod tests {
async fn test_share_with_identity_strategy() {
let machine = set_up_test_machine().await;
let strategy = CollectStrategy::new_identity_based();
let encryption_settings =
EncryptionSettings { sharing_strategy: strategy.clone(), ..Default::default() };
let encryption_settings = identity_based_strategy_settings();
let group_session = create_test_outbound_group_session(&machine, &encryption_settings);
@@ -1169,10 +1308,7 @@ mod tests {
let fake_room_id = room_id!("!roomid:localhost");
let encryption_settings = EncryptionSettings {
sharing_strategy: CollectStrategy::new_identity_based(),
..Default::default()
};
let encryption_settings = identity_based_strategy_settings();
let request_result = machine
.share_room_key(
@@ -1295,10 +1431,7 @@ mod tests {
let fake_room_id = room_id!("!roomid:localhost");
// We share the key using the identity-based strategy.
let encryption_settings = EncryptionSettings {
sharing_strategy: CollectStrategy::new_identity_based(),
..Default::default()
};
let encryption_settings = identity_based_strategy_settings();
let request_result = machine
.share_room_key(
@@ -1390,10 +1523,7 @@ mod tests {
async fn test_should_rotate_based_on_visibility() {
let machine = set_up_test_machine().await;
let strategy = CollectStrategy::DeviceBasedStrategy {
only_allow_trusted_devices: false,
error_on_verified_user_problem: false,
};
let strategy = CollectStrategy::AllDevices;
let encryption_settings = EncryptionSettings {
sharing_strategy: strategy.clone(),
@@ -1439,14 +1569,7 @@ mod tests {
let machine = set_up_test_machine().await;
let fake_room_id = room_id!("!roomid:localhost");
let strategy = CollectStrategy::DeviceBasedStrategy {
only_allow_trusted_devices: false,
error_on_verified_user_problem: false,
};
let encryption_settings =
EncryptionSettings { sharing_strategy: strategy.clone(), ..Default::default() };
let encryption_settings = all_devices_strategy_settings();
let requests = machine
.share_room_key(
@@ -1538,13 +1661,24 @@ mod tests {
machine
}
/// [`EncryptionSettings`] with `error_on_verified_user_problem` set
/// [`EncryptionSettings`] with [`CollectStrategy::AllDevices`]
fn all_devices_strategy_settings() -> EncryptionSettings {
EncryptionSettings { sharing_strategy: CollectStrategy::AllDevices, ..Default::default() }
}
/// [`EncryptionSettings`] with
/// [`CollectStrategy::ErrorOnVerifiedUserProblem`]
fn error_on_verification_problem_encryption_settings() -> EncryptionSettings {
EncryptionSettings {
sharing_strategy: CollectStrategy::DeviceBasedStrategy {
only_allow_trusted_devices: false,
error_on_verified_user_problem: true,
},
sharing_strategy: CollectStrategy::ErrorOnVerifiedUserProblem,
..Default::default()
}
}
/// [`EncryptionSettings`] with [`CollectStrategy::IdentityBasedStrategy`]
fn identity_based_strategy_settings() -> EncryptionSettings {
EncryptionSettings {
sharing_strategy: CollectStrategy::IdentityBasedStrategy,
..Default::default()
}
}

View File

@@ -0,0 +1,5 @@
---
source: crates/matrix-sdk-crypto/src/session_manager/group_sessions/share_strategy.rs
expression: serialized
---
{"algorithm":"m.megolm.v1.aes-sha2","rotation_period":{"secs":604800,"nanos":0},"rotation_period_msgs":100,"history_visibility":"shared","sharing_strategy":"AllDevices"}

View File

@@ -36,7 +36,7 @@ CREATE TABLE "events" (
-- `OwnedEventId` for events, can be null if malformed.
"event_id" TEXT,
-- JSON serialized `SyncTimelineEvent` (encrypted value).
-- JSON serialized `TimelineEvent` (encrypted value).
"content" BLOB NOT NULL,
-- Position (index) in the chunk.
"position" INTEGER NOT NULL,

View File

@@ -24,6 +24,18 @@ All notable changes to this project will be documented in this file.
implement `Into<PathBuf>` also implement `Into<AttachmentSource>`.
([#4451](https://github.com/matrix-org/matrix-rust-sdk/pull/4451))
### Refactor
- [**breaking**] `Timeline::paginate_forwards` and `Timeline::paginate_backwards`
are unified to work on a live or focused timeline.
`Timeline::live_paginate_*` and `Timeline::focused_paginate_*` have been
removed ([#4584](https://github.com/matrix-org/matrix-rust-sdk/pull/4584)).
- [**breaking**] `Timeline::subscribe_batched` replaces
`Timeline::subscribe`. `subscribe` has been removed in
[#4567](https://github.com/matrix-org/matrix-rust-sdk/pull/4567),
and `subscribe_batched` has been renamed to `subscribe` in
[#4585](https://github.com/matrix-org/matrix-rust-sdk/pull/4585).
## [0.9.0] - 2024-12-18
### Bug Fixes
@@ -63,7 +75,6 @@ All notable changes to this project will be documented in this file.
- `EncryptionSyncService` and `Notification` are using
`Client::cross_process_store_locks_holder_name`.
### Refactor
- [**breaking**] `Timeline::edit` now takes a `RoomMessageEventContentWithoutRelation`.

View File

@@ -51,6 +51,9 @@ tracing = { workspace = true, features = ["attributes"] }
unicode-normalization = { workspace = true }
uniffi = { workspace = true, optional = true }
emojis = "0.6.4"
unicode-segmentation = "1.12.0"
[dev-dependencies]
anyhow = { workspace = true }
assert-json-diff = { workspace = true }
@@ -61,6 +64,7 @@ matrix-sdk = { workspace = true, features = ["testing", "sqlite"] }
matrix-sdk-test = { workspace = true }
stream_assert = { workspace = true }
tempfile = { workspace = true }
url = { workspace = true }
wiremock = { workspace = true }
[lints]

View File

@@ -31,7 +31,7 @@ use std::{pin::Pin, time::Duration};
use async_stream::stream;
use futures_core::stream::Stream;
use futures_util::{pin_mut, StreamExt};
use matrix_sdk::{Client, SlidingSync, LEASE_DURATION_MS};
use matrix_sdk::{sleep::sleep, Client, SlidingSync, LEASE_DURATION_MS};
use matrix_sdk_base::sliding_sync::http;
use ruma::assign;
use tokio::sync::OwnedMutexGuard;
@@ -174,7 +174,7 @@ impl EncryptionSyncService {
LEASE_DURATION_MS
);
tokio::time::sleep(Duration::from_millis(LEASE_DURATION_MS.into())).await;
sleep(Duration::from_millis(LEASE_DURATION_MS.into())).await;
lock_guard = self
.client

View File

@@ -18,7 +18,9 @@ use std::{
};
use futures_util::{pin_mut, StreamExt as _};
use matrix_sdk::{room::Room, Client, ClientBuildError, SlidingSyncList, SlidingSyncMode};
use matrix_sdk::{
room::Room, sleep::sleep, Client, ClientBuildError, SlidingSyncList, SlidingSyncMode,
};
use matrix_sdk_base::{
deserialized_responses::TimelineEvent, sliding_sync::http, RoomState, StoreError,
};
@@ -212,7 +214,7 @@ impl NotificationClient {
for _ in 0..3 {
trace!("waiting for decryption…");
tokio::time::sleep(Duration::from_millis(wait)).await;
sleep(Duration::from_millis(wait)).await;
let new_event = room.decrypt_event(raw_event.cast_ref()).await?;

View File

@@ -63,8 +63,8 @@ use async_stream::stream;
use eyeball::Subscriber;
use futures_util::{pin_mut, Stream, StreamExt};
use matrix_sdk::{
event_cache::EventCacheError, Client, Error as SlidingSyncError, SlidingSync, SlidingSyncList,
SlidingSyncMode,
event_cache::EventCacheError, timeout::timeout, Client, Error as SlidingSyncError, SlidingSync,
SlidingSyncList, SlidingSyncMode,
};
use matrix_sdk_base::sliding_sync::http;
pub use room::*;
@@ -72,7 +72,6 @@ pub use room_list::*;
use ruma::{assign, directory::RoomTypeFilter, events::StateEventType, OwnedRoomId, RoomId, UInt};
pub use state::*;
use thiserror::Error;
use tokio::time::timeout;
use tracing::debug;
use crate::timeline;
@@ -328,7 +327,7 @@ impl RoomListService {
};
// `state.next().await` has a maximum of `yield_delay` time to execute…
let next_state = match timeout(yield_delay, state.next()).await {
let next_state = match timeout(state.next(), yield_delay).await {
// A new state has been received before `yield_delay` time. The new
// `sync_indicator` value won't be yielded.
Ok(next_state) => next_state,
@@ -470,8 +469,8 @@ mod tests {
use assert_matches::assert_matches;
use futures_util::{pin_mut, StreamExt};
use matrix_sdk::{
authentication::matrix::{MatrixSession, MatrixSessionTokens},
config::RequestConfig,
matrix_auth::{MatrixSession, MatrixSessionTokens},
reqwest::Url,
sliding_sync::Version as SlidingSyncVersion,
Client, SlidingSyncMode,

View File

@@ -177,7 +177,7 @@ impl RoomList {
Box::new(new_sorter_recency()),
Box::new(new_sorter_name())
]))
.dynamic_limit_with_initial_value(page_size, limit_stream.clone());
.dynamic_head_with_initial_value(page_size, limit_stream.clone());
// Clearing the stream before chaining with the real stream.
yield stream::once(ready(vec![VectorDiff::Reset { values }]))

View File

@@ -18,12 +18,11 @@
//! This is an opiniated way to run both APIs, with high-level callbacks that
//! should be called in reaction to user actions and/or system events.
//!
//! The sync service will signal errors via its
//! [`state`](SyncService::state) that the user
//! MUST observe. Whenever an error/termination is observed, the user MUST call
//! [`SyncService::start()`] again to restart the room list sync.
//! The sync service will signal errors via its [`state`](SyncService::state)
//! that the user MUST observe. Whenever an error/termination is observed, the
//! user MUST call [`SyncService::start()`] again to restart the room list sync.
use std::sync::{Arc, Mutex};
use std::sync::Arc;
use eyeball::{SharedObservable, Subscriber};
use futures_core::Future;
@@ -47,9 +46,9 @@ use crate::{
/// Current state of the application.
///
/// This is a high-level state indicating what's the status of the underlying
/// syncs. The application starts in `Running` mode, and then hits a terminal
/// state `Terminated` (if it gracefully exited) or `Error` (in case any of the
/// underlying syncs ran into an error).
/// syncs. The application starts in [`State::Running`] mode, and then hits a
/// terminal state [`State::Terminated`] (if it gracefully exited) or
/// [`State::Error`] (in case any of the underlying syncs ran into an error).
///
/// It is the responsibility of the caller to restart the application using the
/// [`SyncService::start`] method, in case it terminated, gracefully or not.
@@ -67,146 +66,144 @@ pub enum State {
Error,
}
pub struct SyncService {
/// Room list service used to synchronize the rooms state.
room_list_service: Arc<RoomListService>,
/// Encryption sync taking care of e2ee events.
encryption_sync_service: Arc<EncryptionSyncService>,
/// What's the state of this sync service?
state: SharedObservable<State>,
/// Use a mutex everytime to modify the `state` value, otherwise it would be
/// possible to have race conditions when starting or pausing the
/// service multiple times really quickly.
modifying_state: AsyncMutex<()>,
/// Task running the room list service.
room_list_task: Arc<Mutex<Option<JoinHandle<()>>>>,
/// Task running the encryption sync.
encryption_sync_task: Arc<Mutex<Option<JoinHandle<()>>>>,
/// Global lock to allow using at most one `EncryptionSyncService` at all
/// times.
///
/// This ensures that there's only one ever existing in the application's
/// lifetime (under the assumption that there is at most one
/// `SyncService` per application).
encryption_sync_permit: Arc<AsyncMutex<EncryptionSyncPermit>>,
/// Scheduler task ensuring proper termination.
///
/// This task is waiting for a `TerminationReport` from any of the other two
/// tasks, or from a user request via [`Self::stop()`]. It makes sure
/// that the two services are properly shut up and just interrupted.
///
/// This is set at the same time as the other two tasks.
scheduler_task: Arc<Mutex<Option<JoinHandle<()>>>>,
/// `TerminationReport` sender for the [`Self::stop()`] function.
///
/// This is set at the same time as all the tasks in [`Self::start()`].
scheduler_sender: Mutex<Option<Sender<TerminationReport>>>,
/// A supervisor responsible for managing two sync tasks: one for handling the
/// room list and another for supporting end-to-end encryption.
///
/// The two sync tasks are spawned as child tasks and are contained within the
/// supervising task, which is stored in the [`SyncTaskSupervisor::task`] field.
///
/// The supervisor ensures the two child tasks are managed as a single unit,
/// allowing for them to be shutdown in unison.
struct SyncTaskSupervisor {
/// The supervising task that manages and contains the two sync child tasks.
task: JoinHandle<()>,
/// [`TerminationReport`] sender for the [`SyncTaskSupervisor::shutdown()`]
/// function.
termination_sender: Sender<TerminationReport>,
}
impl SyncService {
/// Create a new builder for configuring an `SyncService`.
pub fn builder(client: Client) -> SyncServiceBuilder {
SyncServiceBuilder::new(client)
impl SyncTaskSupervisor {
async fn new(
inner: &SyncServiceInner,
room_list_service: Arc<RoomListService>,
encryption_sync_permit: Arc<AsyncMutex<EncryptionSyncPermit>>,
) -> Self {
let (sender, receiver) = tokio::sync::mpsc::channel(16);
let (room_list_task, encryption_sync_task) = Self::spawn_child_tasks(
inner,
room_list_service.clone(),
encryption_sync_permit,
sender.clone(),
)
.await;
let task = spawn(Self::spawn_supervisor_task(
inner,
room_list_service,
room_list_task,
encryption_sync_task,
receiver,
));
Self { task, termination_sender: sender }
}
/// Get the underlying `RoomListService` instance for easier access to its
/// methods.
pub fn room_list_service(&self) -> Arc<RoomListService> {
self.room_list_service.clone()
}
/// Returns the state of the sync service.
pub fn state(&self) -> Subscriber<State> {
self.state.subscribe()
}
/// The role of the scheduler task is to wait for a termination message
/// (`TerminationReport`), sent either because we wanted to stop both
/// syncs, or because one of the syncs failed (in which case we'll stop
/// the other one too).
fn spawn_scheduler_task(
&self,
/// The role of the supervisor task is to wait for a termination message
/// ([`TerminationReport`]), sent either because we wanted to stop both
/// syncs, or because one of the syncs failed (in which case we'll stop the
/// other one too).
fn spawn_supervisor_task(
inner: &SyncServiceInner,
room_list_service: Arc<RoomListService>,
room_list_task: JoinHandle<()>,
encryption_sync_task: JoinHandle<()>,
mut receiver: Receiver<TerminationReport>,
) -> impl Future<Output = ()> {
let encryption_sync_task = self.encryption_sync_task.clone();
let encryption_sync = self.encryption_sync_service.clone();
let room_list_service = self.room_list_service.clone();
let room_list_task = self.room_list_task.clone();
let state = self.state.clone();
let encryption_sync = inner.encryption_sync_service.clone();
let state = inner.state.clone();
async move {
let Some(report) = receiver.recv().await else {
let report = if let Some(report) = receiver.recv().await {
report
} else {
info!("internal channel has been closed?");
return;
// We should still stop the child tasks in the unlikely scenario that our
// receiver died.
TerminationReport::supervisor_error()
};
// If one service failed, make sure to request stopping the other one.
let (stop_room_list, stop_encryption) = match &report.origin {
TerminationOrigin::EncryptionSync => (true, false),
TerminationOrigin::RoomList => (false, true),
TerminationOrigin::Scheduler => (true, true),
TerminationOrigin::Supervisor => (true, true),
};
// Stop both services, and wait for the streams to properly finish: at some
// point they'll return `None` and will exit their infinite loops,
// and their tasks will gracefully terminate.
// point they'll return `None` and will exit their infinite loops, and their
// tasks will gracefully terminate.
if stop_room_list {
if let Err(err) = room_list_service.stop_sync() {
warn!(?report, "unable to stop room list service: {err:#}");
}
if report.has_expired {
room_list_service.expire_sync_session().await;
}
}
{
let task = room_list_task.lock().unwrap().take();
if let Some(task) = task {
if let Err(err) = task.await {
error!("when awaiting room list service: {err:#}");
}
}
if let Err(err) = room_list_task.await {
error!("when awaiting room list service: {err:#}");
}
if stop_encryption {
if let Err(err) = encryption_sync.stop_sync() {
warn!(?report, "unable to stop encryption sync: {err:#}");
}
if report.has_expired {
encryption_sync.expire_sync_session().await;
}
}
{
let task = encryption_sync_task.lock().unwrap().take();
if let Some(task) = task {
if let Err(err) = task.await {
error!("when awaiting encryption sync: {err:#}");
}
}
if let Err(err) = encryption_sync_task.await {
error!("when awaiting encryption sync: {err:#}");
}
if report.is_error {
if report.has_expired {
if stop_room_list {
room_list_service.expire_sync_session().await;
}
if stop_encryption {
encryption_sync.expire_sync_session().await;
}
}
state.set(State::Error);
} else if matches!(report.origin, TerminationOrigin::Scheduler) {
} else if matches!(report.origin, TerminationOrigin::Supervisor) {
state.set(State::Idle);
} else {
state.set(State::Terminated);
}
}
.instrument(tracing::span!(Level::WARN, "scheduler task"))
.instrument(tracing::span!(Level::WARN, "supervisor task"))
}
async fn spawn_child_tasks(
inner: &SyncServiceInner,
room_list_service: Arc<RoomListService>,
encryption_sync_permit: Arc<AsyncMutex<EncryptionSyncPermit>>,
sender: Sender<TerminationReport>,
) -> (JoinHandle<()>, JoinHandle<()>) {
// First, take care of the room list.
let room_list_task = spawn(Self::room_list_sync_task(room_list_service, sender.clone()));
// Then, take care of the encryption sync.
let sync_permit_guard = encryption_sync_permit.clone().lock_owned().await;
let encryption_sync_task = spawn(Self::encryption_sync_task(
inner.encryption_sync_service.clone(),
sender.clone(),
sync_permit_guard,
));
(room_list_task, encryption_sync_task)
}
fn check_if_expired(err: &matrix_sdk::Error) -> bool {
err.client_api_error_kind() == Some(&ruma::api::client::error::ErrorKind::UnknownPos)
}
async fn encryption_sync_task(
@@ -214,28 +211,29 @@ impl SyncService {
sender: Sender<TerminationReport>,
sync_permit_guard: OwnedMutexGuard<EncryptionSyncPermit>,
) {
use encryption_sync_service::Error;
let encryption_sync_stream = encryption_sync.sync(sync_permit_guard);
pin_mut!(encryption_sync_stream);
let (is_error, has_expired) = loop {
let res = encryption_sync_stream.next().await;
match res {
match encryption_sync_stream.next().await {
Some(Ok(())) => {
// Carry on.
}
Some(Err(err)) => {
// If the encryption sync error was an expired session, also expire the
// room list sync.
let has_expired = if let encryption_sync_service::Error::SlidingSync(err) = &err
{
err.client_api_error_kind()
== Some(&ruma::api::client::error::ErrorKind::UnknownPos)
let has_expired = if let Error::SlidingSync(err) = &err {
Self::check_if_expired(err)
} else {
false
};
if !has_expired {
error!("Error while processing encryption in sync service: {err:#}");
}
break (true, has_expired);
}
None => {
@@ -261,27 +259,29 @@ impl SyncService {
room_list_service: Arc<RoomListService>,
sender: Sender<TerminationReport>,
) {
use room_list_service::Error;
let room_list_stream = room_list_service.sync();
pin_mut!(room_list_stream);
let (is_error, has_expired) = loop {
let res = room_list_stream.next().await;
match res {
match room_list_stream.next().await {
Some(Ok(())) => {
// Carry on.
}
Some(Err(err)) => {
// If the room list error was an expired session, also expire the
// encryption sync.
let has_expired = if let room_list_service::Error::SlidingSync(err) = &err {
err.client_api_error_kind()
== Some(&ruma::api::client::error::ErrorKind::UnknownPos)
let has_expired = if let Error::SlidingSync(err) = &err {
Self::check_if_expired(err)
} else {
false
};
if !has_expired {
error!("Error while processing room list in sync service: {err:#}");
}
break (true, has_expired);
}
None => {
@@ -299,6 +299,129 @@ impl SyncService {
}
}
async fn shutdown(self) -> Result<(), Error> {
match self
.termination_sender
.send(TerminationReport {
is_error: false,
has_expired: false,
origin: TerminationOrigin::Supervisor,
})
.await
{
Ok(_) => self.task.await.map_err(|err| {
error!("couldn't finish supervisor task: {err}");
Error::InternalSupervisorError
}),
Err(err) => {
error!("when sending termination report: {err}");
// Let's abort the task if it won't shut down properly, otherwise we would have
// left it as a detached task.
self.task.abort();
Err(Error::InternalSupervisorError)
}
}
}
}
struct SyncServiceInner {
encryption_sync_service: Arc<EncryptionSyncService>,
state: SharedObservable<State>,
/// Supervisor task ensuring proper termination.
///
/// This task is waiting for a [`TerminationReport`] from any of the other
/// two tasks, or from a user request via [`SyncService::stop()`]. It
/// makes sure that the two services are properly shut up and just
/// interrupted.
///
/// This is set at the same time as the other two tasks.
supervisor: Option<SyncTaskSupervisor>,
}
/// A high level manager for your Matrix syncing needs.
///
/// The [`SyncService`] is responsible for managing real-time synchronization
/// with a Matrix server. It can initiate and maintain the necessary
/// synchronization tasks for you.
///
/// **Note**: The [`SyncService`] requires a server with support for [MSC4186],
/// otherwise it will fail with an 404 `M_UNRECOGNIZED` request error.
///
/// [MSC4186]: https://github.com/matrix-org/matrix-spec-proposals/pull/4186/
///
/// # Example
///
/// ```no_run
/// use matrix_sdk::Client;
/// use matrix_sdk_ui::sync_service::{State, SyncService};
/// # use url::Url;
/// # async {
/// let homeserver = Url::parse("http://example.com")?;
/// let client = Client::new(homeserver).await?;
///
/// client
/// .matrix_auth()
/// .login_username("example", "wordpass")
/// .initial_device_display_name("My bot")
/// .await?;
///
/// let sync_service = SyncService::builder(client).build().await?;
/// let mut state = sync_service.state();
///
/// while let Some(state) = state.next().await {
/// match state {
/// State::Idle => eprintln!("The sync service is idle."),
/// State::Running => eprintln!("The sync has started to run."),
/// State::Terminated => {
/// eprintln!("The sync service has been gracefully terminated");
/// break;
/// }
/// State::Error => {
/// eprintln!("The sync service has run into an error");
/// break;
/// }
/// }
/// }
/// # anyhow::Ok(()) };
/// ```
pub struct SyncService {
inner: Arc<AsyncMutex<SyncServiceInner>>,
/// Room list service used to synchronize the rooms state.
room_list_service: Arc<RoomListService>,
/// What's the state of this sync service? This field is replicated from the
/// [`SyncServiceInner`] struct, but it should not be modified in this
/// struct. It's re-exposed here so we can subscribe to the state without
/// taking the lock on the `inner` field.
state: SharedObservable<State>,
/// Global lock to allow using at most one [`EncryptionSyncService`] at all
/// times.
///
/// This ensures that there's only one ever existing in the application's
/// lifetime (under the assumption that there is at most one [`SyncService`]
/// per application).
encryption_sync_permit: Arc<AsyncMutex<EncryptionSyncPermit>>,
}
impl SyncService {
/// Create a new builder for configuring an `SyncService`.
pub fn builder(client: Client) -> SyncServiceBuilder {
SyncServiceBuilder::new(client)
}
/// Get the underlying `RoomListService` instance for easier access to its
/// methods.
pub fn room_list_service(&self) -> Arc<RoomListService> {
self.room_list_service.clone()
}
/// Returns the state of the sync service.
pub fn state(&self) -> Subscriber<State> {
self.state.subscribe()
}
/// Start (or restart) the underlying sliding syncs.
///
/// This can be called multiple times safely:
@@ -306,35 +429,26 @@ impl SyncService {
/// - if the stream has been aborted before, it will be properly cleaned up
/// and restarted.
pub async fn start(&self) {
let _guard = self.modifying_state.lock().await;
let mut inner = self.inner.lock().await;
// Only (re)start the tasks if any was stopped.
if matches!(self.state.get(), State::Running) {
// It was already true, so we can skip the restart.
return;
match inner.state.get() {
// If we're already running, there's nothing to do.
State::Running => (),
State::Idle | State::Terminated | State::Error => {
trace!("starting sync service");
inner.supervisor = Some(
SyncTaskSupervisor::new(
&inner,
self.room_list_service.clone(),
self.encryption_sync_permit.clone(),
)
.await,
);
inner.state.set(State::Running);
}
}
trace!("starting sync service");
let (sender, receiver) = tokio::sync::mpsc::channel(16);
// First, take care of the room list.
*self.room_list_task.lock().unwrap() =
Some(spawn(Self::room_list_sync_task(self.room_list_service.clone(), sender.clone())));
// Then, take care of the encryption sync.
let sync_permit_guard = self.encryption_sync_permit.clone().lock_owned().await;
*self.encryption_sync_task.lock().unwrap() = Some(spawn(Self::encryption_sync_task(
self.encryption_sync_service.clone(),
sender.clone(),
sync_permit_guard,
)));
// Spawn the scheduler task.
*self.scheduler_sender.lock().unwrap() = Some(sender);
*self.scheduler_task.lock().unwrap() = Some(spawn(self.spawn_scheduler_task(receiver)));
self.state.set(State::Running);
}
/// Stop the underlying sliding syncs.
@@ -344,52 +458,29 @@ impl SyncService {
/// necessary.
#[instrument(skip_all)]
pub async fn stop(&self) -> Result<(), Error> {
let _guard = self.modifying_state.lock().await;
let mut inner = self.inner.lock().await;
match self.state.get() {
match inner.state.get() {
State::Idle | State::Terminated | State::Error => {
// No need to stop if we were not running.
return Ok(());
}
State::Running => {}
};
State::Running => (),
}
trace!("pausing sync service");
// First, request to stop the two underlying syncs; we'll look at the results
// later, so that we're in a clean state independently of the request to
// stop.
// later, so that we're in a clean state independently of the request to stop.
let sender = self.scheduler_sender.lock().unwrap().clone();
sender
.ok_or_else(|| {
error!("missing sender");
Error::InternalSchedulerError
})?
.send(TerminationReport {
is_error: false,
has_expired: false,
origin: TerminationOrigin::Scheduler,
})
.await
.map_err(|err| {
error!("when sending termination report: {err}");
Error::InternalSchedulerError
})?;
// Remove the supervisor from our inner state and request the tasks to be
// shutdown.
let supervisor = inner.supervisor.take().ok_or_else(|| {
error!("The supervisor was not properly started up");
Error::InternalSupervisorError
})?;
let scheduler_task = self.scheduler_task.lock().unwrap().take();
scheduler_task
.ok_or_else(|| {
error!("missing scheduler task");
Error::InternalSchedulerError
})?
.await
.map_err(|err| {
error!("couldn't finish scheduler task: {err}");
Error::InternalSchedulerError
})?;
Ok(())
supervisor.shutdown().await
}
/// Attempt to get a permit to use an `EncryptionSyncService` at a given
@@ -406,7 +497,7 @@ impl SyncService {
enum TerminationOrigin {
EncryptionSync,
RoomList,
Scheduler,
Supervisor,
}
#[derive(Debug)]
@@ -416,15 +507,22 @@ struct TerminationReport {
origin: TerminationOrigin,
}
impl TerminationReport {
fn supervisor_error() -> Self {
TerminationReport {
is_error: true,
has_expired: false,
origin: TerminationOrigin::Supervisor,
}
}
}
// Testing helpers, mostly.
#[doc(hidden)]
impl SyncService {
/// Return the existential states of internal tasks.
pub fn task_states(&self) -> (bool, bool) {
(
self.encryption_sync_task.lock().unwrap().is_some(),
self.room_list_task.lock().unwrap().is_some(),
)
/// Is the task supervisor running?
pub async fn is_supervisor_running(&self) -> bool {
self.inner.lock().await.supervisor.is_some()
}
}
@@ -457,11 +555,11 @@ impl SyncServiceBuilder {
self
}
/// Finish setting up the `SyncService`.
/// Finish setting up the [`SyncService`].
///
/// This creates the underlying sliding syncs, and will *not* start them in
/// the background. The resulting `SyncService` must be kept alive as
/// long as the sliding syncs are supposed to run.
/// the background. The resulting [`SyncService`] must be kept alive as long
/// as the sliding syncs are supposed to run.
pub async fn build(self) -> Result<SyncService, Error> {
let encryption_sync_permit = Arc::new(AsyncMutex::new(EncryptionSyncPermit::new()));
@@ -476,16 +574,18 @@ impl SyncServiceBuilder {
.await?,
);
let room_list_service = Arc::new(room_list);
let state = SharedObservable::new(State::Idle);
Ok(SyncService {
room_list_service: Arc::new(room_list),
encryption_sync_service: encryption_sync,
encryption_sync_task: Arc::new(Mutex::new(None)),
room_list_task: Arc::new(Mutex::new(None)),
scheduler_task: Arc::new(Mutex::new(None)),
scheduler_sender: Mutex::new(None),
state: SharedObservable::new(State::Idle),
modifying_state: AsyncMutex::new(()),
state: state.clone(),
room_list_service,
encryption_sync_permit,
inner: Arc::new(AsyncMutex::new(SyncServiceInner {
supervisor: None,
encryption_sync_service: encryption_sync,
state,
})),
})
}
}
@@ -501,6 +601,7 @@ pub enum Error {
#[error(transparent)]
EncryptionSync(#[from] encryption_sync_service::Error),
#[error("the scheduler channel has run into an unexpected error")]
InternalSchedulerError,
/// An error had occurred in the sync task supervisor, likely due to a bug.
#[error("the supervisor channel has run into an unexpected error")]
InternalSupervisorError,
}

View File

@@ -20,14 +20,14 @@ use eyeball_im_util::vector::VectorObserverExt;
use futures_core::Stream;
use imbl::Vector;
#[cfg(test)]
use matrix_sdk::crypto::OlmMachine;
use matrix_sdk::{crypto::OlmMachine, SendOutsideWasm};
use matrix_sdk::{
deserialized_responses::{SyncTimelineEvent, TimelineEventKind as SdkTimelineEventKind},
deserialized_responses::{TimelineEvent, TimelineEventKind as SdkTimelineEventKind},
event_cache::{paginator::Paginator, RoomEventCache},
send_queue::{
LocalEcho, LocalEchoContent, RoomSendQueueUpdate, SendHandle, SendReactionHandle,
},
Result, Room, SendOutsideWasm,
Result, Room,
};
use ruma::{
api::client::receipt::create_receipt::v3::ReceiptType as SendReceiptType,
@@ -396,7 +396,7 @@ impl<P: RoomDataProvider> TimelineController<P> {
pub(crate) async fn reload_pinned_events(
&self,
) -> Result<Vec<SyncTimelineEvent>, PinnedEventsLoaderError> {
) -> Result<Vec<TimelineEvent>, PinnedEventsLoaderError> {
let focus_guard = self.focus.read().await;
if let TimelineFocusData::PinnedEvents { loader } = &*focus_guard {
@@ -477,13 +477,13 @@ impl<P: RoomDataProvider> TimelineController<P> {
self.state.read().await.items.clone_items()
}
#[cfg(test)]
pub(super) async fn subscribe(
&self,
) -> (
Vector<Arc<TimelineItem>>,
impl Stream<Item = VectorDiff<Arc<TimelineItem>>> + SendOutsideWasm,
) {
trace!("Creating timeline items signal");
let state = self.state.read().await;
(state.items.clone_items(), state.items.subscribe().into_stream())
}
@@ -491,7 +491,6 @@ impl<P: RoomDataProvider> TimelineController<P> {
pub(super) async fn subscribe_batched(
&self,
) -> (Vector<Arc<TimelineItem>>, impl Stream<Item = Vec<VectorDiff<Arc<TimelineItem>>>>) {
trace!("Creating timeline items signal");
let state = self.state.read().await;
(state.items.clone_items(), state.items.subscribe().into_batched_stream())
}
@@ -504,7 +503,6 @@ impl<P: RoomDataProvider> TimelineController<P> {
U: Clone,
F: Fn(Arc<TimelineItem>) -> Option<U>,
{
trace!("Creating timeline items signal");
self.state.read().await.items.subscribe().filter_map(f)
}
@@ -662,7 +660,7 @@ impl<P: RoomDataProvider> TimelineController<P> {
) -> HandleManyEventsResult
where
Events: IntoIterator + ExactSizeIterator,
<Events as IntoIterator>::Item: Into<SyncTimelineEvent>,
<Events as IntoIterator>::Item: Into<TimelineEvent>,
{
if events.len() == 0 {
return Default::default();
@@ -675,7 +673,7 @@ impl<P: RoomDataProvider> TimelineController<P> {
/// Handle updates on events as [`VectorDiff`]s.
pub(super) async fn handle_remote_events_with_diffs(
&self,
diffs: Vec<VectorDiff<SyncTimelineEvent>>,
diffs: Vec<VectorDiff<TimelineEvent>>,
origin: RemoteEventOrigin,
) {
if diffs.is_empty() {
@@ -710,7 +708,7 @@ impl<P: RoomDataProvider> TimelineController<P> {
origin: RemoteEventOrigin,
) where
Events: IntoIterator + ExactSizeIterator,
<Events as IntoIterator>::Item: Into<SyncTimelineEvent>,
<Events as IntoIterator>::Item: Into<TimelineEvent>,
{
let mut state = self.state.write().await;

View File

@@ -22,9 +22,8 @@ use std::{
use eyeball_im::VectorDiff;
use itertools::Itertools as _;
use matrix_sdk::{
deserialized_responses::SyncTimelineEvent, ring_buffer::RingBuffer, send_queue::SendHandle,
deserialized_responses::TimelineEvent, ring_buffer::RingBuffer, send_queue::SendHandle,
};
use matrix_sdk_base::deserialized_responses::TimelineEvent;
#[cfg(test)]
use ruma::events::receipt::ReceiptEventContent;
use ruma::{
@@ -139,7 +138,7 @@ impl TimelineState {
) -> HandleManyEventsResult
where
Events: IntoIterator + ExactSizeIterator,
<Events as IntoIterator>::Item: Into<SyncTimelineEvent>,
<Events as IntoIterator>::Item: Into<TimelineEvent>,
RoomData: RoomDataProvider,
{
if events.len() == 0 {
@@ -157,7 +156,7 @@ impl TimelineState {
/// Handle updates on events as [`VectorDiff`]s.
pub(super) async fn handle_remote_events_with_diffs<RoomData>(
&mut self,
diffs: Vec<VectorDiff<SyncTimelineEvent>>,
diffs: Vec<VectorDiff<TimelineEvent>>,
origin: RemoteEventOrigin,
room_data: &RoomData,
settings: &TimelineSettings,
@@ -280,7 +279,7 @@ impl TimelineState {
let handle_one_res = txn
.handle_remote_event(
event.into(),
event,
TimelineItemPosition::UpdateAt { timeline_item_index: idx },
room_data_provider,
settings,
@@ -331,7 +330,7 @@ impl TimelineState {
) -> HandleManyEventsResult
where
Events: IntoIterator,
Events::Item: Into<SyncTimelineEvent>,
Events::Item: Into<TimelineEvent>,
RoomData: RoomDataProvider,
{
let mut txn = self.transaction();
@@ -397,7 +396,7 @@ impl TimelineStateTransaction<'_> {
) -> HandleManyEventsResult
where
Events: IntoIterator,
Events::Item: Into<SyncTimelineEvent>,
Events::Item: Into<TimelineEvent>,
RoomData: RoomDataProvider,
{
let mut total = HandleManyEventsResult::default();
@@ -439,7 +438,7 @@ impl TimelineStateTransaction<'_> {
/// Handle updates on events as [`VectorDiff`]s.
pub(super) async fn handle_remote_events_with_diffs<RoomData>(
&mut self,
diffs: Vec<VectorDiff<SyncTimelineEvent>>,
diffs: Vec<VectorDiff<TimelineEvent>>,
origin: RemoteEventOrigin,
room_data_provider: &RoomData,
settings: &TimelineSettings,
@@ -559,13 +558,13 @@ impl TimelineStateTransaction<'_> {
/// Returns the number of timeline updates that were made.
async fn handle_remote_event<P: RoomDataProvider>(
&mut self,
event: SyncTimelineEvent,
event: TimelineEvent,
position: TimelineItemPosition,
room_data_provider: &P,
settings: &TimelineSettings,
date_divider_adjuster: &mut DateDividerAdjuster,
) -> HandleEventResult {
let SyncTimelineEvent { push_actions, kind } = event;
let TimelineEvent { push_actions, kind } = event;
let encryption_info = kind.encryption_info().cloned();
let (raw, utd_info) = match kind {
@@ -758,7 +757,9 @@ impl TimelineStateTransaction<'_> {
} else {
Default::default()
},
is_highlighted: push_actions.iter().any(Action::is_highlight),
is_highlighted: push_actions
.as_ref()
.is_some_and(|actions| actions.iter().any(Action::is_highlight)),
flow: Flow::Remote {
event_id: event_id.clone(),
raw_event: raw,

View File

@@ -211,7 +211,7 @@ impl TimelineEventKind {
Self::UnableToDecrypt { content, utd_cause }
} else {
// If we get here, it means that some part of the code has created a
// `SyncTimelineEvent` containing an `m.room.encrypted` event
// `TimelineEvent` containing an `m.room.encrypted` event
// without decrypting it. Possibly this means that encryption has not been
// configured.
// We treat it the same as any other message-like event.

View File

@@ -36,6 +36,7 @@ use ruma::{
OwnedUserId, RoomId, RoomVersionId, TransactionId, UserId,
};
use tracing::warn;
use unicode_segmentation::UnicodeSegmentation;
mod content;
mod local;
@@ -127,14 +128,17 @@ impl EventTimelineItem {
Self { sender, sender_profile, timestamp, content, reactions, kind, is_room_encrypted }
}
/// If the supplied low-level `SyncTimelineEvent` is suitable for use as the
/// `latest_event` in a message preview, wrap it as an `EventTimelineItem`.
/// If the supplied low-level [`TimelineEvent`] is suitable for use as the
/// `latest_event` in a message preview, wrap it as an
/// `EventTimelineItem`.
///
/// **Note:** Timeline items created via this constructor do **not** produce
/// the correct ShieldState when calling
/// [`get_shield`][EventTimelineItem::get_shield]. This is because they are
/// intended for display in the room list which a) is unlikely to show
/// shields and b) would incur a significant performance overhead.
///
/// [`TimelineEvent`]: matrix_sdk::deserialized_responses::TimelineEvent
pub async fn from_latest_event(
client: Client,
room_id: &RoomId,
@@ -601,6 +605,69 @@ impl EventTimelineItem {
pub fn local_echo_send_handle(&self) -> Option<SendHandle> {
as_variant!(self.handle(), TimelineItemHandle::Local(handle) => handle.clone())
}
/// Some clients may want to know if a particular text message or media
/// caption contains only emojis so that they can render them bigger for
/// added effect.
///
/// This function provides that feature with the following
/// behavior/limitations:
/// - ignores leading and trailing white spaces
/// - fails texts bigger than 5 graphemes for performance reasons
/// - checks the body only for [`MessageType::Text`]
/// - only checks the caption for [`MessageType::Audio`],
/// [`MessageType::File`], [`MessageType::Image`], and
/// [`MessageType::Video`] if present
/// - all other message types will not match
///
/// # Examples
/// # fn render_timeline_item(timeline_item: TimelineItem) {
/// if timeline_item.contains_only_emojis() {
/// // e.g. increase the font size
/// }
/// # }
///
/// See `test_emoji_detection` for more examples.
pub fn contains_only_emojis(&self) -> bool {
let body = match self.content() {
TimelineItemContent::Message(msg) => match msg.msgtype() {
MessageType::Text(text) => Some(text.body.as_str()),
MessageType::Audio(audio) => audio.caption(),
MessageType::File(file) => file.caption(),
MessageType::Image(image) => image.caption(),
MessageType::Video(video) => video.caption(),
_ => None,
},
TimelineItemContent::RedactedMessage
| TimelineItemContent::Sticker(_)
| TimelineItemContent::UnableToDecrypt(_)
| TimelineItemContent::MembershipChange(_)
| TimelineItemContent::ProfileChange(_)
| TimelineItemContent::OtherState(_)
| TimelineItemContent::FailedToParseMessageLike { .. }
| TimelineItemContent::FailedToParseState { .. }
| TimelineItemContent::Poll(_)
| TimelineItemContent::CallInvite
| TimelineItemContent::CallNotify => None,
};
if let Some(body) = body {
// Collect the graphemes after trimming white spaces.
let graphemes = body.trim().graphemes(true).collect::<Vec<&str>>();
// Limit the check to 5 graphemes for performance and security
// reasons. This will probably be used for every new message so we
// want it to be fast and we don't want to allow a DoS attack by
// sending a huge message.
if graphemes.len() > 5 {
return false;
}
graphemes.iter().all(|g| emojis::get(g).is_some())
} else {
false
}
}
}
impl From<LocalEventTimelineItem> for EventTimelineItemKind {
@@ -754,7 +821,7 @@ mod tests {
use assert_matches2::assert_let;
use matrix_sdk::test_utils::logged_in_client;
use matrix_sdk_base::{
deserialized_responses::SyncTimelineEvent, latest_event::LatestEvent, sliding_sync::http,
deserialized_responses::TimelineEvent, latest_event::LatestEvent, sliding_sync::http,
MinimalStateEvent, OriginalMinimalStateEvent,
};
use matrix_sdk_test::{
@@ -844,7 +911,7 @@ mod tests {
client.process_sliding_sync_test_helper(&response).await.unwrap();
// When we construct a timeline event from it
let event = SyncTimelineEvent::new(raw_event.cast());
let event = TimelineEvent::new(raw_event.cast());
let timeline_item =
EventTimelineItem::from_latest_event(client, room_id, LatestEvent::new(event))
.await
@@ -891,7 +958,7 @@ mod tests {
.event_id(original_event_id)
.bundled_relations(relations)
.server_ts(42)
.into_sync();
.into_event();
let client = logged_in_client(None).await;
@@ -947,7 +1014,7 @@ mod tests {
.event_id(original_event_id)
.bundled_relations(relations)
.sender(user_id)
.into_sync();
.into_event();
let client = logged_in_client(None).await;
@@ -1057,6 +1124,48 @@ mod tests {
);
}
#[async_test]
async fn test_emoji_detection() {
let room_id = room_id!("!q:x.uk");
let user_id = user_id!("@t:o.uk");
let client = logged_in_client(None).await;
let mut event = message_event(room_id, user_id, "🤷‍♂️ No boost 🤷‍♂️", "", 0);
let mut timeline_item =
EventTimelineItem::from_latest_event(client.clone(), room_id, LatestEvent::new(event))
.await
.unwrap();
assert!(!timeline_item.contains_only_emojis());
// Ignores leading and trailing white spaces
event = message_event(room_id, user_id, " 🚀 ", "", 0);
timeline_item =
EventTimelineItem::from_latest_event(client.clone(), room_id, LatestEvent::new(event))
.await
.unwrap();
assert!(timeline_item.contains_only_emojis());
// Too many
event = message_event(room_id, user_id, "👨👩👦1⃣🚀👳🏾🪩👍👍🏻🫱🏼🫲🏾🙂👋", "", 0);
timeline_item =
EventTimelineItem::from_latest_event(client.clone(), room_id, LatestEvent::new(event))
.await
.unwrap();
assert!(!timeline_item.contains_only_emojis());
// Works with combined emojis
event = message_event(room_id, user_id, "👨👩👦1⃣👳🏾👍🏻🫱🏼🫲🏾", "", 0);
timeline_item =
EventTimelineItem::from_latest_event(client.clone(), room_id, LatestEvent::new(event))
.await
.unwrap();
assert!(timeline_item.contains_only_emojis());
}
fn member_event(
room_id: &RoomId,
user_id: &UserId,
@@ -1121,8 +1230,8 @@ mod tests {
body: &str,
formatted_body: &str,
ts: u64,
) -> SyncTimelineEvent {
SyncTimelineEvent::new(sync_timeline_event!({
) -> TimelineEvent {
TimelineEvent::new(sync_timeline_event!({
"event_id": "$eventid6",
"sender": user_id,
"origin_server_ts": ts,

View File

@@ -266,25 +266,14 @@ impl Timeline {
}
}
/// Get the current timeline items, and a stream of changes.
/// Get the current timeline items, along with a stream of updates of
/// timeline items.
///
/// You can poll this stream to receive updates. See
/// [`futures_util::StreamExt`] for a high-level API on top of [`Stream`].
/// The stream produces `Vec<VectorDiff<_>>`, which means multiple updates
/// at once. There are no delays, it consumes as many updates as possible
/// and batches them.
pub async fn subscribe(
&self,
) -> (Vector<Arc<TimelineItem>>, impl Stream<Item = VectorDiff<Arc<TimelineItem>>>) {
let (items, stream) = self.controller.subscribe().await;
let stream = TimelineStream::new(stream, self.drop_handle.clone());
(items, stream)
}
/// Get the current timeline items, and a batched stream of changes.
///
/// In contrast to [`subscribe`](Self::subscribe), this stream can yield
/// multiple diffs at once. The batching is done such that no arbitrary
/// delays are added.
pub async fn subscribe_batched(
&self,
) -> (Vector<Arc<TimelineItem>>, impl Stream<Item = Vec<VectorDiff<Arc<TimelineItem>>>>) {
let (items, stream) = self.controller.subscribe_batched().await;
let stream = TimelineStream::new(stream, self.drop_handle.clone());

View File

@@ -36,26 +36,20 @@ impl super::Timeline {
if self.controller.is_live().await {
Ok(self.live_paginate_backwards(num_events).await?)
} else {
Ok(self.focused_paginate_backwards(num_events).await?)
Ok(self.controller.focused_paginate_backwards(num_events).await?)
}
}
/// Assuming the timeline is focused on an event, starts a forwards
/// pagination.
/// Add more events to the end of the timeline.
///
/// Returns whether we hit the end of the timeline.
#[instrument(skip_all)]
pub async fn focused_paginate_forwards(&self, num_events: u16) -> Result<bool, Error> {
Ok(self.controller.focused_paginate_forwards(num_events).await?)
}
/// Assuming the timeline is focused on an event, starts a backwards
/// pagination.
///
/// Returns whether we hit the start of the timeline.
#[instrument(skip(self), fields(room_id = ?self.room().room_id()))]
pub async fn focused_paginate_backwards(&self, num_events: u16) -> Result<bool, Error> {
Ok(self.controller.focused_paginate_backwards(num_events).await?)
#[instrument(skip_all, fields(room_id = ?self.room().room_id()))]
pub async fn paginate_forwards(&self, num_events: u16) -> Result<bool, Error> {
if self.controller.is_live().await {
Ok(true)
} else {
Ok(self.controller.focused_paginate_forwards(num_events).await?)
}
}
/// Paginate backwards in live mode.
@@ -64,8 +58,7 @@ impl super::Timeline {
/// on a specific event.
///
/// Returns whether we hit the start of the timeline.
#[instrument(skip_all, fields(room_id = ?self.room().room_id()))]
pub async fn live_paginate_backwards(&self, batch_size: u16) -> event_cache::Result<bool> {
async fn live_paginate_backwards(&self, batch_size: u16) -> event_cache::Result<bool> {
let pagination = self.event_cache.pagination();
let result = pagination

View File

@@ -19,7 +19,7 @@ use matrix_sdk::{
config::RequestConfig, event_cache::paginator::PaginatorError, BoxFuture, Room,
SendOutsideWasm, SyncOutsideWasm,
};
use matrix_sdk_base::deserialized_responses::SyncTimelineEvent;
use matrix_sdk_base::deserialized_responses::TimelineEvent;
use ruma::{events::relation::RelationType, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId};
use thiserror::Error;
use tracing::{debug, warn};
@@ -55,9 +55,9 @@ impl PinnedEventsLoader {
/// `max_concurrent_requests` allows, to avoid overwhelming the server.
///
/// It returns a `Result` with either a
/// chronologically sorted list of retrieved `SyncTimelineEvent`s
/// or a `PinnedEventsLoaderError`.
pub async fn load_events(&self) -> Result<Vec<SyncTimelineEvent>, PinnedEventsLoaderError> {
/// chronologically sorted list of retrieved [`TimelineEvent`]s
/// or a [`PinnedEventsLoaderError`].
pub async fn load_events(&self) -> Result<Vec<TimelineEvent>, PinnedEventsLoaderError> {
let pinned_event_ids: Vec<OwnedEventId> = self
.room
.pinned_event_ids()
@@ -74,7 +74,7 @@ impl PinnedEventsLoader {
let request_config = Some(RequestConfig::default().retry_limit(3));
let mut loaded_events: Vec<SyncTimelineEvent> =
let mut loaded_events: Vec<TimelineEvent> =
stream::iter(pinned_event_ids.into_iter().map(|event_id| {
let provider = self.room.clone();
let relations_filter =
@@ -132,7 +132,7 @@ pub trait PinnedEventsRoom: SendOutsideWasm + SyncOutsideWasm {
event_id: &'a EventId,
request_config: Option<RequestConfig>,
related_event_filters: Option<Vec<RelationType>>,
) -> BoxFuture<'a, Result<(SyncTimelineEvent, Vec<SyncTimelineEvent>), PaginatorError>>;
) -> BoxFuture<'a, Result<(TimelineEvent, Vec<TimelineEvent>), PaginatorError>>;
/// Get the pinned event ids for a room.
fn pinned_event_ids(&self) -> Option<Vec<OwnedEventId>>;
@@ -150,7 +150,7 @@ impl PinnedEventsRoom for Room {
event_id: &'a EventId,
request_config: Option<RequestConfig>,
related_event_filters: Option<Vec<RelationType>>,
) -> BoxFuture<'a, Result<(SyncTimelineEvent, Vec<SyncTimelineEvent>), PaginatorError>> {
) -> BoxFuture<'a, Result<(TimelineEvent, Vec<TimelineEvent>), PaginatorError>> {
Box::pin(async move {
if let Ok((cache, _handles)) = self.event_cache().await {
if let Some(ret) = cache.event_with_relations(event_id, related_event_filters).await
@@ -163,7 +163,7 @@ impl PinnedEventsRoom for Room {
debug!("Loading pinned event {event_id} from HS");
self.event(event_id, request_config)
.await
.map(|e| (e.into(), Vec::new()))
.map(|e| (e, Vec::new()))
.map_err(|err| PaginatorError::SdkError(Box::new(err)))
})
}

View File

@@ -16,7 +16,7 @@ use assert_matches::assert_matches;
use assert_matches2::assert_let;
use eyeball_im::VectorDiff;
use futures_util::StreamExt;
use matrix_sdk::deserialized_responses::SyncTimelineEvent;
use matrix_sdk::deserialized_responses::TimelineEvent;
use matrix_sdk_test::{
async_test, event_factory::PreviousMembership, sync_timeline_event, ALICE, BOB, CAROL,
};
@@ -89,7 +89,7 @@ async fn test_replace_with_initial_events_and_read_marker() {
.with_settings(TimelineSettings { track_read_receipts: true, ..Default::default() });
let f = &timeline.factory;
let ev = f.text_msg("hey").sender(*ALICE).into_sync();
let ev = f.text_msg("hey").sender(*ALICE).into_event();
timeline
.controller
@@ -104,7 +104,7 @@ async fn test_replace_with_initial_events_and_read_marker() {
assert!(items[0].is_date_divider());
assert_eq!(items[1].as_event().unwrap().content().as_message().unwrap().body(), "hey");
let ev = f.text_msg("yo").sender(*BOB).into_sync();
let ev = f.text_msg("yo").sender(*BOB).into_event();
timeline
.controller
.replace_with_initial_remote_events([ev].into_iter(), RemoteEventOrigin::Sync)
@@ -122,7 +122,7 @@ async fn test_sticker() {
let mut stream = timeline.subscribe_events().await;
timeline
.handle_live_event(SyncTimelineEvent::new(sync_timeline_event!({
.handle_live_event(TimelineEvent::new(sync_timeline_event!({
"content": {
"body": "Happy sticker",
"info": {
@@ -276,9 +276,9 @@ async fn test_internal_id_prefix() {
let timeline = TestTimeline::with_internal_id_prefix("le_prefix_".to_owned());
let f = &timeline.factory;
let ev_a = f.text_msg("A").sender(*ALICE).into_sync();
let ev_b = f.text_msg("B").sender(*BOB).into_sync();
let ev_c = f.text_msg("C").sender(*CAROL).into_sync();
let ev_a = f.text_msg("A").sender(*ALICE).into_event();
let ev_b = f.text_msg("B").sender(*BOB).into_event();
let ev_c = f.text_msg("C").sender(*CAROL).into_event();
timeline
.controller
@@ -445,7 +445,7 @@ async fn test_replace_with_initial_events_when_batched() {
.with_settings(TimelineSettings::default());
let f = &timeline.factory;
let ev = f.text_msg("hey").sender(*ALICE).into_sync();
let ev = f.text_msg("hey").sender(*ALICE).into_event();
timeline
.controller
@@ -460,7 +460,7 @@ async fn test_replace_with_initial_events_when_batched() {
assert!(items[0].is_date_divider());
assert_eq!(items[1].as_event().unwrap().content().as_message().unwrap().body(), "hey");
let ev = f.text_msg("yo").sender(*BOB).into_sync();
let ev = f.text_msg("yo").sender(*BOB).into_event();
timeline
.controller
.replace_with_initial_remote_events([ev].into_iter(), RemoteEventOrigin::Sync)

View File

@@ -251,7 +251,7 @@ async fn test_no_read_marker_with_local_echo() {
.sender(user_id!("@a:b.c"))
.event_id(event_id)
.server_ts(MilliSecondsSinceUnixEpoch::now())
.into_sync()]
.into_event()]
.into_iter(),
RemoteEventOrigin::Sync,
)

View File

@@ -19,7 +19,7 @@ use eyeball_im::VectorDiff;
use matrix_sdk::deserialized_responses::{
AlgorithmInfo, EncryptionInfo, VerificationLevel, VerificationState,
};
use matrix_sdk_base::deserialized_responses::{DecryptedRoomEvent, SyncTimelineEvent};
use matrix_sdk_base::deserialized_responses::{DecryptedRoomEvent, TimelineEvent};
use matrix_sdk_test::{async_test, ALICE};
use ruma::{
event_id,
@@ -178,7 +178,7 @@ async fn test_edit_updates_encryption_info() {
verification_state: VerificationState::Verified,
};
let original_event: SyncTimelineEvent = DecryptedRoomEvent {
let original_event: TimelineEvent = DecryptedRoomEvent {
event: original_event.cast(),
encryption_info: encryption_info.clone(),
unsigned_encryption_info: None,
@@ -207,7 +207,7 @@ async fn test_edit_updates_encryption_info() {
.into_raw_timeline();
encryption_info.verification_state =
VerificationState::Unverified(VerificationLevel::UnverifiedIdentity);
let edit_event: SyncTimelineEvent = DecryptedRoomEvent {
let edit_event: TimelineEvent = DecryptedRoomEvent {
event: edit_event.cast(),
encryption_info: encryption_info.clone(),
unsigned_encryption_info: None,

View File

@@ -30,7 +30,7 @@ use matrix_sdk::{
crypto::{decrypt_room_key_export, types::events::UtdCause, OlmMachine},
test_utils::test_client_builder,
};
use matrix_sdk_base::deserialized_responses::{SyncTimelineEvent, UnableToDecryptReason};
use matrix_sdk_base::deserialized_responses::{TimelineEvent, UnableToDecryptReason};
use matrix_sdk_test::{async_test, ALICE, BOB};
use ruma::{
assign, event_id,
@@ -750,7 +750,7 @@ async fn test_retry_decryption_updates_response() {
}
}
fn utd_event_with_unsigned(unsigned: serde_json::Value) -> SyncTimelineEvent {
fn utd_event_with_unsigned(unsigned: serde_json::Value) -> TimelineEvent {
let raw = Raw::from_json(
to_raw_value(&json!({
"event_id": "$myevent",
@@ -770,7 +770,7 @@ fn utd_event_with_unsigned(unsigned: serde_json::Value) -> SyncTimelineEvent {
.unwrap(),
);
SyncTimelineEvent::new_utd_event(
TimelineEvent::new_utd_event(
raw,
matrix_sdk::deserialized_responses::UnableToDecryptInfo {
session_id: Some("SESSION_ID".into()),

View File

@@ -17,7 +17,7 @@ use std::sync::Arc;
use assert_matches::assert_matches;
use assert_matches2::assert_let;
use eyeball_im::VectorDiff;
use matrix_sdk::deserialized_responses::SyncTimelineEvent;
use matrix_sdk::deserialized_responses::TimelineEvent;
use matrix_sdk_test::{async_test, sync_timeline_event, ALICE, BOB};
use ruma::events::{
room::{
@@ -141,7 +141,7 @@ async fn test_hide_failed_to_parse() {
// m.room.message events must have a msgtype and body in content, so this
// event with an empty content object should fail to deserialize.
timeline
.handle_live_event(SyncTimelineEvent::new(sync_timeline_event!({
.handle_live_event(TimelineEvent::new(sync_timeline_event!({
"content": {},
"event_id": "$eeG0HA0FAZ37wP8kXlNkxx3I",
"origin_server_ts": 10,
@@ -153,7 +153,7 @@ async fn test_hide_failed_to_parse() {
// Similar to above, the m.room.member state event must also not have an
// empty content object.
timeline
.handle_live_event(SyncTimelineEvent::new(sync_timeline_event!({
.handle_live_event(TimelineEvent::new(sync_timeline_event!({
"content": {},
"event_id": "$d5G0HA0FAZ37wP8kXlNkxx3I",
"origin_server_ts": 2179,

View File

@@ -14,7 +14,7 @@
use assert_matches2::assert_let;
use eyeball_im::VectorDiff;
use matrix_sdk::deserialized_responses::SyncTimelineEvent;
use matrix_sdk::deserialized_responses::TimelineEvent;
use matrix_sdk_test::{async_test, sync_timeline_event, ALICE, BOB};
use ruma::{
events::{room::message::MessageType, MessageLikeEventType, StateEventType},
@@ -60,7 +60,7 @@ async fn test_invalid_event_content() {
// m.room.message events must have a msgtype and body in content, so this
// event with an empty content object should fail to deserialize.
timeline
.handle_live_event(SyncTimelineEvent::new(sync_timeline_event!({
.handle_live_event(TimelineEvent::new(sync_timeline_event!({
"content": {},
"event_id": "$eeG0HA0FAZ37wP8kXlNkxx3I",
"origin_server_ts": 10,
@@ -79,7 +79,7 @@ async fn test_invalid_event_content() {
// Similar to above, the m.room.member state event must also not have an
// empty content object.
timeline
.handle_live_event(SyncTimelineEvent::new(sync_timeline_event!({
.handle_live_event(TimelineEvent::new(sync_timeline_event!({
"content": {},
"event_id": "$d5G0HA0FAZ37wP8kXlNkxx3I",
"origin_server_ts": 2179,
@@ -107,7 +107,7 @@ async fn test_invalid_event() {
// This event is missing the sender field which the homeserver must add to
// all timeline events. Because the event is malformed, it will be ignored.
timeline
.handle_live_event(SyncTimelineEvent::new(sync_timeline_event!({
.handle_live_event(TimelineEvent::new(sync_timeline_event!({
"content": {
"body": "hello world",
"msgtype": "m.text"

View File

@@ -27,7 +27,7 @@ use futures_core::Stream;
use indexmap::IndexMap;
use matrix_sdk::{
config::RequestConfig,
deserialized_responses::{SyncTimelineEvent, TimelineEvent},
deserialized_responses::TimelineEvent,
event_cache::paginator::{PaginableRoom, PaginatorError},
room::{EventWithContextResponse, Messages, MessagesOptions},
send_queue::RoomSendQueueUpdate,
@@ -173,7 +173,7 @@ impl TestTimeline {
self.controller.items().await.len()
}
async fn handle_live_event(&self, event: impl Into<SyncTimelineEvent>) {
async fn handle_live_event(&self, event: impl Into<TimelineEvent>) {
let event = event.into();
self.controller
.add_events_at(
@@ -297,7 +297,7 @@ impl PinnedEventsRoom for TestRoomDataProvider {
_event_id: &'a EventId,
_request_config: Option<RequestConfig>,
_related_event_filters: Option<Vec<RelationType>>,
) -> BoxFuture<'a, Result<(SyncTimelineEvent, Vec<SyncTimelineEvent>), PaginatorError>> {
) -> BoxFuture<'a, Result<(TimelineEvent, Vec<TimelineEvent>), PaginatorError>> {
unimplemented!();
}

View File

@@ -18,7 +18,7 @@ use assert_matches2::{assert_let, assert_matches};
use eyeball_im::VectorDiff;
use futures_core::Stream;
use futures_util::{FutureExt as _, StreamExt as _};
use matrix_sdk::deserialized_responses::SyncTimelineEvent;
use matrix_sdk::deserialized_responses::TimelineEvent;
use matrix_sdk_test::{async_test, event_factory::EventFactory, sync_timeline_event, ALICE, BOB};
use ruma::{
event_id, events::AnyMessageLikeEventContent, server_name, uint, EventId,
@@ -151,7 +151,7 @@ async fn test_redact_reaction_success() {
// When that redaction is confirmed by the server,
timeline
.handle_live_event(SyncTimelineEvent::new(sync_timeline_event!({
.handle_live_event(TimelineEvent::new(sync_timeline_event!({
"sender": *ALICE,
"type": "m.room.redaction",
"event_id": "$idb",
@@ -198,9 +198,9 @@ async fn test_initial_reaction_timestamp_is_stored() {
// Reaction comes first.
f.reaction(&message_event_id, REACTION_KEY)
.server_ts(reaction_timestamp)
.into_sync(),
.into_event(),
// Event comes next.
f.text_msg("A").event_id(&message_event_id).into_sync(),
f.text_msg("A").event_id(&message_event_id).into_event(),
]
.into_iter(),
TimelineNewItemPosition::End { origin: RemoteEventOrigin::Sync },

View File

@@ -1,6 +1,6 @@
use assert_matches::assert_matches;
use eyeball_im::VectorDiff;
use matrix_sdk_base::deserialized_responses::{ShieldState, ShieldStateCode, SyncTimelineEvent};
use matrix_sdk_base::deserialized_responses::{ShieldState, ShieldStateCode, TimelineEvent};
use matrix_sdk_test::{async_test, sync_timeline_event, ALICE};
use ruma::{
event_id,
@@ -97,7 +97,7 @@ async fn test_local_sent_in_clear_shield() {
// When the remote echo comes in.
timeline
.handle_live_event(SyncTimelineEvent::new(sync_timeline_event!({
.handle_live_event(TimelineEvent::new(sync_timeline_event!({
"content": {
"body": "Local message",
"msgtype": "m.text",

View File

@@ -27,6 +27,7 @@ use growable_bloom_filter::{GrowableBloom, GrowableBloomBuilder};
use matrix_sdk::{
crypto::types::events::UtdCause,
executor::{spawn, JoinHandle},
sleep::sleep,
Client,
};
use matrix_sdk_base::{StateStoreDataKey, StateStoreDataValue, StoreError};
@@ -34,10 +35,7 @@ use ruma::{
time::{Duration, Instant},
EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedServerName, UserId,
};
use tokio::{
sync::{Mutex as AsyncMutex, MutexGuard},
time::sleep,
};
use tokio::sync::{Mutex as AsyncMutex, MutexGuard};
use tracing::error;
/// A generic interface which methods get called whenever we observe a

View File

@@ -8,8 +8,8 @@ use std::{
use futures_util::{pin_mut, StreamExt as _};
use matrix_sdk::{
authentication::matrix::{MatrixSession, MatrixSessionTokens},
config::RequestConfig,
matrix_auth::{MatrixSession, MatrixSessionTokens},
test_utils::{logged_in_client_with_server, test_client_builder_with_server},
SessionMeta,
};

View File

@@ -55,6 +55,7 @@ async fn mock_sync(server: &MockServer, response_body: impl Serialize, since: Op
///
/// Note: pass `events_before` in the normal order, I'll revert the order for
/// you.
// TODO: replace with MatrixMockServer
#[allow(clippy::too_many_arguments)] // clippy you've got such a fixed mindset
async fn mock_context(
server: &MockServer,
@@ -86,6 +87,7 @@ async fn mock_context(
///
/// Note: pass `chunk` in the correct order: topological for forward pagination,
/// reverse topological for backwards pagination.
// TODO: replace with MatrixMockServer
async fn mock_messages(
server: &MockServer,
start: String,

View File

@@ -73,19 +73,19 @@ async fn test_sync_service_state() -> anyhow::Result<()> {
// At first, the sync service is sleeping.
assert_eq!(state_stream.get(), State::Idle);
assert!(server.received_requests().await.unwrap().is_empty());
assert_eq!(sync_service.task_states(), (false, false));
assert!(!sync_service.is_supervisor_running().await);
assert!(sync_service.try_get_encryption_sync_permit().is_some());
// After starting, the sync service is, well, running.
sync_service.start().await;
assert_next_matches!(state_stream, State::Running);
assert_eq!(sync_service.task_states(), (true, true));
assert!(sync_service.is_supervisor_running().await);
assert!(sync_service.try_get_encryption_sync_permit().is_none());
// Restarting while started doesn't change the current state.
sync_service.start().await;
assert_pending!(state_stream);
assert_eq!(sync_service.task_states(), (true, true));
assert!(sync_service.is_supervisor_running().await);
assert!(sync_service.try_get_encryption_sync_permit().is_none());
// Let the server respond a few times.
@@ -94,7 +94,7 @@ async fn test_sync_service_state() -> anyhow::Result<()> {
// Pausing will stop both syncs, after a bit of delay.
sync_service.stop().await?;
assert_next_matches!(state_stream, State::Idle);
assert_eq!(sync_service.task_states(), (false, false));
assert!(!sync_service.is_supervisor_running().await);
assert!(sync_service.try_get_encryption_sync_permit().is_some());
let mut num_encryption_sync_requests: i32 = 0;
@@ -149,7 +149,7 @@ async fn test_sync_service_state() -> anyhow::Result<()> {
// the same position than just before being stopped.
sync_service.start().await;
assert_next_matches!(state_stream, State::Running);
assert_eq!(sync_service.task_states(), (true, true));
assert!(sync_service.is_supervisor_running().await);
assert!(sync_service.try_get_encryption_sync_permit().is_none());
tokio::time::sleep(Duration::from_millis(100)).await;

View File

@@ -19,8 +19,8 @@ use assert_matches2::assert_let;
use eyeball_im::VectorDiff;
use futures_util::StreamExt;
use matrix_sdk::{
assert_next_matches_with_timeout, config::SyncSettings, executor::spawn,
ruma::MilliSecondsSinceUnixEpoch, test_utils::logged_in_client_with_server,
config::SyncSettings, executor::spawn, ruma::MilliSecondsSinceUnixEpoch,
test_utils::logged_in_client_with_server,
};
use matrix_sdk_test::{
async_test, event_factory::EventFactory, mocks::mock_encryption_state, JoinedRoomBuilder,
@@ -33,7 +33,7 @@ use ruma::{
room_id, uint, user_id,
};
use serde_json::json;
use stream_assert::assert_next_matches;
use stream_assert::{assert_next_matches, assert_pending};
use tokio::task::yield_now;
use wiremock::{
matchers::{header, method, path_regex},
@@ -83,7 +83,10 @@ async fn test_echo() {
timeline.send(RoomMessageEventContent::text_plain("Hello, World!").into()).await
});
assert_let!(Some(VectorDiff::PushBack { value: local_echo }) = timeline_stream.next().await);
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 2);
assert_let!(VectorDiff::PushBack { value: local_echo } = &timeline_updates[0]);
let item = local_echo.as_event().unwrap();
assert_matches!(item.send_state(), Some(EventSendState::NotSentYet));
assert_let!(TimelineItemContent::Message(msg) = item.content());
@@ -92,15 +95,16 @@ async fn test_echo() {
assert!(item.event_id().is_none());
let txn_id = item.transaction_id().unwrap();
assert_let!(Some(VectorDiff::PushFront { value: date_divider }) = timeline_stream.next().await);
assert_let!(VectorDiff::PushFront { value: date_divider } = &timeline_updates[1]);
assert!(date_divider.is_date_divider());
// Wait for the sending to finish and assert everything was successful
send_hdl.await.unwrap().unwrap();
assert_let!(
Some(VectorDiff::Set { index: 1, value: sent_confirmation }) = timeline_stream.next().await
);
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 1);
assert_let!(VectorDiff::Set { index: 1, value: sent_confirmation } = &timeline_updates[0]);
let item = sent_confirmation.as_event().unwrap();
assert_matches!(item.send_state(), Some(EventSendState::Sent { .. }));
assert_eq!(item.event_id(), Some(event_id));
@@ -120,19 +124,24 @@ async fn test_echo() {
let _response = client.sync_once(sync_settings.clone()).await.unwrap();
server.reset().await;
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 4);
// Local echo is replaced with the remote echo.
assert_next_matches!(timeline_stream, VectorDiff::Remove { index: 1 });
let remote_echo =
assert_next_matches!(timeline_stream, VectorDiff::PushFront { value } => value);
assert_let!(VectorDiff::Remove { index: 1 } = &timeline_updates[0]);
assert_let!(VectorDiff::PushFront { value: remote_echo } = &timeline_updates[1]);
let item = remote_echo.as_event().unwrap();
assert!(item.is_own());
assert_eq!(item.timestamp(), MilliSecondsSinceUnixEpoch(uint!(152038280)));
// The date divider is also replaced.
let date_divider =
assert_next_matches!(timeline_stream, VectorDiff::PushFront { value } => value);
assert_let!(VectorDiff::PushFront { value: date_divider } = &timeline_updates[2]);
assert!(date_divider.is_date_divider());
assert_next_matches!(timeline_stream, VectorDiff::Remove { index: 2 });
assert_let!(VectorDiff::Remove { index: 2 } = &timeline_updates[3]);
assert_pending!(timeline_stream);
}
#[async_test]
@@ -251,14 +260,16 @@ async fn test_dedup_by_event_id_late() {
timeline.send(RoomMessageEventContent::text_plain("Hello, World!").into()).await.unwrap();
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 2);
// Timeline: [local echo]
let local_echo =
assert_next_matches_with_timeout!(timeline_stream, VectorDiff::PushBack { value } => value);
assert_let!(VectorDiff::PushBack { value: local_echo } = &timeline_updates[0]);
let item = local_echo.as_event().unwrap();
assert_matches!(item.send_state(), Some(EventSendState::NotSentYet));
// Timeline: [date-divider, local echo]
let date_divider = assert_next_matches_with_timeout!( timeline_stream, VectorDiff::PushFront { value } => value);
assert_let!(VectorDiff::PushFront { value: date_divider } = &timeline_updates[1]);
assert!(date_divider.is_date_divider());
let f = EventFactory::new();
@@ -275,21 +286,29 @@ async fn test_dedup_by_event_id_late() {
mock_sync(&server, sync_builder.build_json_sync_response(), None).await;
let _response = client.sync_once(sync_settings.clone()).await.unwrap();
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 2);
// Timeline: [remote-echo, date-divider, local echo]
let remote_echo =
assert_next_matches!(timeline_stream, VectorDiff::PushFront { value } => value);
assert_let!(VectorDiff::PushFront { value: remote_echo } = &timeline_updates[0]);
let item = remote_echo.as_event().unwrap();
assert_eq!(item.event_id(), Some(event_id));
// Timeline: [date-divider, remote-echo, date-divider, local echo]
let date_divider = assert_next_matches_with_timeout!(timeline_stream, VectorDiff::PushFront { value } => value);
assert_let!(VectorDiff::PushFront { value: date_divider } = &timeline_updates[1]);
assert!(date_divider.is_date_divider());
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 2);
// Local echo and its date divider are removed.
// Timeline: [date-divider, remote-echo, date-divider]
assert_matches!(timeline_stream.next().await, Some(VectorDiff::Remove { index: 3 }));
assert_let!(VectorDiff::Remove { index: 3 } = &timeline_updates[0]);
// Timeline: [date-divider, remote-echo]
assert_matches!(timeline_stream.next().await, Some(VectorDiff::Remove { index: 2 }));
assert_let!(VectorDiff::Remove { index: 2 } = &timeline_updates[1]);
assert_pending!(timeline_stream);
}
#[async_test]

View File

@@ -58,7 +58,7 @@ use ruma::{
OwnedRoomId,
};
use serde_json::json;
use stream_assert::assert_next_matches;
use stream_assert::{assert_next_matches, assert_pending};
use tokio::{task::yield_now, time::sleep};
use wiremock::{
matchers::{header, method, path_regex},
@@ -98,7 +98,10 @@ async fn test_edit() {
let _response = client.sync_once(sync_settings.clone()).await.unwrap();
server.reset().await;
assert_let!(Some(VectorDiff::PushBack { value: first }) = timeline_stream.next().await);
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 2);
assert_let!(VectorDiff::PushBack { value: first } = &timeline_updates[0]);
let item = first.as_event().unwrap();
assert_eq!(item.read_receipts().len(), 1, "implicit read receipt");
assert_matches!(item.latest_edit_json(), None);
@@ -107,7 +110,7 @@ async fn test_edit() {
assert_matches!(msg.in_reply_to(), None);
assert!(!msg.is_edited());
assert_let!(Some(VectorDiff::PushFront { value: date_divider }) = timeline_stream.next().await);
assert_let!(VectorDiff::PushFront { value: date_divider } = &timeline_updates[1]);
assert!(date_divider.is_date_divider());
sync_builder.add_joined_room(
@@ -124,7 +127,10 @@ async fn test_edit() {
let _response = client.sync_once(sync_settings.clone()).await.unwrap();
server.reset().await;
assert_let!(Some(VectorDiff::PushBack { value: second }) = timeline_stream.next().await);
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 4);
assert_let!(VectorDiff::PushBack { value: second } = &timeline_updates[0]);
let item = second.as_event().unwrap();
assert!(item.event_id().is_some());
assert!(!item.is_own());
@@ -140,7 +146,7 @@ async fn test_edit() {
// No more implicit read receipt in Alice's message, because they edited
// something after the second event.
assert_let!(Some(VectorDiff::Set { index: 1, value: item }) = timeline_stream.next().await);
assert_let!(VectorDiff::Set { index: 1, value: item } = &timeline_updates[1]);
let item = item.as_event().unwrap();
assert_matches!(item.latest_edit_json(), None);
assert_let!(TimelineItemContent::Message(msg) = item.content());
@@ -151,7 +157,7 @@ async fn test_edit() {
assert_eq!(item.read_receipts().len(), 0, "no more implicit read receipt");
// ... so Alice's read receipt moves to Bob's message.
assert_let!(Some(VectorDiff::Set { index: 2, value: second }) = timeline_stream.next().await);
assert_let!(VectorDiff::Set { index: 2, value: second } = &timeline_updates[2]);
let item = second.as_event().unwrap();
assert!(item.event_id().is_some());
assert!(!item.is_own());
@@ -159,7 +165,7 @@ async fn test_edit() {
assert_eq!(item.read_receipts().len(), 2, "should carry alice and bob's read receipts");
// The text changes in Alice's message.
assert_let!(Some(VectorDiff::Set { index: 1, value: edit }) = timeline_stream.next().await);
assert_let!(VectorDiff::Set { index: 1, value: edit } = &timeline_updates[3]);
let item = edit.as_event().unwrap();
assert_matches!(item.latest_edit_json(), Some(_));
assert_let!(TimelineItemContent::Message(edited) = item.content());
@@ -189,19 +195,25 @@ async fn test_edit_local_echo() {
// Redacting a local event works.
timeline.send(RoomMessageEventContent::text_plain("hello, just you").into()).await.unwrap();
assert_let!(Some(VectorDiff::PushBack { value: item }) = timeline_stream.next().await);
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 2);
assert_let!(VectorDiff::PushBack { value: item } = &timeline_updates[0]);
let internal_id = item.unique_id();
let item = item.as_event().unwrap();
assert_matches!(item.send_state(), Some(EventSendState::NotSentYet));
assert_let!(Some(VectorDiff::PushFront { value: date_divider }) = timeline_stream.next().await);
assert_let!(VectorDiff::PushFront { value: date_divider } = &timeline_updates[1]);
assert!(date_divider.is_date_divider());
// We haven't set a route for sending events, so this will fail.
assert_let!(Some(VectorDiff::Set { index: 1, value: item }) = timeline_stream.next().await);
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 1);
assert_let!(VectorDiff::Set { index: 1, value: item } = &timeline_updates[0]);
let item = item.as_event().unwrap();
assert!(item.is_local_echo());
@@ -212,7 +224,7 @@ async fn test_edit_local_echo() {
Some(EventSendState::SendingFailed { is_recoverable: false, .. })
);
assert!(timeline_stream.next().now_or_never().is_none());
assert_pending!(timeline_stream);
// Set up the success response before editing, since edit causes an immediate
// retry (the room's send queue is not blocked, since the one event it couldn't
@@ -229,8 +241,11 @@ async fn test_edit_local_echo() {
.await
.unwrap();
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 1);
// Observe local echo being replaced.
assert_let!(Some(VectorDiff::Set { index: 1, value: item }) = timeline_stream.next().await);
assert_let!(VectorDiff::Set { index: 1, value: item } = &timeline_updates[0]);
assert_eq!(item.unique_id(), internal_id);
@@ -246,8 +261,11 @@ async fn test_edit_local_echo() {
// Re-enable the room's queue.
timeline.room().send_queue().set_enabled(true);
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 1);
// Observe the event being sent, and replacing the local echo.
assert_let!(Some(VectorDiff::Set { index: 1, value: item }) = timeline_stream.next().await);
assert_let!(VectorDiff::Set { index: 1, value: item } = &timeline_updates[0]);
let item = item.as_event().unwrap();
assert!(item.is_local_echo());
@@ -256,7 +274,7 @@ async fn test_edit_local_echo() {
assert_eq!(edit_message.body(), "hello, world");
// No new updates.
assert!(timeline_stream.next().now_or_never().is_none());
assert_pending!(timeline_stream);
}
#[async_test]
@@ -761,17 +779,22 @@ async fn test_edit_local_echo_with_unsupported_content() {
timeline.send(RoomMessageEventContent::text_plain("hello, just you").into()).await.unwrap();
assert_let!(Some(VectorDiff::PushBack { value: item }) = timeline_stream.next().await);
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 2);
assert_let!(VectorDiff::PushBack { value: item } = &timeline_updates[0]);
let item = item.as_event().unwrap();
assert_matches!(item.send_state(), Some(EventSendState::NotSentYet));
assert_let!(Some(VectorDiff::PushFront { value: date_divider }) = timeline_stream.next().await);
assert_let!(VectorDiff::PushFront { value: date_divider } = &timeline_updates[1]);
assert!(date_divider.is_date_divider());
// We haven't set a route for sending events, so this will fail.
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 1);
assert_let!(Some(VectorDiff::Set { index: 1, value: item }) = timeline_stream.next().await);
assert_let!(VectorDiff::Set { index: 1, value: item } = &timeline_updates[0]);
let item = item.as_event().unwrap();
assert!(item.is_local_echo());
@@ -782,7 +805,7 @@ async fn test_edit_local_echo_with_unsupported_content() {
Some(EventSendState::SendingFailed { is_recoverable: false, .. })
);
assert!(timeline_stream.next().now_or_never().is_none());
assert_pending!(timeline_stream);
// Set up the success response before editing, since edit causes an immediate
// retry (the room's send queue is not blocked, since the one event it couldn't
@@ -814,7 +837,10 @@ async fn test_edit_local_echo_with_unsupported_content() {
.await
.unwrap();
assert_let!(Some(VectorDiff::PushBack { value: item }) = timeline_stream.next().await);
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 1);
assert_let!(VectorDiff::PushBack { value: item } = &timeline_updates[0]);
let item = item.as_event().unwrap();
assert_matches!(item.send_state(), Some(EventSendState::NotSentYet));
@@ -832,6 +858,8 @@ async fn test_edit_local_echo_with_unsupported_content() {
// We couldn't edit the local echo, since their content types didn't match
assert_matches!(edit_err, Error::EditError(EditError::ContentMismatch { .. }));
assert_pending!(timeline_stream);
}
struct PendingEditHelper {
@@ -879,7 +907,7 @@ impl PendingEditHelper {
.mount()
.await;
self.timeline.live_paginate_backwards(batch_size).await.unwrap();
self.timeline.paginate_backwards(batch_size).await.unwrap();
}
}
@@ -905,7 +933,7 @@ async fn test_pending_edit() {
.await;
// Nothing happens.
assert!(timeline_stream.next().now_or_never().is_none());
assert_pending!(timeline_stream);
// But when I receive the original event after a bit…
h.handle_sync(
@@ -914,8 +942,11 @@ async fn test_pending_edit() {
)
.await;
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 2);
// Then I get the edited content immediately.
assert_let!(Some(VectorDiff::PushBack { value }) = timeline_stream.next().await);
assert_let!(VectorDiff::PushBack { value } = &timeline_updates[0]);
let event = value.as_event().unwrap();
let latest_edit_json = event.latest_edit_json().expect("we should have an edit json");
@@ -926,12 +957,11 @@ async fn test_pending_edit() {
assert_eq!(msg.body(), "[edit]");
// The date divider.
assert_next_matches!(timeline_stream, VectorDiff::PushFront { value } => {
assert!(value.is_date_divider());
});
assert_let!(VectorDiff::PushFront { value: date_divider } = &timeline_updates[1]);
assert!(date_divider.is_date_divider());
// And nothing else.
assert!(timeline_stream.next().now_or_never().is_none());
assert_pending!(timeline_stream);
}
#[async_test]
@@ -963,7 +993,7 @@ async fn test_pending_edit_overrides() {
.await;
// Nothing happens.
assert!(timeline_stream.next().now_or_never().is_none());
assert_pending!(timeline_stream);
// And then I receive the original event after a bit…
h.handle_sync(
@@ -972,19 +1002,21 @@ async fn test_pending_edit_overrides() {
)
.await;
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 2);
// Then I get the latest edited content immediately.
assert_let!(Some(VectorDiff::PushBack { value }) = timeline_stream.next().await);
assert_let!(VectorDiff::PushBack { value } = &timeline_updates[0]);
let msg = value.as_event().unwrap().content().as_message().unwrap();
assert!(msg.is_edited());
assert_eq!(msg.body(), "bonjour");
// The date divider.
assert_next_matches!(timeline_stream, VectorDiff::PushFront { value } => {
assert!(value.is_date_divider());
});
assert_let!(VectorDiff::PushFront { value } = &timeline_updates[1]);
assert!(value.is_date_divider());
// And nothing else.
assert!(timeline_stream.next().now_or_never().is_none());
assert_pending!(timeline_stream);
}
#[async_test]
@@ -1020,19 +1052,21 @@ async fn test_pending_edit_from_backpagination() {
)
.await;
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 2);
// Then I get the latest edited content immediately.
assert_let!(Some(VectorDiff::PushBack { value }) = timeline_stream.next().await);
assert_let!(VectorDiff::PushBack { value } = &timeline_updates[0]);
let msg = value.as_event().unwrap().content().as_message().unwrap();
assert!(msg.is_edited());
assert_eq!(msg.body(), "hello");
// The date divider.
assert_next_matches!(timeline_stream, VectorDiff::PushFront { value } => {
assert!(value.is_date_divider());
});
assert_let!(VectorDiff::PushFront { value } = &timeline_updates[1]);
assert!(value.is_date_divider());
// And nothing else.
assert!(timeline_stream.next().now_or_never().is_none());
assert_pending!(timeline_stream);
}
#[async_test]
@@ -1073,7 +1107,7 @@ async fn test_pending_edit_from_backpagination_doesnt_override_pending_edit_from
.await;
// Nothing happens.
assert_matches!(timeline_stream.next().now_or_never(), None);
assert_pending!(timeline_stream);
// And then I receive the original event after a bit…
h.handle_sync(
@@ -1082,20 +1116,22 @@ async fn test_pending_edit_from_backpagination_doesnt_override_pending_edit_from
)
.await;
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 2);
// Then I get the edit from the sync, even if the back-pagination happened
// after.
assert_let!(Some(VectorDiff::PushBack { value }) = timeline_stream.next().await);
assert_let!(VectorDiff::PushBack { value } = &timeline_updates[0]);
let msg = value.as_event().unwrap().content().as_message().unwrap();
assert!(msg.is_edited());
assert_eq!(msg.body(), "[edit]");
// The date divider.
assert_next_matches!(timeline_stream, VectorDiff::PushFront { value } => {
assert!(value.is_date_divider());
});
assert_let!(VectorDiff::PushFront { value } = &timeline_updates[1]);
assert!(value.is_date_divider());
// And nothing else.
assert!(timeline_stream.next().now_or_never().is_none());
assert_pending!(timeline_stream);
}
#[async_test]
@@ -1131,7 +1167,7 @@ async fn test_pending_poll_edit() {
.await;
// Nothing happens.
assert!(timeline_stream.next().now_or_never().is_none());
assert_pending!(timeline_stream);
// But when I receive the original event after a bit…
let event_content = NewUnstablePollStartEventContent::new(UnstablePollStartContentBlock::new(
@@ -1149,8 +1185,11 @@ async fn test_pending_poll_edit() {
)
.await;
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 2);
// Then I get the edited content immediately.
assert_let!(Some(VectorDiff::PushBack { value }) = timeline_stream.next().await);
assert_let!(VectorDiff::PushBack { value } = &timeline_updates[0]);
let poll = as_variant!(value.as_event().unwrap().content(), TimelineItemContent::Poll).unwrap();
assert!(poll.is_edit());
@@ -1160,12 +1199,11 @@ async fn test_pending_poll_edit() {
assert_eq!(results.answers[1].text, "No");
// The date divider.
assert_next_matches!(timeline_stream, VectorDiff::PushFront { value } => {
assert!(value.is_date_divider());
});
assert_let!(VectorDiff::PushFront { value } = &timeline_updates[1]);
assert!(value.is_date_divider());
// And nothing else.
assert!(timeline_stream.next().now_or_never().is_none());
assert_pending!(timeline_stream);
}
#[async_test]

View File

@@ -19,10 +19,7 @@ use std::time::Duration;
use assert_matches2::assert_let;
use eyeball_im::VectorDiff;
use futures_util::StreamExt;
use matrix_sdk::{
assert_next_matches_with_timeout, config::SyncSettings,
test_utils::logged_in_client_with_server,
};
use matrix_sdk::{config::SyncSettings, test_utils::logged_in_client_with_server};
use matrix_sdk_test::{
async_test, event_factory::EventFactory, mocks::mock_encryption_state, JoinedRoomBuilder,
SyncResponseBuilder, ALICE, BOB,
@@ -57,13 +54,13 @@ async fn test_new_focused() {
target_event,
Some("prev1".to_owned()),
vec![
f.text_msg("i tried so hard").sender(*ALICE).into_timeline(),
f.text_msg("and got so far").sender(*ALICE).into_timeline(),
f.text_msg("i tried so hard").sender(*ALICE).into_event(),
f.text_msg("and got so far").sender(*ALICE).into_event(),
],
f.text_msg("in the end").event_id(target_event).sender(*BOB).into_timeline(),
f.text_msg("in the end").event_id(target_event).sender(*BOB).into_event(),
vec![
f.text_msg("it doesn't even").sender(*ALICE).into_timeline(),
f.text_msg("matter").sender(*ALICE).into_timeline(),
f.text_msg("it doesn't even").sender(*ALICE).into_event(),
f.text_msg("matter").sender(*ALICE).into_event(),
],
Some("next1".to_owned()),
vec![],
@@ -117,35 +114,39 @@ async fn test_new_focused() {
None,
vec![
// reversed manually here
f.text_msg("And even though I tried, it all fell apart").sender(*BOB).into_timeline(),
f.text_msg("I kept everything inside").sender(*BOB).into_timeline(),
f.text_msg("And even though I tried, it all fell apart").sender(*BOB).into_event(),
f.text_msg("I kept everything inside").sender(*BOB).into_event(),
],
vec![],
)
.await;
let hit_start = timeline.focused_paginate_backwards(20).await.unwrap();
let hit_start = timeline.paginate_backwards(20).await.unwrap();
assert!(hit_start);
server.reset().await;
assert_let!(Some(VectorDiff::PushFront { value: message }) = timeline_stream.next().await);
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 4);
assert_let!(VectorDiff::PushFront { value: message } = &timeline_updates[0]);
assert_eq!(
message.as_event().unwrap().content().as_message().unwrap().body(),
"And even though I tried, it all fell apart"
);
assert_let!(Some(VectorDiff::PushFront { value: message }) = timeline_stream.next().await);
assert_let!(VectorDiff::PushFront { value: message } = &timeline_updates[1]);
assert_eq!(
message.as_event().unwrap().content().as_message().unwrap().body(),
"I kept everything inside"
);
// Date divider post processing.
assert_let!(Some(VectorDiff::PushFront { value: item }) = timeline_stream.next().await);
assert_let!(VectorDiff::PushFront { value: item } = &timeline_updates[2]);
assert!(item.is_date_divider());
assert_let!(Some(VectorDiff::Remove { index }) = timeline_stream.next().await);
assert_eq!(index, 3);
assert_let!(VectorDiff::Remove { index } = &timeline_updates[3]);
assert_eq!(*index, 3);
// Now trigger a forward pagination.
mock_messages(
@@ -153,25 +154,28 @@ async fn test_new_focused() {
"next1".to_owned(),
Some("next2".to_owned()),
vec![
f.text_msg("I had to fall, to lose it all").sender(*BOB).into_timeline(),
f.text_msg("But in the end, it doesn't event matter").sender(*BOB).into_timeline(),
f.text_msg("I had to fall, to lose it all").sender(*BOB).into_event(),
f.text_msg("But in the end, it doesn't event matter").sender(*BOB).into_event(),
],
vec![],
)
.await;
let hit_start = timeline.focused_paginate_forwards(20).await.unwrap();
let hit_start = timeline.paginate_forwards(20).await.unwrap();
assert!(!hit_start); // because we gave it another next2 token.
server.reset().await;
assert_let!(Some(VectorDiff::PushBack { value: message }) = timeline_stream.next().await);
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 2);
assert_let!(VectorDiff::PushBack { value: message } = &timeline_updates[0]);
assert_eq!(
message.as_event().unwrap().content().as_message().unwrap().body(),
"I had to fall, to lose it all"
);
assert_let!(Some(VectorDiff::PushBack { value: message }) = timeline_stream.next().await);
assert_let!(VectorDiff::PushBack { value: message } = &timeline_updates[1]);
assert_eq!(
message.as_event().unwrap().content().as_message().unwrap().body(),
"But in the end, it doesn't event matter"
@@ -204,7 +208,7 @@ async fn test_focused_timeline_reacts() {
target_event,
None,
vec![],
f.text_msg("yolo").event_id(target_event).sender(*BOB).into_timeline(),
f.text_msg("yolo").event_id(target_event).sender(*BOB).into_event(),
vec![],
None,
vec![],
@@ -250,7 +254,10 @@ async fn test_focused_timeline_reacts() {
let _response = client.sync_once(sync_settings.clone()).await.unwrap();
server.reset().await;
let item = assert_next_matches_with_timeout!(timeline_stream, VectorDiff::Set { index: 1, value: item } => item);
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 1);
assert_let!(VectorDiff::Set { index: 1, value: item } = &timeline_updates[0]);
let event_item = item.as_event().unwrap();
// Text hasn't changed.
@@ -286,7 +293,7 @@ async fn test_focused_timeline_local_echoes() {
target_event,
None,
vec![],
f.text_msg("yolo").event_id(target_event).sender(*BOB).into_timeline(),
f.text_msg("yolo").event_id(target_event).sender(*BOB).into_event(),
vec![],
None,
vec![],
@@ -322,8 +329,11 @@ async fn test_focused_timeline_local_echoes() {
// Add a reaction to the focused event, which will cause a local echo to happen.
timeline.toggle_reaction(&event_item.identifier(), "").await.unwrap();
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 1);
// We immediately get the local echo for the reaction.
let item = assert_next_matches_with_timeout!(timeline_stream, VectorDiff::Set { index: 1, value: item } => item);
assert_let!(VectorDiff::Set { index: 1, value: item } = &timeline_updates[0]);
let event_item = item.as_event().unwrap();
// Text hasn't changed.
@@ -362,7 +372,7 @@ async fn test_focused_timeline_doesnt_show_local_echoes() {
target_event,
None,
vec![],
f.text_msg("yolo").event_id(target_event).sender(*BOB).into_timeline(),
f.text_msg("yolo").event_id(target_event).sender(*BOB).into_event(),
vec![],
None,
vec![],

View File

@@ -19,7 +19,6 @@ use assert_matches2::assert_let;
use eyeball_im::VectorDiff;
use futures_util::StreamExt;
use matrix_sdk::{
assert_let_timeout,
config::SyncSettings,
test_utils::{logged_in_client_with_server, mocks::MatrixMockServer},
};
@@ -40,7 +39,7 @@ use ruma::{
owned_event_id, room_id, user_id, MilliSecondsSinceUnixEpoch,
};
use serde_json::json;
use stream_assert::{assert_next_matches, assert_pending};
use stream_assert::assert_pending;
use wiremock::{
matchers::{header, method, path_regex},
Mock, ResponseTemplate,
@@ -113,17 +112,18 @@ async fn test_reaction() {
let _response = client.sync_once(sync_settings.clone()).await.unwrap();
server.reset().await;
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 4);
// The new message starts with their author's read receipt.
assert_let_timeout!(Some(VectorDiff::PushBack { value: message }) = timeline_stream.next());
assert_let!(VectorDiff::PushBack { value: message } = &timeline_updates[0]);
let event_item = message.as_event().unwrap();
assert_matches!(event_item.content(), TimelineItemContent::Message(_));
assert_eq!(event_item.read_receipts().len(), 1);
// The new message is getting the reaction, which implies an implicit read
// receipt that's obtained first.
assert_let_timeout!(
Some(VectorDiff::Set { index: 0, value: updated_message }) = timeline_stream.next()
);
assert_let!(VectorDiff::Set { index: 0, value: updated_message } = &timeline_updates[1]);
let event_item = updated_message.as_event().unwrap();
assert_let!(TimelineItemContent::Message(msg) = event_item.content());
assert!(!msg.is_edited());
@@ -131,9 +131,7 @@ async fn test_reaction() {
assert_eq!(event_item.reactions().len(), 0);
// Then the reaction is taken into account.
assert_let_timeout!(
Some(VectorDiff::Set { index: 0, value: updated_message }) = timeline_stream.next()
);
assert_let!(VectorDiff::Set { index: 0, value: updated_message } = &timeline_updates[2]);
let event_item = updated_message.as_event().unwrap();
assert_let!(TimelineItemContent::Message(msg) = event_item.content());
assert!(!msg.is_edited());
@@ -145,9 +143,7 @@ async fn test_reaction() {
assert_eq!(senders.as_slice(), [user_id!("@bob:example.org")]);
// The date divider.
assert_let_timeout!(
Some(VectorDiff::PushFront { value: date_divider }) = timeline_stream.next()
);
assert_let!(VectorDiff::PushFront { value: date_divider } = &timeline_updates[3]);
assert!(date_divider.is_date_divider());
sync_builder.add_joined_room(JoinedRoomBuilder::new(room_id).add_timeline_event(
@@ -165,13 +161,16 @@ async fn test_reaction() {
let _response = client.sync_once(sync_settings.clone()).await.unwrap();
server.reset().await;
assert_let_timeout!(
Some(VectorDiff::Set { index: 1, value: updated_message }) = timeline_stream.next()
);
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 1);
assert_let!(VectorDiff::Set { index: 1, value: updated_message } = &timeline_updates[0]);
let event_item = updated_message.as_event().unwrap();
assert_let!(TimelineItemContent::Message(msg) = event_item.content());
assert!(!msg.is_edited());
assert_eq!(event_item.reactions().len(), 0);
assert_pending!(timeline_stream);
}
#[async_test]
@@ -226,11 +225,16 @@ async fn test_redacted_message() {
let _response = client.sync_once(sync_settings.clone()).await.unwrap();
server.reset().await;
assert_let!(Some(VectorDiff::PushBack { value: first }) = timeline_stream.next().await);
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 2);
assert_let!(VectorDiff::PushBack { value: first } = &timeline_updates[0]);
assert_matches!(first.as_event().unwrap().content(), TimelineItemContent::RedactedMessage);
assert_let!(Some(VectorDiff::PushFront { value: date_divider }) = timeline_stream.next().await);
assert_let!(VectorDiff::PushFront { value: date_divider } = &timeline_updates[1]);
assert!(date_divider.is_date_divider());
assert_pending!(timeline_stream);
}
#[async_test]
@@ -258,13 +262,16 @@ async fn test_redact_message() {
)
.await;
assert_let!(Some(VectorDiff::PushBack { value: first }) = timeline_stream.next().await);
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 2);
assert_let!(VectorDiff::PushBack { value: first } = &timeline_updates[0]);
assert_eq!(
first.as_event().unwrap().content().as_message().unwrap().body(),
"buy my bitcoins bro"
);
assert_let!(Some(VectorDiff::PushFront { value: date_divider }) = timeline_stream.next().await);
assert_let!(VectorDiff::PushFront { value: date_divider } = &timeline_updates[1]);
assert!(date_divider.is_date_divider());
// Redacting a remote event works.
@@ -278,14 +285,20 @@ async fn test_redact_message() {
.await
.unwrap();
assert_let!(Some(VectorDiff::PushBack { value: second }) = timeline_stream.next().await);
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 1);
assert_let!(VectorDiff::PushBack { value: second } = &timeline_updates[0]);
let second = second.as_event().unwrap();
assert_matches!(second.send_state(), Some(EventSendState::NotSentYet));
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 1);
// We haven't set a route for sending events, so this will fail.
assert_let!(Some(VectorDiff::Set { index, value: second }) = timeline_stream.next().await);
assert_eq!(index, 2);
assert_let!(VectorDiff::Set { index, value: second } = &timeline_updates[0]);
assert_eq!(*index, 2);
let second = second.as_event().unwrap();
assert!(second.is_local_echo());
@@ -294,8 +307,13 @@ async fn test_redact_message() {
// Let's redact the local echo.
timeline.redact(&second.identifier(), None).await.unwrap();
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 1);
// Observe local echo being removed.
assert_matches!(timeline_stream.next().await, Some(VectorDiff::Remove { index: 2 }));
assert_let!(VectorDiff::Remove { index: 2 } = &timeline_updates[0]);
assert_pending!(timeline_stream);
}
#[async_test]
@@ -320,21 +338,27 @@ async fn test_redact_local_sent_message() {
.await
.unwrap();
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 2);
// Assert the local event is in the timeline now and is not sent yet.
assert_let_timeout!(Some(VectorDiff::PushBack { value: item }) = timeline_stream.next());
assert_let!(VectorDiff::PushBack { value: item } = &timeline_updates[0]);
let event = item.as_event().unwrap();
assert!(event.is_local_echo());
assert_matches!(event.send_state(), Some(EventSendState::NotSentYet));
// As well as a date divider.
assert_let_timeout!(
Some(VectorDiff::PushFront { value: date_divider }) = timeline_stream.next()
);
assert_let!(VectorDiff::PushFront { value: date_divider } = &timeline_updates[1]);
assert!(date_divider.is_date_divider());
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 1);
// We receive an update in the timeline from the send queue.
assert_let_timeout!(Some(VectorDiff::Set { index, value: item }) = timeline_stream.next());
assert_eq!(index, 1);
assert_let!(VectorDiff::Set { index, value: item } = &timeline_updates[0]);
assert_eq!(*index, 1);
assert_pending!(timeline_stream);
// Check the event is sent but still considered local.
let event = item.as_event().unwrap();
@@ -415,10 +439,13 @@ async fn test_read_marker() {
let _response = client.sync_once(sync_settings.clone()).await.unwrap();
server.reset().await;
assert_let!(Some(VectorDiff::PushBack { value: message }) = timeline_stream.next().await);
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 2);
assert_let!(VectorDiff::PushBack { value: message } = &timeline_updates[0]);
assert_matches!(message.as_event().unwrap().content(), TimelineItemContent::Message(_));
assert_let!(Some(VectorDiff::PushFront { value: date_divider }) = timeline_stream.next().await);
assert_let!(VectorDiff::PushFront { value: date_divider } = &timeline_updates[1]);
assert!(date_divider.is_date_divider());
sync_builder.add_joined_room(
@@ -448,13 +475,16 @@ async fn test_read_marker() {
let _response = client.sync_once(sync_settings.clone()).await.unwrap();
server.reset().await;
assert_let!(Some(VectorDiff::PushBack { value: message }) = timeline_stream.next().await);
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 2);
assert_let!(VectorDiff::PushBack { value: message } = &timeline_updates[0]);
assert_matches!(message.as_event().unwrap().content(), TimelineItemContent::Message(_));
assert_let!(
Some(VectorDiff::Insert { index: 2, value: marker }) = timeline_stream.next().await
);
assert_let!(VectorDiff::Insert { index: 2, value: marker } = &timeline_updates[1]);
assert_matches!(marker.as_virtual().unwrap(), VirtualTimelineItem::ReadMarker);
assert_pending!(timeline_stream);
}
#[async_test]
@@ -499,12 +529,15 @@ async fn test_sync_highlighted() {
let _response = client.sync_once(sync_settings.clone()).await.unwrap();
server.reset().await;
assert_let!(Some(VectorDiff::PushBack { value: first }) = timeline_stream.next().await);
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 2);
assert_let!(VectorDiff::PushBack { value: first } = &timeline_updates[0]);
let remote_event = first.as_event().unwrap();
// Own events don't trigger push rules.
assert!(!remote_event.is_highlighted());
assert_let!(Some(VectorDiff::PushFront { value: date_divider }) = timeline_stream.next().await);
assert_let!(VectorDiff::PushFront { value: date_divider } = &timeline_updates[1]);
assert!(date_divider.is_date_divider());
sync_builder.add_joined_room(JoinedRoomBuilder::new(room_id).add_timeline_event(
@@ -525,10 +558,15 @@ async fn test_sync_highlighted() {
let _response = client.sync_once(sync_settings.clone()).await.unwrap();
server.reset().await;
assert_let!(Some(VectorDiff::PushBack { value: second }) = timeline_stream.next().await);
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 1);
assert_let!(VectorDiff::PushBack { value: second } = &timeline_updates[0]);
let remote_event = second.as_event().unwrap();
// `m.room.tombstone` should be highlighted by default.
assert!(remote_event.is_highlighted());
assert_pending!(timeline_stream);
}
#[async_test]
@@ -772,21 +810,24 @@ async fn test_timeline_without_encryption_can_update() {
let _response = client.sync_once(sync_settings.clone()).await.unwrap();
server.reset().await;
assert_let!(Some(timeline_updates) = stream.next().await);
assert_eq!(timeline_updates.len(), 3);
// Previous timeline event now has a shield
assert_next_matches!(stream, VectorDiff::Set { index, value } => {
assert_eq!(index, 1);
assert!(value.as_event().unwrap().get_shield(false).is_some());
});
assert_let!(VectorDiff::Set { index, value } = &timeline_updates[0]);
assert_eq!(*index, 1);
assert!(value.as_event().unwrap().get_shield(false).is_some());
// Room encryption event is received
assert_next_matches!(stream, VectorDiff::PushBack { value } => {
assert_let!(TimelineItemContent::OtherState(other_state) = value.as_event().unwrap().content());
assert_let!(AnyOtherFullStateEventContent::RoomEncryption(_) = other_state.content());
assert!(value.as_event().unwrap().get_shield(false).is_some());
});
assert_let!(VectorDiff::PushBack { value } = &timeline_updates[1]);
assert_let!(TimelineItemContent::OtherState(other_state) = value.as_event().unwrap().content());
assert_let!(AnyOtherFullStateEventContent::RoomEncryption(_) = other_state.content());
assert!(value.as_event().unwrap().get_shield(false).is_some());
// New message event is received and has a shield
assert_next_matches!(stream, VectorDiff::PushBack { value } => {
assert!(value.as_event().unwrap().get_shield(false).is_some());
});
assert_let!(VectorDiff::PushBack { value } = &timeline_updates[2]);
assert!(value.as_event().unwrap().get_shield(false).is_some());
assert_pending!(stream);
}

View File

@@ -76,7 +76,7 @@ async fn test_back_pagination() {
.await;
let paginate = async {
timeline.live_paginate_backwards(10).await.unwrap();
timeline.paginate_backwards(10).await.unwrap();
server.reset().await;
};
let observe_paginating = async {
@@ -84,9 +84,11 @@ async fn test_back_pagination() {
};
join(paginate, observe_paginating).await;
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
// `m.room.name`
{
assert_let!(Some(VectorDiff::PushBack { value: message }) = timeline_stream.next().await);
assert_let!(VectorDiff::PushBack { value: message } = &timeline_updates[0]);
assert_let!(TimelineItemContent::OtherState(state) = message.as_event().unwrap().content());
assert_eq!(state.state_key(), "");
assert_let!(
@@ -101,13 +103,13 @@ async fn test_back_pagination() {
// `m.room.name` receives an update
{
assert_let!(Some(VectorDiff::Set { index, .. }) = timeline_stream.next().await);
assert_eq!(index, 0);
assert_let!(VectorDiff::Set { index, .. } = &timeline_updates[1]);
assert_eq!(*index, 0);
}
// `m.room.message`: “the world is big”
{
assert_let!(Some(VectorDiff::PushBack { value: message }) = timeline_stream.next().await);
assert_let!(VectorDiff::PushBack { value: message } = &timeline_updates[2]);
assert_let!(TimelineItemContent::Message(msg) = message.as_event().unwrap().content());
assert_let!(MessageType::Text(text) = msg.msgtype());
assert_eq!(text.body, "the world is big");
@@ -115,7 +117,7 @@ async fn test_back_pagination() {
// `m.room.message`: “hello world”
{
assert_let!(Some(VectorDiff::PushBack { value: message }) = timeline_stream.next().await);
assert_let!(VectorDiff::PushBack { value: message } = &timeline_updates[3]);
assert_let!(TimelineItemContent::Message(msg) = message.as_event().unwrap().content());
assert_let!(MessageType::Text(text) = msg.msgtype());
assert_eq!(text.body, "hello world");
@@ -123,9 +125,7 @@ async fn test_back_pagination() {
// Date divider is updated.
{
assert_let!(
Some(VectorDiff::PushFront { value: date_divider }) = timeline_stream.next().await
);
assert_let!(VectorDiff::PushFront { value: date_divider } = &timeline_updates[4]);
assert!(date_divider.is_date_divider());
}
@@ -145,7 +145,7 @@ async fn test_back_pagination() {
.mount(&server)
.await;
let hit_start = timeline.live_paginate_backwards(10).await.unwrap();
let hit_start = timeline.paginate_backwards(10).await.unwrap();
assert!(hit_start);
assert_next_eq!(
back_pagination_status,
@@ -218,12 +218,14 @@ async fn test_back_pagination_highlighted() {
.mount(&server)
.await;
timeline.live_paginate_backwards(10).await.unwrap();
timeline.paginate_backwards(10).await.unwrap();
server.reset().await;
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
// `m.room.tombstone`
{
assert_let!(Some(VectorDiff::PushBack { value: second }) = timeline_stream.next().await);
assert_let!(VectorDiff::PushBack { value: second } = &timeline_updates[0]);
let remote_event = second.as_event().unwrap();
// `m.room.tombstone` should be highlighted by default.
assert!(remote_event.is_highlighted());
@@ -231,7 +233,7 @@ async fn test_back_pagination_highlighted() {
// `m.room.message`
{
assert_let!(Some(VectorDiff::PushBack { value: first }) = timeline_stream.next().await);
assert_let!(VectorDiff::PushBack { value: first } = &timeline_updates[1]);
let remote_event = first.as_event().unwrap();
// Own events don't trigger push rules.
assert!(!remote_event.is_highlighted());
@@ -239,9 +241,7 @@ async fn test_back_pagination_highlighted() {
// Date divider
{
assert_let!(
Some(VectorDiff::PushFront { value: date_divider }) = timeline_stream.next().await
);
assert_let!(VectorDiff::PushFront { value: date_divider } = &timeline_updates[2]);
assert!(date_divider.is_date_divider());
}
@@ -289,7 +289,7 @@ async fn test_wait_for_token() {
mock_sync(&server, sync_builder.build_json_sync_response(), None).await;
let paginate = async {
timeline.live_paginate_backwards(10).await.unwrap();
timeline.paginate_backwards(10).await.unwrap();
};
let observe_paginating = async {
assert_eq!(back_pagination_status.next().await, Some(LiveBackPaginationStatus::Paginating));
@@ -354,10 +354,10 @@ async fn test_dedup_pagination() {
// If I try to paginate twice at the same time,
let paginate_1 = async {
timeline.live_paginate_backwards(10).await.unwrap();
timeline.paginate_backwards(10).await.unwrap();
};
let paginate_2 = async {
timeline.live_paginate_backwards(10).await.unwrap();
timeline.paginate_backwards(10).await.unwrap();
};
timeout(Duration::from_secs(5), join(paginate_1, paginate_2)).await.unwrap();
@@ -443,7 +443,7 @@ async fn test_timeline_reset_while_paginating() {
let (_, mut back_pagination_status) = timeline.live_back_pagination_status().await.unwrap();
let paginate = async { timeline.live_paginate_backwards(10).await.unwrap() };
let paginate = async { timeline.paginate_backwards(10).await.unwrap() };
let observe_paginating = async {
let mut seen_paginating = false;
@@ -608,7 +608,7 @@ async fn test_empty_chunk() {
.await;
let paginate = async {
timeline.live_paginate_backwards(10).await.unwrap();
timeline.paginate_backwards(10).await.unwrap();
server.reset().await;
};
let observe_paginating = async {
@@ -616,9 +616,11 @@ async fn test_empty_chunk() {
};
join(paginate, observe_paginating).await;
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
// `m.room.name`
{
assert_let!(Some(VectorDiff::PushBack { value: message }) = timeline_stream.next().await);
assert_let!(VectorDiff::PushBack { value: message } = &timeline_updates[0]);
assert_let!(TimelineItemContent::OtherState(state) = message.as_event().unwrap().content());
assert_eq!(state.state_key(), "");
assert_let!(
@@ -633,13 +635,13 @@ async fn test_empty_chunk() {
// `m.room.name` is updated
{
assert_let!(Some(VectorDiff::Set { index, .. }) = timeline_stream.next().await);
assert_eq!(index, 0);
assert_let!(VectorDiff::Set { index, .. } = &timeline_updates[1]);
assert_eq!(*index, 0);
}
// `m.room.message`: “the world is big”
{
assert_let!(Some(VectorDiff::PushBack { value: message }) = timeline_stream.next().await);
assert_let!(VectorDiff::PushBack { value: message } = &timeline_updates[2]);
assert_let!(TimelineItemContent::Message(msg) = message.as_event().unwrap().content());
assert_let!(MessageType::Text(text) = msg.msgtype());
assert_eq!(text.body, "the world is big");
@@ -647,7 +649,7 @@ async fn test_empty_chunk() {
// `m.room.name`: “hello world”
{
assert_let!(Some(VectorDiff::PushBack { value: message }) = timeline_stream.next().await);
assert_let!(VectorDiff::PushBack { value: message } = &timeline_updates[3]);
assert_let!(TimelineItemContent::Message(msg) = message.as_event().unwrap().content());
assert_let!(MessageType::Text(text) = msg.msgtype());
assert_eq!(text.body, "hello world");
@@ -655,9 +657,7 @@ async fn test_empty_chunk() {
// Date divider
{
assert_let!(
Some(VectorDiff::PushFront { value: date_divider }) = timeline_stream.next().await
);
assert_let!(VectorDiff::PushFront { value: date_divider } = &timeline_updates[4]);
assert!(date_divider.is_date_divider());
}
@@ -719,16 +719,18 @@ async fn test_until_num_items_with_empty_chunk() {
.await;
let paginate = async {
timeline.live_paginate_backwards(10).await.unwrap();
timeline.paginate_backwards(10).await.unwrap();
};
let observe_paginating = async {
assert_eq!(back_pagination_status.next().await, Some(LiveBackPaginationStatus::Paginating));
};
join(paginate, observe_paginating).await;
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
// `m.room.name`
{
assert_let!(Some(VectorDiff::PushBack { value: message }) = timeline_stream.next().await);
assert_let!(VectorDiff::PushBack { value: message } = &timeline_updates[0]);
assert_let!(TimelineItemContent::OtherState(state) = message.as_event().unwrap().content());
assert_eq!(state.state_key(), "");
assert_let!(
@@ -743,13 +745,13 @@ async fn test_until_num_items_with_empty_chunk() {
// `m.room.name` is updated
{
assert_let!(Some(VectorDiff::Set { index, .. }) = timeline_stream.next().await);
assert_eq!(index, 0);
assert_let!(VectorDiff::Set { index, .. } = &timeline_updates[1]);
assert_eq!(*index, 0);
}
// `m.room.message`: “the world is big”
{
assert_let!(Some(VectorDiff::PushBack { value: message }) = timeline_stream.next().await);
assert_let!(VectorDiff::PushBack { value: message } = &timeline_updates[2]);
assert_let!(TimelineItemContent::Message(msg) = message.as_event().unwrap().content());
assert_let!(MessageType::Text(text) = msg.msgtype());
assert_eq!(text.body, "the world is big");
@@ -757,7 +759,7 @@ async fn test_until_num_items_with_empty_chunk() {
// `m.room.name`: “hello world”
{
assert_let!(Some(VectorDiff::PushBack { value: message }) = timeline_stream.next().await);
assert_let!(VectorDiff::PushBack { value: message } = &timeline_updates[3]);
assert_let!(TimelineItemContent::Message(msg) = message.as_event().unwrap().content());
assert_let!(MessageType::Text(text) = msg.msgtype());
assert_eq!(text.body, "hello world");
@@ -765,20 +767,18 @@ async fn test_until_num_items_with_empty_chunk() {
// Date divider
{
assert_let!(
Some(VectorDiff::PushFront { value: date_divider }) = timeline_stream.next().await
);
assert_let!(VectorDiff::PushFront { value: date_divider } = &timeline_updates[4]);
assert!(date_divider.is_date_divider());
}
timeline.live_paginate_backwards(10).await.unwrap();
timeline.paginate_backwards(10).await.unwrap();
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
// `m.room.name`: “hello room then”
{
assert_let!(
Some(VectorDiff::Insert { index, value: message }) = timeline_stream.next().await
);
assert_eq!(index, 1);
assert_let!(VectorDiff::Insert { index, value: message } = &timeline_updates[0]);
assert_eq!(*index, 1);
assert_let!(TimelineItemContent::Message(msg) = message.as_event().unwrap().content());
assert_let!(MessageType::Text(text) = msg.msgtype());
assert_eq!(text.body, "hello room then");
@@ -821,7 +821,7 @@ async fn test_back_pagination_aborted() {
let paginate = spawn({
let timeline = timeline.clone();
async move {
timeline.live_paginate_backwards(10).await.unwrap();
timeline.paginate_backwards(10).await.unwrap();
}
});

View File

@@ -1,9 +1,10 @@
use std::{ops::ControlFlow, time::Duration};
use assert_matches::assert_matches;
use assert_matches2::assert_let;
use eyeball_im::VectorDiff;
use futures_util::StreamExt as _;
use matrix_sdk::{
assert_next_matches_with_timeout,
config::SyncSettings,
event_cache::{BackPaginationOutcome, TimelineHasBeenResetWhilePaginating},
test_utils::{
@@ -106,22 +107,32 @@ async fn test_new_pinned_events_are_added_on_sync() {
.await
.expect("Room should be synced");
// If the test runs fast, we receive 1 update, then 4 updates. If the test runs
// slow, we receive 5 updates directly. Let's solve this flakiness with a
// `sleep`.
sleep(Duration::from_millis(500)).await;
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 5);
// The item is added automatically
assert_next_matches_with_timeout!(timeline_stream, VectorDiff::PushBack { value } => {
assert_eq!(value.as_event().unwrap().event_id().unwrap(), event_id!("$2"));
});
assert_let!(VectorDiff::PushBack { value } = &timeline_updates[0]);
assert_eq!(value.as_event().unwrap().event_id().unwrap(), event_id!("$2"));
// The list is reloaded, so it's reset
assert_next_matches_with_timeout!(timeline_stream, VectorDiff::Clear);
assert_let!(VectorDiff::Clear = &timeline_updates[1]);
// Then the loaded list items are added
assert_next_matches_with_timeout!(timeline_stream, VectorDiff::PushBack { value } => {
assert_eq!(value.as_event().unwrap().event_id().unwrap(), event_id!("$1"));
});
assert_next_matches_with_timeout!(timeline_stream, VectorDiff::PushBack { value } => {
assert_eq!(value.as_event().unwrap().event_id().unwrap(), event_id!("$2"));
});
assert_next_matches_with_timeout!(timeline_stream, VectorDiff::PushFront { value } => {
assert!(value.is_date_divider());
});
assert_let!(VectorDiff::PushBack { value } = &timeline_updates[2]);
assert_eq!(value.as_event().unwrap().event_id().unwrap(), event_id!("$1"));
assert_let!(VectorDiff::PushBack { value } = &timeline_updates[3]);
assert_eq!(value.as_event().unwrap().event_id().unwrap(), event_id!("$2"));
assert_let!(VectorDiff::PushFront { value } = &timeline_updates[4]);
assert!(value.is_date_divider());
assert_pending!(timeline_stream);
}
#[async_test]
@@ -175,16 +186,20 @@ async fn test_new_pinned_event_ids_reload_the_timeline() {
.await
.expect("Sync failed");
assert_next_matches_with_timeout!(timeline_stream, VectorDiff::Clear);
assert_next_matches_with_timeout!(timeline_stream, VectorDiff::PushBack { value } => {
assert_eq!(value.as_event().unwrap().event_id().unwrap(), event_id!("$1"));
});
assert_next_matches_with_timeout!(timeline_stream, VectorDiff::PushBack { value } => {
assert_eq!(value.as_event().unwrap().event_id().unwrap(), event_id!("$2"));
});
assert_next_matches_with_timeout!(timeline_stream, VectorDiff::PushFront { value } => {
assert!(value.is_date_divider());
});
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 4);
assert_let!(VectorDiff::Clear = &timeline_updates[0]);
assert_let!(VectorDiff::PushBack { value } = &timeline_updates[1]);
assert_eq!(value.as_event().unwrap().event_id().unwrap(), event_id!("$1"));
assert_let!(VectorDiff::PushBack { value } = &timeline_updates[2]);
assert_eq!(value.as_event().unwrap().event_id().unwrap(), event_id!("$2"));
assert_let!(VectorDiff::PushFront { value } = &timeline_updates[3]);
assert!(value.is_date_divider());
assert_pending!(timeline_stream);
// Reload timeline with no pinned event
@@ -195,7 +210,10 @@ async fn test_new_pinned_event_ids_reload_the_timeline() {
.await
.expect("Sync failed");
assert_next_matches_with_timeout!(timeline_stream, VectorDiff::Clear);
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 1);
assert_let!(VectorDiff::Clear = &timeline_updates[0]);
assert_pending!(timeline_stream);
}
@@ -532,26 +550,30 @@ async fn test_edited_events_are_reflected_in_sync() {
.await
.expect("Sync failed");
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 4);
// The list is reloaded, so it's reset
assert_next_matches_with_timeout!(timeline_stream, VectorDiff::Clear);
assert_let!(VectorDiff::Clear = &timeline_updates[0]);
// Then the loaded list items are added
assert_next_matches_with_timeout!(timeline_stream, VectorDiff::PushBack { value } => {
let event = value.as_event().unwrap();
assert_eq!(event.event_id().unwrap(), event_id!("$1"));
});
assert_next_matches_with_timeout!(timeline_stream, VectorDiff::PushFront { value } => {
assert!(value.is_date_divider());
});
assert_let!(VectorDiff::PushBack { value } = &timeline_updates[1]);
let event = value.as_event().unwrap();
assert_eq!(event.event_id().unwrap(), event_id!("$1"));
assert_let!(VectorDiff::PushFront { value } = &timeline_updates[2]);
assert!(value.is_date_divider());
// The edit replaces the original event
assert_next_matches_with_timeout!(timeline_stream, VectorDiff::Set { index, value } => {
assert_eq!(index, 1);
match value.as_event().unwrap().content() {
TimelineItemContent::Message(m) => {
assert_eq!(m.body(), "* edited message!")
}
_ => panic!("Should be a message event"),
assert_let!(VectorDiff::Set { index, value } = &timeline_updates[3]);
assert_eq!(*index, 1);
match value.as_event().unwrap().content() {
TimelineItemContent::Message(m) => {
assert_eq!(m.body(), "* edited message!")
}
});
_ => panic!("Should be a message event"),
}
assert_pending!(timeline_stream);
}
@@ -610,21 +632,25 @@ async fn test_redacted_events_are_reflected_in_sync() {
.await
.expect("Sync failed");
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 4);
// The list is reloaded, so it's reset
assert_next_matches_with_timeout!(timeline_stream, VectorDiff::Clear);
assert_let!(VectorDiff::Clear = &timeline_updates[0]);
// Then the loaded list items are added
assert_next_matches_with_timeout!(timeline_stream, VectorDiff::PushBack { value } => {
let event = value.as_event().unwrap();
assert_eq!(event.event_id().unwrap(), event_id!("$1"));
});
assert_next_matches_with_timeout!(timeline_stream, VectorDiff::PushFront { value } => {
assert!(value.is_date_divider());
});
assert_let!(VectorDiff::PushBack { value } = &timeline_updates[1]);
let event = value.as_event().unwrap();
assert_eq!(event.event_id().unwrap(), event_id!("$1"));
assert_let!(VectorDiff::PushFront { value } = &timeline_updates[2]);
assert!(value.is_date_divider());
// The redaction replaces the original event
assert_next_matches_with_timeout!(timeline_stream, VectorDiff::Set { index, value } => {
assert_eq!(index, 1);
assert_matches!(value.as_event().unwrap().content(), TimelineItemContent::RedactedMessage);
});
assert_let!(VectorDiff::Set { index, value } = &timeline_updates[3]);
assert_eq!(*index, 1);
assert_matches!(value.as_event().unwrap().content(), TimelineItemContent::RedactedMessage);
assert_pending!(timeline_stream);
}
@@ -687,26 +713,30 @@ async fn test_edited_events_survive_pinned_event_ids_change() {
.await
.expect("Sync failed");
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 4);
// The list is reloaded, so it's reset
assert_next_matches_with_timeout!(timeline_stream, VectorDiff::Clear);
assert_let!(VectorDiff::Clear = &timeline_updates[0]);
// Then the loaded list items are added
assert_next_matches_with_timeout!(timeline_stream, VectorDiff::PushBack { value } => {
let event = value.as_event().unwrap();
assert_eq!(event.event_id().unwrap(), event_id!("$1"));
});
assert_next_matches_with_timeout!(timeline_stream, VectorDiff::PushFront { value } => {
assert!(value.is_date_divider());
});
assert_let!(VectorDiff::PushBack { value } = &timeline_updates[1]);
let event = value.as_event().unwrap();
assert_eq!(event.event_id().unwrap(), event_id!("$1"));
assert_let!(VectorDiff::PushFront { value } = &timeline_updates[2]);
assert!(value.is_date_divider());
// The edit replaces the original event
assert_next_matches_with_timeout!(timeline_stream, VectorDiff::Set { index, value } => {
assert_eq!(index, 1);
match value.as_event().unwrap().content() {
TimelineItemContent::Message(m) => {
assert_eq!(m.body(), "edited message!")
}
_ => panic!("Should be a message event"),
assert_let!(VectorDiff::Set { index, value } = &timeline_updates[3]);
assert_eq!(*index, 1);
match value.as_event().unwrap().content() {
TimelineItemContent::Message(m) => {
assert_eq!(m.body(), "edited message!")
}
});
_ => panic!("Should be a message event"),
}
assert_pending!(timeline_stream);
let new_pinned_event = f
@@ -726,36 +756,44 @@ async fn test_edited_events_survive_pinned_event_ids_change() {
.await
.expect("Sync failed");
// If the test runs fast, we receive 1 update, then 5 updates. If the test runs
// slow, we receive 6 updates directly. Let's solve this flakiness with a
// `sleep`.
sleep(Duration::from_millis(500)).await;
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 6);
// New item gets added
assert_next_matches_with_timeout!(timeline_stream, VectorDiff::PushBack { value } => {
let event = value.as_event().unwrap();
assert_eq!(event.event_id().unwrap(), event_id!("$3"));
});
assert_let!(VectorDiff::PushBack { value } = &timeline_updates[0]);
let event = value.as_event().unwrap();
assert_eq!(event.event_id().unwrap(), event_id!("$3"));
// The list is reloaded, so it's reset
assert_next_matches_with_timeout!(timeline_stream, VectorDiff::Clear);
assert_let!(VectorDiff::Clear = &timeline_updates[1]);
// Then the loaded list items are added
assert_next_matches_with_timeout!(timeline_stream, VectorDiff::PushBack { value } => {
let event = value.as_event().unwrap();
assert_eq!(event.event_id().unwrap(), event_id!("$1"));
});
assert_let!(VectorDiff::PushBack { value } = &timeline_updates[2]);
let event = value.as_event().unwrap();
assert_eq!(event.event_id().unwrap(), event_id!("$1"));
// The edit replaces the original event
assert_next_matches_with_timeout!(timeline_stream, VectorDiff::Set { index, value } => {
assert_eq!(index, 0);
match value.as_event().unwrap().content() {
TimelineItemContent::Message(m) => {
assert_eq!(m.body(), "edited message!")
}
_ => panic!("Should be a message event"),
assert_let!(VectorDiff::Set { index, value } = &timeline_updates[3]);
assert_eq!(*index, 0);
match value.as_event().unwrap().content() {
TimelineItemContent::Message(m) => {
assert_eq!(m.body(), "edited message!")
}
});
_ => panic!("Should be a message event"),
}
// The new pinned event is added
assert_next_matches_with_timeout!(timeline_stream, VectorDiff::PushBack { value } => {
let event = value.as_event().unwrap();
assert_eq!(event.event_id().unwrap(), event_id!("$3"));
});
assert_next_matches_with_timeout!(timeline_stream, VectorDiff::PushFront { value } => {
assert!(value.is_date_divider());
});
assert_let!(VectorDiff::PushBack { value } = &timeline_updates[4]);
let event = value.as_event().unwrap();
assert_eq!(event.event_id().unwrap(), event_id!("$3"));
assert_let!(VectorDiff::PushFront { value } = &timeline_updates[5]);
assert!(value.is_date_divider());
assert_pending!(timeline_stream);
}

View File

@@ -335,9 +335,14 @@ async fn test_clear_with_echoes() {
// Wait for the first message to fail. Don't use time, but listen for the first
// timeline item diff to get back signalling the error.
let _date_divider = timeline_stream.next().await;
let _local_echo = timeline_stream.next().await;
let _local_echo_replaced_with_failure = timeline_stream.next().await;
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
// 2 updates: date divider and local echo.
assert_eq!(timeline_updates.len(), 2);
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
// 1 updates: local echo replaced with failure.
assert_eq!(timeline_updates.len(), 1);
}
// Next message will take "forever" to send.
@@ -440,35 +445,36 @@ async fn test_no_duplicate_date_divider() {
// Let the send queue handle the event.
yield_now().await;
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 3);
// Local echoes are available as soon as `timeline.send` returns.
assert_next_matches!(timeline_stream, VectorDiff::PushBack { value } => {
assert_eq!(value.as_event().unwrap().content().as_message().unwrap().body(), "First!");
});
assert_let!(VectorDiff::PushBack { value } = &timeline_updates[0]);
assert_eq!(value.as_event().unwrap().content().as_message().unwrap().body(), "First!");
assert_next_matches!(timeline_stream, VectorDiff::PushFront { value } => {
assert!(value.is_date_divider());
});
assert_let!(VectorDiff::PushFront { value } = &timeline_updates[1]);
assert!(value.is_date_divider());
assert_next_matches!(timeline_stream, VectorDiff::PushBack { value } => {
assert_eq!(value.as_event().unwrap().content().as_message().unwrap().body(), "Second.");
});
assert_let!(VectorDiff::PushBack { value } = &timeline_updates[2]);
assert_eq!(value.as_event().unwrap().content().as_message().unwrap().body(), "Second.");
// Wait 200ms for the first msg, 100ms for the second, 200ms for overhead.
sleep(Duration::from_millis(500)).await;
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 2);
// The first item should be updated first.
assert_next_matches!(timeline_stream, VectorDiff::Set { index: 1, value } => {
let value = value.as_event().unwrap();
assert_eq!(value.content().as_message().unwrap().body(), "First!");
assert_eq!(value.event_id().unwrap(), "$PyHxV5mYzjetBUT3qZq7V95GOzxb02EP");
});
assert_let!(VectorDiff::Set { index: 1, value } = &timeline_updates[0]);
let value = value.as_event().unwrap();
assert_eq!(value.content().as_message().unwrap().body(), "First!");
assert_eq!(value.event_id().unwrap(), "$PyHxV5mYzjetBUT3qZq7V95GOzxb02EP");
// Then the second one.
assert_next_matches!(timeline_stream, VectorDiff::Set { index: 2, value } => {
let value = value.as_event().unwrap();
assert_eq!(value.content().as_message().unwrap().body(), "Second.");
assert_eq!(value.event_id().unwrap(), "$5E2kLK/Sg342bgBU9ceEIEPYpbFaqJpZ");
});
assert_let!(VectorDiff::Set { index: 2, value } = &timeline_updates[1]);
let value = value.as_event().unwrap();
assert_eq!(value.content().as_message().unwrap().body(), "Second.");
assert_eq!(value.event_id().unwrap(), "$5E2kLK/Sg342bgBU9ceEIEPYpbFaqJpZ");
assert_pending!(timeline_stream);
@@ -496,31 +502,32 @@ async fn test_no_duplicate_date_divider() {
let _response = client.sync_once(sync_settings.clone()).await.unwrap();
server.reset().await;
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 6);
// The first message is removed -> [DD Second]
assert_next_matches!(timeline_stream, VectorDiff::Remove { index: 1 });
assert_let!(VectorDiff::Remove { index: 1 } = &timeline_updates[0]);
// The first message is reinserted -> [First DD Second]
assert_next_matches!(timeline_stream, VectorDiff::PushFront { value } => {
let value = value.as_event().unwrap();
assert_eq!(value.content().as_message().unwrap().body(), "First!");
assert_eq!(value.event_id().unwrap(), "$PyHxV5mYzjetBUT3qZq7V95GOzxb02EP");
});
assert_let!(VectorDiff::PushFront { value } = &timeline_updates[1]);
let value = value.as_event().unwrap();
assert_eq!(value.content().as_message().unwrap().body(), "First!");
assert_eq!(value.event_id().unwrap(), "$PyHxV5mYzjetBUT3qZq7V95GOzxb02EP");
// The second message is replaced -> [First Second DD]
assert_next_matches!(timeline_stream, VectorDiff::Remove { index: 2 });
assert_next_matches!(timeline_stream, VectorDiff::Insert { index: 1, value } => {
let value = value.as_event().unwrap();
assert_eq!(value.content().as_message().unwrap().body(), "Second.");
assert_eq!(value.event_id().unwrap(), "$5E2kLK/Sg342bgBU9ceEIEPYpbFaqJpZ");
});
assert_let!(VectorDiff::Remove { index: 2 } = &timeline_updates[2]);
assert_let!(VectorDiff::Insert { index: 1, value } = &timeline_updates[3]);
let value = value.as_event().unwrap();
assert_eq!(value.content().as_message().unwrap().body(), "Second.");
assert_eq!(value.event_id().unwrap(), "$5E2kLK/Sg342bgBU9ceEIEPYpbFaqJpZ");
// A new date divider is inserted -> [DD First Second DD]
assert_next_matches!(timeline_stream, VectorDiff::PushFront { value } => {
assert!(value.is_date_divider());
});
assert_let!(VectorDiff::PushFront { value } = &timeline_updates[4]);
assert!(value.is_date_divider());
// The useless date divider is removed. -> [DD First Second]
assert_next_matches!(timeline_stream, VectorDiff::Remove { index: 3 });
assert_let!(VectorDiff::Remove { index: 3 } = &timeline_updates[5]);
assert_pending!(timeline_stream);
}

View File

@@ -17,10 +17,7 @@ use std::{sync::Mutex, time::Duration};
use assert_matches2::{assert_let, assert_matches};
use eyeball_im::VectorDiff;
use futures_util::{FutureExt as _, StreamExt as _};
use matrix_sdk::{
assert_next_matches_with_timeout,
test_utils::{logged_in_client_with_server, mocks::MatrixMockServer},
};
use matrix_sdk::test_utils::{logged_in_client_with_server, mocks::MatrixMockServer};
use matrix_sdk_test::{
async_test, event_factory::EventFactory, mocks::mock_encryption_state, JoinedRoomBuilder,
SyncResponseBuilder, ALICE,
@@ -28,6 +25,7 @@ use matrix_sdk_test::{
use matrix_sdk_ui::timeline::{ReactionStatus, RoomExt as _};
use ruma::{event_id, events::room::message::RoomMessageEventContent, room_id};
use serde_json::json;
use stream_assert::assert_pending;
use wiremock::{
matchers::{header, method, path_regex},
Mock, ResponseTemplate,
@@ -64,12 +62,15 @@ async fn test_abort_before_being_sent() {
)
.await;
assert_let!(Some(VectorDiff::PushBack { value: first }) = stream.next().await);
assert_let!(Some(timeline_updates) = stream.next().await);
assert_eq!(timeline_updates.len(), 2);
assert_let!(VectorDiff::PushBack { value: first } = &timeline_updates[0]);
let item = first.as_event().unwrap();
let item_id = item.identifier();
assert_eq!(item.content().as_message().unwrap().body(), "hello");
assert_let!(Some(VectorDiff::PushFront { value: date_divider }) = stream.next().await);
assert_let!(VectorDiff::PushFront { value: date_divider } = &timeline_updates[1]);
assert!(date_divider.is_date_divider());
// Now we try to add two reactions to this message…
@@ -98,7 +99,10 @@ async fn test_abort_before_being_sent() {
// First toggle (local echo).
{
assert_let!(Some(VectorDiff::Set { index: 1, value: item }) = stream.next().await);
assert_let!(Some(timeline_updates) = stream.next().await);
assert_eq!(timeline_updates.len(), 1);
assert_let!(VectorDiff::Set { index: 1, value: item } = &timeline_updates[0]);
let reactions = item.as_event().unwrap().reactions();
assert_eq!(reactions.len(), 1);
@@ -107,14 +111,17 @@ async fn test_abort_before_being_sent() {
ReactionStatus::LocalToRemote(_)
);
assert!(stream.next().now_or_never().is_none());
assert_pending!(stream);
}
// We toggle another reaction at the same time…
timeline.toggle_reaction(&item_id, "🥰").await.unwrap();
{
assert_let!(Some(VectorDiff::Set { index: 1, value: item }) = stream.next().await);
assert_let!(Some(timeline_updates) = stream.next().await);
assert_eq!(timeline_updates.len(), 1);
assert_let!(VectorDiff::Set { index: 1, value: item } = &timeline_updates[0]);
let reactions = item.as_event().unwrap().reactions();
assert_eq!(reactions.len(), 2);
@@ -135,7 +142,10 @@ async fn test_abort_before_being_sent() {
timeline.toggle_reaction(&item_id, "👍").await.unwrap();
{
assert_let!(Some(VectorDiff::Set { index: 1, value: item }) = stream.next().await);
assert_let!(Some(timeline_updates) = stream.next().await);
assert_eq!(timeline_updates.len(), 1);
assert_let!(VectorDiff::Set { index: 1, value: item } = &timeline_updates[0]);
let reactions = item.as_event().unwrap().reactions();
assert_eq!(reactions.len(), 1);
@@ -152,7 +162,10 @@ async fn test_abort_before_being_sent() {
timeline.toggle_reaction(&item_id, "🥰").await.unwrap();
{
assert_let!(Some(VectorDiff::Set { index: 1, value: item }) = stream.next().await);
assert_let!(Some(timeline_updates) = stream.next().await);
assert_eq!(timeline_updates.len(), 1);
assert_let!(VectorDiff::Set { index: 1, value: item } = &timeline_updates[0]);
let reactions = item.as_event().unwrap().reactions();
assert!(reactions.is_empty());
@@ -165,7 +178,7 @@ async fn test_abort_before_being_sent() {
// redaction of the reaction. In our case, we're done here.
tokio::time::sleep(Duration::from_millis(300)).await;
assert!(stream.next().now_or_never().is_none());
assert_pending!(stream);
}
#[async_test]
@@ -206,20 +219,24 @@ async fn test_redact_failed() {
let _response = client.sync_once(Default::default()).await.unwrap();
server.reset().await;
let item_id = assert_next_matches_with_timeout!(stream, VectorDiff::PushBack { value: item } => {
assert_let!(Some(timeline_updates) = stream.next().await);
assert_eq!(timeline_updates.len(), 3);
let item_id = {
assert_let!(VectorDiff::PushBack { value: item } = &timeline_updates[0]);
let item = item.as_event().unwrap();
assert_eq!(item.content().as_message().unwrap().body(), "hello");
assert!(item.reactions().is_empty());
item.identifier()
});
};
assert_next_matches_with_timeout!(stream, VectorDiff::Set { index: 0, value: item } => {
assert_eq!(item.as_event().unwrap().reactions().len(), 1);
});
assert_let!(VectorDiff::Set { index: 0, value: item } = &timeline_updates[1]);
assert_eq!(item.as_event().unwrap().reactions().len(), 1);
assert_next_matches_with_timeout!(stream, VectorDiff::PushFront { value: date_divider } => {
assert!(date_divider.is_date_divider());
});
assert_let!(VectorDiff::PushFront { value: date_divider } = &timeline_updates[2]);
assert!(date_divider.is_date_divider());
// Now, redact the annotation we previously added.
@@ -235,18 +252,19 @@ async fn test_redact_failed() {
// We toggle the reaction, which fails with an error.
timeline.toggle_reaction(&item_id, "😆").await.unwrap_err();
assert_let!(Some(timeline_updates) = stream.next().await);
assert_eq!(timeline_updates.len(), 2);
// The local echo is removed (assuming the redaction works)…
assert_next_matches_with_timeout!(stream, VectorDiff::Set { index: 1, value: item } => {
assert!(item.as_event().unwrap().reactions().is_empty());
});
assert_let!(VectorDiff::Set { index: 1, value: item } = &timeline_updates[0]);
assert!(item.as_event().unwrap().reactions().is_empty());
// …then added back, after redaction failed.
assert_next_matches_with_timeout!(stream, VectorDiff::Set { index: 1, value: item } => {
assert_eq!(item.as_event().unwrap().reactions().len(), 1);
});
assert_let!(VectorDiff::Set { index: 1, value: item } = &timeline_updates[1]);
assert_eq!(item.as_event().unwrap().reactions().len(), 1);
tokio::time::sleep(Duration::from_millis(150)).await;
assert!(stream.next().now_or_never().is_none());
assert_pending!(stream);
}
#[async_test]
@@ -300,73 +318,88 @@ async fn test_local_reaction_to_local_echo() {
// Send a local event.
let _ = timeline.send(RoomMessageEventContent::text_plain("lol").into()).await.unwrap();
assert_let!(Some(timeline_updates) = stream.next().await);
assert_eq!(timeline_updates.len(), 2);
// Receive a local echo.
let item_id = assert_next_matches_with_timeout!(stream, VectorDiff::PushBack { value: item } => {
let item_id = {
assert_let!(VectorDiff::PushBack { value: item } = &timeline_updates[0]);
let item = item.as_event().unwrap();
assert!(item.is_local_echo());
assert_eq!(item.content().as_message().unwrap().body(), "lol");
assert!(item.reactions().is_empty());
item.identifier()
});
};
// Good ol' date divider.
assert_next_matches_with_timeout!(stream, VectorDiff::PushFront { value: date_divider } => {
assert!(date_divider.is_date_divider());
});
assert_let!(VectorDiff::PushFront { value: date_divider } = &timeline_updates[1]);
assert!(date_divider.is_date_divider());
// Add a reaction before the remote echo comes back.
let key1 = "🤣";
timeline.toggle_reaction(&item_id, key1).await.unwrap();
assert_let!(Some(timeline_updates) = stream.next().await);
assert_eq!(timeline_updates.len(), 1);
// The reaction is added to the local echo.
assert_next_matches_with_timeout!(stream, VectorDiff::Set { index: 1, value: item } => {
let reactions = item.as_event().unwrap().reactions();
assert_eq!(reactions.len(), 1);
let reaction_info = reactions.get(key1).unwrap().get(user_id).unwrap();
assert_matches!(&reaction_info.status, ReactionStatus::LocalToLocal(..));
});
assert_let!(VectorDiff::Set { index: 1, value: item } = &timeline_updates[0]);
let reactions = item.as_event().unwrap().reactions();
assert_eq!(reactions.len(), 1);
let reaction_info = reactions.get(key1).unwrap().get(user_id).unwrap();
assert_matches!(&reaction_info.status, ReactionStatus::LocalToLocal(..));
// Add another reaction.
let key2 = "😈";
timeline.toggle_reaction(&item_id, key2).await.unwrap();
assert_let!(Some(timeline_updates) = stream.next().await);
assert_eq!(timeline_updates.len(), 1);
// Also comes as a local echo.
assert_next_matches_with_timeout!(stream, VectorDiff::Set { index: 1, value: item } => {
let reactions = item.as_event().unwrap().reactions();
assert_eq!(reactions.len(), 2);
let reaction_info = reactions.get(key2).unwrap().get(user_id).unwrap();
assert_matches!(&reaction_info.status, ReactionStatus::LocalToLocal(..));
});
assert_let!(VectorDiff::Set { index: 1, value: item } = &timeline_updates[0]);
let reactions = item.as_event().unwrap().reactions();
assert_eq!(reactions.len(), 2);
let reaction_info = reactions.get(key2).unwrap().get(user_id).unwrap();
assert_matches!(&reaction_info.status, ReactionStatus::LocalToLocal(..));
// Remove second reaction. It's immediately removed, since it was a local echo,
// and it wasn't being sent.
timeline.toggle_reaction(&item_id, key2).await.unwrap();
assert_next_matches_with_timeout!(stream, VectorDiff::Set { index: 1, value: item } => {
let reactions = item.as_event().unwrap().reactions();
assert_eq!(reactions.len(), 1);
let reaction_info = reactions.get(key1).unwrap().get(user_id).unwrap();
assert_matches!(&reaction_info.status, ReactionStatus::LocalToLocal(..));
});
assert_let!(Some(timeline_updates) = stream.next().await);
assert_eq!(timeline_updates.len(), 1);
assert_let!(VectorDiff::Set { index: 1, value: item } = &timeline_updates[0]);
let reactions = item.as_event().unwrap().reactions();
assert_eq!(reactions.len(), 1);
let reaction_info = reactions.get(key1).unwrap().get(user_id).unwrap();
assert_matches!(&reaction_info.status, ReactionStatus::LocalToLocal(..));
assert_let!(Some(timeline_updates) = stream.next().await);
assert_eq!(timeline_updates.len(), 1);
// Now, wait for the remote echo for the message itself.
assert_next_matches_with_timeout!(stream, 2000, VectorDiff::Set { index: 1, value: item } => {
let reactions = item.as_event().unwrap().reactions();
assert_eq!(reactions.len(), 1);
let reaction_info = reactions.get(key1).unwrap().get(user_id).unwrap();
// TODO(bnjbvr): why not LocalToRemote here?
assert_matches!(&reaction_info.status, ReactionStatus::LocalToLocal(..));
});
assert_let!(VectorDiff::Set { index: 1, value: item } = &timeline_updates[0]);
let reactions = item.as_event().unwrap().reactions();
assert_eq!(reactions.len(), 1);
let reaction_info = reactions.get(key1).unwrap().get(user_id).unwrap();
// TODO(bnjbvr): why not LocalToRemote here?
assert_matches!(&reaction_info.status, ReactionStatus::LocalToLocal(..));
assert_let!(Some(timeline_updates) = stream.next().await);
assert_eq!(timeline_updates.len(), 1);
// And then the remote echo for the reaction itself.
assert_next_matches_with_timeout!(stream, VectorDiff::Set { index: 1, value: item } => {
let reactions = item.as_event().unwrap().reactions();
assert_eq!(reactions.len(), 1);
let reaction_info = reactions.get(key1).unwrap().get(user_id).unwrap();
assert_matches!(&reaction_info.status, ReactionStatus::RemoteToRemote(..));
});
assert_let!(VectorDiff::Set { index: 1, value: item } = &timeline_updates[0]);
let reactions = item.as_event().unwrap().reactions();
assert_eq!(reactions.len(), 1);
let reaction_info = reactions.get(key1).unwrap().get(user_id).unwrap();
assert_matches!(&reaction_info.status, ReactionStatus::RemoteToRemote(..));
// And we're done.
tokio::time::sleep(Duration::from_millis(150)).await;
assert!(stream.next().now_or_never().is_none());
assert_pending!(stream);
}

View File

@@ -132,8 +132,11 @@ async fn test_read_receipts_updates() {
let _response = client.sync_once(sync_settings.clone()).await.unwrap();
server.reset().await;
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 5);
// We don't list the read receipt of our own user on events.
assert_let!(Some(VectorDiff::PushBack { value: first_item }) = timeline_stream.next().await);
assert_let!(VectorDiff::PushBack { value: first_item } = &timeline_updates[0]);
let first_event = first_item.as_event().unwrap();
assert!(first_event.read_receipts().is_empty());
@@ -144,25 +147,23 @@ async fn test_read_receipts_updates() {
assert_pending!(own_receipts_subscriber);
// Implicit read receipt of @alice:localhost.
assert_let!(Some(VectorDiff::PushBack { value: second_item }) = timeline_stream.next().await);
assert_let!(VectorDiff::PushBack { value: second_item } = &timeline_updates[1]);
let second_event = second_item.as_event().unwrap();
assert_eq!(second_event.read_receipts().len(), 1);
// Read receipt of @alice:localhost is moved to third event.
assert_let!(
Some(VectorDiff::Set { index: 1, value: second_item }) = timeline_stream.next().await
);
assert_let!(VectorDiff::Set { index: 1, value: second_item } = &timeline_updates[2]);
let second_event = second_item.as_event().unwrap();
assert!(second_event.read_receipts().is_empty());
assert_let!(Some(VectorDiff::PushBack { value: third_item }) = timeline_stream.next().await);
assert_let!(VectorDiff::PushBack { value: third_item } = &timeline_updates[3]);
let third_event = third_item.as_event().unwrap();
assert_eq!(third_event.read_receipts().len(), 1);
let (alice_receipt_event_id, _) = timeline.latest_user_read_receipt(alice).await.unwrap();
assert_eq!(alice_receipt_event_id, third_event_id);
assert_let!(Some(VectorDiff::PushFront { value: date_divider }) = timeline_stream.next().await);
assert_let!(VectorDiff::PushFront { value: date_divider } = &timeline_updates[4]);
assert!(date_divider.is_date_divider());
// Read receipt on unknown event is ignored.
@@ -254,9 +255,10 @@ async fn test_read_receipts_updates() {
let _response = client.sync_once(sync_settings.clone()).await.unwrap();
server.reset().await;
assert_let!(
Some(VectorDiff::Set { index: 3, value: third_item }) = timeline_stream.next().await
);
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 1);
assert_let!(VectorDiff::Set { index: 3, value: third_item } = &timeline_updates[0]);
let third_event = third_item.as_event().unwrap();
assert_eq!(third_event.read_receipts().len(), 2);
@@ -291,6 +293,7 @@ async fn test_read_receipts_updates() {
assert_ready!(own_receipts_subscriber);
assert_pending!(own_receipts_subscriber);
assert_pending!(timeline_stream);
}
#[async_test]
@@ -377,8 +380,11 @@ async fn test_read_receipts_updates_on_filtered_events() {
let _response = client.sync_once(sync_settings.clone()).await.unwrap();
server.reset().await;
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 4);
// We don't list the read receipt of our own user on events.
assert_let!(Some(VectorDiff::PushBack { value: item_a }) = timeline_stream.next().await);
assert_let!(VectorDiff::PushBack { value: item_a } = &timeline_updates[0]);
let event_a = item_a.as_event().unwrap();
assert!(event_a.read_receipts().is_empty());
@@ -389,7 +395,7 @@ async fn test_read_receipts_updates_on_filtered_events() {
assert_eq!(own_receipt_timeline_event, event_a_id);
// Implicit read receipt of @bob:localhost.
assert_let!(Some(VectorDiff::Set { index: 0, value: item_a }) = timeline_stream.next().await);
assert_let!(VectorDiff::Set { index: 0, value: item_a } = &timeline_updates[1]);
let event_a = item_a.as_event().unwrap();
assert_eq!(event_a.read_receipts().len(), 1);
@@ -402,7 +408,7 @@ async fn test_read_receipts_updates_on_filtered_events() {
assert_eq!(bob_receipt_timeline_event, event_a.event_id().unwrap());
// Implicit read receipt of @alice:localhost.
assert_let!(Some(VectorDiff::PushBack { value: item_c }) = timeline_stream.next().await);
assert_let!(VectorDiff::PushBack { value: item_c } = &timeline_updates[2]);
let event_c = item_c.as_event().unwrap();
assert_eq!(event_c.read_receipts().len(), 1);
@@ -412,7 +418,7 @@ async fn test_read_receipts_updates_on_filtered_events() {
timeline.latest_user_read_receipt_timeline_event_id(*ALICE).await.unwrap();
assert_eq!(alice_receipt_timeline_event, event_c_id);
assert_let!(Some(VectorDiff::PushFront { value: date_divider }) = timeline_stream.next().await);
assert_let!(VectorDiff::PushFront { value: date_divider } = &timeline_updates[3]);
assert!(date_divider.is_date_divider());
// Read receipt on filtered event.
@@ -463,11 +469,14 @@ async fn test_read_receipts_updates_on_filtered_events() {
let _response = client.sync_once(sync_settings.clone()).await.unwrap();
server.reset().await;
assert_let!(Some(VectorDiff::Set { index: 1, value: item_a }) = timeline_stream.next().await);
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 2);
assert_let!(VectorDiff::Set { index: 1, value: item_a } = &timeline_updates[0]);
let event_a = item_a.as_event().unwrap();
assert!(event_a.read_receipts().is_empty());
assert_let!(Some(VectorDiff::Set { index: 2, value: item_c }) = timeline_stream.next().await);
assert_let!(VectorDiff::Set { index: 2, value: item_c } = &timeline_updates[1]);
let event_c = item_c.as_event().unwrap();
assert_eq!(event_c.read_receipts().len(), 2);
@@ -505,6 +514,7 @@ async fn test_read_receipts_updates_on_filtered_events() {
let own_receipt_timeline_event =
timeline.latest_user_read_receipt_timeline_event_id(own_user_id).await.unwrap();
assert_eq!(own_receipt_timeline_event, event_c_id);
assert_pending!(timeline_stream);
}
#[async_test]

View File

@@ -24,7 +24,7 @@ use ruma::{
owned_event_id, room_id,
};
use serde_json::json;
use stream_assert::assert_next_matches;
use stream_assert::{assert_next_matches, assert_pending};
use tokio::task::yield_now;
use wiremock::{
matchers::{header, method, path_regex},
@@ -71,17 +71,20 @@ async fn test_in_reply_to_details() {
let _response = client.sync_once(sync_settings.clone()).await.unwrap();
server.reset().await;
assert_let!(Some(VectorDiff::PushBack { value: first }) = timeline_stream.next().await);
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 3);
assert_let!(VectorDiff::PushBack { value: first } = &timeline_updates[0]);
assert_matches!(first.as_event().unwrap().content(), TimelineItemContent::Message(_));
assert_let!(Some(VectorDiff::PushBack { value: second }) = timeline_stream.next().await);
assert_let!(VectorDiff::PushBack { value: second } = &timeline_updates[1]);
let second_event = second.as_event().unwrap();
assert_let!(TimelineItemContent::Message(message) = second_event.content());
let in_reply_to = message.in_reply_to().unwrap();
assert_eq!(in_reply_to.event_id, event_id!("$event1"));
assert_matches!(in_reply_to.event, TimelineDetails::Ready(_));
assert_let!(Some(VectorDiff::PushFront { value: date_divider }) = timeline_stream.next().await);
assert_let!(VectorDiff::PushFront { value: date_divider } = &timeline_updates[2]);
assert!(date_divider.is_date_divider());
// Add an reply to an unknown event to the timeline
@@ -95,11 +98,12 @@ async fn test_in_reply_to_details() {
let _response = client.sync_once(sync_settings.clone()).await.unwrap();
server.reset().await;
assert_let!(
Some(VectorDiff::Set { value: _read_receipt_update, .. }) = timeline_stream.next().await
);
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 2);
assert_let!(Some(VectorDiff::PushBack { value: third }) = timeline_stream.next().await);
assert_let!(VectorDiff::Set { value: _read_receipt_update, .. } = &timeline_updates[0]);
assert_let!(VectorDiff::PushBack { value: third } = &timeline_updates[1]);
let third_event = third.as_event().unwrap();
assert_let!(TimelineItemContent::Message(message) = third_event.content());
let in_reply_to = message.in_reply_to().unwrap();
@@ -124,12 +128,15 @@ async fn test_in_reply_to_details() {
timeline.fetch_details_for_event(third_event.event_id().unwrap()).await.unwrap();
server.reset().await;
assert_let!(Some(VectorDiff::Set { index: 3, value: third }) = timeline_stream.next().await);
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 2);
assert_let!(VectorDiff::Set { index: 3, value: third } = &timeline_updates[0]);
assert_let!(TimelineItemContent::Message(message) = third.as_event().unwrap().content());
assert_matches!(message.in_reply_to().unwrap().event, TimelineDetails::Pending);
assert_eq!(*third.unique_id(), unique_id);
assert_let!(Some(VectorDiff::Set { index: 3, value: third }) = timeline_stream.next().await);
assert_let!(VectorDiff::Set { index: 3, value: third } = &timeline_updates[1]);
assert_let!(TimelineItemContent::Message(message) = third.as_event().unwrap().content());
assert_matches!(message.in_reply_to().unwrap().event, TimelineDetails::Error(_));
assert_eq!(*third.unique_id(), unique_id);
@@ -153,15 +160,20 @@ async fn test_in_reply_to_details() {
timeline.fetch_details_for_event(third_event.event_id().unwrap()).await.unwrap();
assert_let!(Some(VectorDiff::Set { index: 3, value: third }) = timeline_stream.next().await);
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 2);
assert_let!(VectorDiff::Set { index: 3, value: third } = &timeline_updates[0]);
assert_let!(TimelineItemContent::Message(message) = third.as_event().unwrap().content());
assert_matches!(message.in_reply_to().unwrap().event, TimelineDetails::Pending);
assert_eq!(*third.unique_id(), unique_id);
assert_let!(Some(VectorDiff::Set { index: 3, value: third }) = timeline_stream.next().await);
assert_let!(VectorDiff::Set { index: 3, value: third } = &timeline_updates[1]);
assert_let!(TimelineItemContent::Message(message) = third.as_event().unwrap().content());
assert_matches!(message.in_reply_to().unwrap().event, TimelineDetails::Ready(_));
assert_eq!(*third.unique_id(), unique_id);
assert_pending!(timeline_stream);
}
#[async_test]

View File

@@ -18,7 +18,7 @@ use anyhow::{Context as _, Result};
use assert_matches::assert_matches;
use assert_matches2::assert_let;
use eyeball_im::{Vector, VectorDiff};
use futures_util::{pin_mut, FutureExt, Stream, StreamExt};
use futures_util::{pin_mut, Stream, StreamExt};
use matrix_sdk::{
test_utils::logged_in_client_with_server, Client, SlidingSync, SlidingSyncList,
SlidingSyncListBuilder, SlidingSyncMode, UpdateSummary,
@@ -71,17 +71,17 @@ pub(crate) use timeline_event;
macro_rules! assert_timeline_stream {
// `--- date divider ---`
( @_ [ $stream:ident ] [ --- date divider --- ; $( $rest:tt )* ] [ $( $accumulator:tt )* ] ) => {
( @_ [ $iterator:ident ] [ --- date divider --- ; $( $rest:tt )* ] [ $( $accumulator:tt )* ] ) => {
assert_timeline_stream!(
@_
[ $stream ]
[ $iterator ]
[ $( $rest )* ]
[
$( $accumulator )*
{
assert_matches!(
$stream.next().now_or_never(),
Some(Some(VectorDiff::PushBack { value })) => {
$iterator .next(),
Some(VectorDiff::PushBack { value }) => {
assert_matches!(
**value,
TimelineItemKind::Virtual(
@@ -96,17 +96,17 @@ macro_rules! assert_timeline_stream {
};
// `append "$event_id"`
( @_ [ $stream:ident ] [ append $event_id:literal ; $( $rest:tt )* ] [ $( $accumulator:tt )* ] ) => {
( @_ [ $iterator:ident ] [ append $event_id:literal ; $( $rest:tt )* ] [ $( $accumulator:tt )* ] ) => {
assert_timeline_stream!(
@_
[ $stream ]
[ $iterator ]
[ $( $rest )* ]
[
$( $accumulator )*
{
assert_matches!(
$stream.next().now_or_never(),
Some(Some(VectorDiff::PushBack { value })) => {
$iterator .next(),
Some(VectorDiff::PushBack { value }) => {
assert_matches!(
&**value,
TimelineItemKind::Event(event_timeline_item) => {
@@ -121,17 +121,17 @@ macro_rules! assert_timeline_stream {
};
// `prepend --- date divider ---`
( @_ [ $stream:ident ] [ prepend --- date divider --- ; $( $rest:tt )* ] [ $( $accumulator:tt )* ] ) => {
( @_ [ $iterator:ident ] [ prepend --- date divider --- ; $( $rest:tt )* ] [ $( $accumulator:tt )* ] ) => {
assert_timeline_stream!(
@_
[ $stream ]
[ $iterator ]
[ $( $rest )* ]
[
$( $accumulator )*
{
assert_matches!(
$stream.next().now_or_never(),
Some(Some(VectorDiff::PushFront { value })) => {
$iterator .next(),
Some(VectorDiff::PushFront { value }) => {
assert_matches!(
&**value,
TimelineItemKind::Virtual(VirtualTimelineItem::DateDivider(_)) => {}
@@ -145,17 +145,17 @@ macro_rules! assert_timeline_stream {
// `insert [$nth] "$event_id"`
( @_ [ $stream:ident ] [ insert [$index:literal] $event_id:literal ; $( $rest:tt )* ] [ $( $accumulator:tt )* ] ) => {
( @_ [ $iterator:ident ] [ insert [$index:literal] $event_id:literal ; $( $rest:tt )* ] [ $( $accumulator:tt )* ] ) => {
assert_timeline_stream!(
@_
[ $stream ]
[ $iterator ]
[ $( $rest )* ]
[
$( $accumulator )*
{
assert_matches!(
$stream.next().now_or_never(),
Some(Some(VectorDiff::Insert { index: $index, value })) => {
$iterator .next(),
Some(VectorDiff::Insert { index: $index, value }) => {
assert_matches!(
&**value,
TimelineItemKind::Event(event_timeline_item) => {
@@ -170,17 +170,17 @@ macro_rules! assert_timeline_stream {
};
// `update [$nth] "$event_id"`
( @_ [ $stream:ident ] [ update [$index:literal] $event_id:literal ; $( $rest:tt )* ] [ $( $accumulator:tt )* ] ) => {
( @_ [ $iterator:ident ] [ update [$index:literal] $event_id:literal ; $( $rest:tt )* ] [ $( $accumulator:tt )* ] ) => {
assert_timeline_stream!(
@_
[ $stream ]
[ $iterator ]
[ $( $rest )* ]
[
$( $accumulator )*
{
assert_matches!(
$stream.next().now_or_never(),
Some(Some(VectorDiff::Set { index: $index, value })) => {
$iterator .next(),
Some(VectorDiff::Set { index: $index, value }) => {
assert_matches!(
&**value,
TimelineItemKind::Event(event_timeline_item) => {
@@ -195,17 +195,17 @@ macro_rules! assert_timeline_stream {
};
// `remove [$nth]`
( @_ [ $stream:ident ] [ remove [$index:literal] ; $( $rest:tt )* ] [ $( $accumulator:tt )* ] ) => {
( @_ [ $iterator:ident ] [ remove [$index:literal] ; $( $rest:tt )* ] [ $( $accumulator:tt )* ] ) => {
assert_timeline_stream!(
@_
[ $stream ]
[ $iterator ]
[ $( $rest )* ]
[
$( $accumulator )*
{
assert_matches!(
$stream.next().now_or_never(),
Some(Some(VectorDiff::Remove { index: $index }))
$iterator .next(),
Some(VectorDiff::Remove { index: $index })
);
}
]
@@ -217,7 +217,13 @@ macro_rules! assert_timeline_stream {
};
( [ $stream:ident ] $( $all:tt )* ) => {
assert_timeline_stream!( @_ [ $stream ] [ $( $all )* ] [] )
let mut timeline_updates = $stream
.next()
.await
.expect("Failed to poll the stream")
.into_iter();
assert_timeline_stream!( @_ [ timeline_updates ] [ $( $all )* ] [] )
};
}
@@ -273,7 +279,7 @@ async fn timeline_test_helper(
client: &Client,
sliding_sync: &SlidingSync,
room_id: &RoomId,
) -> Result<(Vector<Arc<TimelineItem>>, impl Stream<Item = VectorDiff<Arc<TimelineItem>>>)> {
) -> Result<(Vector<Arc<TimelineItem>>, impl Stream<Item = Vec<VectorDiff<Arc<TimelineItem>>>>)> {
let sliding_sync_room = sliding_sync.get_room(room_id).await.unwrap();
let room_id = sliding_sync_room.room_id();
@@ -498,11 +504,12 @@ async fn test_timeline_read_receipts_are_updated_live() -> Result<()> {
}
};
assert_let!(
Some(Some(VectorDiff::Set { index: 2, value })) = timeline_stream.next().now_or_never()
);
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 1);
assert_let!(TimelineItemKind::Event(event_timeline_item) = &**value);
assert_let!(VectorDiff::Set { index: 2, value } = &timeline_updates[0]);
assert_let!(TimelineItemKind::Event(event_timeline_item) = &***value);
assert_eq!(event_timeline_item.event_id().unwrap().as_str(), "$x2:bar.org");
let read_receipts = event_timeline_item.read_receipts();

View File

@@ -17,7 +17,7 @@ use std::time::Duration;
use assert_matches::assert_matches;
use assert_matches2::assert_let;
use eyeball_im::VectorDiff;
use futures_util::{pin_mut, StreamExt};
use futures_util::StreamExt;
use matrix_sdk::{config::SyncSettings, test_utils::logged_in_client_with_server};
use matrix_sdk_test::{
async_test, event_factory::EventFactory, mocks::mock_encryption_state, sync_timeline_event,
@@ -30,7 +30,7 @@ use ruma::{
room_id, user_id,
};
use serde_json::json;
use stream_assert::{assert_next_matches, assert_pending};
use stream_assert::assert_pending;
use crate::mock_sync;
@@ -52,7 +52,7 @@ async fn test_batched() {
let room = client.get_room(room_id).unwrap();
let timeline = room.timeline_builder().event_filter(|_, _| true).build().await.unwrap();
let (_, mut timeline_stream) = timeline.subscribe_batched().await;
let (_, mut timeline_stream) = timeline.subscribe().await;
let hdl = tokio::spawn(async move {
let next_batch = timeline_stream.next().await.unwrap();
@@ -113,7 +113,10 @@ async fn test_event_filter() {
let _response = client.sync_once(sync_settings.clone()).await.unwrap();
server.reset().await;
assert_let!(Some(VectorDiff::PushBack { value: first }) = timeline_stream.next().await);
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 2);
assert_let!(VectorDiff::PushBack { value: first } = &timeline_updates[0]);
let first_event = first.as_event().unwrap();
assert_eq!(first_event.event_id(), Some(first_event_id));
assert_eq!(first_event.read_receipts().len(), 1, "implicit read receipt");
@@ -122,7 +125,7 @@ async fn test_event_filter() {
assert_matches!(msg.msgtype(), MessageType::Text(_));
assert!(!msg.is_edited());
assert_let!(Some(VectorDiff::PushFront { value: date_divider }) = timeline_stream.next().await);
assert_let!(VectorDiff::PushFront { value: date_divider } = &timeline_updates[1]);
assert!(date_divider.is_date_divider());
let second_event_id = event_id!("$Ga6Y2l0gKY");
@@ -165,18 +168,21 @@ async fn test_event_filter() {
let _response = client.sync_once(sync_settings.clone()).await.unwrap();
server.reset().await;
assert_let!(Some(VectorDiff::PushBack { value: second }) = timeline_stream.next().await);
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 3);
assert_let!(VectorDiff::PushBack { value: second } = &timeline_updates[0]);
let second_event = second.as_event().unwrap();
assert_eq!(second_event.event_id(), Some(second_event_id));
assert_eq!(second_event.read_receipts().len(), 1, "implicit read receipt");
// The implicit read receipt of Alice is moving from Alice's message...
assert_let!(Some(VectorDiff::Set { index: 1, value: first }) = timeline_stream.next().await);
assert_let!(VectorDiff::Set { index: 1, value: first } = &timeline_updates[1]);
assert_eq!(first.as_event().unwrap().read_receipts().len(), 0, "no more implicit read receipt");
// … to Alice's edit. But since this item isn't visible, it's lost in the weeds!
// The edit is applied to the first event.
assert_let!(Some(VectorDiff::Set { index: 1, value: first }) = timeline_stream.next().await);
assert_let!(VectorDiff::Set { index: 1, value: first } = &timeline_updates[2]);
let first_event = first.as_event().unwrap();
assert!(first_event.read_receipts().is_empty());
assert_matches!(first_event.latest_edit_json(), Some(_));
@@ -184,6 +190,8 @@ async fn test_event_filter() {
assert_let!(MessageType::Text(text) = msg.msgtype());
assert_eq!(text.body, "hi");
assert!(msg.is_edited());
assert_pending!(timeline_stream);
}
#[async_test]
@@ -203,8 +211,7 @@ async fn test_timeline_is_reset_when_a_user_is_ignored_or_unignored() {
let room = client.get_room(room_id).unwrap();
let timeline = room.timeline_builder().build().await.unwrap();
let (_, timeline_stream) = timeline.subscribe().await;
pin_mut!(timeline_stream);
let (_, mut timeline_stream) = timeline.subscribe().await;
let alice = user_id!("@alice:example.org");
let bob = user_id!("@bob:example.org");
@@ -228,21 +235,24 @@ async fn test_timeline_is_reset_when_a_user_is_ignored_or_unignored() {
let _response = client.sync_once(sync_settings.clone()).await.unwrap();
server.reset().await;
assert_next_matches!(timeline_stream, VectorDiff::PushBack { value } => {
assert_eq!(value.as_event().unwrap().event_id(), Some(first_event_id));
});
assert_next_matches!(timeline_stream, VectorDiff::PushBack { value } => {
assert_eq!(value.as_event().unwrap().event_id(), Some(second_event_id));
});
assert_next_matches!(timeline_stream, VectorDiff::Set { index: 0, value } => {
assert_eq!(value.as_event().unwrap().event_id(), Some(first_event_id));
});
assert_next_matches!(timeline_stream, VectorDiff::PushBack { value } => {
assert_eq!(value.as_event().unwrap().event_id(), Some(third_event_id));
});
assert_next_matches!(timeline_stream, VectorDiff::PushFront { value } => {
assert!(value.is_date_divider());
});
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 5);
assert_let!(VectorDiff::PushBack { value } = &timeline_updates[0]);
assert_eq!(value.as_event().unwrap().event_id(), Some(first_event_id));
assert_let!(VectorDiff::PushBack { value } = &timeline_updates[1]);
assert_eq!(value.as_event().unwrap().event_id(), Some(second_event_id));
assert_let!(VectorDiff::Set { index: 0, value } = &timeline_updates[2]);
assert_eq!(value.as_event().unwrap().event_id(), Some(first_event_id));
assert_let!(VectorDiff::PushBack { value } = &timeline_updates[3]);
assert_eq!(value.as_event().unwrap().event_id(), Some(third_event_id));
assert_let!(VectorDiff::PushFront { value } = &timeline_updates[4]);
assert!(value.is_date_divider());
assert_pending!(timeline_stream);
sync_builder.add_global_account_data_event(GlobalAccountDataTestEvent::Custom(json!({
@@ -258,9 +268,11 @@ async fn test_timeline_is_reset_when_a_user_is_ignored_or_unignored() {
let _response = client.sync_once(sync_settings.clone()).await.unwrap();
server.reset().await;
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 1);
// The timeline has been emptied.
assert_next_matches!(timeline_stream, VectorDiff::Clear);
assert_pending!(timeline_stream);
assert_let!(VectorDiff::Clear = &timeline_updates[0]);
let fourth_event_id = event_id!("$YTQwYl2pl4");
let fifth_event_id = event_id!("$YTQwYl2pl5");
@@ -278,21 +290,25 @@ async fn test_timeline_is_reset_when_a_user_is_ignored_or_unignored() {
let _response = client.sync_once(sync_settings.clone()).await.unwrap();
server.reset().await;
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 5);
// Timeline receives events as before.
assert_next_matches!(timeline_stream, VectorDiff::Clear); // TODO: Remove `RoomEventCacheUpdate::Clear` as it creates double
// `VectorDiff::Clear`.
assert_next_matches!(timeline_stream, VectorDiff::PushBack { value } => {
assert_eq!(value.as_event().unwrap().event_id(), Some(fourth_event_id));
});
assert_next_matches!(timeline_stream, VectorDiff::Set { index: 0, value } => {
assert_eq!(value.as_event().unwrap().event_id(), Some(fourth_event_id));
});
assert_next_matches!(timeline_stream, VectorDiff::PushBack { value } => {
assert_eq!(value.as_event().unwrap().event_id(), Some(fifth_event_id));
});
assert_next_matches!(timeline_stream, VectorDiff::PushFront { value } => {
assert!(value.is_date_divider());
});
assert_let!(VectorDiff::Clear = &timeline_updates[0]); // TODO: Remove `RoomEventCacheUpdate::Clear` as it creates double
// `VectorDiff::Clear`.
assert_let!(VectorDiff::PushBack { value } = &timeline_updates[1]);
assert_eq!(value.as_event().unwrap().event_id(), Some(fourth_event_id));
assert_let!(VectorDiff::Set { index: 0, value } = &timeline_updates[2]);
assert_eq!(value.as_event().unwrap().event_id(), Some(fourth_event_id));
assert_let!(VectorDiff::PushBack { value } = &timeline_updates[3]);
assert_eq!(value.as_event().unwrap().event_id(), Some(fifth_event_id));
assert_let!(VectorDiff::PushFront { value } = &timeline_updates[4]);
assert!(value.is_date_divider());
assert_pending!(timeline_stream);
}
@@ -313,8 +329,7 @@ async fn test_profile_updates() {
let room = client.get_room(room_id).unwrap();
let timeline = room.timeline_builder().build().await.unwrap();
let (_, timeline_stream) = timeline.subscribe().await;
pin_mut!(timeline_stream);
let (_, mut timeline_stream) = timeline.subscribe().await;
let alice = "@alice:example.org";
let bob = "@bob:example.org";
@@ -351,19 +366,21 @@ async fn test_profile_updates() {
let _response = client.sync_once(sync_settings.clone()).await.unwrap();
server.reset().await;
let item_1 = assert_next_matches!(timeline_stream, VectorDiff::PushBack { value } => value);
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 3);
assert_let!(VectorDiff::PushBack { value: item_1 } = &timeline_updates[0]);
let event_1_item = item_1.as_event().unwrap();
assert_eq!(event_1_item.event_id(), Some(event_1_id));
assert_matches!(event_1_item.sender_profile(), TimelineDetails::Unavailable);
let item_2 = assert_next_matches!(timeline_stream, VectorDiff::PushBack { value } => value);
assert_let!(VectorDiff::PushBack { value: item_2 } = &timeline_updates[1]);
let event_2_item = item_2.as_event().unwrap();
assert_eq!(event_2_item.event_id(), Some(event_2_id));
assert_matches!(event_2_item.sender_profile(), TimelineDetails::Unavailable);
assert_next_matches!(timeline_stream, VectorDiff::PushFront { value } => {
assert!(value.is_date_divider());
});
assert_let!(VectorDiff::PushFront { value } = &timeline_updates[2]);
assert!(value.is_date_divider());
assert_pending!(timeline_stream);
@@ -412,13 +429,15 @@ async fn test_profile_updates() {
let _response = client.sync_once(sync_settings.clone()).await.unwrap();
server.reset().await;
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 8);
// Read receipt change.
assert_next_matches!(timeline_stream, VectorDiff::Set { index: 2, value } => {
assert_eq!(value.as_event().unwrap().event_id(), Some(event_2_id));
});
assert_let!(VectorDiff::Set { index: 2, value } = &timeline_updates[0]);
assert_eq!(value.as_event().unwrap().event_id(), Some(event_2_id));
// The events are added.
let item_3 = assert_next_matches!(timeline_stream, VectorDiff::PushBack { value } => value);
assert_let!(VectorDiff::PushBack { value: item_3 } = &timeline_updates[1]);
let event_3_item = item_3.as_event().unwrap();
assert_eq!(event_3_item.event_id(), Some(event_3_id));
let profile =
@@ -427,11 +446,10 @@ async fn test_profile_updates() {
assert!(!profile.display_name_ambiguous);
// Read receipt change.
assert_next_matches!(timeline_stream, VectorDiff::Set { index: 1, value } => {
assert_eq!(value.as_event().unwrap().event_id(), Some(event_1_id));
});
assert_let!(VectorDiff::Set { index: 1, value } = &timeline_updates[2]);
assert_eq!(value.as_event().unwrap().event_id(), Some(event_1_id));
let item_4 = assert_next_matches!(timeline_stream, VectorDiff::PushBack { value } => value);
assert_let!(VectorDiff::PushBack { value: item_4 } = &timeline_updates[3]);
let event_4_item = item_4.as_event().unwrap();
assert_eq!(event_4_item.event_id(), Some(event_4_id));
let profile =
@@ -440,11 +458,10 @@ async fn test_profile_updates() {
assert!(!profile.display_name_ambiguous);
// Read receipt change.
assert_next_matches!(timeline_stream, VectorDiff::Set { index: 4, value } => {
assert_eq!(value.as_event().unwrap().event_id(), Some(event_4_id));
});
assert_let!(VectorDiff::Set { index: 4, value } = &timeline_updates[4]);
assert_eq!(value.as_event().unwrap().event_id(), Some(event_4_id));
let item_5 = assert_next_matches!(timeline_stream, VectorDiff::PushBack { value } => value);
assert_let!(VectorDiff::PushBack { value: item_5 } = &timeline_updates[5]);
let event_5_item = item_5.as_event().unwrap();
assert_eq!(event_5_item.event_id(), Some(event_5_id));
let profile =
@@ -453,8 +470,7 @@ async fn test_profile_updates() {
assert!(!profile.display_name_ambiguous);
// The profiles changed.
let item_1 =
assert_next_matches!(timeline_stream, VectorDiff::Set { index: 1, value } => value);
assert_let!(VectorDiff::Set { index: 1, value: item_1 } = &timeline_updates[6]);
let event_1_item = item_1.as_event().unwrap();
assert_eq!(event_1_item.event_id(), Some(event_1_id));
let profile =
@@ -462,8 +478,7 @@ async fn test_profile_updates() {
assert_eq!(profile.display_name.as_deref(), Some("Alice"));
assert!(!profile.display_name_ambiguous);
let item_2 =
assert_next_matches!(timeline_stream, VectorDiff::Set { index: 2, value } => value);
assert_let!(VectorDiff::Set { index: 2, value: item_2 } = &timeline_updates[7]);
let event_2_item = item_2.as_event().unwrap();
assert_eq!(event_2_item.event_id(), Some(event_2_id));
let profile =
@@ -471,8 +486,6 @@ async fn test_profile_updates() {
assert_eq!(profile.display_name.as_deref(), Some("Member"));
assert!(!profile.display_name_ambiguous);
assert_pending!(timeline_stream);
// Change name to be ambiguous.
let event_6_id = event_id!("$YTQwYl2pl6");
@@ -494,13 +507,15 @@ async fn test_profile_updates() {
let _response = client.sync_once(sync_settings.clone()).await.unwrap();
server.reset().await;
assert_let!(Some(timeline_updates) = timeline_stream.next().await);
assert_eq!(timeline_updates.len(), 7);
// Read receipt change.
assert_next_matches!(timeline_stream, VectorDiff::Set { index: 5, value } => {
assert_eq!(value.as_event().unwrap().event_id(), Some(event_5_id));
});
assert_let!(VectorDiff::Set { index: 5, value } = &timeline_updates[0]);
assert_eq!(value.as_event().unwrap().event_id(), Some(event_5_id));
// The event is added.
let item_6 = assert_next_matches!(timeline_stream, VectorDiff::PushBack { value } => value);
assert_let!(VectorDiff::PushBack { value: item_6 } = &timeline_updates[1]);
let event_6_item = item_6.as_event().unwrap();
assert_eq!(event_6_item.event_id(), Some(event_6_id));
let profile =
@@ -509,8 +524,7 @@ async fn test_profile_updates() {
assert!(profile.display_name_ambiguous);
// The profiles changed.
let item_1 =
assert_next_matches!(timeline_stream, VectorDiff::Set { index: 1, value } => value);
assert_let!(VectorDiff::Set { index: 1, value: item_1 } = &timeline_updates[2]);
let event_1_item = item_1.as_event().unwrap();
assert_eq!(event_1_item.event_id(), Some(event_1_id));
let profile =
@@ -518,8 +532,7 @@ async fn test_profile_updates() {
assert_eq!(profile.display_name.as_deref(), Some("Member"));
assert!(profile.display_name_ambiguous);
let item_2 =
assert_next_matches!(timeline_stream, VectorDiff::Set { index: 2, value } => value);
assert_let!(VectorDiff::Set { index: 2, value: item_2 } = &timeline_updates[3]);
let event_2_item = item_2.as_event().unwrap();
assert_eq!(event_2_item.event_id(), Some(event_2_id));
let profile =
@@ -527,8 +540,7 @@ async fn test_profile_updates() {
assert_eq!(profile.display_name.as_deref(), Some("Member"));
assert!(profile.display_name_ambiguous);
let item_3 =
assert_next_matches!(timeline_stream, VectorDiff::Set { index: 3, value } => value);
assert_let!(VectorDiff::Set { index: 3, value: item_3 } = &timeline_updates[4]);
let event_3_item = item_3.as_event().unwrap();
assert_eq!(event_3_item.event_id(), Some(event_3_id));
let profile =
@@ -536,8 +548,7 @@ async fn test_profile_updates() {
assert_eq!(profile.display_name.as_deref(), Some("Member"));
assert!(profile.display_name_ambiguous);
let item_4 =
assert_next_matches!(timeline_stream, VectorDiff::Set { index: 4, value } => value);
assert_let!(VectorDiff::Set { index: 4, value: item_4 } = &timeline_updates[5]);
let event_4_item = item_4.as_event().unwrap();
assert_eq!(event_4_item.event_id(), Some(event_4_id));
let profile =
@@ -545,8 +556,7 @@ async fn test_profile_updates() {
assert_eq!(profile.display_name.as_deref(), Some("Member"));
assert!(profile.display_name_ambiguous);
let item_5 =
assert_next_matches!(timeline_stream, VectorDiff::Set { index: 5, value } => value);
assert_let!(VectorDiff::Set { index: 5, value: item_5 } = &timeline_updates[6]);
let event_5_item = item_5.as_event().unwrap();
assert_eq!(event_5_item.event_id(), Some(event_5_id));
let profile =

View File

@@ -33,6 +33,11 @@ All notable changes to this project will be documented in this file.
### Refactor
- [**breaking**]: The reexported types `SyncTimelineEvent` and `TimelineEvent` have been fused into a single type
`TimelineEvent`, and its field `push_actions` has been made `Option`al (it is set to `None` when
we couldn't compute the push actions, because we lacked some information).
([#4568](https://github.com/matrix-org/matrix-rust-sdk/pull/4568))
- [**breaking**] Move the optional `RequestConfig` argument of the
`Client::send()` method to the `with_request_config()` builder method. You
should call `Client::send(request).with_request_config(request_config).await`
@@ -51,6 +56,12 @@ All notable changes to this project will be documented in this file.
- [**breaking**] `Recovery::are_we_the_last_man_standing()` has been renamed to `is_last_device()`.
([#4522](https://github.com/matrix-org/matrix-rust-sdk/pull/4522))
- [**breaking**] The `matrix_auth` module is now at `authentication::matrix`.
([#4575](https://github.com/matrix-org/matrix-rust-sdk/pull/4575))
- [**breaking**] The `oidc` module is now at `authentication::oidc`.
([#4575](https://github.com/matrix-org/matrix-rust-sdk/pull/4575))
## [0.9.0] - 2024-12-18
### Bug Fixes

View File

@@ -760,7 +760,7 @@ impl MatrixAuth {
/// ```no_run
/// use futures_util::StreamExt;
/// use matrix_sdk::Client;
/// # fn persist_session(_: &matrix_sdk::matrix_auth::MatrixSession) {};
/// # fn persist_session(_: &matrix_sdk::authentication::matrix::MatrixSession) {};
/// # async {
/// let homeserver = "http://example.com";
/// let client = Client::builder()
@@ -835,7 +835,7 @@ impl MatrixAuth {
///
/// ```no_run
/// use matrix_sdk::{
/// matrix_auth::{MatrixSession, MatrixSessionTokens},
/// authentication::matrix::{MatrixSession, MatrixSessionTokens},
/// ruma::{device_id, user_id},
/// Client, SessionMeta,
/// };
@@ -881,7 +881,7 @@ impl MatrixAuth {
/// ```
///
/// [`login`]: #method.login
/// [`LoginBuilder::send()`]: crate::matrix_auth::LoginBuilder::send
/// [`LoginBuilder::send()`]: crate::authentication::matrix::LoginBuilder::send
#[instrument(skip_all)]
pub async fn restore_session(&self, session: MatrixSession) -> Result<()> {
debug!("Restoring Matrix auth session");
@@ -959,7 +959,7 @@ impl MatrixAuth {
///
/// ```
/// use matrix_sdk::{
/// matrix_auth::{MatrixSession, MatrixSessionTokens},
/// authentication::matrix::{MatrixSession, MatrixSessionTokens},
/// SessionMeta,
/// };
/// use ruma::{device_id, user_id};

View File

@@ -14,21 +14,20 @@
//! Types and functions related to authentication in Matrix.
// TODO:(pixlwave) Move AuthenticationService from the FFI into this module.
// TODO:(poljar) Move the oidc and matrix_auth modules under this module.
use std::sync::Arc;
use as_variant::as_variant;
use matrix_sdk_base::SessionMeta;
use tokio::sync::{broadcast, Mutex, OnceCell};
pub mod matrix;
#[cfg(feature = "experimental-oidc")]
use crate::oidc::{self, Oidc, OidcAuthData, OidcCtx};
use crate::{
matrix_auth::{self, MatrixAuth, MatrixAuthData},
Client, RefreshTokenError, SessionChange,
};
pub mod oidc;
use self::matrix::{MatrixAuth, MatrixAuthData};
#[cfg(feature = "experimental-oidc")]
use self::oidc::{Oidc, OidcAuthData, OidcCtx};
use crate::{Client, RefreshTokenError, SessionChange};
#[cfg(all(feature = "experimental-oidc", feature = "e2e-encryption", not(target_arch = "wasm32")))]
pub mod qrcode;
@@ -36,8 +35,8 @@ pub mod qrcode;
/// Session tokens, for any kind of authentication.
#[allow(missing_debug_implementations, clippy::large_enum_variant)]
pub enum SessionTokens {
/// Tokens for a [`matrix_auth`] session.
Matrix(matrix_auth::MatrixSessionTokens),
/// Tokens for a [`matrix`] session.
Matrix(matrix::MatrixSessionTokens),
#[cfg(feature = "experimental-oidc")]
/// Tokens for an [`oidc`] session.
Oidc(oidc::OidcSessionTokens),
@@ -103,7 +102,7 @@ pub enum AuthApi {
#[non_exhaustive]
pub enum AuthSession {
/// A session using the native Matrix authentication API.
Matrix(matrix_auth::MatrixSession),
Matrix(matrix::MatrixSession),
/// A session using the OpenID Connect API.
#[cfg(feature = "experimental-oidc")]
@@ -148,8 +147,8 @@ impl AuthSession {
}
}
impl From<matrix_auth::MatrixSession> for AuthSession {
fn from(session: matrix_auth::MatrixSession) -> Self {
impl From<matrix::MatrixSession> for AuthSession {
fn from(session: matrix::MatrixSession) -> Self {
Self::Matrix(session)
}
}

View File

@@ -37,7 +37,7 @@ use mas_oidc_client::{
use url::Url;
use super::{OidcBackend, OidcError, RefreshedSessionTokens};
use crate::oidc::{AuthorizationCode, OidcSessionTokens};
use crate::authentication::oidc::{AuthorizationCode, OidcSessionTokens};
pub(crate) const ISSUER_URL: &str = "https://oidc.example.com/issuer";
pub(crate) const AUTHORIZATION_URL: &str = "https://oidc.example.com/authorization";

View File

@@ -42,7 +42,7 @@ use url::Url;
use super::{OidcBackend, OidcError, RefreshedSessionTokens};
use crate::{
oidc::{rng, AuthorizationCode, OidcSessionTokens},
authentication::oidc::{rng, AuthorizationCode, OidcSessionTokens},
Client,
};

View File

@@ -264,7 +264,7 @@ mod tests {
use super::compute_session_hash;
use crate::{
oidc::{
authentication::oidc::{
backend::mock::{MockImpl, ISSUER_URL},
cross_process::SessionHash,
tests,

View File

@@ -217,11 +217,11 @@ pub use self::{
use self::{
backend::{server::OidcServer, OidcBackend},
cross_process::{CrossProcessRefreshLockGuard, CrossProcessRefreshManager},
registrations::{ClientId, OidcRegistrations},
};
use crate::{
authentication::{qrcode::LoginWithQrCode, AuthData},
client::SessionChange,
oidc::registrations::{ClientId, OidcRegistrations},
Client, HttpError, RefreshTokenError, Result,
};

View File

@@ -26,14 +26,11 @@ use wiremock::{
use super::{
backend::mock::{MockImpl, AUTHORIZATION_URL, ISSUER_URL},
registrations::{ClientId, OidcRegistrations},
AuthorizationCode, AuthorizationError, AuthorizationResponse, Oidc, OidcError, OidcSession,
OidcSessionTokens, RedirectUriQueryParseError, UserSession,
};
use crate::{
oidc::registrations::{ClientId, OidcRegistrations},
test_utils::test_client_builder,
Client, Error,
};
use crate::{test_utils::test_client_builder, Client, Error};
const CLIENT_ID: &str = "test_client_id";
const REDIRECT_URI_STRING: &str = "http://matrix.example.com/oidc/callback";

View File

@@ -34,7 +34,7 @@ use super::{
SecureChannelError,
};
#[cfg(doc)]
use crate::oidc::Oidc;
use crate::authentication::oidc::Oidc;
use crate::{
authentication::qrcode::{
messages::QrAuthMessage, secure_channel::EstablishedSecureChannel, QRCodeLoginError,

View File

@@ -33,8 +33,8 @@ use url::Url;
pub use vodozemac::ecies::{Error as EciesError, MessageDecodeError};
#[cfg(doc)]
use crate::oidc::Oidc;
use crate::{oidc::CrossProcessRefreshLockError, HttpError};
use crate::authentication::oidc::Oidc;
use crate::{authentication::oidc::CrossProcessRefreshLockError, HttpError};
mod login;
mod messages;
@@ -113,7 +113,7 @@ pub enum DeviceAuhorizationOidcError {
/// A generic OIDC error happened while we were attempting to register the
/// device with the OIDC provider.
#[error(transparent)]
Oidc(#[from] crate::oidc::OidcError),
Oidc(#[from] crate::authentication::oidc::OidcError),
/// The issuer URL failed to be parsed.
#[error(transparent)]

View File

@@ -32,7 +32,7 @@ use openidconnect::{
use vodozemac::Curve25519PublicKey;
use super::DeviceAuhorizationOidcError;
use crate::{http_client::HttpClient, oidc::OidcSessionTokens};
use crate::{authentication::oidc::OidcSessionTokens, http_client::HttpClient};
// Obtain the device_authorization_url from the OIDC metadata provider.
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]

View File

@@ -28,14 +28,14 @@ use tokio::sync::{broadcast, Mutex, OnceCell};
use tracing::{debug, field::debug, instrument, Span};
use super::{Client, ClientInner};
#[cfg(feature = "experimental-oidc")]
use crate::authentication::oidc::OidcCtx;
#[cfg(feature = "e2e-encryption")]
use crate::crypto::{CollectStrategy, TrustRequirement};
#[cfg(feature = "e2e-encryption")]
use crate::encryption::EncryptionSettings;
#[cfg(not(target_arch = "wasm32"))]
use crate::http_client::HttpSettings;
#[cfg(feature = "experimental-oidc")]
use crate::oidc::OidcCtx;
use crate::{
authentication::AuthCtx, client::ClientServerCapabilities, config::RequestConfig,
error::RumaApiError, http_client::HttpClient, send_queue::SendQueueData,

View File

@@ -35,7 +35,7 @@ use tracing::trace;
use super::super::Client;
#[cfg(feature = "experimental-oidc")]
use crate::oidc::OidcError;
use crate::authentication::oidc::OidcError;
use crate::{
config::RequestConfig,
error::{HttpError, HttpResult},

View File

@@ -74,9 +74,11 @@ use url::Url;
use self::futures::SendRequest;
#[cfg(feature = "experimental-oidc")]
use crate::oidc::Oidc;
use crate::authentication::oidc::Oidc;
use crate::{
authentication::{AuthCtx, AuthData, ReloadSessionCallback, SaveSessionCallback},
authentication::{
matrix::MatrixAuth, AuthCtx, AuthData, ReloadSessionCallback, SaveSessionCallback,
},
config::RequestConfig,
deduplicating_handler::DeduplicatingHandler,
error::{HttpError, HttpResult},
@@ -86,7 +88,6 @@ use crate::{
EventHandlerStore, ObservableEventHandler, SyncEvent,
},
http_client::HttpClient,
matrix_auth::MatrixAuth,
notification_settings::NotificationSettings,
room_preview::RoomPreview,
send_queue::SendQueueData,

Some files were not shown because too many files have changed in this diff Show More