Merge branch 'main' into valere/msc_2399

This commit is contained in:
Damir Jelić
2023-03-21 16:41:55 +01:00
46 changed files with 2536 additions and 1731 deletions

View File

@@ -209,7 +209,7 @@ jobs:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install Rust
uses: dtolnay/rust-toolchain@nightly
uses: dtolnay/rust-toolchain@stable
- name: Install aarch64-apple-ios target
run: rustup target install aarch64-apple-ios

63
Cargo.lock generated
View File

@@ -540,12 +540,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "bitmaps"
version = "2.1.0"
version = "3.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "031043d04099746d8db04daf1fa424b2bc8bd69d92b25962dcde24da39ab64a2"
dependencies = [
"typenum",
]
checksum = "703642b98a00b3b90513279a8ede3fcfa479c126c5fb46e78f3051522f021403"
[[package]]
name = "blake3"
@@ -1614,12 +1611,12 @@ dependencies = [
[[package]]
name = "eyeball-im"
version = "0.1.0"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7bb8a6cfd1f5947d0426dcb753723318d5922c738e905be7af167547565f81d9"
checksum = "29e6dff0ac9894dcc183064377dfeb4137bcffa9f9ec3dbc10f8e7fba34c0ac7"
dependencies = [
"futures-core",
"im",
"imbl",
"tokio",
"tokio-stream",
]
@@ -2220,21 +2217,6 @@ dependencies = [
"unicode-normalization",
]
[[package]]
name = "im"
version = "15.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d0acd33ff0285af998aaf9b57342af478078f53492322fafc47450e09397e0e9"
dependencies = [
"bitmaps",
"rand_core 0.6.4",
"rand_xoshiro",
"serde",
"sized-chunks",
"typenum",
"version_check",
]
[[package]]
name = "image"
version = "0.23.14"
@@ -2272,6 +2254,29 @@ dependencies = [
"tiff 0.8.1",
]
[[package]]
name = "imbl"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c2806b69cd9f4664844027b64465eacb444c67c1db9c778e341adff0c25cdb0d"
dependencies = [
"bitmaps",
"imbl-sized-chunks",
"rand_core 0.6.4",
"rand_xoshiro",
"serde",
"version_check",
]
[[package]]
name = "imbl-sized-chunks"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6957ea0b2541c5ca561d3ef4538044af79f8a05a1eb3a3b148936aaceaa1076"
dependencies = [
"bitmaps",
]
[[package]]
name = "indenter"
version = "0.3.3"
@@ -2746,8 +2751,8 @@ dependencies = [
"gloo-timers",
"http",
"hyper",
"im",
"image 0.24.5",
"imbl",
"indexmap",
"matrix-sdk-base",
"matrix-sdk-common",
@@ -4925,16 +4930,6 @@ version = "0.3.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7bd3e3206899af3f8b12af284fafc038cc1dc2b41d1b89dd17297221c5d225de"
[[package]]
name = "sized-chunks"
version = "0.6.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "16d69225bde7a69b235da73377861095455d298f2b970996eec25ddbb42b3d1e"
dependencies = [
"bitmaps",
"typenum",
]
[[package]]
name = "slab"
version = "0.4.7"

View File

@@ -28,7 +28,7 @@ byteorder = "1.4.3"
ctor = "0.1.26"
dashmap = "5.2.0"
eyeball = "0.4.0"
eyeball-im = "0.1.0"
eyeball-im = "0.2.0"
futures-util = { version = "0.3.26", default-features = false, features = ["alloc"] }
http = "0.2.6"
ruma = { git = "https://github.com/ruma/ruma", rev = "8eea3e05490fa9a318f9ed66c3a75272e6ef0ee5", features = ["client-api-c"] }

View File

@@ -1,64 +0,0 @@
@testable import MatrixRustSDK
import XCTest
class AuthenticationServiceTests: XCTestCase {
var service: AuthenticationService!
override func setUp() {
service = AuthenticationService(basePath: FileManager.default.temporaryDirectory.path,
passphrase: nil,
customSlidingSyncProxy: nil)
}
func testValidServers() {
XCTAssertNoThrow(try service.configureHomeserver(serverNameOrHomeserverUrl: "matrix.org"))
XCTAssertNoThrow(try service.configureHomeserver(serverNameOrHomeserverUrl: "https://matrix.org"))
XCTAssertNoThrow(try service.configureHomeserver(serverNameOrHomeserverUrl: "https://matrix.org/"))
}
func testInvalidCharacters() {
XCTAssertThrowsError(try service.configureHomeserver(serverNameOrHomeserverUrl: "hello!@$£%^world"),
"A server name with invalid characters should not succeed to build.") { error in
guard case AuthenticationError.InvalidServerName = error else { XCTFail("Expected invalid name error."); return }
}
}
func textNonExistentDomain() {
XCTAssertThrowsError(try service.configureHomeserver(serverNameOrHomeserverUrl: "somesillylinkthatdoesntexist.com"),
"A server name that doesn't exist should not succeed.") { error in
guard case AuthenticationError.Generic = error else { XCTFail("Expected generic error."); return }
}
XCTAssertThrowsError(try service.configureHomeserver(serverNameOrHomeserverUrl: "https://somesillylinkthatdoesntexist.com"),
"A server URL that doesn't exist should not succeed.") { error in
guard case AuthenticationError.Generic = error else { XCTFail("Expected generic error."); return }
}
}
func testValidDomainWithoutServer() {
XCTAssertThrowsError(try service.configureHomeserver(serverNameOrHomeserverUrl: "https://google.com"),
"Google should not succeed as it doesn't host a homeserver.") { error in
guard case AuthenticationError.Generic = error else { XCTFail("Expected generic error."); return }
}
}
func testServerWithoutSlidingSync() {
XCTAssertThrowsError(try service.configureHomeserver(serverNameOrHomeserverUrl: "envs.net"),
"Envs should not succeed as it doesn't advertise a sliding sync proxy.") { error in
guard case AuthenticationError.SlidingSyncNotAvailable = error else { XCTFail("Expected sliding sync error."); return }
}
}
func testHomeserverURL() {
XCTAssertThrowsError(try service.configureHomeserver(serverNameOrHomeserverUrl: "https://matrix-client.matrix.org"),
"Directly using a homeserver should not succeed as a sliding sync proxy won't be found.") { error in
guard case AuthenticationError.SlidingSyncNotAvailable = error else { XCTFail("Expected sliding sync error."); return }
}
}
func testHomeserverURLWithProxyOverride() {
service = AuthenticationService(basePath: FileManager.default.temporaryDirectory.path,
passphrase: nil, customSlidingSyncProxy: "https://slidingsync.proxy")
XCTAssertNoThrow(try service.configureHomeserver(serverNameOrHomeserverUrl: "https://matrix-client.matrix.org"),
"Directly using a homeserver should succeed what a custom sliding sync proxy has been set.")
}
}

View File

@@ -12,7 +12,6 @@ mod error;
mod logger;
mod machine;
mod responses;
mod uniffi_api;
mod users;
mod verification;
@@ -47,7 +46,6 @@ use ruma::{
};
use serde::{Deserialize, Serialize};
use tokio::runtime::Runtime;
use uniffi_api::*;
pub use users::UserIdentity;
pub use verification::{
CancelInfo, ConfirmVerificationResult, QrCode, QrCodeListener, QrCodeState,
@@ -839,6 +837,18 @@ fn parse_user_id(user_id: &str) -> Result<OwnedUserId, CryptoStoreError> {
ruma::UserId::parse(user_id).map_err(|e| CryptoStoreError::InvalidUserId(user_id.to_owned(), e))
}
#[uniffi::export]
fn version() -> String {
matrix_sdk_crypto::VERSION.to_owned()
}
#[uniffi::export]
fn vodozemac_version() -> String {
vodozemac::VERSION.to_owned()
}
uniffi::include_scaffolding!("olm");
mod uniffi_types {
pub use crate::{
backup_recovery_key::{

View File

@@ -28,7 +28,7 @@ interface MigrationError {
};
callback interface Logger {
void log(string logLine);
void log(string log_line);
};
callback interface ProgressListener {

View File

@@ -1,5 +0,0 @@
#![allow(clippy::all, warnings)]
use crate::*;
uniffi::include_scaffolding!("olm");

View File

@@ -1,2 +1,2 @@
version-tag-prefix "matrix-sdk-crypto-js-v"
version-tag-prefix "matrix-sdk-crypto-js-"
version-git-message "matrix-sdk-crypto-js v%s"

View File

@@ -1,6 +1,6 @@
{
"name": "@matrix-org/matrix-sdk-crypto-js",
"version": "0.1.0-alpha.4",
"version": "0.1.0-alpha.5",
"homepage": "https://github.com/matrix-org/matrix-rust-sdk",
"description": "Matrix encryption library, for JavaScript",
"license": "Apache-2.0",

View File

@@ -38,8 +38,30 @@ pub mod types;
pub mod verification;
pub mod vodozemac;
use js_sys::JsString;
use wasm_bindgen::prelude::*;
/// Object containing the versions of the Rust libraries we are using.
#[wasm_bindgen(getter_with_clone)]
#[derive(Debug)]
pub struct Versions {
/// The version of the vodozemac crate.
#[wasm_bindgen(readonly)]
pub vodozemac: JsString,
/// The version of the matrix-sdk-crypto crate.
#[wasm_bindgen(readonly)]
pub matrix_sdk_crypto: JsString,
}
/// Get the versions of the Rust libraries we are using.
#[wasm_bindgen(js_name = "getVersions")]
pub fn get_versions() -> Versions {
Versions {
vodozemac: matrix_sdk_crypto::vodozemac::VERSION.into(),
matrix_sdk_crypto: matrix_sdk_crypto::VERSION.into(),
}
}
/// Run some stuff when the Wasm module is instantiated.
///
/// Right now, it does the following:

View File

@@ -7,7 +7,6 @@ const {
EncryptionSettings,
EventId,
InboundGroupSession,
KeysClaimRequest,
KeysQueryRequest,
KeysUploadRequest,
MaybeSignature,
@@ -16,16 +15,29 @@ const {
RequestType,
RoomId,
RoomMessageRequest,
ShieldColor,
SignatureUploadRequest,
ToDeviceRequest,
UserId,
UserIdentity,
VerificationRequest,
ShieldColor,
VerificationState,
Versions,
getVersions,
} = require("../pkg/matrix_sdk_crypto_js");
const { addMachineToMachine } = require("./helper");
require("fake-indexeddb/auto");
describe("Versions", () => {
test("can find out the crate versions", async () => {
const versions = getVersions();
expect(versions).toBeInstanceOf(Versions);
expect(versions.vodozemac).toBeDefined();
expect(versions.matrix_sdk_crypto).toBeDefined();
});
});
describe(OlmMachine.name, () => {
test("can be instantiated with the async initializer", async () => {
expect(await OlmMachine.initialize(new UserId("@foo:bar.org"), new DeviceId("baz"))).toBeInstanceOf(OlmMachine);

View File

@@ -16,6 +16,8 @@
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
//#![warn(missing_docs, missing_debug_implementations)]
use napi_derive::napi;
pub mod attachment;
pub mod encryption;
mod errors;
@@ -31,4 +33,25 @@ pub mod tracing;
pub mod types;
pub mod vodozemac;
/// Object containing the versions of the Rust libraries we are using.
#[napi]
pub struct Versions {
/// The version of the vodozemac crate.
#[napi(getter)]
pub vodozemac: String,
/// The version of the matrix-sdk-crypto crate.
#[napi(getter)]
pub matrix_sdk_crypto: String,
}
/// Get the versions of the Rust libraries we are using.
#[napi(js_name = "getVersions")]
pub fn get_versions() -> Versions {
Versions {
vodozemac: matrix_sdk_crypto::vodozemac::VERSION.to_owned(),
matrix_sdk_crypto: matrix_sdk_crypto::VERSION.to_owned(),
}
}
use crate::errors::into_err;

View File

@@ -14,8 +14,10 @@ const {
VerificationState,
CrossSigningStatus,
MaybeSignature,
StoreType,
ShieldColor,
StoreType,
Versions,
getVersions,
} = require("../");
const path = require("path");
const os = require("os");
@@ -28,6 +30,16 @@ describe("StoreType", () => {
});
});
describe("Versions", () => {
test("can find out the crate versions", async () => {
const versions = getVersions();
expect(versions).toBeInstanceOf(Versions);
expect(versions.vodozemac).toBeDefined();
expect(versions.matrixSdkCrypto).toBeDefined();
});
});
describe(OlmMachine.name, () => {
test("cannot be instantiated with the constructor", () => {
expect(() => {

View File

@@ -32,14 +32,17 @@ callback interface SlidingSyncObserver {
};
enum SlidingSyncState {
/// Hasn't started yet
"Cold",
/// We are quickly preloading a preview of the most important rooms
"Preload",
/// Sliding Sync has not started to load anything yet.
"NotLoaded",
/// Sliding Sync has been preloaded, i.e. restored from a cache for example.
"Preloaded",
/// We are trying to load all remaining rooms, might be in batches
"CatchingUp",
/// We are all caught up and now only sync the live responses.
"Live",
/// Updates are received from the loaded rooms, and new rooms are being fetched
/// in background
"PartiallyLoaded",
/// Updates are received for all the loaded rooms, and all rooms have been
/// loaded!
"FullyLoaded",
};
enum SlidingSyncMode {
@@ -209,15 +212,7 @@ enum MembershipState {
"Leave",
};
dictionary RoomMember {
string user_id;
string? display_name;
string? avatar_url;
MembershipState membership;
boolean is_name_ambiguous;
i64 power_level;
i64 normalized_power_level;
};
interface RoomMember { };
interface Room {
[Throws=ClientError]

View File

@@ -3,6 +3,7 @@ use std::sync::{Arc, RwLock};
use anyhow::{anyhow, Context};
use matrix_sdk::{
media::{MediaFileHandle as SdkMediaFileHandle, MediaFormat, MediaRequest, MediaThumbnailSize},
room::Room as SdkRoom,
ruma::{
api::client::{
account::whoami,
@@ -251,7 +252,7 @@ impl Client {
/// The OIDC Provider that is trusted by the homeserver. `None` when
/// not configured.
pub async fn authentication_issuer(&self) -> Option<String> {
self.client.authentication_issuer().await.map(|server| server.to_string())
self.client.authentication_issuer().await
}
/// The sliding sync proxy that is trusted by the homeserver. `None` when
@@ -497,6 +498,29 @@ impl Client {
pub fn rooms(&self) -> Vec<Arc<Room>> {
self.client.rooms().into_iter().map(|room| Arc::new(Room::new(room))).collect()
}
pub fn get_dm_room(&self, user_id: String) -> Result<Option<Arc<Room>>, ClientError> {
let user_id = UserId::parse(user_id)?;
let sdk_room = self.client.get_dm_room(&user_id).map(SdkRoom::Joined);
let dm = sdk_room.map(|room| Arc::new(Room::new(room)));
Ok(dm)
}
pub fn ignore_user(&self, user_id: String) -> Result<(), ClientError> {
RUNTIME.block_on(async move {
let user_id = UserId::parse(user_id)?;
self.client.account().ignore_user(&user_id).await?;
Ok(())
})
}
pub fn unignore_user(&self, user_id: String) -> Result<(), ClientError> {
RUNTIME.block_on(async move {
let user_id = UserId::parse(user_id)?;
self.client.account().unignore_user(&user_id).await?;
Ok(())
})
}
}
impl Client {

View File

@@ -28,17 +28,16 @@ pub mod client_builder;
mod helpers;
pub mod notification_service;
pub mod room;
pub mod room_member;
pub mod session_verification;
pub mod sliding_sync;
pub mod timeline;
mod uniffi_api;
use client::Client;
use client_builder::ClientBuilder;
use matrix_sdk::{encryption::CryptoStoreError, HttpError, IdParseError};
use once_cell::sync::Lazy;
use tokio::runtime::Runtime;
pub use uniffi_api::*;
pub static RUNTIME: Lazy<Runtime> =
Lazy::new(|| Runtime::new().expect("Can't start Tokio runtime"));
@@ -49,7 +48,7 @@ pub use matrix_sdk::{
};
pub use self::{
authentication_service::*, client::*, notification_service::*, room::*,
authentication_service::*, client::*, notification_service::*, room::*, room_member::*,
session_verification::*, sliding_sync::*, timeline::*,
};
@@ -97,6 +96,8 @@ impl From<serde_json::Error> for ClientError {
pub use platform::*;
uniffi::include_scaffolding!("api");
mod uniffi_types {
pub use matrix_sdk::ruma::events::room::{message::RoomMessageEventContent, MediaSource};
@@ -109,7 +110,8 @@ mod uniffi_types {
PusherKind, Session,
},
client_builder::ClientBuilder,
room::{Membership, MembershipState, Room, RoomMember},
room::{Membership, Room},
room_member::{MembershipState, RoomMember},
session_verification::{SessionVerificationController, SessionVerificationEmoji},
sliding_sync::{
RequiredState, RoomListEntry, SlidingSync, SlidingSyncBuilder, SlidingSyncList,

View File

@@ -24,7 +24,7 @@ use mime::Mime;
use tracing::error;
use super::RUNTIME;
use crate::{TimelineDiff, TimelineItem, TimelineListener};
use crate::{RoomMember, TimelineDiff, TimelineItem, TimelineListener};
#[derive(uniffi::Enum)]
pub enum Membership {
@@ -40,56 +40,6 @@ pub struct Room {
timeline: TimelineLock,
}
#[derive(Clone)]
pub enum MembershipState {
/// The user is banned.
Ban,
/// The user has been invited.
Invite,
/// The user has joined.
Join,
/// The user has requested to join.
Knock,
/// The user has left.
Leave,
}
#[derive(uniffi::Object)]
pub struct RoomMember {
pub user_id: String,
pub display_name: Option<String>,
pub avatar_url: Option<String>,
pub membership: MembershipState,
pub is_name_ambiguous: bool,
pub power_level: i64,
pub normalized_power_level: i64,
}
impl From<matrix_sdk::ruma::events::room::member::MembershipState> for MembershipState {
fn from(m: matrix_sdk::ruma::events::room::member::MembershipState) -> Self {
match m {
matrix_sdk::ruma::events::room::member::MembershipState::Ban => MembershipState::Ban,
matrix_sdk::ruma::events::room::member::MembershipState::Invite => {
MembershipState::Invite
}
matrix_sdk::ruma::events::room::member::MembershipState::Join => MembershipState::Join,
matrix_sdk::ruma::events::room::member::MembershipState::Knock => {
MembershipState::Knock
}
matrix_sdk::ruma::events::room::member::MembershipState::Leave => {
MembershipState::Leave
}
_ => todo!(
"Handle Custom case: https://github.com/matrix-org/matrix-rust-sdk/issues/1254"
),
}
}
}
#[uniffi::export]
impl Room {
pub fn id(&self) -> String {
@@ -198,22 +148,14 @@ impl Room {
})
}
pub fn members(&self) -> Result<Vec<RoomMember>> {
pub fn members(&self) -> Result<Vec<Arc<RoomMember>>> {
let room = self.room.clone();
RUNTIME.block_on(async move {
let members = room
.members()
.await?
.iter()
.map(|m| RoomMember {
user_id: m.user_id().to_string(),
display_name: m.display_name().map(|d| d.to_owned()),
avatar_url: m.avatar_url().map(|a| a.to_string()),
membership: m.membership().to_owned().into(),
is_name_ambiguous: m.name_ambiguous(),
power_level: m.power_level(),
normalized_power_level: m.normalized_power_level(),
})
.map(|m| Arc::new(RoomMember::new(m.clone())))
.collect();
Ok(members)
})

View File

@@ -0,0 +1,110 @@
use matrix_sdk::room::RoomMember as SdkRoomMember;
use super::RUNTIME;
use crate::ClientError;
#[derive(Clone)]
pub enum MembershipState {
/// The user is banned.
Ban,
/// The user has been invited.
Invite,
/// The user has joined.
Join,
/// The user has requested to join.
Knock,
/// The user has left.
Leave,
}
impl From<matrix_sdk::ruma::events::room::member::MembershipState> for MembershipState {
fn from(m: matrix_sdk::ruma::events::room::member::MembershipState) -> Self {
match m {
matrix_sdk::ruma::events::room::member::MembershipState::Ban => MembershipState::Ban,
matrix_sdk::ruma::events::room::member::MembershipState::Invite => {
MembershipState::Invite
}
matrix_sdk::ruma::events::room::member::MembershipState::Join => MembershipState::Join,
matrix_sdk::ruma::events::room::member::MembershipState::Knock => {
MembershipState::Knock
}
matrix_sdk::ruma::events::room::member::MembershipState::Leave => {
MembershipState::Leave
}
_ => todo!(
"Handle Custom case: https://github.com/matrix-org/matrix-rust-sdk/issues/1254"
),
}
}
}
pub struct RoomMember {
inner: SdkRoomMember,
}
#[uniffi::export]
impl RoomMember {
pub fn user_id(&self) -> String {
self.inner.user_id().to_string()
}
pub fn display_name(&self) -> Option<String> {
self.inner.display_name().map(|d| d.to_owned())
}
pub fn avatar_url(&self) -> Option<String> {
self.inner.avatar_url().map(ToString::to_string)
}
pub fn membership(&self) -> MembershipState {
self.inner.membership().to_owned().into()
}
pub fn is_name_ambiguous(&self) -> bool {
self.inner.name_ambiguous()
}
pub fn power_level(&self) -> i64 {
self.inner.power_level()
}
pub fn normalized_power_level(&self) -> i64 {
self.inner.normalized_power_level()
}
pub fn is_ignored(&self) -> bool {
self.inner.is_ignored()
}
pub fn is_account_user(&self) -> bool {
self.inner.is_account_user()
}
/// Adds the room member to the current account data's ignore list
/// which will ignore the user across all rooms.
pub fn ignore(&self) -> Result<(), ClientError> {
RUNTIME.block_on(async move {
self.inner.ignore().await?;
Ok(())
})
}
/// Removes the room member from the current account data's ignore list
/// which will unignore the user across all rooms.
pub fn unignore(&self) -> Result<(), ClientError> {
RUNTIME.block_on(async move {
self.inner.unignore().await?;
Ok(())
})
}
}
impl RoomMember {
pub fn new(room_member: SdkRoomMember) -> Self {
RoomMember { inner: room_member }
}
}

View File

@@ -447,19 +447,19 @@ impl SlidingSyncListBuilder {
pub fn batch_size(self: Arc<Self>, batch_size: u32) -> Arc<Self> {
let mut builder = unwrap_or_clone_arc(self);
builder.inner = builder.inner.batch_size(batch_size);
builder.inner = builder.inner.full_sync_batch_size(batch_size);
Arc::new(builder)
}
pub fn room_limit(self: Arc<Self>, limit: u32) -> Arc<Self> {
let mut builder = unwrap_or_clone_arc(self);
builder.inner = builder.inner.limit(limit);
builder.inner = builder.inner.full_sync_maximum_number_of_rooms_to_fetch(limit);
Arc::new(builder)
}
pub fn no_room_limit(self: Arc<Self>) -> Arc<Self> {
let mut builder = unwrap_or_clone_arc(self);
builder.inner = builder.inner.limit(None);
builder.inner = builder.inner.full_sync_maximum_number_of_rooms_to_fetch(None);
Arc::new(builder)
}
@@ -555,7 +555,7 @@ impl SlidingSyncList {
&self,
observer: Box<dyn SlidingSyncListRoomsCountObserver>,
) -> Arc<TaskHandle> {
let mut rooms_count_stream = self.inner.rooms_count_stream();
let mut rooms_count_stream = self.inner.maximum_number_of_rooms_stream();
Arc::new(TaskHandle::new(RUNTIME.spawn(async move {
loop {
@@ -597,7 +597,7 @@ impl SlidingSyncList {
/// Total of rooms matching the filter
pub fn current_room_count(&self) -> Option<u32> {
self.inner.rooms_count()
self.inner.maximum_number_of_rooms()
}
/// The current timeline limit

View File

@@ -1,5 +0,0 @@
#![allow(clippy::all)]
use crate::*;
uniffi::include_scaffolding!("api");

View File

@@ -41,6 +41,7 @@ pub struct RoomMember {
pub(crate) max_power_level: i64,
pub(crate) is_room_creator: bool,
pub(crate) display_name_ambiguous: bool,
pub(crate) is_ignored: bool,
}
impl RoomMember {
@@ -125,4 +126,9 @@ impl RoomMember {
pub fn membership(&self) -> &MembershipState {
self.event.membership()
}
/// Is the room member ignored by the current account user
pub fn is_ignored(&self) -> bool {
self.is_ignored
}
}

View File

@@ -21,6 +21,7 @@ use futures_util::stream::{self, StreamExt};
use ruma::{
api::client::sync::sync_events::v3::RoomSummary as RumaSummary,
events::{
ignored_user_list::IgnoredUserListEventContent,
receipt::{Receipt, ReceiptThread, ReceiptType},
room::{
create::RoomCreateEventContent, encryption::RoomEncryptionEventContent,
@@ -457,6 +458,16 @@ impl Room {
.len()
> 1;
let is_ignored = self
.store
.get_account_data_event_static::<IgnoredUserListEventContent>()
.await?
.map(|c| c.deserialize())
.transpose()?
.map(|e| e.content)
.map(|l| l.ignored_users.contains_key(member_event.user_id()))
.unwrap_or(false);
Ok(Some(RoomMember {
event: Arc::new(member_event),
profile: profile.into(),
@@ -465,6 +476,7 @@ impl Room {
max_power_level,
is_room_creator,
display_name_ambiguous: ambiguous,
is_ignored,
}))
}

View File

@@ -1,4 +1,4 @@
use std::collections::BTreeMap;
use std::{collections::BTreeMap, fmt};
use ruma::{
events::{AnySyncTimelineEvent, AnyTimelineEvent},
@@ -17,6 +17,7 @@ const UNKNOWN_DEVICE: &str = "Encrypted by an unknown or deleted device.";
/// Represents the state of verification for a decrypted message sent by a
/// device.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)]
#[serde(from = "OldVerificationStateHelper")]
pub enum VerificationState {
/// This message is guaranteed to be authentic as it is coming from a device
/// belonging to a user that we have verified.
@@ -31,6 +32,34 @@ pub enum VerificationState {
Unverified(VerificationLevel),
}
// TODO: Remove this once we're confident that everybody that serialized these
// states uses the new enum.
#[derive(Clone, Debug, Deserialize)]
enum OldVerificationStateHelper {
Untrusted,
UnknownDevice,
#[serde(alias = "Trusted")]
Verified,
Unverified(VerificationLevel),
}
impl From<OldVerificationStateHelper> for VerificationState {
fn from(value: OldVerificationStateHelper) -> Self {
match value {
// This mapping isn't strictly correct but we don't know which part in the old
// `VerificationState` enum was unverified.
OldVerificationStateHelper::Untrusted => {
VerificationState::Unverified(VerificationLevel::UnsignedDevice)
}
OldVerificationStateHelper::UnknownDevice => {
Self::Unverified(VerificationLevel::None(DeviceLinkProblem::MissingDevice))
}
OldVerificationStateHelper::Verified => Self::Verified,
OldVerificationStateHelper::Unverified(l) => Self::Unverified(l),
}
}
}
impl VerificationState {
/// Convert the `VerificationState` into a `ShieldState` which can be
/// directly used to decorate messages in the recommended way.
@@ -180,7 +209,7 @@ pub struct EncryptionInfo {
/// A customized version of a room event coming from a sync that holds optional
/// encryption info.
#[derive(Clone, Debug, Deserialize, Serialize)]
#[derive(Clone, Deserialize, Serialize)]
pub struct SyncTimelineEvent {
/// The actual event.
pub event: Raw<AnySyncTimelineEvent>,
@@ -193,6 +222,14 @@ pub struct SyncTimelineEvent {
}
impl SyncTimelineEvent {
/// Create a new `SyncTimelineEvent` from the given raw event.
///
/// This is a convenience constructor for when you don't need to set
/// `encryption_info` or `push_action`, for example inside a test.
pub fn new(event: Raw<AnySyncTimelineEvent>) -> Self {
Self { event, encryption_info: None, push_actions: vec![] }
}
/// Get the event id of this `SyncTimelineEvent` if the event has any valid
/// id.
pub fn event_id(&self) -> Option<OwnedEventId> {
@@ -200,6 +237,18 @@ impl SyncTimelineEvent {
}
}
#[cfg(not(tarpaulin_include))]
impl fmt::Debug for SyncTimelineEvent {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let SyncTimelineEvent { event, encryption_info, push_actions } = self;
f.debug_struct("SyncTimelineEvent")
.field("event", &DebugRawEvent(event))
.field("encryption_info", encryption_info)
.field("push_actions", push_actions)
.finish()
}
}
impl From<Raw<AnySyncTimelineEvent>> for SyncTimelineEvent {
fn from(inner: Raw<AnySyncTimelineEvent>) -> Self {
Self { encryption_info: None, event: inner, push_actions: Vec::default() }
@@ -220,7 +269,7 @@ impl From<TimelineEvent> for SyncTimelineEvent {
}
}
#[derive(Clone, Debug)]
#[derive(Clone)]
pub struct TimelineEvent {
/// The actual event.
pub event: Raw<AnyTimelineEvent>,
@@ -241,28 +290,78 @@ impl TimelineEvent {
}
}
#[cfg(not(tarpaulin_include))]
impl fmt::Debug for TimelineEvent {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let TimelineEvent { event, encryption_info, push_actions } = self;
f.debug_struct("TimelineEvent")
.field("event", &DebugRawEvent(event))
.field("encryption_info", encryption_info)
.field("push_actions", push_actions)
.finish()
}
}
struct DebugRawEvent<'a, T>(&'a Raw<T>);
#[cfg(not(tarpaulin_include))]
impl<T> fmt::Debug for DebugRawEvent<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("RawEvent")
.field("event_id", &DebugEventId(self.0.get_field("event_id")))
.finish_non_exhaustive()
}
}
struct DebugEventId(serde_json::Result<Option<OwnedEventId>>);
#[cfg(not(tarpaulin_include))]
impl fmt::Debug for DebugEventId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match &self.0 {
Ok(Some(id)) => id.fmt(f),
Ok(None) => f.write_str("Missing"),
Err(e) => f.debug_tuple("Invalid").field(&e).finish(),
}
}
}
#[cfg(test)]
mod tests {
use ruma::{
events::{room::message::RoomMessageEventContent, AnySyncTimelineEvent},
serde::Raw,
};
use serde::Deserialize;
use serde_json::json;
use super::{SyncTimelineEvent, TimelineEvent};
use super::{SyncTimelineEvent, TimelineEvent, VerificationState};
use crate::deserialized_responses::{DeviceLinkProblem, VerificationLevel};
#[test]
fn room_event_to_sync_room_event() {
let event = json!({
"content": RoomMessageEventContent::text_plain("foobar"),
fn example_event() -> serde_json::Value {
json!({
"content": RoomMessageEventContent::text_plain("secret"),
"type": "m.room.message",
"event_id": "$xxxxx:example.org",
"room_id": "!someroom:example.com",
"origin_server_ts": 2189,
"sender": "@carl:example.com",
});
})
}
let room_event = TimelineEvent::new(Raw::new(&event).unwrap().cast());
#[test]
fn sync_timeline_debug_content() {
let room_event = SyncTimelineEvent::new(Raw::new(&example_event()).unwrap().cast());
let debug_s = format!("{room_event:?}");
assert!(
!debug_s.contains("secret"),
"Debug representation contains event content!\n{debug_s}"
);
}
#[test]
fn room_event_to_sync_room_event() {
let room_event = TimelineEvent::new(Raw::new(&example_event()).unwrap().cast());
let converted_room_event: SyncTimelineEvent = room_event.into();
let converted_event: AnySyncTimelineEvent =
@@ -271,4 +370,44 @@ mod tests {
assert_eq!(converted_event.event_id(), "$xxxxx:example.org");
assert_eq!(converted_event.sender(), "@carl:example.com");
}
#[test]
fn old_verification_state_to_new_migration() {
#[derive(Deserialize)]
struct State {
state: VerificationState,
}
let state = json!({
"state": "Trusted",
});
let deserialized: State =
serde_json::from_value(state).expect("We can deserialize the old trusted value");
assert_eq!(deserialized.state, VerificationState::Verified);
let state = json!({
"state": "UnknownDevice",
});
let deserialized: State =
serde_json::from_value(state).expect("We can deserialize the old unknown device value");
assert_eq!(
deserialized.state,
VerificationState::Unverified(VerificationLevel::None(
DeviceLinkProblem::MissingDevice
))
);
let state = json!({
"state": "Untrusted",
});
let deserialized: State =
serde_json::from_value(state).expect("We can deserialize the old trusted value");
assert_eq!(
deserialized.state,
VerificationState::Unverified(VerificationLevel::UnsignedDevice)
);
}
}

View File

@@ -984,6 +984,7 @@ pub(crate) mod tests {
}
#[test]
#[allow(clippy::redundant_clone)]
fn delete_a_device() {
let device = get_device();
assert!(!device.is_deleted());

View File

@@ -146,12 +146,14 @@ impl IndexeddbCryptoStore {
let name = format!("{prefix:0}::matrix-sdk-crypto");
// Open my_db v1
let mut db_req: OpenDbRequest = IdbDatabase::open_f64(&name, 2.0)?;
let mut db_req: OpenDbRequest = IdbDatabase::open_u32(&name, 3)?;
db_req.set_on_upgrade_needed(Some(|evt: &IdbVersionChangeEvent| -> Result<(), JsValue> {
let old_version = evt.old_version();
// Even if the web-sys bindings expose the version as a f64, the IndexedDB API
// works with an unsigned integer.
// See <https://github.com/rustwasm/wasm-bindgen/issues/1149>
let old_version = evt.old_version() as u32;
if old_version < 1.0 {
if old_version < 1 {
// migrating to version 1
let db = evt.db();
@@ -170,20 +172,22 @@ impl IndexeddbCryptoStore {
db.create_object_store(keys::SECRET_REQUESTS_BY_INFO)?;
db.create_object_store(keys::BACKUP_KEYS)?;
} else if old_version < 1.1 {
}
if old_version < 2 {
let db = evt.db();
// We changed how we store inbound group sessions, the key used to
// be a trippled of `(room_id, sender_key, session_id)` now it's a
// tuple of `(room_id, session_id)`
//
// Let's just drop the whole object store.
let db = evt.db();
db.delete_object_store(keys::INBOUND_GROUP_SESSIONS)?;
db.create_object_store(keys::INBOUND_GROUP_SESSIONS)?;
db.create_object_store(keys::ROOM_SETTINGS)?;
}
if old_version < 2.0 {
if old_version < 3 {
let db = evt.db();
// We changed the way we store outbound session.
@@ -194,7 +198,6 @@ impl IndexeddbCryptoStore {
// Support for MSC2399 withheld codes
db.create_object_store(keys::DIRECT_WITHHELD_INFO)?;
db.create_object_store(keys::ROOM_SETTINGS)?;
}
Ok(())
@@ -250,9 +253,10 @@ impl IndexeddbCryptoStore {
pub async fn open_with_passphrase(prefix: &str, passphrase: &str) -> Result<Self> {
let name = format!("{prefix:0}::matrix-sdk-crypto-meta");
let mut db_req: OpenDbRequest = IdbDatabase::open_f64(&name, 1.0)?;
let mut db_req: OpenDbRequest = IdbDatabase::open_u32(&name, 1)?;
db_req.set_on_upgrade_needed(Some(|evt: &IdbVersionChangeEvent| -> Result<(), JsValue> {
if evt.old_version() < 1.0 {
let old_version = evt.old_version() as u32;
if old_version < 1 {
// migrating to version 1
let db = evt.db();
@@ -452,7 +456,6 @@ impl_crypto_store! {
.put_key_val(&JsValue::from_str(keys::BACKUP_KEY_V1), &self.serialize_value(&a)?)?;
}
if !changes.sessions.is_empty() {
let sessions = tx.object_store(keys::SESSION)?;
@@ -464,7 +467,6 @@ impl_crypto_store! {
let key = self.encode_key(keys::SESSION, (&sender_key, session_id));
sessions.put_key_val(&key, &self.serialize_value(&pickle)?)?;
}
}

View File

@@ -79,7 +79,7 @@ eyre = { version = "0.6.8", optional = true }
futures-core = "0.3.21"
futures-util = { workspace = true }
http = { workspace = true }
im = { version = "15.1.0", features = ["serde"] }
imbl = { version = "2.0.0", features = ["serde"] }
indexmap = "1.9.1"
hyper = { version = "0.14.20", features = ["http1", "http2", "server"], optional = true }
matrix-sdk-base = { version = "0.6.0", path = "../matrix-sdk-base", default_features = false }

View File

@@ -34,12 +34,14 @@ use ruma::{
},
assign,
events::{
room::MediaSource, AnyGlobalAccountDataEventContent, GlobalAccountDataEventContent,
ignored_user_list::{IgnoredUser, IgnoredUserListEventContent},
room::MediaSource,
AnyGlobalAccountDataEventContent, GlobalAccountDataEventContent,
GlobalAccountDataEventType, StaticEventContent,
},
serde::Raw,
thirdparty::Medium,
ClientSecret, MxcUri, OwnedMxcUri, SessionId, UInt,
ClientSecret, MxcUri, OwnedMxcUri, OwnedUserId, RoomId, SessionId, UInt, UserId,
};
use serde::Deserialize;
@@ -747,6 +749,79 @@ impl Account {
Ok(self.client.send(request, None).await?)
}
/// Marks the given room with `room_id` as "direct chat" with with any
/// user in `user_ids`.
///
/// This is done adding new the `room_id` to the list of DM
/// chats for any user id in `user_ids`.
///
/// # Arguments
///
/// * `room_id` - The room id of the DM room.
/// * `user_ids` - The user ids of the invitees for the DM room.
pub(crate) async fn mark_as_dm(
&self,
room_id: &RoomId,
user_ids: &[OwnedUserId],
) -> Result<()> {
use ruma::events::direct::DirectEventContent;
// Now we need to mark the room as a DM for ourselves, we fetch the
// existing `m.direct` event and append the room to the list of DMs we
// have with this user.
let mut content = self
.account_data::<DirectEventContent>()
.await?
.map(|c| c.deserialize())
.transpose()?
.unwrap_or_default();
for user_id in user_ids {
content.entry(user_id.to_owned()).or_default().push(room_id.to_owned());
}
// TODO We should probably save the fact that we need to send this out
// because otherwise we might end up in a state where we have a DM that
// isn't marked as one.
self.set_account_data(content).await?;
Ok(())
}
/// Adds the given user ID to the account's ignore list.
pub async fn ignore_user(&self, user_id: &UserId) -> Result<()> {
let mut ignored_user_list = self.get_ignored_user_list_event_content().await?;
ignored_user_list.ignored_users.insert(user_id.to_owned(), IgnoredUser::new());
// Updating the account data
self.set_account_data(ignored_user_list).await?;
// TODO: I think I should reset all the storage and perform a new local sync
// here but I don't know how
Ok(())
}
/// Removes the given user ID from the account's ignore list.
pub async fn unignore_user(&self, user_id: &UserId) -> Result<()> {
let mut ignored_user_list = self.get_ignored_user_list_event_content().await?;
ignored_user_list.ignored_users.remove(user_id);
// Updating the account data
self.set_account_data(ignored_user_list).await?;
// TODO: I think I should reset all the storage and perform a new local sync
// here but I don't know how
Ok(())
}
async fn get_ignored_user_list_event_content(&self) -> Result<IgnoredUserListEventContent> {
let ignored_user_list = self
.account_data::<IgnoredUserListEventContent>()
.await?
.map(|c| c.deserialize())
.transpose()?
.unwrap_or_default();
Ok(ignored_user_list)
}
}
fn get_raw_content<Ev, C>(raw: Option<Raw<Ev>>) -> Result<Option<Raw<C>>> {

View File

@@ -378,7 +378,7 @@ impl ClientBuilder {
let base_client = BaseClient::with_store_config(store_config);
let http_client = HttpClient::new(inner_http_client.clone(), self.request_config);
let mut authentication_issuer: Option<Url> = None;
let mut authentication_issuer = None;
#[cfg(feature = "experimental-sliding-sync")]
let mut sliding_sync_proxy: Option<Url> = None;
let homeserver = match homeserver_cfg {
@@ -402,14 +402,16 @@ impl ClientBuilder {
err => ClientBuildError::Http(err),
})?;
if let Some(issuer) = well_known.authentication.map(|auth| auth.issuer) {
authentication_issuer = Url::parse(&issuer).ok();
}
authentication_issuer = well_known.authentication.map(|auth| auth.issuer);
#[cfg(feature = "experimental-sliding-sync")]
if let Some(proxy) = well_known.sliding_sync_proxy.map(|p| p.url) {
sliding_sync_proxy = Url::parse(&proxy).ok();
}
debug!(homserver_url = well_known.homeserver.base_url, "Discovered the homeserver");
debug!(
homeserver_url = well_known.homeserver.base_url,
"Discovered the homeserver"
);
well_known.homeserver.base_url
}

View File

@@ -71,9 +71,7 @@ use serde::de::DeserializeOwned;
use tokio::sync::broadcast;
#[cfg(not(target_arch = "wasm32"))]
use tokio::sync::OnceCell;
#[cfg(feature = "e2e-encryption")]
use tracing::error;
use tracing::{debug, field::display, info, instrument, trace, Instrument, Span};
use tracing::{debug, error, field::display, info, instrument, trace, Instrument, Span};
use url::Url;
#[cfg(feature = "e2e-encryption")]
@@ -145,7 +143,7 @@ pub(crate) struct ClientInner {
/// The URL of the homeserver to connect to.
homeserver: RwLock<Url>,
/// The OIDC Provider that is trusted by the homeserver.
authentication_issuer: Option<RwLock<Url>>,
authentication_issuer: Option<RwLock<String>>,
/// The sliding sync proxy that is trusted by the homeserver.
#[cfg(feature = "experimental-sliding-sync")]
sliding_sync_proxy: Option<RwLock<Url>>,
@@ -330,7 +328,7 @@ impl Client {
}
/// The OIDC Provider that is trusted by the homeserver.
pub async fn authentication_issuer(&self) -> Option<Url> {
pub async fn authentication_issuer(&self) -> Option<String> {
let server = self.inner.authentication_issuer.as_ref()?;
Some(server.read().await.clone())
}
@@ -1664,17 +1662,17 @@ impl Client {
self.send(request, None).await
}
/// Create a room using the `RoomBuilder` and send the request.
/// Create a room with the given parameters.
///
/// Sends a request to `/_matrix/client/r0/createRoom`, returns a
/// `create_room::Response`, this is an empty response.
/// Sends a request to `/_matrix/client/r0/createRoom`, returns the created
/// room as a [`room::Joined`] object.
///
/// # Arguments
///
/// * `room` - The easiest way to create this request is using the
/// `create_room::Request` itself.
/// If you want to create a direct message with one specific user, you can
/// use [`create_dm`][Self::create_dm], which is more convenient than
/// assembling the [`create_room::v3::Request`] yourself.
///
/// # Examples
///
/// ```no_run
/// use matrix_sdk::Client;
/// # use matrix_sdk::ruma::api::client::room::{
@@ -1691,12 +1689,40 @@ impl Client {
/// assert!(client.create_room(request).await.is_ok());
/// # });
/// ```
pub async fn create_room(&self, request: create_room::v3::Request) -> HttpResult<room::Joined> {
pub async fn create_room(&self, request: create_room::v3::Request) -> Result<room::Joined> {
let invite = request.invite.clone();
let is_direct_room = request.is_direct;
let response = self.send(request, None).await?;
let base_room =
self.base_client().get_or_create_room(&response.room_id, RoomState::Joined).await;
Ok(room::Joined::new(self, base_room).unwrap())
let joined_room = room::Joined::new(self, base_room).unwrap();
if is_direct_room && !invite.is_empty() {
if let Err(error) =
self.account().mark_as_dm(joined_room.room_id(), invite.as_slice()).await
{
// FIXME: Retry in the background
error!("Failed to mark room as DM: {error}");
}
}
Ok(joined_room)
}
/// Create a DM room.
///
/// Convenience shorthand for [`create_room`][Self::create_room] with the
/// given user being invited, the room marked `is_direct` and both the
/// creator and invitee getting the default maximum power level.
pub async fn create_dm(&self, user_id: &UserId) -> Result<room::Joined> {
self.create_room(assign!(create_room::v3::Request::new(), {
invite: vec![user_id.to_owned()],
is_direct: true,
preset: Some(create_room::v3::RoomPreset::TrustedPrivateChat),
}))
.await
}
/// Search the homeserver's directory for public rooms with a filter.

View File

@@ -477,7 +477,7 @@ impl OtherUserIdentity {
}
room.clone()
} else {
self.client.create_dm_room(self.inner.user_id().to_owned()).await?
self.client.create_dm(self.inner.user_id()).await?
};
let response = room

View File

@@ -38,8 +38,6 @@ pub use matrix_sdk_base::crypto::{
use matrix_sdk_base::crypto::{
CrossSigningStatus, OutgoingRequest, RoomMessageRequest, ToDeviceRequest,
};
#[cfg(feature = "e2e-encryption")]
use ruma::OwnedDeviceId;
use ruma::{
api::client::{
backup::add_backup_keys::v3::Response as KeysBackupResponse,
@@ -52,7 +50,7 @@ use ruma::{
},
uiaa::AuthData,
},
assign, DeviceId, OwnedUserId, TransactionId, UserId,
assign, DeviceId, OwnedDeviceId, OwnedUserId, TransactionId, UserId,
};
use tracing::{debug, instrument, trace, warn};
@@ -68,12 +66,10 @@ use crate::{
};
impl Client {
#[cfg(feature = "e2e-encryption")]
pub(crate) fn olm_machine(&self) -> Option<&matrix_sdk_base::crypto::OlmMachine> {
self.base_client().olm_machine()
}
#[cfg(feature = "e2e-encryption")]
pub(crate) async fn mark_request_as_sent(
&self,
request_id: &TransactionId,
@@ -93,7 +89,6 @@ impl Client {
/// # Panics
///
/// Panics if no key query needs to be done.
#[cfg(feature = "e2e-encryption")]
#[instrument(skip(self))]
pub(crate) async fn keys_query(
&self,
@@ -140,7 +135,6 @@ impl Client {
/// room.send(CustomEventContent { encrypted_file }, None).await?;
/// # anyhow::Ok(()) });
/// ```
#[cfg(feature = "e2e-encryption")]
pub async fn prepare_encrypted_file<'a, R: Read + ?Sized + 'a>(
&self,
content_type: &mime::Mime,
@@ -170,7 +164,6 @@ impl Client {
/// Encrypt and upload the file to be read from `reader` and construct an
/// attachment message with `body`, `content_type`, `info` and `thumbnail`.
#[cfg(feature = "e2e-encryption")]
pub(crate) async fn prepare_encrypted_attachment_message(
&self,
body: &str,
@@ -252,51 +245,11 @@ impl Client {
})
}
#[cfg(feature = "e2e-encryption")]
pub(crate) async fn create_dm_room(&self, user_id: OwnedUserId) -> Result<room::Joined> {
use ruma::{
api::client::room::create_room::v3::RoomPreset, events::direct::DirectEventContent,
};
// First we create the DM room, where we invite the user and tell the
// invitee that the room should be a DM.
let invite = vec![user_id.clone()];
let request = assign!(ruma::api::client::room::create_room::v3::Request::new(), {
invite,
is_direct: true,
preset: Some(RoomPreset::TrustedPrivateChat),
});
let room = self.create_room(request).await?;
// Now we need to mark the room as a DM for ourselves, we fetch the
// existing `m.direct` event and append the room to the list of DMs we
// have with this user.
let mut content = self
.account()
.account_data::<DirectEventContent>()
.await?
.map(|c| c.deserialize())
.transpose()?
.unwrap_or_default();
content.entry(user_id.to_owned()).or_default().push(room.room_id().to_owned());
// TODO We should probably save the fact that we need to send this out
// because otherwise we might end up in a state where we have a DM that
// isn't marked as one.
self.account().set_account_data(content).await?;
Ok(room)
}
/// Claim one-time keys creating new Olm sessions.
///
/// # Arguments
///
/// * `users` - The list of user/device pairs that we should claim keys for.
#[cfg(feature = "e2e-encryption")]
pub(crate) async fn claim_one_time_keys(
&self,
users: impl Iterator<Item = &UserId>,
@@ -325,7 +278,6 @@ impl Client {
///
/// Panics if the client isn't logged in, or if no encryption keys need to
/// be uploaded.
#[cfg(feature = "e2e-encryption")]
#[instrument(skip(self, request))]
pub(crate) async fn keys_upload(
&self,
@@ -344,7 +296,6 @@ impl Client {
Ok(response)
}
#[cfg(feature = "e2e-encryption")]
pub(crate) async fn room_send_helper(
&self,
request: &RoomMessageRequest,
@@ -359,7 +310,6 @@ impl Client {
.await
}
#[cfg(feature = "e2e-encryption")]
pub(crate) async fn send_to_device(
&self,
request: &ToDeviceRequest,
@@ -373,7 +323,6 @@ impl Client {
self.send(request, None).await
}
#[cfg(feature = "e2e-encryption")]
pub(crate) async fn send_verification_request(
&self,
request: matrix_sdk_base::crypto::OutgoingVerificationRequest,
@@ -390,8 +339,8 @@ impl Client {
Ok(())
}
#[cfg(feature = "e2e-encryption")]
fn get_dm_room(&self, user_id: &UserId) -> Option<room::Joined> {
/// Get the existing DM room with the given user, if any.
pub fn get_dm_room(&self, user_id: &UserId) -> Option<room::Joined> {
let rooms = self.joined_rooms();
// Find the room we share with the `user_id` and only with `user_id`
@@ -483,14 +432,12 @@ impl Client {
/// A high-level API to manage the client's encryption.
///
/// To get this, use [`Client::encryption()`].
#[cfg(feature = "e2e-encryption")]
#[derive(Debug, Clone)]
pub struct Encryption {
/// The underlying client.
client: Client,
}
#[cfg(feature = "e2e-encryption")]
impl Encryption {
pub(crate) fn new(client: Client) -> Self {
Self { client }

View File

@@ -64,4 +64,24 @@ impl RoomMember {
let request = MediaRequest { source: MediaSource::Plain(url.to_owned()), format };
Ok(Some(self.client.media().get_media_content(&request, true).await?))
}
/// Adds the room member to the current account data's ignore list
/// which will ignore the user across all rooms.
pub async fn ignore(&self) -> Result<()> {
self.client.account().ignore_user(self.inner.user_id()).await
}
/// Removes the room member from the current account data's ignore list
/// which will unignore the user across all rooms.
pub async fn unignore(&self) -> Result<()> {
self.client.account().unignore_user(self.inner.user_id()).await
}
/// Returns true if the member of the room is the user of the account
pub fn is_account_user(&self) -> bool {
match self.client.user_id() {
Some(id) => id == self.inner.user_id(),
None => false,
}
}
}

View File

@@ -14,7 +14,7 @@
use std::sync::Arc;
use im::Vector;
use imbl::Vector;
use matrix_sdk_base::{
deserialized_responses::{EncryptionInfo, SyncTimelineEvent},
locks::Mutex,

View File

@@ -18,7 +18,7 @@ use std::{collections::HashMap, sync::Arc};
use async_trait::async_trait;
use eyeball_im::{ObservableVector, VectorSubscriber};
use im::Vector;
use imbl::Vector;
use indexmap::{IndexMap, IndexSet};
#[cfg(feature = "e2e-encryption")]
use matrix_sdk_base::crypto::OlmMachine;

View File

@@ -20,7 +20,7 @@ use std::{pin::Pin, sync::Arc, task::Poll};
use eyeball_im::{VectorDiff, VectorSubscriber};
use futures_core::Stream;
use im::Vector;
use imbl::Vector;
use matrix_sdk_base::locks::Mutex;
use pin_project_lite::pin_project;
use ruma::{

View File

@@ -1,7 +1,7 @@
use assert_matches::assert_matches;
use eyeball_im::VectorDiff;
use futures_util::StreamExt;
use im::vector;
use imbl::vector;
use matrix_sdk_base::deserialized_responses::SyncTimelineEvent;
use matrix_sdk_test::async_test;
use ruma::{

View File

@@ -248,8 +248,9 @@ impl SlidingSyncBuilder {
{
trace!(name, "frozen for list found");
let FrozenSlidingSyncList { rooms_count, rooms_list, rooms } = frozen_list;
list.set_from_cold(rooms_count, rooms_list);
let FrozenSlidingSyncList { maximum_number_of_rooms, rooms_list, rooms } =
frozen_list;
list.set_from_cold(maximum_number_of_rooms, rooms_list);
for (key, frozen_room) in rooms.into_iter() {
rooms_found.entry(key).or_insert_with(|| {

View File

@@ -7,7 +7,7 @@ use std::{
use eyeball::unique::Observable;
use eyeball_im::ObservableVector;
use im::Vector;
use imbl::Vector;
use ruma::{api::client::sync::sync_events::v4, events::StateEventType, UInt};
use super::{Error, RoomListEntry, SlidingSyncList, SlidingSyncMode, SlidingSyncState};
@@ -22,14 +22,13 @@ pub struct SlidingSyncListBuilder {
sync_mode: SlidingSyncMode,
sort: Vec<String>,
required_state: Vec<(StateEventType, String)>,
batch_size: u32,
full_sync_batch_size: u32,
full_sync_maximum_number_of_rooms_to_fetch: Option<u32>,
send_updates_for_items: bool,
limit: Option<u32>,
filters: Option<v4::SyncRequestListFilters>,
timeline_limit: Option<UInt>,
name: Option<String>,
state: SlidingSyncState,
rooms_count: Option<u32>,
rooms_list: Vector<RoomListEntry>,
ranges: Vec<(UInt, UInt)>,
}
@@ -43,14 +42,13 @@ impl SlidingSyncListBuilder {
(StateEventType::RoomEncryption, "".to_owned()),
(StateEventType::RoomTombstone, "".to_owned()),
],
batch_size: 20,
full_sync_batch_size: 20,
full_sync_maximum_number_of_rooms_to_fetch: None,
send_updates_for_items: false,
limit: None,
filters: None,
timeline_limit: None,
name: None,
state: SlidingSyncState::default(),
rooms_count: None,
rooms_list: Vector::new(),
ranges: Vec::new(),
}
@@ -79,9 +77,20 @@ impl SlidingSyncListBuilder {
self
}
/// How many rooms request at a time when doing a full-sync catch up.
pub fn batch_size(mut self, value: u32) -> Self {
self.batch_size = value;
/// When doing a full-sync, this method defines the value by which ranges of
/// rooms will be extended.
pub fn full_sync_batch_size(mut self, value: u32) -> Self {
self.full_sync_batch_size = value;
self
}
/// When doing a full-sync, this method defines the total limit of rooms to
/// load (it can be useful for gigantic accounts).
pub fn full_sync_maximum_number_of_rooms_to_fetch(
mut self,
value: impl Into<Option<u32>>,
) -> Self {
self.full_sync_maximum_number_of_rooms_to_fetch = value.into();
self
}
@@ -92,12 +101,6 @@ impl SlidingSyncListBuilder {
self
}
/// How many rooms request a total hen doing a full-sync catch up.
pub fn limit(mut self, value: impl Into<Option<u32>>) -> Self {
self.limit = value.into();
self
}
/// Any filters to apply to the query.
pub fn filters(mut self, value: Option<v4::SyncRequestListFilters>) -> Self {
self.filters = value;
@@ -123,31 +126,31 @@ impl SlidingSyncListBuilder {
self
}
/// Set the ranges to fetch
/// Set the ranges to fetch.
pub fn ranges<U: Into<UInt>>(mut self, range: Vec<(U, U)>) -> Self {
self.ranges = range.into_iter().map(|(a, b)| (a.into(), b.into())).collect();
self
}
/// Set a single range fetch
/// Set a single range fetch.
pub fn set_range<U: Into<UInt>>(mut self, from: U, to: U) -> Self {
self.ranges = vec![(from.into(), to.into())];
self
}
/// Set the ranges to fetch
/// Set the ranges to fetch.
pub fn add_range<U: Into<UInt>>(mut self, from: U, to: U) -> Self {
self.ranges.push((from.into(), to.into()));
self
}
/// Set the ranges to fetch
/// Set the ranges to fetch.
pub fn reset_ranges(mut self) -> Self {
self.ranges = Default::default();
self.ranges.clear();
self
}
/// Build the list
/// Build the list.
pub fn build(self) -> Result<SlidingSyncList> {
let mut rooms_list = ObservableVector::new();
rooms_list.append(self.rooms_list);
@@ -156,14 +159,15 @@ impl SlidingSyncListBuilder {
sync_mode: self.sync_mode,
sort: self.sort,
required_state: self.required_state,
batch_size: self.batch_size,
full_sync_batch_size: self.full_sync_batch_size,
send_updates_for_items: self.send_updates_for_items,
limit: self.limit,
full_sync_maximum_number_of_rooms_to_fetch: self
.full_sync_maximum_number_of_rooms_to_fetch,
filters: self.filters,
timeline_limit: Arc::new(StdRwLock::new(Observable::new(self.timeline_limit))),
name: self.name.ok_or(Error::BuildMissingField("name"))?,
state: Arc::new(StdRwLock::new(Observable::new(self.state))),
rooms_count: Arc::new(StdRwLock::new(Observable::new(self.rooms_count))),
maximum_number_of_rooms: Arc::new(StdRwLock::new(Observable::new(None))),
rooms_list: Arc::new(StdRwLock::new(rooms_list)),
ranges: Arc::new(StdRwLock::new(Observable::new(self.ranges))),
is_cold: Arc::new(AtomicBool::new(false)),

View File

@@ -15,7 +15,7 @@ pub use builder::*;
use eyeball::unique::Observable;
use eyeball_im::{ObservableVector, VectorDiff};
use futures_core::Stream;
use im::Vector;
use imbl::Vector;
pub(super) use request_generator::*;
use ruma::{api::client::sync::sync_events::v4, events::StateEventType, OwnedRoomId, RoomId, UInt};
use serde::{Deserialize, Serialize};
@@ -25,7 +25,7 @@ use super::{Error, FrozenSlidingSyncRoom, SlidingSyncRoom};
use crate::Result;
/// Holding a specific filtered list within the concept of sliding sync.
/// Main entrypoint to the SlidingSync
/// Main entrypoint to the `SlidingSync`:
///
/// ```no_run
/// # use futures::executor::block_on;
@@ -51,17 +51,19 @@ pub struct SlidingSyncList {
/// Required states to return per room
required_state: Vec<(StateEventType, String)>,
/// How many rooms request at a time when doing a full-sync catch up
batch_size: u32,
/// When doing a full-sync, the ranges of rooms to load are extended by this
/// `full_sync_batch_size` size.
full_sync_batch_size: u32,
/// When doing a full-sync, it is possible to limit the total number of
/// rooms to load by using this field.
full_sync_maximum_number_of_rooms_to_fetch: Option<u32>,
/// Whether the list should send `UpdatedAt`-Diff signals for rooms
/// that have changed
/// that have changed.
send_updates_for_items: bool,
/// How many rooms request a total hen doing a full-sync catch up
limit: Option<u32>,
/// Any filters to apply to the query
/// Any filters to apply to the query.
filters: Option<v4::SyncRequestListFilters>,
/// The maximum number of timeline events to query for
@@ -70,16 +72,22 @@ pub struct SlidingSyncList {
/// Name of this list to easily recognize them
pub name: String,
/// The state this list is in
/// The state this list is in.
state: Arc<StdRwLock<Observable<SlidingSyncState>>>,
/// The total known number of rooms,
rooms_count: Arc<StdRwLock<Observable<Option<u32>>>>,
/// The total number of rooms that is possible to interact with for the
/// given list.
///
/// It's not the total rooms that have been fetched. The server tells the
/// client that it's possible to fetch this amount of rooms maximum.
/// Since this number can change according to the list filters, it's
/// observable.
maximum_number_of_rooms: Arc<StdRwLock<Observable<Option<u32>>>>,
/// The rooms in order
/// The rooms in order.
rooms_list: Arc<StdRwLock<ObservableVector<RoomListEntry>>>,
/// The ranges windows of the list
/// The ranges windows of the list.
#[allow(clippy::type_complexity)] // temporarily
ranges: Arc<StdRwLock<Observable<Vec<(UInt, UInt)>>>>,
@@ -95,12 +103,15 @@ pub struct SlidingSyncList {
impl SlidingSyncList {
pub(crate) fn set_from_cold(
&mut self,
rooms_count: Option<u32>,
maximum_number_of_rooms: Option<u32>,
rooms_list: Vector<RoomListEntry>,
) {
Observable::set(&mut self.state.write().unwrap(), SlidingSyncState::Preload);
Observable::set(&mut self.state.write().unwrap(), SlidingSyncState::Preloaded);
self.is_cold.store(true, Ordering::SeqCst);
Observable::set(&mut self.rooms_count.write().unwrap(), rooms_count);
Observable::set(
&mut self.maximum_number_of_rooms.write().unwrap(),
maximum_number_of_rooms,
);
let mut lock = self.rooms_list.write().unwrap();
lock.clear();
@@ -119,7 +130,7 @@ impl SlidingSyncList {
.sync_mode(self.sync_mode.clone())
.sort(self.sort.clone())
.required_state(self.required_state.clone())
.batch_size(self.batch_size)
.full_sync_batch_size(self.full_sync_batch_size)
.ranges(self.ranges.read().unwrap().clone())
}
@@ -195,14 +206,15 @@ impl SlidingSyncList {
ObservableVector::subscribe(&self.rooms_list.read().unwrap())
}
/// Get the current rooms count.
pub fn rooms_count(&self) -> Option<u32> {
**self.rooms_count.read().unwrap()
/// Get the maximum number of rooms. See [`Self::maximum_number_of_rooms`]
/// to learn more.
pub fn maximum_number_of_rooms(&self) -> Option<u32> {
**self.maximum_number_of_rooms.read().unwrap()
}
/// Get a stream of rooms count.
pub fn rooms_count_stream(&self) -> impl Stream<Item = Option<u32>> {
Observable::subscribe(&self.rooms_count.read().unwrap())
pub fn maximum_number_of_rooms_stream(&self) -> impl Stream<Item = Option<u32>> {
Observable::subscribe(&self.maximum_number_of_rooms.read().unwrap())
}
/// Find the current valid position of the room in the list `room_list`.
@@ -283,26 +295,32 @@ impl SlidingSyncList {
#[instrument(skip(self, ops), fields(name = self.name, ops_count = ops.len()))]
pub(super) fn handle_response(
&self,
rooms_count: u32,
maximum_number_of_rooms: u32,
ops: &Vec<v4::SyncOp>,
ranges: &Vec<(usize, usize)>,
rooms: &Vec<OwnedRoomId>,
ranges: &Vec<(UInt, UInt)>,
updated_rooms: &Vec<OwnedRoomId>,
) -> Result<bool, Error> {
let current_rooms_count = **self.rooms_count.read().unwrap();
let ranges = ranges
.iter()
.map(|(start, end)| ((*start).try_into().unwrap(), (*end).try_into().unwrap()))
.collect::<Vec<(usize, usize)>>();
if current_rooms_count.is_none()
|| current_rooms_count == Some(0)
let current_maximum_number_of_rooms = **self.maximum_number_of_rooms.read().unwrap();
if current_maximum_number_of_rooms.is_none()
|| current_maximum_number_of_rooms == Some(0)
|| self.is_cold.load(Ordering::SeqCst)
{
debug!("first run, replacing rooms list");
// first response, we do that slightly differently
let mut rooms_list = ObservableVector::new();
rooms_list
.append(iter::repeat(RoomListEntry::Empty).take(rooms_count as usize).collect());
rooms_list.append(
iter::repeat(RoomListEntry::Empty).take(maximum_number_of_rooms as usize).collect(),
);
// then we apply it
room_ops(&mut rooms_list, ops, ranges)?;
room_ops(&mut rooms_list, ops, &ranges)?;
{
let mut lock = self.rooms_list.write().unwrap();
@@ -310,7 +328,10 @@ impl SlidingSyncList {
lock.append(rooms_list.into_inner());
}
Observable::set(&mut self.rooms_count.write().unwrap(), Some(rooms_count));
Observable::set(
&mut self.maximum_number_of_rooms.write().unwrap(),
Some(maximum_number_of_rooms),
);
self.is_cold.store(false, Ordering::SeqCst);
return Ok(true);
@@ -318,7 +339,7 @@ impl SlidingSyncList {
debug!("regular update");
let mut missing = rooms_count
let mut missing = maximum_number_of_rooms
.checked_sub(self.rooms_list.read().unwrap().len() as u32)
.unwrap_or_default();
let mut changed = false;
@@ -339,7 +360,7 @@ impl SlidingSyncList {
let mut rooms_list = self.rooms_list.write().unwrap();
if !ops.is_empty() {
room_ops(&mut rooms_list, ops, ranges)?;
room_ops(&mut rooms_list, ops, &ranges)?;
changed = true;
} else {
debug!("no rooms operations found");
@@ -347,16 +368,16 @@ impl SlidingSyncList {
}
{
let mut lock = self.rooms_count.write().unwrap();
let mut lock = self.maximum_number_of_rooms.write().unwrap();
if **lock != Some(rooms_count) {
Observable::set(&mut lock, Some(rooms_count));
if **lock != Some(maximum_number_of_rooms) {
Observable::set(&mut lock, Some(maximum_number_of_rooms));
changed = true;
}
}
if self.send_updates_for_items && !rooms.is_empty() {
let found_lists = self.find_rooms_in_list(rooms);
if self.send_updates_for_items && !updated_rooms.is_empty() {
let found_lists = self.find_rooms_in_list(updated_rooms);
if !found_lists.is_empty() {
debug!("room details found");
@@ -380,22 +401,24 @@ impl SlidingSyncList {
pub(super) fn request_generator(&self) -> SlidingSyncListRequestGenerator {
match &self.sync_mode {
SlidingSyncMode::PagingFullSync => {
SlidingSyncListRequestGenerator::new_with_paging_syncup(self.clone())
SlidingSyncListRequestGenerator::new_with_paging_full_sync(self.clone())
}
SlidingSyncMode::GrowingFullSync => {
SlidingSyncListRequestGenerator::new_with_growing_syncup(self.clone())
SlidingSyncListRequestGenerator::new_with_growing_full_sync(self.clone())
}
SlidingSyncMode::Selective => SlidingSyncListRequestGenerator::new_live(self.clone()),
SlidingSyncMode::Selective => {
SlidingSyncListRequestGenerator::new_selective(self.clone())
}
}
}
}
#[derive(Serialize, Deserialize)]
pub(super) struct FrozenSlidingSyncList {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub(super) rooms_count: Option<u32>,
#[serde(default, rename = "rooms_count", skip_serializing_if = "Option::is_none")]
pub(super) maximum_number_of_rooms: Option<u32>,
#[serde(default, skip_serializing_if = "Vector::is_empty")]
pub(super) rooms_list: Vector<RoomListEntry>,
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
@@ -426,7 +449,7 @@ impl FrozenSlidingSyncList {
}
FrozenSlidingSyncList {
rooms_count: **source_list.rooms_count.read().unwrap(),
maximum_number_of_rooms: **source_list.maximum_number_of_rooms.read().unwrap(),
rooms_list,
rooms,
}
@@ -599,38 +622,44 @@ fn room_ops(
/// The state the [`SlidingSyncList`] is in.
///
/// The lifetime of a SlidingSync usually starts at a `Preload`, getting a fast
/// response for the first given number of Rooms, then switches into
/// `CatchingUp` during which the list fetches the remaining rooms, usually in
/// order, some times in batches. Once that is ready, it switches into `Live`.
/// The lifetime of a `SlidingSyncList` usually starts at `NotLoaded` or
/// `Preloaded` (if it is restored from a cache). When loading rooms in a list,
/// depending of the [`SlidingSyncMode`], it moves to `PartiallyLoaded` or
/// `FullyLoaded`. The lifetime of a `SlidingSync` usually starts at a
///
/// If the client has been offline for a while, though, the SlidingSync might
/// return back to `CatchingUp` at any point.
/// If the client has been offline for a while, though, the `SlidingSyncList`
/// might return back to `PartiallyLoaded` at any point.
#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum SlidingSyncState {
/// Hasn't started yet
/// Sliding Sync has not started to load anything yet.
#[default]
Cold,
/// We are quickly preloading a preview of the most important rooms
Preload,
/// We are trying to load all remaining rooms, might be in batches
CatchingUp,
/// We are all caught up and now only sync the live responses.
Live,
#[serde(rename = "Cold")]
NotLoaded,
/// Sliding Sync has been preloaded, i.e. restored from a cache for example.
#[serde(rename = "Preload")]
Preloaded,
/// Updates are received from the loaded rooms, and new rooms are being
/// fetched in the background.
#[serde(rename = "CatchingUp")]
PartiallyLoaded,
/// Updates are received for all the loaded rooms, and all rooms have been
/// loaded!
#[serde(rename = "Live")]
FullyLoaded,
}
/// The mode by which the the [`SlidingSyncList`] is in fetching the data.
/// How a [`SlidingSyncList`] fetches the data.
#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum SlidingSyncMode {
/// Fully sync all rooms in the background, page by page of `batch_size`,
/// like `0..20`, `21..40`, 41..60` etc. assuming the `batch_size` is 20.
/// like `0..=19`, `20..=39`, 40..=59` etc. assuming the `batch_size` is 20.
#[serde(alias = "FullSync")]
PagingFullSync,
/// Fully sync all rooms in the background, with a growing window of
/// `batch_size`, like `0..20`, `0..40`, `0..60` etc. assuming the
/// `batch_size`, like `0..=19`, `0..=39`, `0..=59` etc. assuming the
/// `batch_size` is 20.
GrowingFullSync,
/// Only sync the specific windows defined
/// Only sync the specific defined windows/ranges.
#[default]
Selective,
}

View File

@@ -1,104 +1,157 @@
//! The logic to generate Sliding Sync list requests.
//!
//! Depending on the [`SlidingSyncMode`], the generated requests aren't the
//! same.
//!
//! In [`SlidingSyncMode::Selective`], it's pretty straightforward:
//!
//! * There is a set of ranges,
//! * Each request asks to load the particular ranges.
//!
//! In [`SlidingSyncMode::PagingFullSync`]:
//!
//! * There is a `batch_size`,
//! * Each request asks to load a new successive range containing exactly
//! `batch_size` rooms.
//!
//! In [`SlidingSyncMode::GrowingFullSync]:
//!
//! * There is a `batch_size`,
//! * Each request asks to load a new range, always starting from 0, but where
//! the end is incremented by `batch_size` everytime.
//!
//! The number of rooms to load is capped by the
//! [`SlidingSyncList::maximum_number_of_rooms`], i.e. the real number of
//! rooms it is possible to load. This value comes from the server.
//!
//! The number of rooms to load can _also_ be capped by the
//! [`SlidingSyncList::full_sync_maximum_number_of_rooms_to_fetch`], i.e. a
//! user-specified limit representing the maximum number of rooms the user
//! actually wants to load.
use std::cmp::min;
use eyeball::unique::Observable;
use ruma::{api::client::sync::sync_events::v4, assign, OwnedRoomId, UInt};
use tracing::{error, instrument, trace};
use tracing::{error, instrument};
use super::{Error, SlidingSyncList, SlidingSyncState};
/// The kind of request generator.
#[derive(Debug)]
enum GeneratorKind {
GrowingFullSync { position: u32, batch_size: u32, limit: Option<u32>, live: bool },
PagingFullSync { position: u32, batch_size: u32, limit: Option<u32>, live: bool },
Live,
// Growing-mode (see [`SlidingSyncMode`]).
GrowingFullSync {
// Number of fetched rooms.
number_of_fetched_rooms: u32,
// Size of the batch, used to grow the range to fetch more rooms.
batch_size: u32,
// Maximum number of rooms to fetch (see
// [`SlidingSyncList::full_sync_maximum_number_of_rooms_to_fetch`]).
maximum_number_of_rooms_to_fetch: Option<u32>,
// Whether all rooms have been loaded.
fully_loaded: bool,
},
// Paging-mode (see [`SlidingSyncMode`]).
PagingFullSync {
// Number of fetched rooms.
number_of_fetched_rooms: u32,
// Size of the batch, used to grow the range to fetch more rooms.
batch_size: u32,
// Maximum number of rooms to fetch (see
// [`SlidingSyncList::full_sync_maximum_number_of_rooms_to_fetch`]).
maximum_number_of_rooms_to_fetch: Option<u32>,
// Whether all romms have been loaded.
fully_loaded: bool,
},
// Selective-mode (see [`SlidingSyncMode`]).
Selective,
}
/// A request generator for [`SlidingSyncList`].
#[derive(Debug)]
pub(in super::super) struct SlidingSyncListRequestGenerator {
/// The parent [`SlidingSyncList`] object that has created this request
/// generator.
list: SlidingSyncList,
ranges: Vec<(usize, usize)>,
/// The current range used by this request generator.
ranges: Vec<(UInt, UInt)>,
/// The kind of request generator.
kind: GeneratorKind,
}
impl SlidingSyncListRequestGenerator {
pub(super) fn new_with_paging_syncup(list: SlidingSyncList) -> Self {
let batch_size = list.batch_size;
let limit = list.limit;
let position = list
/// Create a new request generator configured for paging-mode.
pub(super) fn new_with_paging_full_sync(list: SlidingSyncList) -> Self {
let batch_size = list.full_sync_batch_size;
let maximum_number_of_rooms_to_fetch = list.full_sync_maximum_number_of_rooms_to_fetch;
// If a range exists, let's consider it's been used to load existing room. So
// let's start from the end of the range. It can be useful when we resume a sync
// for example. Otherwise let's use the default value.
let number_of_fetched_rooms = list
.ranges
.read()
.unwrap()
.first()
.map(|(_start, end)| u32::try_from(*end).unwrap())
.map(|(_start, end)| u32::try_from(*end).unwrap().saturating_add(1))
.unwrap_or_default();
Self {
list,
ranges: Default::default(),
kind: GeneratorKind::PagingFullSync { position, batch_size, limit, live: false },
ranges: Vec::new(),
kind: GeneratorKind::PagingFullSync {
number_of_fetched_rooms,
batch_size,
maximum_number_of_rooms_to_fetch,
fully_loaded: false,
},
}
}
pub(super) fn new_with_growing_syncup(list: SlidingSyncList) -> Self {
let batch_size = list.batch_size;
let limit = list.limit;
let position = list
/// Create a new request generator configured for growing-mode.
pub(super) fn new_with_growing_full_sync(list: SlidingSyncList) -> Self {
let batch_size = list.full_sync_batch_size;
let maximum_number_of_rooms_to_fetch = list.full_sync_maximum_number_of_rooms_to_fetch;
// If a range exists, let's consider it's been used to load existing room. So
// let's start from the end of the range. It can be useful when we resume a sync
// for example. Otherwise let's use the default value.
let number_of_fetched_rooms = list
.ranges
.read()
.unwrap()
.first()
.map(|(_start, end)| u32::try_from(*end).unwrap())
.map(|(_start, end)| u32::try_from(*end).unwrap().saturating_add(1))
.unwrap_or_default();
Self {
list,
ranges: Default::default(),
kind: GeneratorKind::GrowingFullSync { position, batch_size, limit, live: false },
ranges: Vec::new(),
kind: GeneratorKind::GrowingFullSync {
number_of_fetched_rooms,
batch_size,
maximum_number_of_rooms_to_fetch,
fully_loaded: false,
},
}
}
pub(super) fn new_live(list: SlidingSyncList) -> Self {
Self { list, ranges: Default::default(), kind: GeneratorKind::Live }
/// Create a new request generator configured for selective-mode.
pub(super) fn new_selective(list: SlidingSyncList) -> Self {
Self { list, ranges: Vec::new(), kind: GeneratorKind::Selective }
}
fn prefetch_request(
&mut self,
start: u32,
batch_size: u32,
limit: Option<u32>,
) -> v4::SyncRequestList {
let calculated_end = start + batch_size;
let mut end = match limit {
Some(limit) => min(limit, calculated_end),
_ => calculated_end,
};
end = match self.list.rooms_count() {
Some(total_room_count) => min(end, total_room_count - 1),
_ => end,
};
self.make_request_for_ranges(vec![(start.into(), end.into())])
}
#[instrument(skip(self), fields(name = self.list.name))]
fn make_request_for_ranges(&mut self, ranges: Vec<(UInt, UInt)>) -> v4::SyncRequestList {
/// Build a [`SyncRequestList`][v4::SyncRequestList].
#[instrument(skip(self), fields(name = self.list.name, ranges = ?&self.ranges))]
fn build_request(&self) -> v4::SyncRequestList {
let sort = self.list.sort.clone();
let required_state = self.list.required_state.clone();
let timeline_limit = **self.list.timeline_limit.read().unwrap();
let filters = self.list.filters.clone();
self.ranges = ranges
.iter()
.map(|(a, b)| {
(
usize::try_from(*a).expect("range is a valid u32"),
usize::try_from(*b).expect("range is a valid u32"),
)
})
.collect();
assign!(v4::SyncRequestList::default(), {
ranges: ranges,
ranges: self.ranges.clone(),
room_details: assign!(v4::RoomDetailsConfig::default(), {
required_state,
timeline_limit,
@@ -108,67 +161,152 @@ impl SlidingSyncListRequestGenerator {
})
}
// Handle the response from the server.
#[instrument(skip_all, fields(name = self.list.name, rooms_count, has_ops = !ops.is_empty()))]
pub(in super::super) fn handle_response(
&mut self,
rooms_count: u32,
maximum_number_of_rooms: u32,
ops: &Vec<v4::SyncOp>,
rooms: &Vec<OwnedRoomId>,
updated_rooms: &Vec<OwnedRoomId>,
) -> Result<bool, Error> {
let response = self.list.handle_response(rooms_count, ops, &self.ranges, rooms)?;
self.update_state(rooms_count.saturating_sub(1)); // index is 0 based, count is 1 based
let response =
self.list.handle_response(maximum_number_of_rooms, ops, &self.ranges, updated_rooms)?;
self.update_state(maximum_number_of_rooms);
Ok(response)
}
fn update_state(&mut self, max_index: u32) {
let Some((_start, range_end)) = self.ranges.first() else {
error!("Why don't we have any ranges?");
/// Update the state of the generator.
fn update_state(&mut self, maximum_number_of_rooms: u32) {
let Some(range_end) = self.ranges.first().map(|(_start, end)| u32::try_from(*end).unwrap()) else {
error!(name = self.list.name, "The request generator must have a range.");
return;
};
let end = if &(max_index as usize) < range_end { max_index } else { *range_end as u32 };
trace!(end, max_index, range_end, name = self.list.name, "updating state");
match &mut self.kind {
GeneratorKind::PagingFullSync { position, live, limit, .. }
| GeneratorKind::GrowingFullSync { position, live, limit, .. } => {
let max = limit.map(|limit| min(limit, max_index)).unwrap_or(max_index);
GeneratorKind::PagingFullSync {
number_of_fetched_rooms,
fully_loaded,
maximum_number_of_rooms_to_fetch,
..
}
| GeneratorKind::GrowingFullSync {
number_of_fetched_rooms,
fully_loaded,
maximum_number_of_rooms_to_fetch,
..
} => {
// Calculate the maximum bound for the range.
// At this step, the server has given us a maximum number of rooms for this
// list. That's our `range_maximum`.
let mut range_maximum = maximum_number_of_rooms;
trace!(end, max, name = self.list.name, "updating state");
// But maybe the user has defined a maximum number of rooms to fetch? In this
// case, let's take the minimum of the two.
if let Some(maximum_number_of_rooms_to_fetch) = maximum_number_of_rooms_to_fetch {
range_maximum = min(range_maximum, *maximum_number_of_rooms_to_fetch);
}
if end >= max {
// Switching to live mode.
// Finally, ranges are inclusive!
range_maximum = range_maximum.saturating_sub(1);
trace!(name = self.list.name, "going live");
// Now, we know what the maximum bound for the range is.
self.list.set_range(0, max);
*position = max;
*live = true;
// The current range hasn't reached its maximum, let's continue.
if range_end < range_maximum {
// Update the _list range_ to cover from 0 to `range_end`.
// The list range is different from the request generator (this) range.
self.list.set_range(0, range_end);
// Update the number of fetched rooms forward. Do not forget that ranges are
// inclusive, so let's add 1.
*number_of_fetched_rooms = range_end.saturating_add(1);
// The list is still not fully loaded.
*fully_loaded = false;
// Finally, let's update the list' state.
Observable::update_eq(&mut self.list.state.write().unwrap(), |state| {
*state = SlidingSyncState::Live;
*state = SlidingSyncState::PartiallyLoaded;
});
} else {
*position = end;
*live = false;
self.list.set_range(0, end);
}
// Otherwise the current range has reached its maximum, we switched to `FullyLoaded`
// mode.
else {
// The range is covering the entire list, from 0 to its maximum.
self.list.set_range(0, range_maximum);
// The number of fetched rooms is set to the maximum too.
*number_of_fetched_rooms = range_maximum;
// And we update the `fully_loaded` marker.
*fully_loaded = true;
// Finally, let's update the list' state.
Observable::update_eq(&mut self.list.state.write().unwrap(), |state| {
*state = SlidingSyncState::CatchingUp;
*state = SlidingSyncState::FullyLoaded;
});
}
}
GeneratorKind::Live => {
GeneratorKind::Selective => {
// Selective mode always loads everything.
Observable::update_eq(&mut self.list.state.write().unwrap(), |state| {
*state = SlidingSyncState::Live;
*state = SlidingSyncState::FullyLoaded;
});
}
}
}
#[cfg(test)]
fn is_fully_loaded(&self) -> bool {
match self.kind {
GeneratorKind::PagingFullSync { fully_loaded, .. }
| GeneratorKind::GrowingFullSync { fully_loaded, .. } => fully_loaded,
GeneratorKind::Selective => true,
}
}
}
fn create_range(
start: u32,
desired_size: u32,
maximum_number_of_rooms_to_fetch: Option<u32>,
maximum_number_of_rooms: Option<u32>,
) -> Option<(UInt, UInt)> {
// Calculate the range.
// The `start` bound is given. Let's calculate the `end` bound.
// The `end`, by default, is `start` + `desired_size`.
let mut end = start + desired_size;
// But maybe the user has defined a maximum number of rooms to fetch? In this
// case, take the minimum of the two.
if let Some(maximum_number_of_rooms_to_fetch) = maximum_number_of_rooms_to_fetch {
end = min(end, maximum_number_of_rooms_to_fetch);
}
// But there is more! The server can tell us what is the maximum number of rooms
// fulfilling a particular list. For example, if the server says there is 42
// rooms for a particular list, with a `start` of 40 and a `batch_size` of 20,
// the range must be capped to `[40; 46]`; the range `[40; 60]` would be invalid
// and could be rejected by the server.
if let Some(maximum_number_of_rooms) = maximum_number_of_rooms {
end = min(end, maximum_number_of_rooms);
}
// Finally, because the bounds of the range are inclusive, 1 is subtracted.
end = end.saturating_sub(1);
// Make sure `start` is smaller than `end`. It can happen if `start` is greater
// than `maximum_number_of_rooms_to_fetch` or `maximum_number_of_rooms`.
if start > end {
return None;
}
Some((start.into(), end.into()))
}
impl Iterator for SlidingSyncListRequestGenerator {
@@ -176,21 +314,357 @@ impl Iterator for SlidingSyncListRequestGenerator {
fn next(&mut self) -> Option<Self::Item> {
match self.kind {
GeneratorKind::PagingFullSync { live: true, .. }
| GeneratorKind::GrowingFullSync { live: true, .. }
| GeneratorKind::Live => {
let ranges = self.list.ranges.read().unwrap().clone();
// Cases where all rooms have been fully loaded.
GeneratorKind::PagingFullSync { fully_loaded: true, .. }
| GeneratorKind::GrowingFullSync { fully_loaded: true, .. }
| GeneratorKind::Selective => {
// Let's copy all the ranges from the parent `SlidingSyncList`, and build a
// request for them.
self.ranges = self.list.ranges.read().unwrap().clone();
Some(self.make_request_for_ranges(ranges))
// Here we go.
Some(self.build_request())
}
GeneratorKind::PagingFullSync { position, batch_size, limit, .. } => {
Some(self.prefetch_request(position, batch_size, limit))
GeneratorKind::PagingFullSync {
number_of_fetched_rooms,
batch_size,
maximum_number_of_rooms_to_fetch,
..
} => {
// In paging-mode, range starts at the number of fetched rooms. Since ranges are
// inclusive, and since the number of fetched rooms starts at 1,
// not at 0, there is no need to add 1 here.
let range_start = number_of_fetched_rooms;
let range_desired_size = batch_size;
// Create a new range, and use it as the current set of ranges.
self.ranges = vec![create_range(
range_start,
range_desired_size,
maximum_number_of_rooms_to_fetch,
self.list.maximum_number_of_rooms(),
)?];
// Here we go.
Some(self.build_request())
}
GeneratorKind::GrowingFullSync { position, batch_size, limit, .. } => {
Some(self.prefetch_request(0, position + batch_size, limit))
GeneratorKind::GrowingFullSync {
number_of_fetched_rooms,
batch_size,
maximum_number_of_rooms_to_fetch,
..
} => {
// In growing-mode, range always starts from 0. However, the end is growing by
// adding `batch_size` to the previous number of fetched rooms.
let range_start = 0;
let range_desired_size = number_of_fetched_rooms.saturating_add(batch_size);
self.ranges = vec![create_range(
range_start,
range_desired_size,
maximum_number_of_rooms_to_fetch,
self.list.maximum_number_of_rooms(),
)?];
// Here we go.
Some(self.build_request())
}
}
}
}
#[cfg(test)]
mod tests {
use ruma::uint;
use super::*;
#[test]
fn test_create_range_from() {
// From 0, we want 100 items.
assert_eq!(create_range(0, 100, None, None), Some((uint!(0), uint!(99))));
// From 100, we want 100 items.
assert_eq!(create_range(100, 100, None, None), Some((uint!(100), uint!(199))));
// From 0, we want 100 items, but there is a maximum number of rooms to fetch
// defined at 50.
assert_eq!(create_range(0, 100, Some(50), None), Some((uint!(0), uint!(49))));
// From 49, we want 100 items, but there is a maximum number of rooms to fetch
// defined at 50. There is 1 item to load.
assert_eq!(create_range(49, 100, Some(50), None), Some((uint!(49), uint!(49))));
// From 50, we want 100 items, but there is a maximum number of rooms to fetch
// defined at 50.
assert_eq!(create_range(50, 100, Some(50), None), None);
// From 0, we want 100 items, but there is a maximum number of rooms defined at
// 50.
assert_eq!(create_range(0, 100, None, Some(50)), Some((uint!(0), uint!(49))));
// From 49, we want 100 items, but there is a maximum number of rooms defined at
// 50. There is 1 item to load.
assert_eq!(create_range(49, 100, None, Some(50)), Some((uint!(49), uint!(49))));
// From 50, we want 100 items, but there is a maximum number of rooms defined at
// 50.
assert_eq!(create_range(50, 100, None, Some(50)), None);
// From 0, we want 100 items, but there is a maximum number of rooms to fetch
// defined at 75, and a maximum number of rooms defined at 50.
assert_eq!(create_range(0, 100, Some(75), Some(50)), Some((uint!(0), uint!(49))));
// From 0, we want 100 items, but there is a maximum number of rooms to fetch
// defined at 50, and a maximum number of rooms defined at 75.
assert_eq!(create_range(0, 100, Some(50), Some(75)), Some((uint!(0), uint!(49))));
}
macro_rules! assert_request_and_response {
(
list = $list:ident,
generator = $generator:ident,
maximum_number_of_rooms = $maximum_number_of_rooms:expr,
$(
next => {
ranges = $( [ $range_start:literal ; $range_end:literal ] ),+ ,
is_fully_loaded = $is_fully_loaded:expr,
list_state = $list_state:ident,
}
),*
$(,)*
) => {
// That's the initial state.
assert_eq!($list.state(), SlidingSyncState::NotLoaded);
$(
{
// Generate a new request.
let request = $generator.next().unwrap();
assert_eq!(request.ranges, [ $( (uint!( $range_start ), uint!( $range_end )) ),* ]);
// Fake a response.
let _ = $generator.handle_response($maximum_number_of_rooms, &vec![], &vec![]);
assert_eq!($generator.is_fully_loaded(), $is_fully_loaded);
assert_eq!($list.state(), SlidingSyncState::$list_state);
}
)*
};
}
#[test]
fn test_generator_paging_full_sync() {
let list = SlidingSyncList::builder()
.sync_mode(crate::SlidingSyncMode::PagingFullSync)
.name("testing")
.full_sync_batch_size(10)
.build()
.unwrap();
let mut generator = list.request_generator();
assert_request_and_response! {
list = list,
generator = generator,
maximum_number_of_rooms = 25,
next => {
ranges = [0; 9],
is_fully_loaded = false,
list_state = PartiallyLoaded,
},
next => {
ranges = [10; 19],
is_fully_loaded = false,
list_state = PartiallyLoaded,
},
// The maximum number of rooms is reached!
next => {
ranges = [20; 24],
is_fully_loaded = true,
list_state = FullyLoaded,
},
// Now it's fully loaded, so the same request must be produced everytime.
next => {
ranges = [0; 24], // the range starts at 0 now!
is_fully_loaded = true,
list_state = FullyLoaded,
},
next => {
ranges = [0; 24],
is_fully_loaded = true,
list_state = FullyLoaded,
},
};
}
#[test]
fn test_generator_paging_full_sync_with_a_maximum_number_of_rooms_to_fetch() {
let list = SlidingSyncList::builder()
.sync_mode(crate::SlidingSyncMode::PagingFullSync)
.name("testing")
.full_sync_batch_size(10)
.full_sync_maximum_number_of_rooms_to_fetch(22)
.build()
.unwrap();
let mut generator = list.request_generator();
assert_request_and_response! {
list = list,
generator = generator,
maximum_number_of_rooms = 25,
next => {
ranges = [0; 9],
is_fully_loaded = false,
list_state = PartiallyLoaded,
},
next => {
ranges = [10; 19],
is_fully_loaded = false,
list_state = PartiallyLoaded,
},
// The maximum number of rooms to fetch is reached!
next => {
ranges = [20; 21],
is_fully_loaded = true,
list_state = FullyLoaded,
},
// Now it's fully loaded, so the same request must be produced everytime.
next => {
ranges = [0; 21], // the range starts at 0 now!
is_fully_loaded = true,
list_state = FullyLoaded,
},
next => {
ranges = [0; 21],
is_fully_loaded = true,
list_state = FullyLoaded,
},
};
}
#[test]
fn test_generator_growing_full_sync() {
let list = SlidingSyncList::builder()
.sync_mode(crate::SlidingSyncMode::GrowingFullSync)
.name("testing")
.full_sync_batch_size(10)
.build()
.unwrap();
let mut generator = list.request_generator();
assert_request_and_response! {
list = list,
generator = generator,
maximum_number_of_rooms = 25,
next => {
ranges = [0; 9],
is_fully_loaded = false,
list_state = PartiallyLoaded,
},
next => {
ranges = [0; 19],
is_fully_loaded = false,
list_state = PartiallyLoaded,
},
// The maximum number of rooms is reached!
next => {
ranges = [0; 24],
is_fully_loaded = true,
list_state = FullyLoaded,
},
// Now it's fully loaded, so the same request must be produced everytime.
next => {
ranges = [0; 24],
is_fully_loaded = true,
list_state = FullyLoaded,
},
next => {
ranges = [0; 24],
is_fully_loaded = true,
list_state = FullyLoaded,
},
};
}
#[test]
fn test_generator_growing_full_sync_with_a_maximum_number_of_rooms_to_fetch() {
let list = SlidingSyncList::builder()
.sync_mode(crate::SlidingSyncMode::GrowingFullSync)
.name("testing")
.full_sync_batch_size(10)
.full_sync_maximum_number_of_rooms_to_fetch(22)
.build()
.unwrap();
let mut generator = list.request_generator();
assert_request_and_response! {
list = list,
generator = generator,
maximum_number_of_rooms = 25,
next => {
ranges = [0; 9],
is_fully_loaded = false,
list_state = PartiallyLoaded,
},
next => {
ranges = [0; 19],
is_fully_loaded = false,
list_state = PartiallyLoaded,
},
// The maximum number of rooms is reached!
next => {
ranges = [0; 21],
is_fully_loaded = true,
list_state = FullyLoaded,
},
// Now it's fully loaded, so the same request must be produced everytime.
next => {
ranges = [0; 21],
is_fully_loaded = true,
list_state = FullyLoaded,
},
next => {
ranges = [0; 21],
is_fully_loaded = true,
list_state = FullyLoaded,
},
};
}
#[test]
fn test_generator_selective() {
let list = SlidingSyncList::builder()
.sync_mode(crate::SlidingSyncMode::Selective)
.name("testing")
.ranges(vec![(0u32, 10), (42, 153)])
.build()
.unwrap();
let mut generator = list.request_generator();
assert_request_and_response! {
list = list,
generator = generator,
maximum_number_of_rooms = 25,
// The maximum number of rooms is reached directly!
next => {
ranges = [0; 10], [42; 153],
is_fully_loaded = true,
list_state = FullyLoaded,
},
// Now it's fully loaded, so the same request must be produced everytime.
next => {
ranges = [0; 10], [42; 153],
is_fully_loaded = true,
list_state = FullyLoaded,
},
next => {
ranges = [0; 10], [42; 153],
is_fully_loaded = true,
list_state = FullyLoaded,
}
};
}
}

View File

@@ -110,15 +110,15 @@
//! copy can be retrieved by calling `SlidingSync::list()`, providing the name
//! of the list. Next to the configuration settings (like name and
//! `timeline_limit`), the list provides the stateful
//! [`rooms_count`](SlidingSyncList::rooms_count),
//! [`maximum_number_of_rooms`](SlidingSyncList::maximum_number_of_rooms),
//! [`rooms_list`](SlidingSyncList::rooms_list) and
//! [`state`](SlidingSyncList::state):
//!
//! - `rooms_count` is the number of rooms _total_ there were found matching
//! the filters given.
//! - `rooms_list` is a vector of `rooms_count` [`RoomListEntry`]'s at the
//! current state. `RoomListEntry`'s only hold `the room_id` if given, the
//! [Rooms API](#rooms) holds the actual information about each room
//! - `maximum_number_of_rooms` is the number of rooms _total_ there were found
//! matching the filters given.
//! - `rooms_list` is a vector of `maximum_number_of_rooms` [`RoomListEntry`]'s
//! at the current state. `RoomListEntry`'s only hold `the room_id` if given,
//! the [Rooms API](#rooms) holds the actual information about each room
//! - `state` is a [`SlidingSyncMode`] signalling meta information about the
//! list and its stateful data — whether this is the state loaded from local
//! cache, whether the [full sync](#helper-lists) is in progress or whether
@@ -171,11 +171,11 @@
//!
//! ### Room List Entries
//!
//! As the room list of each list is a vec of the `rooms_count` len but a room
//! may only know of a subset of entries for sure at any given time, these
//! entries are wrapped in [`RoomListEntry`][]. This type, in close proximity to
//! the [specification][MSC], can be either `Empty`, `Filled` or `Invalidated`,
//! signaling the state of each entry position.
//! As the room list of each list is a vec of the `maximum_number_of_rooms` len
//! but a room may only know of a subset of entries for sure at any given time,
//! these entries are wrapped in [`RoomListEntry`][]. This type, in close
//! proximity to the [specification][MSC], can be either `Empty`, `Filled` or
//! `Invalidated`, signaling the state of each entry position.
//! - `Empty` we don't know what sits here at this position in the list.
//! - `Filled`: there is this `room_id` at this position.
//! - `Invalidated` in that sense means that we _knew_ what was here before, but
@@ -429,8 +429,9 @@
//! ## Caching
//!
//! All room data, for filled but also _invalidated_ rooms, including the entire
//! timeline events as well as all list `room_lists` and `rooms_count` are held
//! in memory (unless one `pop`s the list out).
//! timeline events as well as all list `room_lists` and
//! `maximum_number_of_rooms` are held in memory (unless one `pop`s the list
//! out).
//!
//! This is a purely in-memory cache layer though. If one wants Sliding Sync to
//! persist and load from cold (storage) cache, one needs to set its key with
@@ -511,8 +512,8 @@
//! .required_state(vec![
//! (StateEventType::RoomEncryption, "".to_owned())
//! ]) // only want to know if the room is encrypted
//! .batch_size(50) // grow the window by 50 items at a time
//! .limit(500) // only sync up the top 500 rooms
//! .full_sync_batch_size(50) // grow the window by 50 items at a time
//! .full_sync_maximum_number_of_rooms_to_fetch(500) // only sync up the top 500 rooms
//! .build()?;
//!
//! let active_list = SlidingSyncList::builder()
@@ -538,7 +539,7 @@
//!
//! let active_list = sliding_sync.list(&active_list_name).unwrap();
//! let list_state_stream = active_list.state_stream();
//! let list_count_stream = active_list.rooms_count_stream();
//! let list_count_stream = active_list.maximum_number_of_rooms_stream();
//! let list_stream = active_list.rooms_list_stream();
//!
//! tokio::spawn(async move {
@@ -585,7 +586,6 @@
//! # });
//! ```
//!
//!
//! [MSC]: https://github.com/matrix-org/matrix-spec-proposals/pull/3575
//! [proxy]: https://github.com/matrix-org/sliding-sync
//! [ruma-types]: https://docs.rs/ruma/latest/ruma/api/client/sync/sync_events/v4/index.html
@@ -930,7 +930,7 @@ impl SlidingSync {
}
let update_summary = {
let mut rooms = Vec::new();
let mut updated_rooms = Vec::new();
let mut rooms_map = self.inner.rooms.write().unwrap();
for (room_id, mut room_data) in sliding_sync_response.rooms.into_iter() {
@@ -963,22 +963,26 @@ impl SlidingSync {
);
}
rooms.push(room_id);
updated_rooms.push(room_id);
}
let mut updated_lists = Vec::new();
for (name, updates) in sliding_sync_response.lists {
let Some(generator) = list_generators.get_mut(&name) else {
let Some(list_generator) = list_generators.get_mut(&name) else {
error!("Response for list `{name}` - unknown to us; skipping");
continue
};
let count: u32 =
let maximum_number_of_rooms: u32 =
updates.count.try_into().expect("the list total count convertible into u32");
if generator.handle_response(count, &updates.ops, &rooms)? {
if list_generator.handle_response(
maximum_number_of_rooms,
&updates.ops,
&updated_rooms,
)? {
updated_lists.push(name.clone());
}
}
@@ -988,7 +992,7 @@ impl SlidingSync {
self.update_to_device_since(to_device.next_batch);
}
UpdateSummary { lists: updated_lists, rooms }
UpdateSummary { lists: updated_lists, rooms: updated_rooms }
};
Ok(update_summary)
@@ -1292,7 +1296,7 @@ pub struct UpdateSummary {
#[cfg(test)]
mod test {
use assert_matches::assert_matches;
use ruma::room_id;
use ruma::{room_id, uint};
use serde_json::json;
use wiremock::MockServer;
@@ -1321,7 +1325,13 @@ mod test {
}))
.unwrap();
list.handle_response(10u32, &vec![full_window_update], &vec![(0, 9)], &vec![]).unwrap();
list.handle_response(
10u32,
&vec![full_window_update],
&vec![(uint!(0), uint!(9))],
&vec![],
)
.unwrap();
let a02 = room_id!("!A00002:matrix.example").to_owned();
let a05 = room_id!("!A00005:matrix.example").to_owned();
@@ -1343,7 +1353,13 @@ mod test {
}))
.unwrap();
list.handle_response(10u32, &vec![update], &vec![(0, 3), (8, 9)], &vec![]).unwrap();
list.handle_response(
10u32,
&vec![update],
&vec![(uint!(0), uint!(3)), (uint!(8), uint!(9))],
&vec![],
)
.unwrap();
assert_eq!(list.find_room_in_list(room_id!("!A00002:matrix.example")), Some(2));
assert_eq!(list.find_room_in_list(room_id!("!A00005:matrix.example")), None);

View File

@@ -9,7 +9,7 @@ use std::{
use eyeball::unique::Observable;
use eyeball_im::ObservableVector;
use im::Vector;
use imbl::Vector;
use matrix_sdk_base::deserialized_responses::SyncTimelineEvent;
use ruma::{
api::client::sync::sync_events::{v4, UnreadNotificationsCount},
@@ -355,7 +355,7 @@ impl From<&SlidingSyncRoom> for FrozenSlidingSyncRoom {
#[cfg(test)]
mod tests {
use im::vector;
use imbl::vector;
use matrix_sdk_base::deserialized_responses::TimelineEvent;
use ruma::{events::room::message::RoomMessageEventContent, RoomId};
use serde_json::json;

View File

@@ -21,11 +21,12 @@ pub async fn run_client(
.timeline_limit(10u32)
.sync_mode(config.full_sync_mode.into());
if let Some(size) = config.batch_size {
full_sync_view_builder = full_sync_view_builder.batch_size(size);
full_sync_view_builder = full_sync_view_builder.full_sync_batch_size(size);
}
if let Some(limit) = config.limit {
full_sync_view_builder = full_sync_view_builder.limit(limit);
full_sync_view_builder =
full_sync_view_builder.full_sync_maximum_number_of_rooms_to_fetch(limit);
}
if let Some(limit) = config.timeline_limit {
full_sync_view_builder = full_sync_view_builder.timeline_limit(limit);
@@ -66,7 +67,7 @@ pub async fn run_client(
let state = view.state();
ssync_state.set_view_state(state.clone());
if state == SlidingSyncState::Live {
if state == SlidingSyncState::FullyLoaded {
info!("Reached live sync");
break;
}

View File

@@ -135,7 +135,7 @@ impl SlidingSyncState {
}
pub fn total_rooms_count(&self) -> Option<u32> {
self.view.rooms_count()
self.view.maximum_number_of_rooms()
}
pub fn set_first_render_now(&mut self) {

View File

@@ -4,13 +4,13 @@ version = "0.1.0"
edition = "2021"
publish = false
[dependencies]
[dev-dependencies]
anyhow = { workspace = true }
assert_matches = "1.5.0"
eyeball = { workspace = true }
eyeball-im = { workspace = true }
futures = { version = "0.3.25" }
matrix-sdk-integration-testing = { path = "../matrix-sdk-integration-testing", features = ["helpers"] }
matrix-sdk = { path = "../../crates/matrix-sdk", features = ["experimental-sliding-sync", "testing"] }
tokio = { version = "1", features = ["rt", "rt-multi-thread", "macros"] }
futures = { version = "0.3.25" }
uuid = { version = "1.2.2" }
assert_matches = "1.5.0"

View File

File diff suppressed because it is too large Load Diff