mirror of
https://github.com/matrix-org/matrix-rust-sdk.git
synced 2026-05-14 11:05:32 -04:00
chore: rename restore_pos_from_database to share_pos
This commit is contained in:
@@ -80,7 +80,7 @@ impl EncryptionSync {
|
||||
let mut builder = client
|
||||
.sliding_sync("encryption")
|
||||
.map_err(Error::SlidingSync)?
|
||||
.restore_pos_from_database()
|
||||
.share_pos()
|
||||
.with_to_device_extension(
|
||||
assign!(v4::ToDeviceConfig::default(), { enabled: Some(true)}),
|
||||
)
|
||||
|
||||
@@ -40,7 +40,7 @@ pub struct SlidingSyncBuilder {
|
||||
rooms: BTreeMap<OwnedRoomId, SlidingSyncRoom>,
|
||||
poll_timeout: Duration,
|
||||
network_timeout: Duration,
|
||||
restore_pos_from_database: bool,
|
||||
share_pos: bool,
|
||||
}
|
||||
|
||||
impl SlidingSyncBuilder {
|
||||
@@ -63,7 +63,7 @@ impl SlidingSyncBuilder {
|
||||
rooms: BTreeMap::new(),
|
||||
poll_timeout: Duration::from_secs(30),
|
||||
network_timeout: Duration::from_secs(30),
|
||||
restore_pos_from_database: false,
|
||||
share_pos: false,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -224,16 +224,16 @@ impl SlidingSyncBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
/// Should the sliding sync instance restore its stream position from the
|
||||
/// database?
|
||||
/// Should the sliding sync instance share its sync position through
|
||||
/// storage?
|
||||
///
|
||||
/// In general, sliding sync instances will cache the stream position (`pos`
|
||||
/// field in the request) in internal fields. It can be useful, in
|
||||
/// multi-process scenarios, to save it into the database so that one
|
||||
/// In general, sliding sync instances will cache the sync position (`pos`
|
||||
/// field in the request) in internal memory. It can be useful, in
|
||||
/// multi-process scenarios, to save it into some shared storage so that one
|
||||
/// sliding sync instance running across two different processes can
|
||||
/// continue with the same stream position it had before being stopped.
|
||||
pub fn restore_pos_from_database(mut self) -> Self {
|
||||
self.restore_pos_from_database = true;
|
||||
/// continue with the same sync position it had before being stopped.
|
||||
pub fn share_pos(mut self) -> Self {
|
||||
self.share_pos = true;
|
||||
self
|
||||
}
|
||||
|
||||
@@ -263,7 +263,7 @@ impl SlidingSyncBuilder {
|
||||
(None, None)
|
||||
};
|
||||
|
||||
let pos = if self.restore_pos_from_database { pos } else { None };
|
||||
let pos = if self.share_pos { pos } else { None };
|
||||
|
||||
let rooms = AsyncRwLock::new(self.rooms);
|
||||
let lists = AsyncRwLock::new(lists);
|
||||
@@ -278,7 +278,7 @@ impl SlidingSyncBuilder {
|
||||
|
||||
client,
|
||||
storage_key: self.storage_key,
|
||||
restore_pos_from_database: self.restore_pos_from_database,
|
||||
share_pos: self.share_pos,
|
||||
|
||||
lists,
|
||||
rooms,
|
||||
|
||||
@@ -91,9 +91,9 @@ pub(super) struct SlidingSyncInner {
|
||||
/// The storage key to keep this cache at and load it from.
|
||||
storage_key: String,
|
||||
|
||||
/// Should this sliding sync instance try to restore its stream position
|
||||
/// Should this sliding sync instance try to restore its sync position
|
||||
/// from the database?
|
||||
restore_pos_from_database: bool,
|
||||
share_pos: bool,
|
||||
|
||||
/// Position markers.
|
||||
///
|
||||
@@ -469,7 +469,7 @@ impl SlidingSync {
|
||||
let to_device_enabled =
|
||||
self.inner.sticky.read().unwrap().data().extensions.to_device.enabled == Some(true);
|
||||
|
||||
let restored_fields = if self.inner.restore_pos_from_database || to_device_enabled {
|
||||
let restored_fields = if self.inner.share_pos || to_device_enabled {
|
||||
let lists = self.inner.lists.read().await;
|
||||
restore_sliding_sync_state(&self.inner.client, &self.inner.storage_key, &lists).await?
|
||||
} else {
|
||||
@@ -478,7 +478,7 @@ impl SlidingSync {
|
||||
|
||||
// Update pos: either the one restored from the database, if any and the sliding
|
||||
// sync was configured so, or read it from the memory cache.
|
||||
let pos = if self.inner.restore_pos_from_database {
|
||||
let pos = if self.inner.share_pos {
|
||||
if let Some(fields) = &restored_fields {
|
||||
// Override the memory one with the database one, for consistency.
|
||||
if fields.pos != position_guard.pos {
|
||||
@@ -1738,8 +1738,7 @@ mod tests {
|
||||
|
||||
let client = logged_in_client(Some(server.uri())).await;
|
||||
|
||||
let sliding_sync =
|
||||
client.sliding_sync("elephant-sync")?.restore_pos_from_database().build().await?;
|
||||
let sliding_sync = client.sliding_sync("elephant-sync")?.share_pos().build().await?;
|
||||
|
||||
// `pos` is `None` to start with.
|
||||
{
|
||||
@@ -1792,8 +1791,7 @@ mod tests {
|
||||
|
||||
// Recreating a sliding sync with the same ID will reload it too.
|
||||
{
|
||||
let sliding_sync =
|
||||
client.sliding_sync("elephant-sync")?.restore_pos_from_database().build().await?;
|
||||
let sliding_sync = client.sliding_sync("elephant-sync")?.share_pos().build().await?;
|
||||
assert_eq!(sliding_sync.inner.position.lock().await.pos.as_deref(), Some("42"));
|
||||
|
||||
let (request, _, _, _) =
|
||||
@@ -1815,8 +1813,7 @@ mod tests {
|
||||
|
||||
// And new sliding syncs with the same ID won't find it either.
|
||||
{
|
||||
let sliding_sync =
|
||||
client.sliding_sync("elephant-sync")?.restore_pos_from_database().build().await?;
|
||||
let sliding_sync = client.sliding_sync("elephant-sync")?.share_pos().build().await?;
|
||||
assert!(sliding_sync.inner.position.lock().await.pos.is_none());
|
||||
|
||||
let (request, _, _, _) =
|
||||
|
||||
Reference in New Issue
Block a user