mirror of
https://github.com/matrix-org/matrix-rust-sdk.git
synced 2026-05-08 07:56:55 -04:00
chore: address new clippy recommendations
This commit is contained in:
@@ -536,10 +536,9 @@ impl MatrixAuth {
|
||||
|
||||
if let Some(save_session_callback) =
|
||||
self.client.inner.auth_ctx.save_session_callback.get()
|
||||
&& let Err(err) = save_session_callback(self.client.clone())
|
||||
{
|
||||
if let Err(err) = save_session_callback(self.client.clone()) {
|
||||
error!("when saving session after refresh: {err}");
|
||||
}
|
||||
error!("when saving session after refresh: {err}");
|
||||
}
|
||||
|
||||
_ = self
|
||||
|
||||
@@ -193,14 +193,14 @@ impl CrossProcessRefreshLockGuard {
|
||||
let new_hash = compute_session_hash(trusted_tokens);
|
||||
trace!("Trusted OAuth 2.0 tokens have hash {new_hash:?}; db had {:?}", self.db_hash);
|
||||
|
||||
if let Some(db_hash) = &self.db_hash {
|
||||
if new_hash != *db_hash {
|
||||
// That should never happen, unless we got into an impossible situation!
|
||||
// In this case, we assume the value returned by the callback is always
|
||||
// correct, so override that in the database too.
|
||||
tracing::error!("error: DB and trusted disagree. Overriding in DB.");
|
||||
self.save_in_database(&new_hash).await?;
|
||||
}
|
||||
if let Some(db_hash) = &self.db_hash
|
||||
&& new_hash != *db_hash
|
||||
{
|
||||
// That should never happen, unless we got into an impossible situation!
|
||||
// In this case, we assume the value returned by the callback is always
|
||||
// correct, so override that in the database too.
|
||||
tracing::error!("error: DB and trusted disagree. Overriding in DB.");
|
||||
self.save_in_database(&new_hash).await?;
|
||||
}
|
||||
|
||||
self.save_in_memory(new_hash);
|
||||
|
||||
@@ -1045,22 +1045,20 @@ impl OAuth {
|
||||
// Enable the cross-process lock for refreshes, if needs be.
|
||||
self.deferred_enable_cross_process_refresh_lock().await;
|
||||
|
||||
if let Some(cross_process_manager) = self.ctx().cross_process_token_refresh_manager.get() {
|
||||
if let Some(tokens) = self.client.session_tokens() {
|
||||
let mut cross_process_guard = cross_process_manager.spin_lock().await?;
|
||||
if let Some(cross_process_manager) = self.ctx().cross_process_token_refresh_manager.get()
|
||||
&& let Some(tokens) = self.client.session_tokens()
|
||||
{
|
||||
let mut cross_process_guard = cross_process_manager.spin_lock().await?;
|
||||
|
||||
if cross_process_guard.hash_mismatch {
|
||||
// At this point, we're finishing a login while another process had written
|
||||
// something in the database. It's likely the information in the database is
|
||||
// just outdated and wasn't properly updated, but display a warning, just in
|
||||
// case this happens frequently.
|
||||
warn!(
|
||||
"unexpected cross-process hash mismatch when finishing login (see comment)"
|
||||
);
|
||||
}
|
||||
|
||||
cross_process_guard.save_in_memory_and_db(&tokens).await?;
|
||||
if cross_process_guard.hash_mismatch {
|
||||
// At this point, we're finishing a login while another process had written
|
||||
// something in the database. It's likely the information in the database is
|
||||
// just outdated and wasn't properly updated, but display a warning, just in
|
||||
// case this happens frequently.
|
||||
warn!("unexpected cross-process hash mismatch when finishing login (see comment)");
|
||||
}
|
||||
|
||||
cross_process_guard.save_in_memory_and_db(&tokens).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -1362,12 +1362,11 @@ impl Client {
|
||||
/// * `login_well_known` - The `well_known` field from a successful login
|
||||
/// response.
|
||||
pub(crate) fn maybe_update_login_well_known(&self, login_well_known: Option<&DiscoveryInfo>) {
|
||||
if self.inner.respect_login_well_known {
|
||||
if let Some(well_known) = login_well_known {
|
||||
if let Ok(homeserver) = Url::parse(&well_known.homeserver.base_url) {
|
||||
self.set_homeserver(homeserver);
|
||||
}
|
||||
}
|
||||
if self.inner.respect_login_well_known
|
||||
&& let Some(well_known) = login_well_known
|
||||
&& let Ok(homeserver) = Url::parse(&well_known.homeserver.base_url)
|
||||
{
|
||||
self.set_homeserver(homeserver);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1576,13 +1575,12 @@ impl Client {
|
||||
}
|
||||
|
||||
#[cfg(feature = "e2e-encryption")]
|
||||
if self.inner.enable_share_history_on_invite {
|
||||
if let Some(inviter) =
|
||||
if self.inner.enable_share_history_on_invite
|
||||
&& let Some(inviter) =
|
||||
pre_join_room_info.as_ref().and_then(|info| info.inviter.as_ref())
|
||||
{
|
||||
crate::room::shared_room_history::maybe_accept_key_bundle(&room, inviter.user_id())
|
||||
.await?;
|
||||
}
|
||||
{
|
||||
crate::room::shared_room_history::maybe_accept_key_bundle(&room, inviter.user_id())
|
||||
.await?;
|
||||
}
|
||||
|
||||
// Suppress "unused variable" and "unused field" lints
|
||||
@@ -1726,13 +1724,13 @@ impl Client {
|
||||
|
||||
let joined_room = Room::new(self.clone(), base_room);
|
||||
|
||||
if is_direct_room && !invite.is_empty() {
|
||||
if let Err(error) =
|
||||
if is_direct_room
|
||||
&& !invite.is_empty()
|
||||
&& let Err(error) =
|
||||
self.account().mark_as_dm(joined_room.room_id(), invite.as_slice()).await
|
||||
{
|
||||
// FIXME: Retry in the background
|
||||
error!("Failed to mark room as DM: {error}");
|
||||
}
|
||||
{
|
||||
// FIXME: Retry in the background
|
||||
error!("Failed to mark room as DM: {error}");
|
||||
}
|
||||
|
||||
Ok(joined_room)
|
||||
|
||||
@@ -454,21 +454,21 @@ impl Backups {
|
||||
|
||||
let backup_keys = olm_machine.store().load_backup_keys().await?;
|
||||
|
||||
if let Some(decryption_key) = backup_keys.decryption_key {
|
||||
if let Some(version) = backup_keys.backup_version {
|
||||
let request =
|
||||
get_backup_keys_for_room::v3::Request::new(version.clone(), room_id.to_owned());
|
||||
let response = self.client.send(request).await?;
|
||||
if let Some(decryption_key) = backup_keys.decryption_key
|
||||
&& let Some(version) = backup_keys.backup_version
|
||||
{
|
||||
let request =
|
||||
get_backup_keys_for_room::v3::Request::new(version.clone(), room_id.to_owned());
|
||||
let response = self.client.send(request).await?;
|
||||
|
||||
// Transform response to standard format (map of room ID -> room key).
|
||||
let response = get_backup_keys::v3::Response::new(BTreeMap::from([(
|
||||
room_id.to_owned(),
|
||||
RoomKeyBackup::new(response.sessions),
|
||||
)]));
|
||||
// Transform response to standard format (map of room ID -> room key).
|
||||
let response = get_backup_keys::v3::Response::new(BTreeMap::from([(
|
||||
room_id.to_owned(),
|
||||
RoomKeyBackup::new(response.sessions),
|
||||
)]));
|
||||
|
||||
self.handle_downloaded_room_keys(response, decryption_key, &version, olm_machine)
|
||||
.await?;
|
||||
}
|
||||
self.handle_downloaded_room_keys(response, decryption_key, &version, olm_machine)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -1830,10 +1830,10 @@ impl Encryption {
|
||||
// network requests
|
||||
this.update_verification_state().await;
|
||||
|
||||
if this.settings().auto_enable_cross_signing {
|
||||
if let Err(e) = this.bootstrap_cross_signing_if_needed(auth_data).await {
|
||||
error!("Couldn't bootstrap cross signing {e:?}");
|
||||
}
|
||||
if this.settings().auto_enable_cross_signing
|
||||
&& let Err(e) = this.bootstrap_cross_signing_if_needed(auth_data).await
|
||||
{
|
||||
error!("Couldn't bootstrap cross signing {e:?}");
|
||||
}
|
||||
|
||||
if let Err(e) = this.backups().setup_and_resume().await {
|
||||
@@ -1852,10 +1852,10 @@ impl Encryption {
|
||||
pub async fn wait_for_e2ee_initialization_tasks(&self) {
|
||||
let task = self.client.inner.e2ee.tasks.lock().setup_e2ee.take();
|
||||
|
||||
if let Some(task) = task {
|
||||
if let Err(err) = task.await {
|
||||
warn!("Error when initializing backups: {err}");
|
||||
}
|
||||
if let Some(task) = task
|
||||
&& let Err(err) = task.await
|
||||
{
|
||||
warn!("Error when initializing backups: {err}");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -307,19 +307,15 @@ impl Recovery {
|
||||
// Then we finally set the event to an empty JSON content.
|
||||
if let Ok(Some(default_event)) =
|
||||
self.client.encryption().secret_storage().fetch_default_key_id().await
|
||||
&& let Ok(default_event) = default_event.deserialize()
|
||||
{
|
||||
if let Ok(default_event) = default_event.deserialize() {
|
||||
let key_id = default_event.key_id;
|
||||
let event_type = GlobalAccountDataEventType::SecretStorageKey(key_id);
|
||||
let key_id = default_event.key_id;
|
||||
let event_type = GlobalAccountDataEventType::SecretStorageKey(key_id);
|
||||
|
||||
self.client
|
||||
.account()
|
||||
.set_account_data_raw(
|
||||
event_type,
|
||||
Raw::new(&json!({})).expect("").cast_unchecked(),
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
self.client
|
||||
.account()
|
||||
.set_account_data_raw(event_type, Raw::new(&json!({})).expect("").cast_unchecked())
|
||||
.await?;
|
||||
}
|
||||
|
||||
// Now let's "delete" the actual `m.secret.storage.default_key` event.
|
||||
@@ -688,13 +684,13 @@ impl Recovery {
|
||||
|
||||
#[instrument(skip_all)]
|
||||
pub(crate) async fn update_state_after_keys_query(&self, response: &get_keys::v3::Response) {
|
||||
if let Some(user_id) = self.client.user_id() {
|
||||
if response.master_keys.contains_key(user_id) {
|
||||
// TODO: This is unnecessarily expensive, we could let the crypto crate notify
|
||||
// us that our private keys got erased... But, the OlmMachine
|
||||
// gets recreated and... You know the drill by now...
|
||||
self.update_recovery_state_no_fail().await;
|
||||
}
|
||||
if let Some(user_id) = self.client.user_id()
|
||||
&& response.master_keys.contains_key(user_id)
|
||||
{
|
||||
// TODO: This is unnecessarily expensive, we could let the crypto crate notify
|
||||
// us that our private keys got erased... But, the OlmMachine
|
||||
// gets recreated and... You know the drill by now...
|
||||
self.update_recovery_state_no_fail().await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -170,15 +170,15 @@ impl BackupDownloadTask {
|
||||
room_id: OwnedRoomId,
|
||||
event: Raw<OriginalSyncRoomEncryptedEvent>,
|
||||
) {
|
||||
if let Ok(deserialized_event) = event.deserialize() {
|
||||
if let EncryptedEventScheme::MegolmV1AesSha2(c) = deserialized_event.content.scheme {
|
||||
let _ = self.sender.send(RoomKeyDownloadRequest {
|
||||
room_id,
|
||||
event_id: deserialized_event.event_id,
|
||||
event,
|
||||
megolm_session_id: c.session_id,
|
||||
});
|
||||
}
|
||||
if let Ok(deserialized_event) = event.deserialize()
|
||||
&& let EncryptedEventScheme::MegolmV1AesSha2(c) = deserialized_event.content.scheme
|
||||
{
|
||||
let _ = self.sender.send(RoomKeyDownloadRequest {
|
||||
room_id,
|
||||
event_id: deserialized_event.event_id,
|
||||
event,
|
||||
megolm_session_id: c.session_id,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -747,11 +747,9 @@ impl EventCache {
|
||||
if let Some(index_operation) =
|
||||
search::parse_timeline_event(&room_cache, &event, &redaction_rules)
|
||||
.await
|
||||
&& let Err(err) = search_index_guard.execute(index_operation, &room_id)
|
||||
{
|
||||
if let Err(err) = search_index_guard.execute(index_operation, &room_id)
|
||||
{
|
||||
warn!("Failed to handle event for indexing: {err}")
|
||||
}
|
||||
warn!("Failed to handle event for indexing: {err}")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -93,19 +93,16 @@ async fn handle_room_redaction(
|
||||
cache: &RoomEventCache,
|
||||
rules: &RedactionRules,
|
||||
) -> Option<RoomIndexOperation> {
|
||||
if let Some(redacted_event_id) = event.redacts(rules) {
|
||||
if let Some(redacted_event) = cache.find_event(redacted_event_id).await {
|
||||
if let Ok(AnySyncTimelineEvent::MessageLike(AnySyncMessageLikeEvent::RoomMessage(
|
||||
redacted_event,
|
||||
))) = redacted_event.raw().deserialize()
|
||||
{
|
||||
if let Some(redacted_event) = redacted_event.as_original() {
|
||||
return handle_possible_edit(redacted_event, cache)
|
||||
.await
|
||||
.or(Some(RoomIndexOperation::Remove(redacted_event.event_id.clone())));
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Some(redacted_event_id) = event.redacts(rules)
|
||||
&& let Some(redacted_event) = cache.find_event(redacted_event_id).await
|
||||
&& let Ok(AnySyncTimelineEvent::MessageLike(AnySyncMessageLikeEvent::RoomMessage(
|
||||
redacted_event,
|
||||
))) = redacted_event.raw().deserialize()
|
||||
&& let Some(redacted_event) = redacted_event.as_original()
|
||||
{
|
||||
return handle_possible_edit(redacted_event, cache)
|
||||
.await
|
||||
.or(Some(RoomIndexOperation::Remove(redacted_event.event_id.clone())));
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
@@ -931,23 +931,23 @@ fn filter_timeline_event(
|
||||
AnySyncTimelineEvent::State(state) => {
|
||||
// … but we make an exception for knocked state events _if_ the current user
|
||||
// can either accept or decline them.
|
||||
if let AnySyncStateEvent::RoomMember(member) = state {
|
||||
if matches!(member.membership(), MembershipState::Knock) {
|
||||
let can_accept_or_decline_knocks = match power_levels {
|
||||
Some((own_user_id, room_power_levels)) => {
|
||||
room_power_levels.user_can_invite(own_user_id)
|
||||
|| room_power_levels.user_can_kick(own_user_id)
|
||||
}
|
||||
_ => false,
|
||||
};
|
||||
|
||||
// The current user can act on the knock changes, so they should be
|
||||
// displayed
|
||||
if can_accept_or_decline_knocks {
|
||||
// We can only decide whether the user can accept or decline knocks if the
|
||||
// event isn't redacted.
|
||||
return matches!(member, SyncStateEvent::Original(_));
|
||||
if let AnySyncStateEvent::RoomMember(member) = state
|
||||
&& matches!(member.membership(), MembershipState::Knock)
|
||||
{
|
||||
let can_accept_or_decline_knocks = match power_levels {
|
||||
Some((own_user_id, room_power_levels)) => {
|
||||
room_power_levels.user_can_invite(own_user_id)
|
||||
|| room_power_levels.user_can_kick(own_user_id)
|
||||
}
|
||||
_ => false,
|
||||
};
|
||||
|
||||
// The current user can act on the knock changes, so they should be
|
||||
// displayed
|
||||
if can_accept_or_decline_knocks {
|
||||
// We can only decide whether the user can accept or decline knocks if the
|
||||
// event isn't redacted.
|
||||
return matches!(member, SyncStateEvent::Original(_));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -276,10 +276,10 @@ impl Media {
|
||||
) -> Result<()> {
|
||||
// Do a best-effort at reporting an expired MXC URI here; otherwise the server
|
||||
// may complain about it later.
|
||||
if let Some(expire_date) = uri.expire_date {
|
||||
if MilliSecondsSinceUnixEpoch::now() >= expire_date {
|
||||
return Err(Error::Media(MediaError::ExpiredPreallocatedMxcUri));
|
||||
}
|
||||
if let Some(expire_date) = uri.expire_date
|
||||
&& MilliSecondsSinceUnixEpoch::now() >= expire_date
|
||||
{
|
||||
return Err(Error::Media(MediaError::ExpiredPreallocatedMxcUri));
|
||||
}
|
||||
|
||||
let timeout = std::cmp::max(
|
||||
@@ -426,12 +426,11 @@ impl Media {
|
||||
}
|
||||
|
||||
// Read from the cache.
|
||||
if use_cache {
|
||||
if let Some(content) =
|
||||
if use_cache
|
||||
&& let Some(content) =
|
||||
self.client.media_store().lock().await?.get_media_content(request).await?
|
||||
{
|
||||
return Ok(content);
|
||||
}
|
||||
{
|
||||
return Ok(content);
|
||||
}
|
||||
|
||||
let request_config = self
|
||||
|
||||
@@ -142,11 +142,11 @@ impl Rules {
|
||||
match rule {
|
||||
AnyPushRuleRef::Override(r) | AnyPushRuleRef::Underride(r) => {
|
||||
for condition in &r.conditions {
|
||||
if let PushCondition::EventMatch { key, pattern } = condition {
|
||||
if key == "room_id" {
|
||||
room_ids.insert(pattern.clone());
|
||||
break;
|
||||
}
|
||||
if let PushCondition::EventMatch { key, pattern } = condition
|
||||
&& key == "room_id"
|
||||
{
|
||||
room_ids.insert(pattern.clone());
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -173,19 +173,19 @@ impl Rules {
|
||||
#[allow(deprecated)]
|
||||
if let Some(rule) =
|
||||
self.ruleset.get(RuleKind::Override, PredefinedOverrideRuleId::ContainsDisplayName)
|
||||
&& rule.enabled()
|
||||
&& rule.triggers_notification()
|
||||
{
|
||||
if rule.enabled() && rule.triggers_notification() {
|
||||
return true;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
#[allow(deprecated)]
|
||||
if let Some(rule) =
|
||||
self.ruleset.get(RuleKind::Content, PredefinedContentRuleId::ContainsUserName)
|
||||
&& rule.enabled()
|
||||
&& rule.triggers_notification()
|
||||
{
|
||||
if rule.enabled() && rule.triggers_notification() {
|
||||
return true;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
false
|
||||
|
||||
@@ -389,11 +389,11 @@ impl PaginableRoom for Room {
|
||||
// If the error was a 404, then the event wasn't found on the server;
|
||||
// special case this to make it easy to react to
|
||||
// such an error.
|
||||
if let Some(error) = err.as_client_api_error() {
|
||||
if error.status_code == 404 {
|
||||
// Event not found
|
||||
return Err(PaginatorError::EventNotFound(event_id.to_owned()));
|
||||
}
|
||||
if let Some(error) = err.as_client_api_error()
|
||||
&& error.status_code == 404
|
||||
{
|
||||
// Event not found
|
||||
return Err(PaginatorError::EventNotFound(event_id.to_owned()));
|
||||
}
|
||||
|
||||
// Otherwise, just return a wrapped error.
|
||||
|
||||
@@ -125,10 +125,9 @@ impl Room {
|
||||
// Ignore decline for other unrelated notification events.
|
||||
if let Some(declined_event_id) =
|
||||
event.as_original().map(|ev| ev.content.relates_to.event_id.clone())
|
||||
&& declined_event_id == own_notification_event_id
|
||||
{
|
||||
if declined_event_id == own_notification_event_id {
|
||||
let _ = sender.send(event.sender().to_owned());
|
||||
}
|
||||
let _ = sender.send(event.sender().to_owned());
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
@@ -858,7 +858,7 @@ mod tests {
|
||||
(_, Some(m)) => {
|
||||
assert_eq!(*m.membership(), new_state);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
async fn bob_is_pinned(&self) -> bool {
|
||||
|
||||
@@ -708,10 +708,9 @@ impl Room {
|
||||
if let Ok(AnySyncTimelineEvent::MessageLike(AnySyncMessageLikeEvent::RoomEncrypted(
|
||||
SyncMessageLikeEvent::Original(_),
|
||||
))) = event.deserialize_as::<AnySyncTimelineEvent>()
|
||||
&& let Ok(event) = self.decrypt_event(event.cast_ref_unchecked(), push_ctx).await
|
||||
{
|
||||
if let Ok(event) = self.decrypt_event(event.cast_ref_unchecked(), push_ctx).await {
|
||||
return event;
|
||||
}
|
||||
return event;
|
||||
}
|
||||
|
||||
let mut event = TimelineEvent::from_plaintext(event.cast());
|
||||
@@ -3589,12 +3588,12 @@ impl Room {
|
||||
let _response = self.client.send(request).await?;
|
||||
|
||||
// If it was a DM, remove the room from the `m.direct` global account data.
|
||||
if self.inner.direct_targets_length() != 0 {
|
||||
if let Err(e) = self.set_is_direct(false).await {
|
||||
// It is not important whether we managed to remove the room, it will not have
|
||||
// any consequences, so just log the error.
|
||||
warn!(room_id = ?self.room_id(), "failed to remove room from m.direct account data: {e}");
|
||||
}
|
||||
if self.inner.direct_targets_length() != 0
|
||||
&& let Err(e) = self.set_is_direct(false).await
|
||||
{
|
||||
// It is not important whether we managed to remove the room, it will not have
|
||||
// any consequences, so just log the error.
|
||||
warn!(room_id = ?self.room_id(), "failed to remove room from m.direct account data: {e}");
|
||||
}
|
||||
|
||||
self.client.base_client().forget_room(self.inner.room_id()).await?;
|
||||
|
||||
@@ -383,10 +383,11 @@ fn ensure_server_names_is_not_empty(
|
||||
) -> Vec<OwnedServerName> {
|
||||
let mut server_names = server_names;
|
||||
|
||||
if let Some((own_server, alias_server)) = own_server_name.zip(room_or_alias_id.server_name()) {
|
||||
if server_names.is_empty() && own_server != alias_server {
|
||||
server_names.push(alias_server.to_owned());
|
||||
}
|
||||
if let Some((own_server, alias_server)) = own_server_name.zip(room_or_alias_id.server_name())
|
||||
&& server_names.is_empty()
|
||||
&& own_server != alias_server
|
||||
{
|
||||
server_names.push(alias_server.to_owned());
|
||||
}
|
||||
|
||||
server_names
|
||||
|
||||
@@ -702,39 +702,38 @@ impl QueueStorage {
|
||||
let mut removed_dependent_upload = false;
|
||||
let mut removed_dependent_event = false;
|
||||
|
||||
if let Some(thumbnail_txn) = &handles.upload_thumbnail_txn {
|
||||
if store.remove_send_queue_request(&self.room_id, thumbnail_txn).await? {
|
||||
// The thumbnail upload existed as a request: either it was pending (something
|
||||
// else was being sent), or it was actively being sent.
|
||||
trace!("could remove thumbnail request, removing 2 dependent requests now");
|
||||
if let Some(thumbnail_txn) = &handles.upload_thumbnail_txn
|
||||
&& store.remove_send_queue_request(&self.room_id, thumbnail_txn).await?
|
||||
{
|
||||
// The thumbnail upload existed as a request: either it was pending (something
|
||||
// else was being sent), or it was actively being sent.
|
||||
trace!("could remove thumbnail request, removing 2 dependent requests now");
|
||||
|
||||
// 1. Try to abort sending using the being_sent info, in case it was active.
|
||||
if let Some(info) = guard.being_sent.as_ref() {
|
||||
if info.transaction_id == *thumbnail_txn {
|
||||
// SAFETY: we knew it was Some(), two lines above.
|
||||
let info = guard.being_sent.take().unwrap();
|
||||
if info.cancel_upload() {
|
||||
trace!("aborted ongoing thumbnail upload");
|
||||
}
|
||||
}
|
||||
// 1. Try to abort sending using the being_sent info, in case it was active.
|
||||
if let Some(info) = guard.being_sent.as_ref()
|
||||
&& info.transaction_id == *thumbnail_txn
|
||||
{
|
||||
// SAFETY: we knew it was Some(), two lines above.
|
||||
let info = guard.being_sent.take().unwrap();
|
||||
if info.cancel_upload() {
|
||||
trace!("aborted ongoing thumbnail upload");
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Remove the dependent requests.
|
||||
removed_dependent_upload = store
|
||||
.remove_dependent_queued_request(&self.room_id, &upload_file_as_dependent)
|
||||
.await?;
|
||||
// 2. Remove the dependent requests.
|
||||
removed_dependent_upload = store
|
||||
.remove_dependent_queued_request(&self.room_id, &upload_file_as_dependent)
|
||||
.await?;
|
||||
|
||||
if !removed_dependent_upload {
|
||||
warn!("unable to find the dependent file upload request");
|
||||
}
|
||||
if !removed_dependent_upload {
|
||||
warn!("unable to find the dependent file upload request");
|
||||
}
|
||||
|
||||
removed_dependent_event = store
|
||||
.remove_dependent_queued_request(&self.room_id, &event_as_dependent)
|
||||
.await?;
|
||||
removed_dependent_event =
|
||||
store.remove_dependent_queued_request(&self.room_id, &event_as_dependent).await?;
|
||||
|
||||
if !removed_dependent_event {
|
||||
warn!("unable to find the dependent media event upload request");
|
||||
}
|
||||
if !removed_dependent_event {
|
||||
warn!("unable to find the dependent media event upload request");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -751,13 +750,13 @@ impl QueueStorage {
|
||||
trace!("could remove file upload request, removing 1 dependent request");
|
||||
|
||||
// 1. Try to abort sending using the being_sent info, in case it was active.
|
||||
if let Some(info) = guard.being_sent.as_ref() {
|
||||
if info.transaction_id == handles.upload_file_txn {
|
||||
// SAFETY: we knew it was Some(), two lines above.
|
||||
let info = guard.being_sent.take().unwrap();
|
||||
if info.cancel_upload() {
|
||||
trace!("aborted ongoing file upload");
|
||||
}
|
||||
if let Some(info) = guard.being_sent.as_ref()
|
||||
&& info.transaction_id == handles.upload_file_txn
|
||||
{
|
||||
// SAFETY: we knew it was Some(), two lines above.
|
||||
let info = guard.being_sent.take().unwrap();
|
||||
if info.cancel_upload() {
|
||||
trace!("aborted ongoing file upload");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -890,22 +889,22 @@ impl QueueStorage {
|
||||
let new_serialized = SerializableEventContent::new(&any_content.clone())?;
|
||||
|
||||
// If the request is active (being sent), send a dependent request.
|
||||
if let Some(being_sent) = guard.being_sent.as_ref() {
|
||||
if being_sent.transaction_id == *txn {
|
||||
// Record a dependent request to edit, and exit.
|
||||
store
|
||||
.save_dependent_queued_request(
|
||||
&self.room_id,
|
||||
txn,
|
||||
ChildTransactionId::new(),
|
||||
MilliSecondsSinceUnixEpoch::now(),
|
||||
DependentQueuedRequestKind::EditEvent { new_content: new_serialized },
|
||||
)
|
||||
.await?;
|
||||
if let Some(being_sent) = guard.being_sent.as_ref()
|
||||
&& being_sent.transaction_id == *txn
|
||||
{
|
||||
// Record a dependent request to edit, and exit.
|
||||
store
|
||||
.save_dependent_queued_request(
|
||||
&self.room_id,
|
||||
txn,
|
||||
ChildTransactionId::new(),
|
||||
MilliSecondsSinceUnixEpoch::now(),
|
||||
DependentQueuedRequestKind::EditEvent { new_content: new_serialized },
|
||||
)
|
||||
.await?;
|
||||
|
||||
trace!("media event was being sent, pushed a dependent edit");
|
||||
return Ok(Some(any_content));
|
||||
}
|
||||
trace!("media event was being sent, pushed a dependent edit");
|
||||
return Ok(Some(any_content));
|
||||
}
|
||||
|
||||
// The request is not active: edit the local echo.
|
||||
|
||||
@@ -179,11 +179,11 @@ pub(super) async fn restore_sliding_sync_state(
|
||||
if let Some(olm_machine) = &*_client.olm_machine().await {
|
||||
let instance_storage_key = format_storage_key_for_sliding_sync(storage_key);
|
||||
|
||||
if let Ok(Some(blob)) = olm_machine.store().get_custom_value(&instance_storage_key).await {
|
||||
if let Ok(frozen_pos) = serde_json::from_slice::<FrozenSlidingSyncPos>(&blob) {
|
||||
trace!("Successfully read the `Sliding Sync` pos from the crypto store cache");
|
||||
restored_fields.pos = frozen_pos.pos;
|
||||
}
|
||||
if let Ok(Some(blob)) = olm_machine.store().get_custom_value(&instance_storage_key).await
|
||||
&& let Ok(frozen_pos) = serde_json::from_slice::<FrozenSlidingSyncPos>(&blob)
|
||||
{
|
||||
trace!("Successfully read the `Sliding Sync` pos from the crypto store cache");
|
||||
restored_fields.pos = frozen_pos.pos;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -334,10 +334,10 @@ impl Client {
|
||||
// If the last sync happened less than a second ago, sleep for a
|
||||
// while to not hammer out requests if the server doesn't respect
|
||||
// the sync timeout.
|
||||
if let Some(t) = last_sync_time {
|
||||
if now - *t <= Duration::from_secs(1) {
|
||||
Self::sleep().await;
|
||||
}
|
||||
if let Some(t) = last_sync_time
|
||||
&& now - *t <= Duration::from_secs(1)
|
||||
{
|
||||
Self::sleep().await;
|
||||
}
|
||||
|
||||
*last_sync_time = Some(now);
|
||||
|
||||
@@ -365,10 +365,10 @@ impl MatrixMockServer {
|
||||
});
|
||||
let event: Raw<EncryptedToDeviceEvent> = serde_json::from_value(event).unwrap();
|
||||
|
||||
if let Ok(mut guard) = tx.lock() {
|
||||
if let Some(tx) = guard.take() {
|
||||
let _ = tx.send(event);
|
||||
}
|
||||
if let Ok(mut guard) = tx.lock()
|
||||
&& let Some(tx) = guard.take()
|
||||
{
|
||||
let _ = tx.send(event);
|
||||
}
|
||||
|
||||
ResponseTemplate::new(200).set_body_json(&*test_json::EMPTY)
|
||||
|
||||
@@ -136,28 +136,28 @@ async fn test_delete_devices() {
|
||||
|
||||
let devices = &[device_id!("DEVICEID").to_owned()];
|
||||
|
||||
if let Err(e) = client.delete_devices(devices, None).await {
|
||||
if let Some(info) = e.as_uiaa_response() {
|
||||
let mut auth_parameters = BTreeMap::new();
|
||||
if let Err(e) = client.delete_devices(devices, None).await
|
||||
&& let Some(info) = e.as_uiaa_response()
|
||||
{
|
||||
let mut auth_parameters = BTreeMap::new();
|
||||
|
||||
let identifier = json!({
|
||||
"type": "m.id.user",
|
||||
"user": "example",
|
||||
});
|
||||
auth_parameters.insert("identifier".to_owned(), identifier);
|
||||
auth_parameters.insert("password".to_owned(), "wordpass".into());
|
||||
let identifier = json!({
|
||||
"type": "m.id.user",
|
||||
"user": "example",
|
||||
});
|
||||
auth_parameters.insert("identifier".to_owned(), identifier);
|
||||
auth_parameters.insert("password".to_owned(), "wordpass".into());
|
||||
|
||||
let auth_data = uiaa::AuthData::Password(assign!(
|
||||
uiaa::Password::new(
|
||||
uiaa::UserIdentifier::UserIdOrLocalpart("example".to_owned()),
|
||||
"wordpass".to_owned(),
|
||||
), {
|
||||
session: info.session.clone(),
|
||||
}
|
||||
));
|
||||
let auth_data = uiaa::AuthData::Password(assign!(
|
||||
uiaa::Password::new(
|
||||
uiaa::UserIdentifier::UserIdOrLocalpart("example".to_owned()),
|
||||
"wordpass".to_owned(),
|
||||
), {
|
||||
session: info.session.clone(),
|
||||
}
|
||||
));
|
||||
|
||||
client.delete_devices(devices, Some(auth_data)).await.unwrap();
|
||||
}
|
||||
client.delete_devices(devices, Some(auth_data)).await.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user