chore(sdk): Feedbacks.

This commit is contained in:
Ivan Enderlin
2023-07-05 13:56:25 +02:00
parent 1ccc0c3df3
commit 9493c8a75d
4 changed files with 40 additions and 23 deletions

View File

@@ -1,10 +1,10 @@
//! Sliding Sync errors.
use thiserror::Error;
use tokio::task::JoinError;
/// Internal representation of errors in Sliding Sync.
#[derive(Error, Debug)]
#[cfg_attr(test, derive(PartialEq))]
#[non_exhaustive]
pub enum Error {
/// The response we've received from the server can't be parsed or doesn't
@@ -41,6 +41,11 @@ pub enum Error {
InvalidSlidingSyncIdentifier,
/// A task failed to execute to completion.
#[error("A task failed to execute to completion; task description: {0}")]
JoinError(String),
#[error("A task failed to execute to completion; task description: {task_description}")]
JoinError {
/// Task description.
task_description: String,
/// The original `JoinError`.
error: JoinError,
},
}

View File

@@ -326,53 +326,57 @@ fn create_range(
#[cfg(test)]
mod tests {
use std::ops::RangeInclusive;
use assert_matches::assert_matches;
use super::*;
#[test]
fn test_create_range_from() {
// From 0, we want 100 items.
assert_eq!(create_range(0, 100, None, None), Ok(0..=99));
assert_matches!(create_range(0, 100, None, None), Ok(range) if range == RangeInclusive::new(0, 99));
// From 100, we want 100 items.
assert_eq!(create_range(100, 100, None, None), Ok(100..=199));
assert_matches!(create_range(100, 100, None, None), Ok(range) if range == RangeInclusive::new(100, 199));
// From 0, we want 100 items, but there is a maximum number of rooms to fetch
// defined at 50.
assert_eq!(create_range(0, 100, Some(50), None), Ok(0..=49));
assert_matches!(create_range(0, 100, Some(50), None), Ok(range) if range == RangeInclusive::new(0, 49));
// From 49, we want 100 items, but there is a maximum number of rooms to fetch
// defined at 50. There is 1 item to load.
assert_eq!(create_range(49, 100, Some(50), None), Ok(49..=49));
assert_matches!(create_range(49, 100, Some(50), None), Ok(range) if range == RangeInclusive::new(49, 49));
// From 50, we want 100 items, but there is a maximum number of rooms to fetch
// defined at 50.
assert_eq!(
assert_matches!(
create_range(50, 100, Some(50), None),
Err(Error::InvalidRange { start: 50, end: 49 })
);
// From 0, we want 100 items, but there is a maximum number of rooms defined at
// 50.
assert_eq!(create_range(0, 100, None, Some(50)), Ok(0..=49));
assert_matches!(create_range(0, 100, None, Some(50)), Ok(range) if range == RangeInclusive::new(0, 49));
// From 49, we want 100 items, but there is a maximum number of rooms defined at
// 50. There is 1 item to load.
assert_eq!(create_range(49, 100, None, Some(50)), Ok(49..=49));
assert_matches!(create_range(49, 100, None, Some(50)), Ok(range) if range == RangeInclusive::new(49, 49));
// From 50, we want 100 items, but there is a maximum number of rooms defined at
// 50.
assert_eq!(
assert_matches!(
create_range(50, 100, None, Some(50)),
Err(Error::InvalidRange { start: 50, end: 49 })
);
// From 0, we want 100 items, but there is a maximum number of rooms to fetch
// defined at 75, and a maximum number of rooms defined at 50.
assert_eq!(create_range(0, 100, Some(75), Some(50)), Ok(0..=49));
assert_matches!(create_range(0, 100, Some(75), Some(50)), Ok(range) if range == RangeInclusive::new(0, 49));
// From 0, we want 100 items, but there is a maximum number of rooms to fetch
// defined at 50, and a maximum number of rooms defined at 75.
assert_eq!(create_range(0, 100, Some(50), Some(75)), Ok(0..=49));
assert_matches!(create_range(0, 100, Some(50), Some(75)), Ok(range) if range == RangeInclusive::new(0, 49));
}
#[test]

View File

@@ -485,16 +485,21 @@ impl SlidingSync {
if self.inner.sticky.read().unwrap().data().extensions.e2ee.enabled == Some(true) {
debug!("Sliding Sync is sending the request along with outgoing E2EE requests");
// If the sliding sync request fails faster than the e2ee requests get their
// responses, we want to fail as early as possible, so as to invalidate the
// sliding sync session as early as possible.
// Here, we need to run 2 things:
//
// Also, we don't want an interruption in the sliding sync request to abort
// processing of the e2ee response.
// 1. Send the sliding sync request and get a response,
// 2. Send the E2EE requests.
//
// For those reasons, start the e2ee requests in a background task. If this is
// aborted while everything is running, the same data might be sent later, which
// is fine.
// We don't want to use a `join` or `try_join` because we want to fail if and
// only if sending the sliding sync request fails. Failing to send the E2EE
// requests should just result in a log.
//
// We also want to give the priority to sliding sync request. E2EE requests are
// sent concurrently to the sliding sync request, but the priority is on waiting
// a sliding sync response.
//
// If sending sliding sync request fails, the sending of E2EE requests must be
// aborted as soon as possible.
let client = self.inner.client.clone();
let e2ee_uploads = spawn(async move {
@@ -513,7 +518,10 @@ impl SlidingSync {
// `e2ee_uploads`. It did run concurrently, so it should not be blocking for too
// long. Otherwise —if `request` has failed— `e2ee_uploads` has
// been dropped, so aborted.
e2ee_uploads.await.map_err(|_| Error::JoinError("e2ee_uploads".to_string()))?;
e2ee_uploads.await.map_err(|error| Error::JoinError {
task_description: "e2ee_uploads".to_owned(),
error,
})?;
response
} else {

View File

@@ -12,7 +12,7 @@ use tokio::task::{JoinError, JoinHandle};
pub(crate) struct AbortOnDrop<T>(JoinHandle<T>);
impl<T> AbortOnDrop<T> {
pub fn new(join_handle: JoinHandle<T>) -> Self {
fn new(join_handle: JoinHandle<T>) -> Self {
Self(join_handle)
}
}