diff --git a/Cargo.lock b/Cargo.lock index 3dc41a695..e7b0476eb 100644 Binary files a/Cargo.lock and b/Cargo.lock differ diff --git a/Cargo.toml b/Cargo.toml index 716d26cc6..79efbf25d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -41,7 +41,7 @@ tauri-specta = { version = "1.0.2" } swift-rs = { version = "1.0.5" } -tokio = { version = "1.25.0" } +tokio = { version = "1.28.2" } [patch.crates-io] # We use this patch so we can compile for the IOS simulator on M1 diff --git a/apps/desktop/src-tauri/src/main.rs b/apps/desktop/src-tauri/src/main.rs index 11cc7be3d..83f70b933 100644 --- a/apps/desktop/src-tauri/src/main.rs +++ b/apps/desktop/src-tauri/src/main.rs @@ -3,7 +3,7 @@ windows_subsystem = "windows" )] -use std::{path::PathBuf, time::Duration}; +use std::{path::PathBuf, sync::Arc, time::Duration}; use sd_core::{custom_uri::create_custom_uri_endpoint, Node, NodeError}; @@ -28,6 +28,13 @@ async fn app_ready(app_handle: tauri::AppHandle) { window.show().unwrap(); } +#[tauri::command(async)] +#[specta::specta] +async fn open_logs_dir(node: tauri::State<'_, Arc>) -> Result<(), ()> { + opener::open(node.data_dir.join("logs")).ok(); + Ok(()) +} + pub fn tauri_error_plugin(err: NodeError) -> TauriPlugin { tauri::plugin::Builder::new("spacedrive") .js_init_script(format!(r#"window.__SD_ERROR__ = "{err}";"#)) @@ -55,6 +62,8 @@ async fn main() -> tauri::Result<()> { #[cfg(debug_assertions)] let data_dir = data_dir.join("dev"); + let _guard = Node::init_logger(&data_dir); + let result = Node::new(data_dir).await; let app = tauri::Builder::default(); @@ -76,7 +85,10 @@ async fn main() -> tauri::Result<()> { (Some(node), app) } - Err(err) => (None, app.plugin(tauri_error_plugin(err))), + Err(err) => { + tracing::error!("Error starting up the node: {err}"); + (None, app.plugin(tauri_error_plugin(err))) + } }; let app = app @@ -130,6 +142,7 @@ async fn main() -> tauri::Result<()> { .menu(menu::get_menu()) .invoke_handler(tauri_handlers![ app_ready, + open_logs_dir, file::open_file_path, file::get_file_path_open_with_apps, file::open_file_path_with diff --git a/apps/desktop/src/App.tsx b/apps/desktop/src/App.tsx index 1e4f26d5f..b94f3033d 100644 --- a/apps/desktop/src/App.tsx +++ b/apps/desktop/src/App.tsx @@ -17,7 +17,13 @@ import { } from '@sd/interface'; import { getSpacedropState } from '@sd/interface/hooks/useSpacedropState'; import '@sd/ui/style'; -import { appReady, getFilePathOpenWithApps, openFilePath, openFilePathWith } from './commands'; +import { + appReady, + getFilePathOpenWithApps, + openFilePath, + openFilePathWith, + openLogsDir +} from './commands'; // TODO: Bring this back once upstream is fixed up. // const client = hooks.createClient({ @@ -71,6 +77,7 @@ const platform: Platform = { saveFilePickerDialog: () => dialog.save(), showDevtools: () => invoke('show_devtools'), openPath: (path) => shell.open(path), + openLogsDir, openFilePath, getFilePathOpenWithApps, openFilePathWith @@ -103,17 +110,27 @@ export default function App() { }; }, []); - if (startupError) { - return ; - } - return ( - + ); } + +// This is required because `ErrorPage` uses the OS which comes from `PlatformProvider` +function AppInner() { + if (startupError) { + return ( + + ); + } + + return ; +} diff --git a/apps/desktop/src/commands.ts b/apps/desktop/src/commands.ts index 37d4c4a2c..71e6dc417 100644 --- a/apps/desktop/src/commands.ts +++ b/apps/desktop/src/commands.ts @@ -14,6 +14,10 @@ export function appReady() { return invoke()("app_ready") } +export function openLogsDir() { + return invoke()("open_logs_dir") +} + export function openFilePath(library: string, id: number) { return invoke()("open_file_path", { library,id }) } diff --git a/apps/mobile/crates/core/src/lib.rs b/apps/mobile/crates/core/src/lib.rs index 7f2b76612..49631ecb4 100644 --- a/apps/mobile/crates/core/src/lib.rs +++ b/apps/mobile/crates/core/src/lib.rs @@ -67,6 +67,8 @@ pub fn handle_core_msg( match node { Some(node) => node.clone(), None => { + let _guard = Node::init_logger(&data_dir); + // TODO: probably don't unwrap let new_node = Node::new(data_dir).await.unwrap(); node.replace(new_node.clone()); diff --git a/apps/server/src/main.rs b/apps/server/src/main.rs index 678d5d6bd..17218f856 100644 --- a/apps/server/src/main.rs +++ b/apps/server/src/main.rs @@ -32,6 +32,8 @@ async fn main() { .map(|port| port.parse::().unwrap_or(8080)) .unwrap_or(8080); + let _guard = Node::init_logger(&data_dir); + let (node, router) = Node::new(data_dir).await.expect("Unable to create node"); let signal = utils::axum_shutdown_signal(node.clone()); diff --git a/core/Cargo.toml b/core/Cargo.toml index 10ef0385f..cea2c7cad 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -67,8 +67,10 @@ include_dir = { version = "0.7.2", features = ["glob"] } async-trait = "^0.1.57" image = "0.24.6" webp = "0.2.2" -tracing = "0.1.36" -tracing-subscriber = { version = "0.3.15", features = ["env-filter"] } +tracing = { git = "https://github.com/tokio-rs/tracing", rev = "29146260fb4615d271d2e899ad95a753bb42915e" } # To work with tracing-appender +tracing-subscriber = { git = "https://github.com/tokio-rs/tracing", rev = "29146260fb4615d271d2e899ad95a753bb42915e", features = [ + "env-filter", +] } async-stream = "0.3.3" once_cell = "1.15.0" ctor = "0.1.23" @@ -80,12 +82,13 @@ http-range = "0.1.5" mini-moka = "0.10.0" serde_with = "2.2.0" dashmap = { version = "5.4.0", features = ["serde"] } -notify = { version = "5.0.0", default-features = false, features = [ +notify = { version = "5.2.0", default-features = false, features = [ "macos_fsevent", ], optional = true } static_assertions = "1.1.0" serde-hashkey = "0.4.5" normpath = { version = "1.1.1", features = ["localization"] } +tracing-appender = { git = "https://github.com/tokio-rs/tracing", rev = "29146260fb4615d271d2e899ad95a753bb42915e" } # Unreleased changes for log deletion strum = { version = "0.24", features = ["derive"] } strum_macros = "0.24" @@ -95,4 +98,4 @@ version = "0.1.5" [dev-dependencies] tempfile = "^3.3.0" -tracing-test = "^0.2.3" +tracing-test = "^0.2.4" diff --git a/core/src/api/files.rs b/core/src/api/files.rs index 49a85dd62..7bbfda2b1 100644 --- a/core/src/api/files.rs +++ b/core/src/api/files.rs @@ -9,7 +9,7 @@ use crate::{ prisma::{location, object}, }; -use chrono::{FixedOffset, Utc}; +use chrono::Utc; use rspc::{alpha::AlphaRouter, ErrorCode}; use serde::Deserialize; use specta::Type; @@ -108,9 +108,7 @@ pub(crate) fn mount() -> AlphaRouter { .object() .update( object::id::equals(id), - vec![object::date_accessed::set(Some( - Utc::now().with_timezone(&FixedOffset::east_opt(0).unwrap()), - ))], + vec![object::date_accessed::set(Some(Utc::now().into()))], ) .exec() .await?; diff --git a/core/src/api/keys.rs b/core/src/api/keys.rs index 91064339d..d57405a23 100644 --- a/core/src/api/keys.rs +++ b/core/src/api/keys.rs @@ -1,4 +1,5 @@ use rspc::alpha::AlphaRouter; +use rspc::ErrorCode; use sd_crypto::keys::keymanager::{StoredKey, StoredKeyType}; use sd_crypto::primitives::SECRET_KEY_IDENTIFIER; use sd_crypto::types::{Algorithm, HashingAlgorithm, OnboardingConfig, SecretKeyString}; @@ -389,38 +390,45 @@ pub(crate) fn mount() -> AlphaRouter { invalidate_query!(library, "keys.list"); invalidate_query!(library, "keys.listMounted"); - Ok(TryInto::::try_into(updated_keys.len()).unwrap()) // We convert from `usize` (bigint type) to `u32` (number type) because rspc doesn't support bigints. + TryInto::::try_into(updated_keys.len()).map_err(|_| { + rspc::Error::new(ErrorCode::InternalServerError, "integer overflow".into()) + }) // We convert from `usize` (bigint type) to `u32` (number type) because rspc doesn't support bigints. }) }) - .procedure("changeMasterPassword", { - R.with2(library()) - .mutation(|(_, library), args: MasterPasswordChangeArgs| async move { - let verification_key = library - .key_manager - .change_master_password( - args.password, - args.algorithm, - args.hashing_algorithm, - library.id, - ) - .await?; + .procedure( + "changeMasterPassword", + #[allow(clippy::unwrap_used)] // TODO: Jake is fixing this in a Crypto PR + { + R.with2(library()).mutation( + |(_, library), args: MasterPasswordChangeArgs| async move { + let verification_key = library + .key_manager + .change_master_password( + args.password, + args.algorithm, + args.hashing_algorithm, + library.id, + ) + .await?; - invalidate_query!(library, "keys.getSecretKey"); + invalidate_query!(library, "keys.getSecretKey"); - // remove old root key if present - library - .db - .key() - .delete_many(vec![key::key_type::equals( - serde_json::to_string(&StoredKeyType::Root).unwrap(), - )]) - .exec() - .await?; + // remove old root key if present + library + .db + .key() + .delete_many(vec![key::key_type::equals( + serde_json::to_string(&StoredKeyType::Root).unwrap(), + )]) + .exec() + .await?; - // write the new verification key - write_storedkey_to_db(&library.db, &verification_key).await?; + // write the new verification key + write_storedkey_to_db(&library.db, &verification_key).await?; - Ok(()) - }) - }) + Ok(()) + }, + ) + }, + ) } diff --git a/core/src/api/libraries.rs b/core/src/api/libraries.rs index 2bd5ae0c3..21edbfc10 100644 --- a/core/src/api/libraries.rs +++ b/core/src/api/libraries.rs @@ -109,7 +109,7 @@ pub(crate) fn mount() -> AlphaRouter { ctx.library_manager .get_library(new_library.uuid) .await - .unwrap(), + .expect("We just created the library. Where do it be?"), "library.getStatistics" ); diff --git a/core/src/api/nodes.rs b/core/src/api/nodes.rs index 85a51a614..a94d1b551 100644 --- a/core/src/api/nodes.rs +++ b/core/src/api/nodes.rs @@ -1,6 +1,7 @@ -use rspc::alpha::AlphaRouter; +use rspc::{alpha::AlphaRouter, ErrorCode}; use serde::Deserialize; use specta::Type; +use tracing::error; use crate::api::R; @@ -20,9 +21,13 @@ pub(crate) fn mount() -> AlphaRouter { config.name = args.name; }) .await - .unwrap(); - - Ok(()) + .map_err(|err| { + error!("Failed to write config: {}", err); + rspc::Error::new( + ErrorCode::InternalServerError, + "error updating config".into(), + ) + }) }) }) } diff --git a/core/src/api/p2p.rs b/core/src/api/p2p.rs index 203ece62b..13109460a 100644 --- a/core/src/api/p2p.rs +++ b/core/src/api/p2p.rs @@ -1,4 +1,4 @@ -use rspc::alpha::AlphaRouter; +use rspc::{alpha::AlphaRouter, ErrorCode}; use sd_p2p::PeerId; use serde::Deserialize; use specta::Type; @@ -45,8 +45,18 @@ pub(crate) fn mount() -> AlphaRouter { R.mutation(|ctx, args: SpacedropArgs| async move { // TODO: Handle multiple files path and error if zero paths ctx.p2p - .big_bad_spacedrop(args.peer_id, PathBuf::from(args.file_path.first().unwrap())) - .await; + .big_bad_spacedrop( + args.peer_id, + PathBuf::from( + args.file_path + .first() + .expect("https://linear.app/spacedriveapp/issue/ENG-625/spacedrop-multiple-files"), + ), + ) + .await + .map_err(|_| { + rspc::Error::new(ErrorCode::InternalServerError, "todo".to_string()) + }) }) }) .procedure("acceptSpacedrop", { diff --git a/core/src/api/tags.rs b/core/src/api/tags.rs index 6922de7fa..1846593ca 100644 --- a/core/src/api/tags.rs +++ b/core/src/api/tags.rs @@ -1,4 +1,4 @@ -use rspc::alpha::AlphaRouter; +use rspc::{alpha::AlphaRouter, ErrorCode}; use serde::Deserialize; use specta::Type; @@ -151,7 +151,10 @@ pub(crate) fn mount() -> AlphaRouter { .select(tag::select!({ pub_id })) .exec() .await? - .unwrap(); + .ok_or(rspc::Error::new( + ErrorCode::NotFound, + "Error finding tag in db".into(), + ))?; sync.write_ops( db, diff --git a/core/src/api/utils/invalidate.rs b/core/src/api/utils/invalidate.rs index 38350445c..63df249c4 100644 --- a/core/src/api/utils/invalidate.rs +++ b/core/src/api/utils/invalidate.rs @@ -65,11 +65,13 @@ impl InvalidRequests { } } - #[allow(unused_variables)] + #[allow(unused_variables, clippy::panic)] pub(crate) fn validate(r: Arc) { #[cfg(debug_assertions)] { - let invalidate_requests = INVALIDATION_REQUESTS.lock().unwrap(); + let invalidate_requests = INVALIDATION_REQUESTS + .lock() + .expect("Failed to lock the mutex for invalidation requests"); let queries = r.queries(); for req in &invalidate_requests.queries { @@ -240,7 +242,15 @@ pub(crate) fn mount_invalidate() -> AlphaRouter { if let Ok(event) = event { if let CoreEvent::InvalidateOperation(op) = event { // Newer data replaces older data in the buffer - buf.insert(to_key(&(op.key, &op.arg)).unwrap(), op); + match to_key(&(op.key, &op.arg)) { + Ok(key) => { + buf.insert(key, op); + }, + Err(err) => { + warn!("Error deriving key for invalidate operation '{:?}': {:?}", op, err); + }, + } + } } else { warn!("Shutting down invalidation manager thread due to the core event bus being droppped!"); diff --git a/core/src/job/mod.rs b/core/src/job/mod.rs index 84a83da35..040e318cb 100644 --- a/core/src/job/mod.rs +++ b/core/src/job/mod.rs @@ -72,6 +72,10 @@ pub enum JobError { MatchingSrcDest(PathBuf), #[error("action would overwrite another file: {}", .0.display())] WouldOverwrite(PathBuf), + #[error("item of type '{0}' with id '{1}' is missing from the db")] + MissingFromDb(&'static str, String), + #[error("the cas id is not set on the path data")] + MissingCasId, // Not errors #[error("step completed with errors")] @@ -295,7 +299,10 @@ pub struct JobState { impl DynJob for Job { fn id(&self) -> Uuid { // SAFETY: This method is using during queueing, so we still have a report - self.report().as_ref().unwrap().id + self.report() + .as_ref() + .expect("This method is using during queueing, so we still have a report") + .id } fn parent_id(&self) -> Option { @@ -347,7 +354,9 @@ impl DynJob for Job { ) => { match step_result { Err(JobError::EarlyFinish { .. }) => { - info!("{}", step_result.unwrap_err()); + step_result.map_err(|err| { + warn!("{}", err); + }).ok(); break; }, Err(JobError::StepCompletedWithErrors(errors_text)) => { diff --git a/core/src/lib.rs b/core/src/lib.rs index 8ce04cbcf..3d1da1153 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -1,3 +1,5 @@ +#![warn(clippy::unwrap_used, clippy::panic)] + use crate::{ api::{CoreEvent, Router}, job::JobManager, @@ -9,11 +11,18 @@ use crate::{ pub use sd_prisma::*; -use std::{path::Path, sync::Arc}; +use std::{ + path::{Path, PathBuf}, + sync::Arc, +}; use thiserror::Error; use tokio::{fs, sync::broadcast}; use tracing::{debug, error, info, warn}; -use tracing_subscriber::{prelude::*, EnvFilter}; +use tracing_appender::{ + non_blocking::{NonBlocking, WorkerGuard}, + rolling::{RollingFileAppender, Rotation}, +}; +use tracing_subscriber::{fmt, prelude::*, EnvFilter}; pub mod api; pub mod custom_uri; @@ -38,6 +47,7 @@ pub struct NodeContext { } pub struct Node { + pub data_dir: PathBuf, config: Arc, pub library_manager: Arc, location_manager: Arc, @@ -47,86 +57,16 @@ pub struct Node { // peer_request: tokio::sync::Mutex>, } -#[cfg(not(target_os = "android"))] -const CONSOLE_LOG_FILTER: tracing_subscriber::filter::LevelFilter = { - use tracing_subscriber::filter::LevelFilter; - - match cfg!(debug_assertions) { - true => LevelFilter::DEBUG, - false => LevelFilter::INFO, - } -}; - impl Node { pub async fn new(data_dir: impl AsRef) -> Result<(Arc, Arc), NodeError> { let data_dir = data_dir.as_ref(); #[cfg(debug_assertions)] - let init_data = util::debug_initializer::InitConfig::load(data_dir).await; + let init_data = util::debug_initializer::InitConfig::load(data_dir).await?; // This error is ignored because it's throwing on mobile despite the folder existing. let _ = fs::create_dir_all(&data_dir).await; - // dbg!(get_object_kind_from_extension("png")); - - // let (non_blocking, _guard) = tracing_appender::non_blocking(rolling::daily( - // Path::new(&data_dir).join("logs"), - // "log", - // )); - // TODO: Make logs automatically delete after x time https://github.com/tokio-rs/tracing/pull/2169 - - let subscriber = tracing_subscriber::registry().with( - EnvFilter::from_default_env() - .add_directive("warn".parse().expect("Error invalid tracing directive!")) - .add_directive( - "sd_core=debug" - .parse() - .expect("Error invalid tracing directive!"), - ) - .add_directive( - "sd_core::location::manager=info" - .parse() - .expect("Error invalid tracing directive!"), - ) - .add_directive( - "sd_core_mobile=debug" - .parse() - .expect("Error invalid tracing directive!"), - ) - // .add_directive( - // "sd_p2p=debug" - // .parse() - // .expect("Error invalid tracing directive!"), - // ) - .add_directive( - "server=debug" - .parse() - .expect("Error invalid tracing directive!"), - ) - .add_directive( - "desktop=debug" - .parse() - .expect("Error invalid tracing directive!"), - ), - // .add_directive( - // "rspc=debug" - // .parse() - // .expect("Error invalid tracing directive!"), - // ), - ); - #[cfg(not(target_os = "android"))] - let subscriber = subscriber.with(tracing_subscriber::fmt::layer().with_filter(CONSOLE_LOG_FILTER)); - // #[cfg(target_os = "android")] - // let subscriber = subscriber.with(tracing_android::layer("com.spacedrive.app").unwrap()); // TODO: This is not working - subscriber - // .with( - // Layer::default() - // .with_writer(non_blocking) - // .with_ansi(false) - // .with_filter(LevelFilter::DEBUG), - // ) - .init(); - let event_bus = broadcast::channel(1024); let config = NodeConfigManager::new(data_dir.to_path_buf()) .await @@ -134,7 +74,7 @@ impl Node { let jobs = JobManager::new(); let location_manager = LocationManager::new(); - let (p2p, mut p2p_rx) = P2PManager::new(config.clone()).await; + let (p2p, mut p2p_rx) = P2PManager::new(config.clone()).await?; let library_manager = LibraryManager::new( data_dir.join("libraries"), @@ -150,11 +90,9 @@ impl Node { #[cfg(debug_assertions)] if let Some(init_data) = init_data { - init_data.apply(&library_manager).await; + init_data.apply(&library_manager).await?; } - debug!("Watching locations"); - tokio::spawn({ let library_manager = library_manager.clone(); @@ -168,8 +106,12 @@ impl Node { }; for op in operations { - println!("ingest lib id: {}", library.id); - library.sync.ingest_op(op).await.unwrap(); + library.sync.ingest_op(op).await.unwrap_or_else(|err| { + error!( + "error ingesting operation for library '{}': {err:?}", + library.id + ); + }); } } } @@ -177,6 +119,7 @@ impl Node { let router = api::mount(); let node = Node { + data_dir: data_dir.to_path_buf(), config, library_manager, location_manager, @@ -190,6 +133,73 @@ impl Node { Ok((Arc::new(node), router)) } + pub fn init_logger(data_dir: impl AsRef) -> WorkerGuard { + let log_filter = match cfg!(debug_assertions) { + true => tracing::Level::DEBUG, + false => tracing::Level::INFO, + }; + + let (logfile, guard) = NonBlocking::new( + RollingFileAppender::builder() + .filename_prefix("sd.log") + .rotation(Rotation::DAILY) + .max_log_files(4) + .build(data_dir.as_ref().join("logs")) + .expect("Error setting up log file!"), + ); + + let collector = tracing_subscriber::registry() + .with(fmt::Subscriber::new().with_ansi(false).with_writer(logfile)) + .with( + fmt::Subscriber::new() + .with_writer(std::io::stdout.with_max_level(log_filter)) + .with_filter( + EnvFilter::from_default_env() + .add_directive( + "warn".parse().expect("Error invalid tracing directive!"), + ) + .add_directive( + "sd_core=debug" + .parse() + .expect("Error invalid tracing directive!"), + ) + .add_directive( + "sd_core::location::manager=info" + .parse() + .expect("Error invalid tracing directive!"), + ) + .add_directive( + "sd_core_mobile=debug" + .parse() + .expect("Error invalid tracing directive!"), + ) + // .add_directive( + // "sd_p2p=debug" + // .parse() + // .expect("Error invalid tracing directive!"), + // ) + .add_directive( + "server=debug" + .parse() + .expect("Error invalid tracing directive!"), + ) + .add_directive( + "spacedrive=debug" + .parse() + .expect("Error invalid tracing directive!"), + ), + ), + ); + + tracing::collect::set_global_default(collector) + .map_err(|err| { + println!("Error initializing global logger: {:?}", err); + }) + .ok(); + + guard + } + pub async fn shutdown(&self) { info!("Spacedrive shutting down..."); self.jobs.pause().await; @@ -216,12 +226,17 @@ impl Node { /// Error type for Node related errors. #[derive(Error, Debug)] pub enum NodeError { - #[error("failed to initialize config")] + #[error("failed to initialize config: {0}")] FailedToInitializeConfig(util::migrator::MigratorError), - #[error("failed to initialize library manager")] + #[error("failed to initialize library manager: {0}")] FailedToInitializeLibraryManager(#[from] library::LibraryManagerError), #[error(transparent)] LocationManager(#[from] LocationManagerError), + #[error("failed to initialize p2p manager: {0}")] + P2PManager(#[from] sd_p2p::ManagerError), #[error("invalid platform integer")] InvalidPlatformInt(i32), + #[cfg(debug_assertions)] + #[error("Init config error: {0}")] + InitConfig(#[from] util::debug_initializer::InitConfigError), } diff --git a/core/src/library/manager.rs b/core/src/library/manager.rs index 9b14356b6..6683325ca 100644 --- a/core/src/library/manager.rs +++ b/core/src/library/manager.rs @@ -5,7 +5,7 @@ use crate::{ prisma::{node, PrismaClient}, sync::{SyncManager, SyncMessage}, util::{ - db::load_and_migrate, + db::{load_and_migrate, MigrationError}, error::{FileIOError, NonUtf8PathError}, migrator::MigratorError, seeder::{indexer_rules_seeder, SeederError}, @@ -62,6 +62,8 @@ pub enum LibraryManagerError { KeyManager(#[from] sd_crypto::Error), #[error("failed to run library migrations")] MigratorError(#[from] MigratorError), + #[error("error migrating the library: {0}")] + MigrationError(#[from] MigrationError), #[error("invalid library configuration: {0}")] InvalidConfig(String), #[error(transparent)] @@ -93,7 +95,7 @@ pub async fn seed_keymanager( .iter() .map(|key| { let key = key.clone(); - let uuid = uuid::Uuid::from_str(&key.uuid).unwrap(); + let uuid = uuid::Uuid::from_str(&key.uuid).expect("invalid key id in the DB"); if key.default { default = Some(uuid); @@ -119,8 +121,7 @@ pub async fn seed_keymanager( automount: key.automount, }) }) - .collect::, sd_crypto::Error>>() - .unwrap(); + .collect::, sd_crypto::Error>>()?; // insert all keys from the DB into the keymanager's keystore km.populate_keystore(stored_keys).await?; @@ -368,8 +369,7 @@ impl LibraryManager { LibraryManagerError::NonUtf8Path(NonUtf8PathError(db_path.into())) })? )) - .await - .unwrap(), + .await?, ); let node_config = node_context.config.get().await; diff --git a/core/src/location/file_path_helper/mod.rs b/core/src/location/file_path_helper/mod.rs index 8aa416879..3048919f1 100644 --- a/core/src/location/file_path_helper/mod.rs +++ b/core/src/location/file_path_helper/mod.rs @@ -99,6 +99,8 @@ pub struct FilePathMetadata { pub enum FilePathError { #[error("file Path not found: ", .0.display())] NotFound(Box), + #[error("location '{0}' not found")] + LocationNotFound(i32), #[error("received an invalid sub path: ", .location_path.display(), .sub_path.display())] InvalidSubPath { location_path: Box, @@ -154,7 +156,7 @@ pub async fn create_file_path( .select(location::select!({ id pub_id })) .exec() .await? - .unwrap(); + .ok_or(FilePathError::LocationNotFound(location_id))?; let params = { use file_path::*; @@ -291,7 +293,9 @@ pub async fn ensure_sub_path_is_in_location( let mut sub_path = sub_path.as_ref(); if sub_path.starts_with("/") { // SAFETY: we just checked that it starts with the separator - sub_path = sub_path.strip_prefix("/").unwrap(); + sub_path = sub_path + .strip_prefix("/") + .expect("we just checked that it starts with the separator"); } let location_path = location_path.as_ref(); @@ -361,7 +365,9 @@ pub async fn ensure_sub_path_is_directory( Err(e) if e.kind() == io::ErrorKind::NotFound => { if sub_path.starts_with("/") { // SAFETY: we just checked that it starts with the separator - sub_path = sub_path.strip_prefix("/").unwrap(); + sub_path = sub_path + .strip_prefix("/") + .expect("we just checked that it starts with the separator"); } let location_path = location_path.as_ref(); diff --git a/core/src/location/indexer/walk.rs b/core/src/location/indexer/walk.rs index 50b54c3d0..4941f7e76 100644 --- a/core/src/location/indexer/walk.rs +++ b/core/src/location/indexer/walk.rs @@ -615,7 +615,7 @@ mod tests { use globset::{Glob, GlobSetBuilder}; use tempfile::{tempdir, TempDir}; use tokio::fs; - use tracing_test::traced_test; + // use tracing_test::traced_test; impl PartialEq for WalkedEntry { fn eq(&self, other: &Self) -> bool { @@ -758,7 +758,7 @@ mod tests { } #[tokio::test] - #[traced_test] + // #[traced_test] async fn test_only_photos() { let root = prepare_location().await; let root_path = root.path(); @@ -822,7 +822,7 @@ mod tests { } #[tokio::test] - #[traced_test] + // #[traced_test] async fn test_git_repos() { let root = prepare_location().await; let root_path = root.path(); @@ -895,7 +895,7 @@ mod tests { } #[tokio::test] - #[traced_test] + // #[traced_test] async fn git_repos_without_deps_or_build_dirs() { let root = prepare_location().await; let root_path = root.path(); diff --git a/core/src/location/manager/mod.rs b/core/src/location/manager/mod.rs index 565efc663..393cf6e8c 100644 --- a/core/src/location/manager/mod.rs +++ b/core/src/location/manager/mod.rs @@ -103,6 +103,11 @@ pub enum LocationManagerError { #[error("Job Manager error: (error: {0})")] JobManager(#[from] JobManagerError), + #[error("invalid inode")] + InvalidInode, + #[error("invalid device")] + InvalidDevice, + #[error(transparent)] FileIO(#[from] FileIOError), } @@ -554,10 +559,10 @@ impl Drop for StopWatcherGuard<'_> { fn drop(&mut self) { if cfg!(feature = "location-watcher") { // FIXME: change this Drop to async drop in the future - if let Err(e) = block_on( - self.manager - .reinit_watcher(self.location_id, self.library.take().unwrap()), - ) { + if let Err(e) = block_on(self.manager.reinit_watcher( + self.location_id, + self.library.take().expect("library should be set"), + )) { error!("Failed to reinit watcher on stop watcher guard drop: {e}"); } } @@ -578,9 +583,9 @@ impl Drop for IgnoreEventsForPathGuard<'_> { // FIXME: change this Drop to async drop in the future if let Err(e) = block_on(self.manager.watcher_management_message( self.location_id, - self.library.take().unwrap(), + self.library.take().expect("library should be set"), WatcherManagementMessageAction::IgnoreEventsForPath { - path: self.path.take().unwrap(), + path: self.path.take().expect("path should be set"), ignore: false, }, )) { diff --git a/core/src/location/manager/watcher/utils.rs b/core/src/location/manager/watcher/utils.rs index 064120c47..283078b4a 100644 --- a/core/src/location/manager/watcher/utils.rs +++ b/core/src/location/manager/watcher/utils.rs @@ -685,13 +685,23 @@ pub(super) async fn extract_inode_and_device_from_path( .select(file_path::select!( {inode device} )) .exec() .await? - .map(|file_path| { - ( - u64::from_le_bytes(file_path.inode[0..8].try_into().unwrap()), - u64::from_le_bytes(file_path.device[0..8].try_into().unwrap()), - ) - }) - .ok_or_else(|| FilePathError::NotFound(path.into()).into()) + .map_or( + Err(FilePathError::NotFound(path.into()).into()), + |file_path| { + Ok(( + u64::from_le_bytes( + file_path.inode[0..8] + .try_into() + .map_err(|_| LocationManagerError::InvalidInode)?, + ), + u64::from_le_bytes( + file_path.device[0..8] + .try_into() + .map_err(|_| LocationManagerError::InvalidDevice)?, + ), + )) + }, + ) } pub(super) async fn extract_location_path( diff --git a/core/src/location/mod.rs b/core/src/location/mod.rs index dc361070a..23bef9885 100644 --- a/core/src/location/mod.rs +++ b/core/src/location/mod.rs @@ -156,7 +156,10 @@ impl LocationCreateArgs { if metadata.has_library(library.id) { return Err(LocationError::NeedRelink { // SAFETY: This unwrap is ok as we checked that we have this library_id - old_path: metadata.location_path(library.id).unwrap().to_path_buf(), + old_path: metadata + .location_path(library.id) + .expect("This unwrap is ok as we checked that we have this library_id") + .to_path_buf(), new_path: self.path, }); } @@ -281,7 +284,9 @@ impl LocationUpdateArgs { if let Some(mut metadata) = SpacedriveLocationMetadataFile::try_load(&location.path).await? { - metadata.update(library.id, self.name.unwrap()).await?; + metadata + .update(library.id, self.name.expect("TODO")) + .await?; } } } diff --git a/core/src/node/mod.rs b/core/src/node/mod.rs index 04af1c7cd..af8967f6d 100644 --- a/core/src/node/mod.rs +++ b/core/src/node/mod.rs @@ -18,20 +18,16 @@ pub struct LibraryNode { pub last_seen: DateTime, } -impl From for LibraryNode { - fn from(data: node::Data) -> Self { - Self { - uuid: Uuid::from_slice(&data.pub_id).unwrap(), - name: data.name, - platform: Platform::try_from(data.platform).unwrap(), - last_seen: data.last_seen.into(), - } - } -} +impl TryFrom for LibraryNode { + type Error = String; -impl From> for LibraryNode { - fn from(data: Box) -> Self { - Self::from(*data) + fn try_from(data: node::Data) -> Result { + Ok(Self { + uuid: Uuid::from_slice(&data.pub_id).map_err(|_| "Invalid node pub_id")?, + name: data.name, + platform: Platform::try_from(data.platform).map_err(|_| "Invalid platform_id")?, + last_seen: data.last_seen.into(), + }) } } diff --git a/core/src/node/peer_request.rs b/core/src/node/peer_request.rs index 03762f2ab..83af4243c 100644 --- a/core/src/node/peer_request.rs +++ b/core/src/node/peer_request.rs @@ -1,4 +1,4 @@ -#![allow(dead_code, unused_variables)] // TODO: Reenable once this is working +#![allow(dead_code, unused_variables, clippy::panic, clippy::unwrap_used)] // TODO: Reenable once this is working use serde::{Deserialize, Serialize}; use specta::Type; diff --git a/core/src/object/file_identifier/file_identifier_job.rs b/core/src/object/file_identifier/file_identifier_job.rs index a5b3b18f8..43825cf79 100644 --- a/core/src/object/file_identifier/file_identifier_job.rs +++ b/core/src/object/file_identifier/file_identifier_job.rs @@ -149,7 +149,7 @@ impl StatefulJob for FileIdentifierJob { .select(file_path::select!({ id })) .exec() .await? - .unwrap(); // SAFETY: We already validated before that there are orphans `file_path`s + .expect("We already validated before that there are orphans `file_path`s"); // SAFETY: We already validated before that there are orphans `file_path`s data.cursor = first_path.id; diff --git a/core/src/object/file_identifier/mod.rs b/core/src/object/file_identifier/mod.rs index 3b4afb108..ab6618738 100644 --- a/core/src/object/file_identifier/mod.rs +++ b/core/src/object/file_identifier/mod.rs @@ -108,7 +108,8 @@ async fn identifier_job_step( .await .map(|params| { ( - Uuid::from_slice(&file_path.pub_id).unwrap(), + // SAFETY: This should never happen + Uuid::from_slice(&file_path.pub_id).expect("file_path.pub_id is invalid!"), (params, file_path), ) }) @@ -191,7 +192,7 @@ async fn identifier_job_step( let (crdt_op, db_op) = file_path_object_connect_ops( pub_id, // SAFETY: This pub_id is generated by the uuid lib, but we have to store bytes in sqlite - Uuid::from_slice(&object.pub_id).unwrap(), + Uuid::from_slice(&object.pub_id).expect("uuid bytes are invalid"), sync, db, ); diff --git a/core/src/object/file_identifier/shallow_file_identifier_job.rs b/core/src/object/file_identifier/shallow_file_identifier_job.rs index 4385407f5..2b36086fb 100644 --- a/core/src/object/file_identifier/shallow_file_identifier_job.rs +++ b/core/src/object/file_identifier/shallow_file_identifier_job.rs @@ -148,7 +148,7 @@ impl StatefulJob for ShallowFileIdentifierJob { .select(file_path::select!({ id })) .exec() .await? - .unwrap(); // SAFETY: We already validated before that there are orphans `file_path`s + .expect("We already validated before that there are orphans `file_path`s"); // SAFETY: We already validated before that there are orphans `file_path`s data.cursor = first_path.id; diff --git a/core/src/object/fs/encrypt.rs b/core/src/object/fs/encrypt.rs index ae72d6fb1..af30786ca 100644 --- a/core/src/object/fs/encrypt.rs +++ b/core/src/object/fs/encrypt.rs @@ -194,7 +194,12 @@ impl StatefulJob for FileEncryptorJob { .config() .data_directory() .join("thumbnails") - .join(info.path_data.cas_id.as_ref().unwrap()) + .join( + info.path_data + .cas_id + .as_ref() + .ok_or(JobError::MissingCasId)?, + ) .with_extension("wepb"); if tokio::fs::metadata(&pvm_path).await.is_ok() { diff --git a/core/src/object/preview/thumbnail/mod.rs b/core/src/object/preview/thumbnail/mod.rs index e92b4f1e1..9f94109f2 100644 --- a/core/src/object/preview/thumbnail/mod.rs +++ b/core/src/object/preview/thumbnail/mod.rs @@ -124,7 +124,11 @@ pub async fn generate_image_thumbnail>( let webp = block_in_place(|| -> Result, Box> { #[cfg(all(feature = "heif", target_os = "macos"))] let img = { - let ext = file_path.as_ref().extension().unwrap().to_ascii_lowercase(); + let ext = file_path + .as_ref() + .extension() + .unwrap_or_default() + .to_ascii_lowercase(); if HEIF_EXTENSIONS .iter() .any(|e| ext == std::ffi::OsStr::new(e)) diff --git a/core/src/object/validation/validator_job.rs b/core/src/object/validation/validator_job.rs index af1880e46..1579686d2 100644 --- a/core/src/object/validation/validator_job.rs +++ b/core/src/object/validation/validator_job.rs @@ -73,7 +73,10 @@ impl StatefulJob for ObjectValidatorJob { .find_unique(location::id::equals(state.init.location_id)) .exec() .await? - .unwrap(); + .ok_or(JobError::MissingFromDb( + "location", + format!("id={}", state.init.location_id), + ))?; state.data = Some(ObjectValidatorJobState { root_path: location.path.into(), diff --git a/core/src/p2p/p2p_manager.rs b/core/src/p2p/p2p_manager.rs index c7aac3904..6d41ea53c 100644 --- a/core/src/p2p/p2p_manager.rs +++ b/core/src/p2p/p2p_manager.rs @@ -1,4 +1,7 @@ +#![allow(clippy::unwrap_used)] // TODO: Remove once this is fully stablised + use std::{ + borrow::Cow, collections::HashMap, path::PathBuf, sync::Arc, @@ -8,7 +11,7 @@ use std::{ use sd_p2p::{ spaceblock::{self, BlockSize, SpacedropRequest}, spacetime::SpaceTimeStream, - Event, Manager, MetadataManager, PeerId, + Event, Manager, ManagerError, MetadataManager, PeerId, }; use sd_sync::CRDTOperation; use serde::Serialize; @@ -58,7 +61,7 @@ pub struct P2PManager { impl P2PManager { pub async fn new( node_config: Arc, - ) -> (Arc, broadcast::Receiver<(Uuid, Vec)>) { + ) -> Result<(Arc, broadcast::Receiver<(Uuid, Vec)>), ManagerError> { let (config, keypair) = { let config = node_config.get().await; (Self::config_to_metadata(&config), config.keypair) @@ -67,9 +70,7 @@ impl P2PManager { let metadata_manager = MetadataManager::new(config); let (manager, mut stream) = - Manager::new(SPACEDRIVE_APP_ID, &keypair, metadata_manager.clone()) - .await - .unwrap(); + Manager::new(SPACEDRIVE_APP_ID, &keypair, metadata_manager.clone()).await?; info!( "Node '{}' is now online listening at addresses: {:?}", @@ -226,41 +227,7 @@ impl P2PManager { } }); - // TODO(@Oscar): Remove this in the future once i'm done using it for testing - if std::env::var("SPACEDROP_DEMO").is_ok() { - // tokio::spawn({ - // let this = this.clone(); - // async move { - // tokio::time::sleep(std::time::Duration::from_secs(5)).await; - // let mut connected = this - // .manager - // .get_connected_peers() - // .await - // .unwrap() - // .into_iter(); - // if let Some(peer_id) = connected.next() { - // info!("Starting Spacedrop to peer '{}'", peer_id); - // this.broadcast_sync_events( - // Uuid::from_str("e4372586-d028-48f8-8be6-b4ff781a7dc2").unwrap(), - // vec![CRDTOperation { - // node: Uuid::new_v4(), - // timestamp: NTP64(1), - // id: Uuid::new_v4(), - // typ: CRDTOperationType::Owned(OwnedOperation { - // model: "TODO".to_owned(), - // items: Vec::new(), - // }), - // }], - // ) - // .await; - // } else { - // info!("No clients found so skipping Spacedrop demo!"); - // } - // } - // }); - } - - (this, rx2) + Ok((this, rx2)) } fn config_to_metadata(config: &NodeConfig) -> PeerMetadata { @@ -296,7 +263,13 @@ impl P2PManager { #[allow(unused)] // TODO: Remove `allow(unused)` once integrated pub async fn broadcast_sync_events(&self, library_id: Uuid, event: Vec) { - let mut buf = rmp_serde::to_vec_named(&event).unwrap(); // TODO: Error handling + let mut buf = match rmp_serde::to_vec_named(&event) { + Ok(buf) => buf, + Err(e) => { + error!("Failed to serialize sync event: {:?}", e); + return; + } + }; let mut head_buf = Header::Sync(library_id, buf.len() as u32).to_bytes(); // Max Sync payload is like 4GB head_buf.append(&mut buf); @@ -309,26 +282,31 @@ impl P2PManager { self.manager.broadcast(Header::Ping.to_bytes()).await; } - pub async fn big_bad_spacedrop(&self, peer_id: PeerId, path: PathBuf) { - let mut stream = self.manager.stream(peer_id).await.unwrap(); // TODO: handle providing incorrect peer id + // TODO: Proper error handling + pub async fn big_bad_spacedrop(&self, peer_id: PeerId, path: PathBuf) -> Result<(), ()> { + let mut stream = self.manager.stream(peer_id).await.map_err(|_| ())?; // TODO: handle providing incorrect peer id - let file = File::open(&path).await.unwrap(); - let metadata = file.metadata().await.unwrap(); + let file = File::open(&path).await.map_err(|_| ())?; + let metadata = file.metadata().await.map_err(|_| ())?; let header = Header::Spacedrop(SpacedropRequest { - name: path.file_name().unwrap().to_str().unwrap().to_string(), // TODO: Encode this as bytes instead + name: path + .file_name() + .map(|v| v.to_string_lossy()) + .unwrap_or(Cow::Borrowed("")) + .to_string(), size: metadata.len(), block_size: BlockSize::from_size(metadata.len()), // TODO: This should be dynamic }); - stream.write_all(&header.to_bytes()).await.unwrap(); + stream.write_all(&header.to_bytes()).await.map_err(|_| ())?; debug!("Waiting for Spacedrop to be accepted from peer '{peer_id}'"); let mut buf = [0; 1]; // TODO: Add timeout so the connection is dropped if they never response - stream.read_exact(&mut buf).await.unwrap(); + stream.read_exact(&mut buf).await.map_err(|_| ())?; if buf[0] != 1 { debug!("Spacedrop was rejected from peer '{peer_id}'"); - return; + return Ok(()); } debug!("Starting Spacedrop to peer '{peer_id}'"); @@ -349,6 +327,8 @@ impl P2PManager { "Finished Spacedrop to peer '{peer_id}' after '{:?}", i.elapsed() ); + + Ok(()) } pub async fn shutdown(&self) { diff --git a/core/src/p2p/protocol.rs b/core/src/p2p/protocol.rs index ca9e99aa9..d5eb26a0a 100644 --- a/core/src/p2p/protocol.rs +++ b/core/src/p2p/protocol.rs @@ -1,7 +1,11 @@ +use thiserror::Error; use tokio::io::AsyncReadExt; use uuid::Uuid; -use sd_p2p::{spaceblock::SpacedropRequest, spacetime::SpaceTimeStream}; +use sd_p2p::{ + spaceblock::{SpacedropRequest, SpacedropRequestError}, + spacetime::SpaceTimeStream, +}; /// TODO #[derive(Debug, PartialEq, Eq)] @@ -11,31 +15,65 @@ pub enum Header { Sync(Uuid, u32), } +#[derive(Debug, Error)] +pub enum SyncRequestError { + #[error("io error reading library id: {0}")] + LibraryIdIoError(std::io::Error), + #[error("io error decoding library id: {0}")] + ErrorDecodingLibraryId(uuid::Error), + #[error("io error reading sync payload len: {0}")] + PayloadLenIoError(std::io::Error), +} + +#[derive(Debug, Error)] +pub enum HeaderError { + #[error("io error reading discriminator: {0}")] + DiscriminatorIoError(std::io::Error), + #[error("invalid discriminator '{0}'")] + InvalidDiscriminator(u8), + #[error("error reading spacedrop request: {0}")] + SpacedropRequestError(#[from] SpacedropRequestError), + #[error("error reading sync request: {0}")] + SyncRequestError(#[from] SyncRequestError), + #[error("invalid request. Spacedrop requires a unicast stream!")] + SpacedropOverMulticastIsForbidden, +} + impl Header { - pub async fn from_stream(stream: &mut SpaceTimeStream) -> Result { - let discriminator = stream.read_u8().await.map_err(|e| { - dbg!(e); - })?; // TODO: Error handling + pub async fn from_stream(stream: &mut SpaceTimeStream) -> Result { + let discriminator = stream + .read_u8() + .await + .map_err(HeaderError::DiscriminatorIoError)?; match discriminator { 0 => match stream { SpaceTimeStream::Unicast(stream) => Ok(Self::Spacedrop( SpacedropRequest::from_stream(stream).await?, )), - _ => todo!(), + _ => Err(HeaderError::SpacedropOverMulticastIsForbidden), }, 1 => Ok(Self::Ping), 2 => { let mut uuid = [0u8; 16]; - stream.read_exact(&mut uuid).await.map_err(|_| ())?; // TODO: Error handling + stream + .read_exact(&mut uuid) + .await + .map_err(SyncRequestError::LibraryIdIoError)?; let mut len = [0; 4]; - stream.read_exact(&mut len).await.map_err(|_| ())?; // TODO: Error handling + stream + .read_exact(&mut len) + .await + .map_err(SyncRequestError::PayloadLenIoError)?; let len = u32::from_le_bytes(len); - Ok(Self::Sync(Uuid::from_slice(&uuid).unwrap(), len)) // TODO: Error handling + Ok(Self::Sync( + Uuid::from_slice(&uuid).map_err(SyncRequestError::ErrorDecodingLibraryId)?, + len, + )) } - _ => Err(()), + d => Err(HeaderError::InvalidDiscriminator(d)), } } diff --git a/core/src/sync/manager.rs b/core/src/sync/manager.rs index 885f4886d..7556cb0f9 100644 --- a/core/src/sync/manager.rs +++ b/core/src/sync/manager.rs @@ -1,3 +1,5 @@ +#![allow(clippy::unwrap_used, clippy::panic)] // TODO: Brendan remove this once you've got error handling here + use crate::prisma::*; use std::{collections::HashMap, sync::Arc}; diff --git a/core/src/util/debug_initializer.rs b/core/src/util/debug_initializer.rs index fc8a8fd80..591c79d8a 100644 --- a/core/src/util/debug_initializer.rs +++ b/core/src/util/debug_initializer.rs @@ -1,16 +1,22 @@ // ! A system for loading a default set of data on startup. This is ONLY enabled in development builds. use std::{ + io, path::{Path, PathBuf}, time::Duration, }; use crate::{ - library::LibraryConfig, - location::{delete_location, scan_location, LocationCreateArgs}, + job::JobManagerError, + library::{LibraryConfig, LibraryManagerError}, + location::{ + delete_location, scan_location, LocationCreateArgs, LocationError, LocationManagerError, + }, prisma::location, }; +use prisma_client_rust::QueryError; use serde::Deserialize; +use thiserror::Error; use tokio::{ fs::{self, metadata}, time::sleep, @@ -47,39 +53,56 @@ pub struct InitConfig { path: PathBuf, } +#[derive(Error, Debug)] +pub enum InitConfigError { + #[error("error loading the init data: {0}")] + Io(#[from] io::Error), + #[error("error parsing the init data: {0}")] + Json(#[from] serde_json::Error), + #[error("job manager: {0}")] + JobManager(#[from] JobManagerError), + #[error("location manager: {0}")] + LocationManager(#[from] LocationManagerError), + #[error("library manager: {0}")] + LibraryManager(#[from] LibraryManagerError), + #[error("query error: {0}")] + QueryError(#[from] QueryError), + #[error("location error: {0}")] + LocationError(#[from] LocationError), +} + impl InitConfig { - pub async fn load(data_dir: &Path) -> Option { - let path = std::env::current_dir() - .unwrap() + pub async fn load(data_dir: &Path) -> Result, InitConfigError> { + let path = std::env::current_dir()? .join(std::env::var("SD_INIT_DATA").unwrap_or("sd_init.json".to_string())); if metadata(&path).await.is_ok() { - let config = fs::read_to_string(&path).await.unwrap(); - let mut config: InitConfig = serde_json::from_str(&config).unwrap(); + let config = fs::read_to_string(&path).await?; + let mut config: InitConfig = serde_json::from_str(&config)?; config.path = path; if config.reset_on_startup && data_dir.exists() { warn!("previous 'SD_DATA_DIR' was removed on startup!"); - fs::remove_dir_all(&data_dir).await.unwrap(); + fs::remove_dir_all(&data_dir).await?; } - return Some(config); + return Ok(Some(config)); } - None + Ok(None) } - pub async fn apply(self, library_manager: &LibraryManager) { + pub async fn apply(self, library_manager: &LibraryManager) -> Result<(), InitConfigError> { info!("Initializing app from file: {:?}", self.path); for lib in self.libraries { let name = lib.name.clone(); - let handle = tokio::spawn(async move { + let _guard = AbortOnDrop(tokio::spawn(async move { loop { info!("Initializing library '{name}' from 'sd_init.json'..."); sleep(Duration::from_secs(1)).await; } - }); + })); let library = match library_manager.get_library(lib.id).await { Some(lib) => lib, @@ -92,25 +115,27 @@ impl InitConfig { description: lib.description.unwrap_or("".to_string()), }, ) - .await - .unwrap(); + .await?; - library_manager.get_library(library.uuid).await.unwrap() + match library_manager.get_library(library.uuid).await { + Some(lib) => lib, + None => { + warn!( + "Debug init error: library '{}' was not found after being created!", + library.config.name + ); + return Ok(()); + } + } } }; if lib.reset_locations_on_startup { - let locations = library - .db - .location() - .find_many(vec![]) - .exec() - .await - .unwrap(); + let locations = library.db.location().find_many(vec![]).exec().await?; for location in locations { warn!("deleting location: {:?}", location.path); - delete_location(&library, location.id).await.unwrap(); + delete_location(&library, location.id).await?; } } @@ -120,34 +145,47 @@ impl InitConfig { .location() .find_first(vec![location::path::equals(loc.path.clone())]) .exec() - .await - .unwrap() + .await? { warn!("deleting location: {:?}", location.path); - delete_location(&library, location.id).await.unwrap(); + delete_location(&library, location.id).await?; } let sd_file = PathBuf::from(&loc.path).join(".spacedrive"); if sd_file.exists() { - fs::remove_file(sd_file).await.unwrap(); + fs::remove_file(sd_file).await?; } let location = LocationCreateArgs { - path: loc.path.into(), + path: loc.path.clone().into(), dry_run: false, indexer_rules_ids: Vec::new(), } .create(&library) - .await - .unwrap() - .unwrap(); - - scan_location(&library, location).await.unwrap(); + .await?; + match location { + Some(location) => { + scan_location(&library, location).await?; + } + None => { + warn!( + "Debug init error: location '{}' was not found after being created!", + loc.path + ); + } + } } - - handle.abort(); } info!("Initialized app from file: {:?}", self.path); + Ok(()) + } +} + +pub struct AbortOnDrop(tokio::task::JoinHandle); + +impl Drop for AbortOnDrop { + fn drop(&mut self) { + self.0.abort(); } } diff --git a/core/src/util/migrator.rs b/core/src/util/migrator.rs index 3b249959f..cef8d2e4d 100644 --- a/core/src/util/migrator.rs +++ b/core/src/util/migrator.rs @@ -49,7 +49,7 @@ where // } pub fn load(&self, path: &Path) -> Result { - match path.try_exists().unwrap() { + match path.try_exists()? { true => { let mut file = File::options().read(true).write(true).open(path)?; let mut cfg: BaseConfig = serde_json::from_reader(BufReader::new(&mut file))?; @@ -89,10 +89,7 @@ where other: match serde_json::to_value(content)? { Value::Object(map) => map, _ => { - panic!( - "Type '{}' as generic `Migrator::T` must be serialiable to a Serde object!", - type_name::() - ); + return Err(MigratorError::InvalidType(type_name::())); } }, }; @@ -113,6 +110,8 @@ pub enum MigratorError { "the config file is for a newer version of the app. Please update to the latest version to load it!" )] YourAppIsOutdated, + #[error("Type '{0}' as generic `Migrator::T` must be serialiable to a Serde object!")] + InvalidType(&'static str), #[error("custom migration error: {0}")] Custom(String), } diff --git a/core/src/volume.rs b/core/src/volume.rs index 748959018..07ddadee2 100644 --- a/core/src/volume.rs +++ b/core/src/volume.rs @@ -11,6 +11,7 @@ use sysinfo::{DiskExt, System, SystemExt}; use thiserror::Error; #[derive(Serialize, Deserialize, Debug, Clone, Type)] +#[allow(clippy::upper_case_acronyms)] pub enum DiskType { SSD, HDD, diff --git a/crates/p2p/Cargo.toml b/crates/p2p/Cargo.toml index 957c03cb9..0d0516905 100644 --- a/crates/p2p/Cargo.toml +++ b/crates/p2p/Cargo.toml @@ -20,17 +20,19 @@ tokio = { workspace = true, features = [ "io-util", "fs", ] } -libp2p = { version = "0.51.0", features = ["tokio", "quic", "serde"] } +libp2p = { version = "0.51.3", features = ["tokio", "serde"] } +libp2p-quic = { version = "0.7.0-alpha.3", features = ["tokio"] } +if-watch = { version = "3.0.1", features = ["tokio"] } # Override the features of if-watch which is used by libp2p-quic mdns-sd = "0.6.1" -thiserror = "1.0.39" +thiserror = "1.0.40" tracing = "0.1.37" -serde = { version = "1.0.152", features = ["derive"] } +serde = { version = "1.0.163", features = ["derive"] } rmp-serde = "1.1.1" specta = { workspace = true } flume = "0.10.14" -tokio-util = { version = "0.7.7", features = ["compat"] } +tokio-util = { version = "0.7.8", features = ["compat"] } arc-swap = "1.6.0" [dev-dependencies] tokio = { workspace = true, features = ["rt-multi-thread"] } -tracing-subscriber = { version = "0.3.16", features = ["env-filter"] } +tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } diff --git a/crates/p2p/src/manager.rs b/crates/p2p/src/manager.rs index 69b7dae98..e365caef9 100644 --- a/crates/p2p/src/manager.rs +++ b/crates/p2p/src/manager.rs @@ -7,7 +7,7 @@ use std::{ }, }; -use libp2p::{core::muxing::StreamMuxerBox, quic, Swarm, Transport}; +use libp2p::{core::muxing::StreamMuxerBox, swarm::SwarmBuilder, Transport}; use thiserror::Error; use tokio::sync::{mpsc, oneshot}; use tracing::{debug, error, warn}; @@ -41,7 +41,7 @@ impl Manager { .then_some(()) .ok_or(ManagerError::InvalidAppName)?; - let peer_id = PeerId(keypair.public().to_peer_id()); + let peer_id = PeerId(keypair.peer_id()); let (event_stream_tx, event_stream_rx) = mpsc::channel(1024); let (mdns, mdns_state) = Mdns::new(application_name, peer_id, metadata_manager) @@ -60,13 +60,16 @@ impl Manager { event_stream_tx, }); - let mut swarm = Swarm::with_tokio_executor( - quic::GenTransport::::new(quic::Config::new(keypair.inner())) - .map(|(p, c), _| (p, StreamMuxerBox::new(c))) - .boxed(), + let mut swarm = SwarmBuilder::with_tokio_executor( + libp2p_quic::GenTransport::::new( + libp2p_quic::Config::new(&keypair.inner()), + ) + .map(|(p, c), _| (p, StreamMuxerBox::new(c))) + .boxed(), SpaceTime::new(this.clone()), - keypair.public().to_peer_id(), - ); + keypair.peer_id(), + ) + .build(); { let listener_id = swarm .listen_on("/ip4/0.0.0.0/udp/0/quic-v1".parse().expect("Error passing libp2p multiaddr. This value is hardcoded so this should be impossible.")) diff --git a/crates/p2p/src/manager_stream.rs b/crates/p2p/src/manager_stream.rs index 1eb3b48a1..58da7e7ad 100644 --- a/crates/p2p/src/manager_stream.rs +++ b/crates/p2p/src/manager_stream.rs @@ -12,7 +12,7 @@ use libp2p::{ futures::StreamExt, swarm::{ dial_opts::{DialOpts, PeerCondition}, - NetworkBehaviourAction, NotifyHandler, SwarmEvent, + NotifyHandler, SwarmEvent, ToSwarm, }, Swarm, }; @@ -114,7 +114,6 @@ where SwarmEvent::IncomingConnection { local_addr, .. } => debug!("incoming connection from '{}'", local_addr), SwarmEvent::IncomingConnectionError { local_addr, error, .. } => warn!("handshake error with incoming connection from '{}': {}", local_addr, error), SwarmEvent::OutgoingConnectionError { peer_id, error } => warn!("error establishing connection with '{:?}': {}", peer_id, error), - SwarmEvent::BannedPeer { peer_id, .. } => warn!("banned peer '{}' attempted to connection and was rejected", peer_id), SwarmEvent::NewListenAddr { address, .. } => { match quic_multiaddr_to_socketaddr(address) { Ok(addr) => { @@ -162,6 +161,8 @@ where } SwarmEvent::ListenerError { listener_id, error } => warn!("listener '{:?}' reported a non-fatal error: {}", listener_id, error), SwarmEvent::Dialing(_peer_id) => {}, + #[allow(deprecated)] + SwarmEvent::BannedPeer { .. } => {}, } } } @@ -202,26 +203,25 @@ where } } ManagerStreamAction::StartStream(peer_id, rx) => { - self.swarm.behaviour_mut().pending_events.push_back( - NetworkBehaviourAction::NotifyHandler { + self.swarm + .behaviour_mut() + .pending_events + .push_back(ToSwarm::NotifyHandler { peer_id: peer_id.0, handler: NotifyHandler::Any, event: OutboundRequest::Unicast(rx), - }, - ); + }); } ManagerStreamAction::BroadcastData(data) => { let connected_peers = self.swarm.connected_peers().copied().collect::>(); let behaviour = self.swarm.behaviour_mut(); debug!("Broadcasting message to '{:?}'", connected_peers); for peer_id in connected_peers { - behaviour - .pending_events - .push_back(NetworkBehaviourAction::NotifyHandler { - peer_id, - handler: NotifyHandler::Any, - event: OutboundRequest::Broadcast(data.clone()), - }); + behaviour.pending_events.push_back(ToSwarm::NotifyHandler { + peer_id, + handler: NotifyHandler::Any, + event: OutboundRequest::Broadcast(data.clone()), + }); } } ManagerStreamAction::Shutdown(tx) => { diff --git a/crates/p2p/src/spaceblock/mod.rs b/crates/p2p/src/spaceblock/mod.rs index 823f7dd29..60535b299 100644 --- a/crates/p2p/src/spaceblock/mod.rs +++ b/crates/p2p/src/spaceblock/mod.rs @@ -6,8 +6,10 @@ use std::{ marker::PhantomData, path::{Path, PathBuf}, + string::FromUtf8Error, }; +use thiserror::Error; use tokio::{ fs::File, io::{AsyncBufRead, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, BufReader}, @@ -42,22 +44,38 @@ pub struct SpacedropRequest { pub block_size: BlockSize, } +#[derive(Debug, Error)] +pub enum SpacedropRequestError { + #[error("io error reading name len: {0}")] + NameLenIoError(std::io::Error), + #[error("io error reading name: {0}")] + NameIoError(std::io::Error), + #[error("error utf-8 decoding name: {0}")] + NameFormatError(FromUtf8Error), + #[error("io error reading file size: {0}")] + SizeIoError(std::io::Error), +} + impl SpacedropRequest { - pub async fn from_stream(stream: &mut (impl AsyncRead + Unpin)) -> Result { - let mut name_len = [0; 2]; - stream.read_exact(&mut name_len).await.map_err(|_| ())?; // TODO: Error handling - let name_len = u16::from_le_bytes(name_len); - + pub async fn from_stream( + stream: &mut (impl AsyncRead + Unpin), + ) -> Result { + let name_len = stream + .read_u16_le() + .await + .map_err(SpacedropRequestError::NameLenIoError)?; let mut name = vec![0u8; name_len as usize]; - stream.read_exact(&mut name).await.map_err(|_| ())?; // TODO: Error handling - let name = String::from_utf8(name).map_err(|_| ())?; // TODO: Error handling + stream + .read_exact(&mut name) + .await + .map_err(SpacedropRequestError::NameIoError)?; + let name = String::from_utf8(name).map_err(SpacedropRequestError::NameFormatError)?; - let mut size = [0; 8]; - stream.read_exact(&mut size).await.map_err(|_| ())?; // TODO: Error handling - let size = u64::from_le_bytes(size); - - // TODO: If we change `BlockSize` and both clients are running a different version this will not match up and everything will explode - let block_size = BlockSize::from_size(size); // TODO: Error handling + let size = stream + .read_u64_le() + .await + .map_err(SpacedropRequestError::SizeIoError)?; + let block_size = BlockSize::from_size(size); // TODO: Get from stream: stream.read_u8().await.map_err(|_| ())?; // TODO: Error handling Ok(Self { name, diff --git a/crates/p2p/src/spacetime/behaviour.rs b/crates/p2p/src/spacetime/behaviour.rs index e6c303724..7a7556925 100644 --- a/crates/p2p/src/spacetime/behaviour.rs +++ b/crates/p2p/src/spacetime/behaviour.rs @@ -8,8 +8,8 @@ use libp2p::{ core::{ConnectedPoint, Endpoint}, swarm::{ derive_prelude::{ConnectionEstablished, ConnectionId, FromSwarm}, - ConnectionClosed, ConnectionDenied, ConnectionHandler, NetworkBehaviour, - NetworkBehaviourAction, PollParameters, THandler, THandlerInEvent, + ConnectionClosed, ConnectionDenied, ConnectionHandler, NetworkBehaviour, PollParameters, + THandler, THandlerInEvent, ToSwarm, }, Multiaddr, }; @@ -34,9 +34,8 @@ pub enum OutboundFailure {} /// This protocol sits under the application to abstract many complexities of 2 way connections and deals with authentication, chucking, etc. pub struct SpaceTime { pub(crate) manager: Arc>, - pub(crate) pending_events: VecDeque< - NetworkBehaviourAction<::OutEvent, THandlerInEvent>, - >, + pub(crate) pending_events: + VecDeque::OutEvent, THandlerInEvent>>, // For future me's sake, DON't try and refactor this to use shared state (for the nth time), it doesn't fit into libp2p's synchronous trait and polling model!!! // pub(crate) connected_peers: HashMap, } @@ -116,12 +115,11 @@ impl NetworkBehaviour for SpaceTime { { debug!("sending establishment request to peer '{}'", peer_id); if other_established == 0 { - self.pending_events - .push_back(NetworkBehaviourAction::GenerateEvent( - ManagerStreamAction::Event(Event::PeerConnected(ConnectedPeer { - peer_id, - })), - )); + self.pending_events.push_back(ToSwarm::GenerateEvent( + ManagerStreamAction::Event(Event::PeerConnected(ConnectedPeer { + peer_id, + })), + )); } } } @@ -133,10 +131,9 @@ impl NetworkBehaviour for SpaceTime { let peer_id = PeerId(peer_id); if remaining_established == 0 { debug!("Disconnected from peer '{}'", peer_id); - self.pending_events - .push_back(NetworkBehaviourAction::GenerateEvent( - ManagerStreamAction::Event(Event::PeerDisconnected(peer_id)), - )); + self.pending_events.push_back(ToSwarm::GenerateEvent( + ManagerStreamAction::Event(Event::PeerDisconnected(peer_id)), + )); } } FromSwarm::AddressChange(event) => { @@ -187,15 +184,14 @@ impl NetworkBehaviour for SpaceTime { _connection: ConnectionId, event: as ConnectionHandler>::OutEvent, ) { - self.pending_events - .push_back(NetworkBehaviourAction::GenerateEvent(event)); + self.pending_events.push_back(ToSwarm::GenerateEvent(event)); } fn poll( &mut self, _: &mut Context<'_>, _: &mut impl PollParameters, - ) -> Poll>> { + ) -> Poll>> { if let Some(ev) = self.pending_events.pop_front() { return Poll::Ready(ev); } else if self.pending_events.capacity() > EMPTY_QUEUE_SHRINK_THRESHOLD { diff --git a/crates/p2p/src/utils/keypair.rs b/crates/p2p/src/utils/keypair.rs index 978b33f1f..62e4f6426 100644 --- a/crates/p2p/src/utils/keypair.rs +++ b/crates/p2p/src/utils/keypair.rs @@ -1,20 +1,22 @@ -use libp2p::identity::{ed25519, PublicKey}; +use libp2p::identity::ed25519::{self}; use serde::{Deserialize, Serialize}; #[derive(Debug, Clone)] -pub struct Keypair(libp2p::identity::Keypair); +pub struct Keypair(ed25519::Keypair); impl Keypair { pub fn generate() -> Self { - Self(libp2p::identity::Keypair::generate_ed25519()) + Self(ed25519::Keypair::generate()) } - pub fn public(&self) -> PublicKey { - self.0.public() + pub fn peer_id(&self) -> libp2p::PeerId { + let pk: libp2p::identity::PublicKey = self.0.public().into(); + + libp2p::PeerId::from_public_key(&pk) } - pub fn inner(&self) -> &libp2p::identity::Keypair { - &self.0 + pub fn inner(&self) -> libp2p::identity::Keypair { + self.0.clone().into() } } @@ -23,13 +25,7 @@ impl Serialize for Keypair { where S: serde::Serializer, { - match &self.0 { - libp2p::identity::Keypair::Ed25519(keypair) => { - serializer.serialize_bytes(&keypair.encode()) - } - #[allow(unreachable_patterns)] - _ => unreachable!(), - } + serializer.serialize_bytes(&self.0.to_bytes()) } } @@ -39,8 +35,9 @@ impl<'de> Deserialize<'de> for Keypair { D: serde::Deserializer<'de>, { let mut bytes = Vec::::deserialize(deserializer)?; - Ok(Self(libp2p::identity::Keypair::Ed25519( - ed25519::Keypair::decode(bytes.as_mut_slice()).map_err(serde::de::Error::custom)?, - ))) + Ok(Self( + ed25519::Keypair::try_from_bytes(bytes.as_mut_slice()) + .map_err(serde::de::Error::custom)?, + )) } } diff --git a/crates/p2p/src/utils/peer_id.rs b/crates/p2p/src/utils/peer_id.rs index 158aa67ef..e33b07b6d 100644 --- a/crates/p2p/src/utils/peer_id.rs +++ b/crates/p2p/src/utils/peer_id.rs @@ -10,6 +10,7 @@ pub struct PeerId( ); impl FromStr for PeerId { + #[allow(deprecated)] type Err = libp2p::core::ParseError; fn from_str(s: &str) -> Result { diff --git a/interface/ErrorFallback.tsx b/interface/ErrorFallback.tsx index 3074eea78..a4afdb89c 100644 --- a/interface/ErrorFallback.tsx +++ b/interface/ErrorFallback.tsx @@ -1,11 +1,29 @@ import { captureException } from '@sentry/browser'; import { FallbackProps } from 'react-error-boundary'; +import { useRouteError } from 'react-router'; import { useDebugState } from '@sd/client'; import { Button } from '@sd/ui'; +import { useOperatingSystem } from './hooks'; + +export function RouterErrorBoundary() { + const error = useRouteError(); + return ( + { + captureException(error); + location.reload(); + }} + reloadBtn={() => { + location.reload(); + }} + /> + ); +} export default ({ error, resetErrorBoundary }: FallbackProps) => ( { captureException(error); resetErrorBoundary(); @@ -17,28 +35,34 @@ export default ({ error, resetErrorBoundary }: FallbackProps) => ( export function ErrorPage({ reloadBtn, sendReportBtn, - message + message, + submessage }: { reloadBtn?: () => void; sendReportBtn?: () => void; message: string; + submessage?: string; }) { const debug = useDebugState(); + const os = useOperatingSystem(); + const isMacOS = os === 'macOS'; + + if (!submessage && debug.enabled) + submessage = 'Check the console (CMD/CTRL + OPTION + i) for stack trace.'; return (

APP CRASHED

We're past the event horizon...

-
Error: {message}
- {debug.enabled && ( -
-					Check the console (CMD/CTRL + OPTION + i) for stack trace.
-				
- )} +
{message}
+ {submessage &&
{submessage}
}
{reloadBtn && (
+
{ + platform?.openLogsDir?.(); + }} + className="text-sm font-medium text-ink-faint" + > + + Logs Folder + + {node.data?.data_path + '/logs'} +
diff --git a/interface/app/index.tsx b/interface/app/index.tsx index 807155c71..41991acad 100644 --- a/interface/app/index.tsx +++ b/interface/app/index.tsx @@ -1,6 +1,7 @@ import { Navigate, Outlet, RouteObject } from 'react-router-dom'; import { currentLibraryCache, useCachedLibraries, useInvalidateQuery } from '@sd/client'; import { Dialogs } from '@sd/ui'; +import { RouterErrorBoundary } from '~/ErrorFallback'; import { useKeybindHandler } from '~/hooks/useKeyboardHandler'; import libraryRoutes from './$libraryId'; import onboardingRoutes from './onboarding'; @@ -39,6 +40,7 @@ const Wrapper = () => { export const routes = [ { element: , + errorElement: , children: [ { index: true, diff --git a/interface/package.json b/interface/package.json index de6519dde..24782373d 100644 --- a/interface/package.json +++ b/interface/package.json @@ -19,6 +19,7 @@ "dependencies": { "@fontsource/inter": "^4.5.13", "@headlessui/react": "^1.7.3", + "@icons-pack/react-simple-icons": "^7.2.0", "@radix-ui/react-progress": "^1.0.1", "@radix-ui/react-slider": "^1.1.0", "@radix-ui/react-toast": "^1.1.2", diff --git a/interface/util/Platform.tsx b/interface/util/Platform.tsx index 2af08672b..682017247 100644 --- a/interface/util/Platform.tsx +++ b/interface/util/Platform.tsx @@ -21,6 +21,7 @@ export type Platform = { saveFilePickerDialog?(): Promise; showDevtools?(): void; openPath?(path: string): void; + openLogsDir?(): void; // Opens a file path with a given ID openFilePath?(library: string, id: number): any; getFilePathOpenWithApps?(library: string, id: number): any; diff --git a/packages/client/src/core.ts b/packages/client/src/core.ts index 16f98fd1f..e5c8e7ceb 100644 --- a/packages/client/src/core.ts +++ b/packages/client/src/core.ts @@ -77,7 +77,7 @@ export type Procedures = { { key: "locations.quickRescan", input: LibraryArgs, result: null } | { key: "locations.relink", input: LibraryArgs, result: null } | { key: "locations.update", input: LibraryArgs, result: null } | - { key: "nodes.changeNodeName", input: ChangeNodeNameArgs, result: null } | + { key: "nodes.changeNodeName", input: ChangeNodeNameArgs, result: NodeConfig } | { key: "p2p.acceptSpacedrop", input: [string, string | null], result: null } | { key: "p2p.spacedrop", input: SpacedropArgs, result: null } | { key: "tags.assign", input: LibraryArgs, result: null } | @@ -96,10 +96,6 @@ export type FilePathSearchArgs = { take?: number | null; order?: FilePathSearchO export type PeerMetadata = { name: string; operating_system: OperatingSystem | null; version: string | null; email: string | null; img_url: string | null } -export type MasterPasswordChangeArgs = { password: Protected; algorithm: Algorithm; hashing_algorithm: HashingAlgorithm } - -export type Node = { id: number; pub_id: number[]; name: string; platform: number; version: string | null; last_seen: string; timezone: string | null; date_created: string } - /** * NodeConfig is the configuration for a node. This is shared between all libraries and is stored in a JSON file on disk. */ @@ -130,19 +126,11 @@ export type LibraryConfigWrapped = { uuid: string; config: LibraryConfig } */ export type Params = "Standard" | "Hardened" | "Paranoid" -/** - * `LocationUpdateArgs` is the argument received from the client using `rspc` to update a location. - * It contains the id of the location to be updated, possible a name to change the current location's name - * and a vector of indexer rules ids to add or remove from the location. - * - * It is important to note that only the indexer rule ids in this vector will be used from now on. - * Old rules that aren't in this vector will be purged. - */ -export type LocationUpdateArgs = { id: number; name: string | null; generate_preview_media: boolean | null; sync_preview_media: boolean | null; hidden: boolean | null; indexer_rules_ids: number[] } +export type Location = { id: number; pub_id: number[]; node_id: number; name: string; path: string; total_capacity: number | null; available_capacity: number | null; is_archived: boolean; generate_preview_media: boolean; sync_preview_media: boolean; hidden: boolean; date_created: string } export type SortOrder = "Asc" | "Desc" -export type MediaData = { id: number; pixel_width: number | null; pixel_height: number | null; longitude: number | null; latitude: number | null; fps: number | null; capture_device_make: string | null; capture_device_model: string | null; capture_device_software: string | null; duration_seconds: number | null; codecs: string | null; streams: number | null } +export type KeyAddArgs = { algorithm: Algorithm; hashing_algorithm: HashingAlgorithm; key: Protected; library_sync: boolean; automount: boolean } /** * Represents the operating system which the remote peer is running. @@ -163,8 +151,6 @@ export type OnboardingConfig = { password: Protected; algorithm: Algorit export type FileDecryptorJobInit = { location_id: number; path_id: number; mount_associated_key: boolean; output_path: string | null; password: string | null; save_to_library: boolean | null } -export type Statistics = { id: number; date_captured: string; total_object_count: number; library_db_size: string; total_bytes_used: string; total_bytes_capacity: string; total_unique_bytes: string; total_bytes_free: string; preview_media_bytes: string } - export type TagCreateArgs = { name: string; color: string } export type LightScanArgs = { location_id: number; sub_path: string } @@ -178,10 +164,20 @@ export type FileEraserJobInit = { location_id: number; path_id: number; passes: */ export type Nonce = { XChaCha20Poly1305: number[] } | { Aes256Gcm: number[] } -export type UnlockKeyManagerArgs = { password: Protected; secret_key: Protected } +export type AutomountUpdateArgs = { uuid: string; status: boolean } export type NodeState = ({ id: string; name: string; p2p_port: number | null; p2p_email: string | null; p2p_img_url: string | null }) & { data_path: string } +/** + * `LocationUpdateArgs` is the argument received from the client using `rspc` to update a location. + * It contains the id of the location to be updated, possible a name to change the current location's name + * and a vector of indexer rules ids to add or remove from the location. + * + * It is important to note that only the indexer rule ids in this vector will be used from now on. + * Old rules that aren't in this vector will be purged. + */ +export type LocationUpdateArgs = { id: number; name: string | null; generate_preview_media: boolean | null; sync_preview_media: boolean | null; hidden: boolean | null; indexer_rules_ids: number[] } + export type EditLibraryArgs = { id: string; name: string | null; description: string | null } export type SetNoteArgs = { id: number; note: string | null } @@ -204,20 +200,29 @@ export type Salt = number[] */ export type Category = "Recents" | "Favorites" | "Photos" | "Videos" | "Movies" | "Music" | "Documents" | "Downloads" | "Encrypted" | "Projects" | "Applications" | "Archives" | "Databases" | "Games" | "Books" | "Contacts" | "Trash" +/** + * TODO: P2P event for the frontend + */ +export type P2PEvent = { type: "DiscoveredPeer"; peer_id: PeerId; metadata: PeerMetadata } | { type: "SpacedropRequest"; id: string; peer_id: PeerId; name: string } + export type FileCopierJobInit = { source_location_id: number; source_path_id: number; target_location_id: number; target_path: string; target_file_name_suffix: string | null } export type DiskType = "SSD" | "HDD" | "Removable" +export type RestoreBackupArgs = { password: Protected; secret_key: Protected; path: string } + export type SetFavoriteArgs = { id: number; favorite: boolean } export type FilePathFilterArgs = { locationId?: number | null; search?: string; extension?: string | null; createdAt?: OptionalRange; path?: string | null; object?: ObjectFilterArgs | null } export type RuleKind = "AcceptFilesByGlob" | "RejectFilesByGlob" | "AcceptIfChildrenDirectoriesArePresent" | "RejectIfChildrenDirectoriesArePresent" -export type Volume = { name: string; mount_point: string; total_capacity: string; available_capacity: string; is_removable: boolean; disk_type: DiskType | null; file_system: string | null; is_root_filesystem: boolean } +export type MediaData = { id: number; pixel_width: number | null; pixel_height: number | null; longitude: number | null; latitude: number | null; fps: number | null; capture_device_make: string | null; capture_device_model: string | null; capture_device_software: string | null; duration_seconds: number | null; codecs: string | null; streams: number | null } export type FilePathSearchOrdering = { name: SortOrder } | { sizeInBytes: SortOrder } | { dateCreated: SortOrder } | { dateModified: SortOrder } | { dateIndexed: SortOrder } | { object: ObjectSearchOrdering } +export type IndexerRule = { id: number; name: string; default: boolean; rules_per_kind: number[]; date_created: string; date_modified: string } + export type BuildInfo = { version: string; commit: string } export type IdentifyUniqueFilesArgs = { id: number; path: string } @@ -227,7 +232,7 @@ export type IdentifyUniqueFilesArgs = { id: number; path: string } */ export type Algorithm = "XChaCha20Poly1305" | "Aes256Gcm" -export type Location = { id: number; pub_id: number[]; node_id: number; name: string; path: string; total_capacity: number | null; available_capacity: number | null; is_archived: boolean; generate_preview_media: boolean; sync_preview_media: boolean; hidden: boolean; date_created: string } +export type Tag = { id: number; pub_id: number[]; name: string | null; color: string | null; total_objects: number | null; redundancy_goal: number | null; date_created: string; date_modified: string } export type OwnedOperationItem = { id: any; data: OwnedOperationData } @@ -235,21 +240,10 @@ export type ObjectSearchOrdering = { dateAccessed: SortOrder } export type CRDTOperationType = SharedOperation | RelationOperation | OwnedOperation -/** - * TODO: P2P event for the frontend - */ -export type P2PEvent = { type: "DiscoveredPeer"; peer_id: PeerId; metadata: PeerMetadata } | { type: "SpacedropRequest"; id: string; peer_id: PeerId; name: string } - -export type RenameFileArgs = { location_id: number; file_name: string; new_file_name: string } - export type MaybeNot = T | { not: T } export type SpacedropArgs = { peer_id: PeerId; file_path: string[] } -export type Object = { id: number; pub_id: number[]; kind: number; key_id: number | null; hidden: boolean; favorite: boolean; important: boolean; has_thumbnail: boolean; has_thumbstrip: boolean; has_video_preview: boolean; ipfs_id: string | null; note: string | null; date_created: string; date_accessed: string | null } - -export type FilePath = { id: number; pub_id: number[]; is_dir: boolean; cas_id: string | null; integrity_checksum: string | null; location_id: number; materialized_path: string; name: string; extension: string; size_in_bytes: string; inode: number[]; device: number[]; object_id: number | null; key_id: number | null; date_created: string; date_modified: string; date_indexed: string } - export type JobReport = { id: string; name: string; action: string | null; data: number[] | null; metadata: any | null; is_background: boolean; errors_text: string[]; created_at: string | null; started_at: string | null; completed_at: string | null; parent_id: string | null; status: JobStatus; task_count: number; completed_task_count: number; message: string; estimated_completion: string } export type ObjectFilterArgs = { favorite?: boolean | null; hidden?: boolean | null; dateAccessed?: MaybeNot | null; kind?: number[]; tags?: number[] } @@ -280,12 +274,8 @@ export type IndexerRuleCreateArgs = { name: string; dry_run: boolean; rules: ([R export type SharedOperationCreateData = { u: { [key: string]: any } } | "a" -export type KeyAddArgs = { algorithm: Algorithm; hashing_algorithm: HashingAlgorithm; key: Protected; library_sync: boolean; automount: boolean } - export type OptionalRange = { from: T | null; to: T | null } -export type IndexerRule = { id: number; name: string; default: boolean; rules_per_kind: number[]; date_created: string; date_modified: string } - export type FileEncryptorJobInit = { location_id: number; path_id: number; key_uuid: string; algorithm: Algorithm; metadata: boolean; preview_media: boolean; output_path: string | null } /** @@ -302,22 +292,26 @@ export type ExplorerItem = { type: "Path"; has_thumbnail: boolean; item: FilePat */ export type LibraryArgs = { library_id: string; arg: T } -export type FileCutterJobInit = { source_location_id: number; source_path_id: number; target_location_id: number; target_path: string } +export type UnlockKeyManagerArgs = { password: Protected; secret_key: Protected } -export type Tag = { id: number; pub_id: number[]; name: string | null; color: string | null; total_objects: number | null; redundancy_goal: number | null; date_created: string; date_modified: string } +export type FileCutterJobInit = { source_location_id: number; source_path_id: number; target_location_id: number; target_path: string } export type OwnedOperationData = { Create: { [key: string]: any } } | { CreateMany: { values: ([any, { [key: string]: any }])[]; skip_duplicates: boolean } } | { Update: { [key: string]: any } } | "Delete" export type SharedOperationData = SharedOperationCreateData | { field: string; value: any } | null +export type Node = { id: number; pub_id: number[]; name: string; platform: number; version: string | null; last_seen: string; timezone: string | null; date_created: string } + +export type Volume = { name: string; mount_point: string; total_capacity: string; available_capacity: string; is_removable: boolean; disk_type: DiskType | null; file_system: string | null; is_root_filesystem: boolean } + export type TagUpdateArgs = { id: number; name: string | null; color: string | null } +export type MasterPasswordChangeArgs = { password: Protected; algorithm: Algorithm; hashing_algorithm: HashingAlgorithm } + export type ObjectValidatorArgs = { id: number; path: string } export type TagAssignArgs = { object_id: number; tag_id: number; unassign: boolean } -export type ChangeNodeNameArgs = { name: string } - /** * This defines all available password hashing algorithms. */ @@ -336,11 +330,17 @@ export type LibraryConfig = { name: string; description: string } export type SearchData = { cursor: number[] | null; items: T[] } -export type AutomountUpdateArgs = { uuid: string; status: boolean } +export type FilePath = { id: number; pub_id: number[]; is_dir: boolean; cas_id: string | null; integrity_checksum: string | null; location_id: number; materialized_path: string; name: string; extension: string; size_in_bytes: string; inode: number[]; device: number[]; object_id: number | null; key_id: number | null; date_created: string; date_modified: string; date_indexed: string } + +export type Statistics = { id: number; date_captured: string; total_object_count: number; library_db_size: string; total_bytes_used: string; total_bytes_capacity: string; total_unique_bytes: string; total_bytes_free: string; preview_media_bytes: string } export type Protected = T -export type RestoreBackupArgs = { password: Protected; secret_key: Protected; path: string } +export type ChangeNodeNameArgs = { name: string } + +export type Object = { id: number; pub_id: number[]; kind: number; key_id: number | null; hidden: boolean; favorite: boolean; important: boolean; has_thumbnail: boolean; has_thumbstrip: boolean; has_video_preview: boolean; ipfs_id: string | null; note: string | null; date_created: string; date_accessed: string | null } + +export type RenameFileArgs = { location_id: number; file_name: string; new_file_name: string } export type RelationOperation = { relation_item: string; relation_group: string; relation: string; data: RelationOperationData } diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 92d9228c7..64b18b51f 100644 Binary files a/pnpm-lock.yaml and b/pnpm-lock.yaml differ