mirror of
https://github.com/spacedriveapp/spacedrive.git
synced 2026-05-06 22:33:34 -04:00
[ENG-546] Improve error handling (#802)
* log to disk * remove some unwraps * panicless * some p2p error handling * clippy moment * Fix `<ErrorBoundary />` * open logs button * 39 to 0 * fix types * update deps and comment out broken tests * clippy * more clippy * upgrade rimraf - https://github.com/isaacs/rimraf/issues/259 * regen broken lockfile * update `notify` and update `commands.ts` * more clippy (pls work) * allow deprecated for p2p (hopefully temporary) * a * upgrade deps for p2p * do betterer * do it correctly * remove unused import * improve core startup error + bc keypair --------- Co-authored-by: Utku Bakir <74243531+utkubakir@users.noreply.github.com> Co-authored-by: brxken128 <77554505+brxken128@users.noreply.github.com>
This commit is contained in:
BIN
Cargo.lock
generated
BIN
Cargo.lock
generated
Binary file not shown.
@@ -41,7 +41,7 @@ tauri-specta = { version = "1.0.2" }
|
||||
|
||||
swift-rs = { version = "1.0.5" }
|
||||
|
||||
tokio = { version = "1.25.0" }
|
||||
tokio = { version = "1.28.2" }
|
||||
|
||||
[patch.crates-io]
|
||||
# We use this patch so we can compile for the IOS simulator on M1
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
windows_subsystem = "windows"
|
||||
)]
|
||||
|
||||
use std::{path::PathBuf, time::Duration};
|
||||
use std::{path::PathBuf, sync::Arc, time::Duration};
|
||||
|
||||
use sd_core::{custom_uri::create_custom_uri_endpoint, Node, NodeError};
|
||||
|
||||
@@ -28,6 +28,13 @@ async fn app_ready(app_handle: tauri::AppHandle) {
|
||||
window.show().unwrap();
|
||||
}
|
||||
|
||||
#[tauri::command(async)]
|
||||
#[specta::specta]
|
||||
async fn open_logs_dir(node: tauri::State<'_, Arc<Node>>) -> Result<(), ()> {
|
||||
opener::open(node.data_dir.join("logs")).ok();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn tauri_error_plugin<R: Runtime>(err: NodeError) -> TauriPlugin<R> {
|
||||
tauri::plugin::Builder::new("spacedrive")
|
||||
.js_init_script(format!(r#"window.__SD_ERROR__ = "{err}";"#))
|
||||
@@ -55,6 +62,8 @@ async fn main() -> tauri::Result<()> {
|
||||
#[cfg(debug_assertions)]
|
||||
let data_dir = data_dir.join("dev");
|
||||
|
||||
let _guard = Node::init_logger(&data_dir);
|
||||
|
||||
let result = Node::new(data_dir).await;
|
||||
|
||||
let app = tauri::Builder::default();
|
||||
@@ -76,7 +85,10 @@ async fn main() -> tauri::Result<()> {
|
||||
|
||||
(Some(node), app)
|
||||
}
|
||||
Err(err) => (None, app.plugin(tauri_error_plugin(err))),
|
||||
Err(err) => {
|
||||
tracing::error!("Error starting up the node: {err}");
|
||||
(None, app.plugin(tauri_error_plugin(err)))
|
||||
}
|
||||
};
|
||||
|
||||
let app = app
|
||||
@@ -130,6 +142,7 @@ async fn main() -> tauri::Result<()> {
|
||||
.menu(menu::get_menu())
|
||||
.invoke_handler(tauri_handlers![
|
||||
app_ready,
|
||||
open_logs_dir,
|
||||
file::open_file_path,
|
||||
file::get_file_path_open_with_apps,
|
||||
file::open_file_path_with
|
||||
|
||||
@@ -17,7 +17,13 @@ import {
|
||||
} from '@sd/interface';
|
||||
import { getSpacedropState } from '@sd/interface/hooks/useSpacedropState';
|
||||
import '@sd/ui/style';
|
||||
import { appReady, getFilePathOpenWithApps, openFilePath, openFilePathWith } from './commands';
|
||||
import {
|
||||
appReady,
|
||||
getFilePathOpenWithApps,
|
||||
openFilePath,
|
||||
openFilePathWith,
|
||||
openLogsDir
|
||||
} from './commands';
|
||||
|
||||
// TODO: Bring this back once upstream is fixed up.
|
||||
// const client = hooks.createClient({
|
||||
@@ -71,6 +77,7 @@ const platform: Platform = {
|
||||
saveFilePickerDialog: () => dialog.save(),
|
||||
showDevtools: () => invoke('show_devtools'),
|
||||
openPath: (path) => shell.open(path),
|
||||
openLogsDir,
|
||||
openFilePath,
|
||||
getFilePathOpenWithApps,
|
||||
openFilePathWith
|
||||
@@ -103,17 +110,27 @@ export default function App() {
|
||||
};
|
||||
}, []);
|
||||
|
||||
if (startupError) {
|
||||
return <ErrorPage message={startupError} />;
|
||||
}
|
||||
|
||||
return (
|
||||
<RspcProvider queryClient={queryClient}>
|
||||
<PlatformProvider platform={platform}>
|
||||
<QueryClientProvider client={queryClient}>
|
||||
<SpacedriveInterface router={router} />
|
||||
<AppInner />
|
||||
</QueryClientProvider>
|
||||
</PlatformProvider>
|
||||
</RspcProvider>
|
||||
);
|
||||
}
|
||||
|
||||
// This is required because `ErrorPage` uses the OS which comes from `PlatformProvider`
|
||||
function AppInner() {
|
||||
if (startupError) {
|
||||
return (
|
||||
<ErrorPage
|
||||
message={startupError}
|
||||
submessage="Error occurred starting up the Spacedrive core"
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
return <SpacedriveInterface router={router} />;
|
||||
}
|
||||
|
||||
@@ -14,6 +14,10 @@ export function appReady() {
|
||||
return invoke()<null>("app_ready")
|
||||
}
|
||||
|
||||
export function openLogsDir() {
|
||||
return invoke()<null>("open_logs_dir")
|
||||
}
|
||||
|
||||
export function openFilePath(library: string, id: number) {
|
||||
return invoke()<OpenFilePathResult>("open_file_path", { library,id })
|
||||
}
|
||||
|
||||
@@ -67,6 +67,8 @@ pub fn handle_core_msg(
|
||||
match node {
|
||||
Some(node) => node.clone(),
|
||||
None => {
|
||||
let _guard = Node::init_logger(&data_dir);
|
||||
|
||||
// TODO: probably don't unwrap
|
||||
let new_node = Node::new(data_dir).await.unwrap();
|
||||
node.replace(new_node.clone());
|
||||
|
||||
@@ -32,6 +32,8 @@ async fn main() {
|
||||
.map(|port| port.parse::<u16>().unwrap_or(8080))
|
||||
.unwrap_or(8080);
|
||||
|
||||
let _guard = Node::init_logger(&data_dir);
|
||||
|
||||
let (node, router) = Node::new(data_dir).await.expect("Unable to create node");
|
||||
let signal = utils::axum_shutdown_signal(node.clone());
|
||||
|
||||
|
||||
@@ -67,8 +67,10 @@ include_dir = { version = "0.7.2", features = ["glob"] }
|
||||
async-trait = "^0.1.57"
|
||||
image = "0.24.6"
|
||||
webp = "0.2.2"
|
||||
tracing = "0.1.36"
|
||||
tracing-subscriber = { version = "0.3.15", features = ["env-filter"] }
|
||||
tracing = { git = "https://github.com/tokio-rs/tracing", rev = "29146260fb4615d271d2e899ad95a753bb42915e" } # To work with tracing-appender
|
||||
tracing-subscriber = { git = "https://github.com/tokio-rs/tracing", rev = "29146260fb4615d271d2e899ad95a753bb42915e", features = [
|
||||
"env-filter",
|
||||
] }
|
||||
async-stream = "0.3.3"
|
||||
once_cell = "1.15.0"
|
||||
ctor = "0.1.23"
|
||||
@@ -80,12 +82,13 @@ http-range = "0.1.5"
|
||||
mini-moka = "0.10.0"
|
||||
serde_with = "2.2.0"
|
||||
dashmap = { version = "5.4.0", features = ["serde"] }
|
||||
notify = { version = "5.0.0", default-features = false, features = [
|
||||
notify = { version = "5.2.0", default-features = false, features = [
|
||||
"macos_fsevent",
|
||||
], optional = true }
|
||||
static_assertions = "1.1.0"
|
||||
serde-hashkey = "0.4.5"
|
||||
normpath = { version = "1.1.1", features = ["localization"] }
|
||||
tracing-appender = { git = "https://github.com/tokio-rs/tracing", rev = "29146260fb4615d271d2e899ad95a753bb42915e" } # Unreleased changes for log deletion
|
||||
strum = { version = "0.24", features = ["derive"] }
|
||||
strum_macros = "0.24"
|
||||
|
||||
@@ -95,4 +98,4 @@ version = "0.1.5"
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "^3.3.0"
|
||||
tracing-test = "^0.2.3"
|
||||
tracing-test = "^0.2.4"
|
||||
|
||||
@@ -9,7 +9,7 @@ use crate::{
|
||||
prisma::{location, object},
|
||||
};
|
||||
|
||||
use chrono::{FixedOffset, Utc};
|
||||
use chrono::Utc;
|
||||
use rspc::{alpha::AlphaRouter, ErrorCode};
|
||||
use serde::Deserialize;
|
||||
use specta::Type;
|
||||
@@ -108,9 +108,7 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
|
||||
.object()
|
||||
.update(
|
||||
object::id::equals(id),
|
||||
vec![object::date_accessed::set(Some(
|
||||
Utc::now().with_timezone(&FixedOffset::east_opt(0).unwrap()),
|
||||
))],
|
||||
vec![object::date_accessed::set(Some(Utc::now().into()))],
|
||||
)
|
||||
.exec()
|
||||
.await?;
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use rspc::alpha::AlphaRouter;
|
||||
use rspc::ErrorCode;
|
||||
use sd_crypto::keys::keymanager::{StoredKey, StoredKeyType};
|
||||
use sd_crypto::primitives::SECRET_KEY_IDENTIFIER;
|
||||
use sd_crypto::types::{Algorithm, HashingAlgorithm, OnboardingConfig, SecretKeyString};
|
||||
@@ -389,38 +390,45 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
|
||||
invalidate_query!(library, "keys.list");
|
||||
invalidate_query!(library, "keys.listMounted");
|
||||
|
||||
Ok(TryInto::<u32>::try_into(updated_keys.len()).unwrap()) // We convert from `usize` (bigint type) to `u32` (number type) because rspc doesn't support bigints.
|
||||
TryInto::<u32>::try_into(updated_keys.len()).map_err(|_| {
|
||||
rspc::Error::new(ErrorCode::InternalServerError, "integer overflow".into())
|
||||
}) // We convert from `usize` (bigint type) to `u32` (number type) because rspc doesn't support bigints.
|
||||
})
|
||||
})
|
||||
.procedure("changeMasterPassword", {
|
||||
R.with2(library())
|
||||
.mutation(|(_, library), args: MasterPasswordChangeArgs| async move {
|
||||
let verification_key = library
|
||||
.key_manager
|
||||
.change_master_password(
|
||||
args.password,
|
||||
args.algorithm,
|
||||
args.hashing_algorithm,
|
||||
library.id,
|
||||
)
|
||||
.await?;
|
||||
.procedure(
|
||||
"changeMasterPassword",
|
||||
#[allow(clippy::unwrap_used)] // TODO: Jake is fixing this in a Crypto PR
|
||||
{
|
||||
R.with2(library()).mutation(
|
||||
|(_, library), args: MasterPasswordChangeArgs| async move {
|
||||
let verification_key = library
|
||||
.key_manager
|
||||
.change_master_password(
|
||||
args.password,
|
||||
args.algorithm,
|
||||
args.hashing_algorithm,
|
||||
library.id,
|
||||
)
|
||||
.await?;
|
||||
|
||||
invalidate_query!(library, "keys.getSecretKey");
|
||||
invalidate_query!(library, "keys.getSecretKey");
|
||||
|
||||
// remove old root key if present
|
||||
library
|
||||
.db
|
||||
.key()
|
||||
.delete_many(vec![key::key_type::equals(
|
||||
serde_json::to_string(&StoredKeyType::Root).unwrap(),
|
||||
)])
|
||||
.exec()
|
||||
.await?;
|
||||
// remove old root key if present
|
||||
library
|
||||
.db
|
||||
.key()
|
||||
.delete_many(vec![key::key_type::equals(
|
||||
serde_json::to_string(&StoredKeyType::Root).unwrap(),
|
||||
)])
|
||||
.exec()
|
||||
.await?;
|
||||
|
||||
// write the new verification key
|
||||
write_storedkey_to_db(&library.db, &verification_key).await?;
|
||||
// write the new verification key
|
||||
write_storedkey_to_db(&library.db, &verification_key).await?;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
})
|
||||
Ok(())
|
||||
},
|
||||
)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
@@ -109,7 +109,7 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
|
||||
ctx.library_manager
|
||||
.get_library(new_library.uuid)
|
||||
.await
|
||||
.unwrap(),
|
||||
.expect("We just created the library. Where do it be?"),
|
||||
"library.getStatistics"
|
||||
);
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use rspc::alpha::AlphaRouter;
|
||||
use rspc::{alpha::AlphaRouter, ErrorCode};
|
||||
use serde::Deserialize;
|
||||
use specta::Type;
|
||||
use tracing::error;
|
||||
|
||||
use crate::api::R;
|
||||
|
||||
@@ -20,9 +21,13 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
|
||||
config.name = args.name;
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
Ok(())
|
||||
.map_err(|err| {
|
||||
error!("Failed to write config: {}", err);
|
||||
rspc::Error::new(
|
||||
ErrorCode::InternalServerError,
|
||||
"error updating config".into(),
|
||||
)
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use rspc::alpha::AlphaRouter;
|
||||
use rspc::{alpha::AlphaRouter, ErrorCode};
|
||||
use sd_p2p::PeerId;
|
||||
use serde::Deserialize;
|
||||
use specta::Type;
|
||||
@@ -45,8 +45,18 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
|
||||
R.mutation(|ctx, args: SpacedropArgs| async move {
|
||||
// TODO: Handle multiple files path and error if zero paths
|
||||
ctx.p2p
|
||||
.big_bad_spacedrop(args.peer_id, PathBuf::from(args.file_path.first().unwrap()))
|
||||
.await;
|
||||
.big_bad_spacedrop(
|
||||
args.peer_id,
|
||||
PathBuf::from(
|
||||
args.file_path
|
||||
.first()
|
||||
.expect("https://linear.app/spacedriveapp/issue/ENG-625/spacedrop-multiple-files"),
|
||||
),
|
||||
)
|
||||
.await
|
||||
.map_err(|_| {
|
||||
rspc::Error::new(ErrorCode::InternalServerError, "todo".to_string())
|
||||
})
|
||||
})
|
||||
})
|
||||
.procedure("acceptSpacedrop", {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use rspc::alpha::AlphaRouter;
|
||||
use rspc::{alpha::AlphaRouter, ErrorCode};
|
||||
use serde::Deserialize;
|
||||
use specta::Type;
|
||||
|
||||
@@ -151,7 +151,10 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
|
||||
.select(tag::select!({ pub_id }))
|
||||
.exec()
|
||||
.await?
|
||||
.unwrap();
|
||||
.ok_or(rspc::Error::new(
|
||||
ErrorCode::NotFound,
|
||||
"Error finding tag in db".into(),
|
||||
))?;
|
||||
|
||||
sync.write_ops(
|
||||
db,
|
||||
|
||||
@@ -65,11 +65,13 @@ impl InvalidRequests {
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(unused_variables)]
|
||||
#[allow(unused_variables, clippy::panic)]
|
||||
pub(crate) fn validate(r: Arc<Router>) {
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
let invalidate_requests = INVALIDATION_REQUESTS.lock().unwrap();
|
||||
let invalidate_requests = INVALIDATION_REQUESTS
|
||||
.lock()
|
||||
.expect("Failed to lock the mutex for invalidation requests");
|
||||
|
||||
let queries = r.queries();
|
||||
for req in &invalidate_requests.queries {
|
||||
@@ -240,7 +242,15 @@ pub(crate) fn mount_invalidate() -> AlphaRouter<Ctx> {
|
||||
if let Ok(event) = event {
|
||||
if let CoreEvent::InvalidateOperation(op) = event {
|
||||
// Newer data replaces older data in the buffer
|
||||
buf.insert(to_key(&(op.key, &op.arg)).unwrap(), op);
|
||||
match to_key(&(op.key, &op.arg)) {
|
||||
Ok(key) => {
|
||||
buf.insert(key, op);
|
||||
},
|
||||
Err(err) => {
|
||||
warn!("Error deriving key for invalidate operation '{:?}': {:?}", op, err);
|
||||
},
|
||||
}
|
||||
|
||||
}
|
||||
} else {
|
||||
warn!("Shutting down invalidation manager thread due to the core event bus being droppped!");
|
||||
|
||||
@@ -72,6 +72,10 @@ pub enum JobError {
|
||||
MatchingSrcDest(PathBuf),
|
||||
#[error("action would overwrite another file: {}", .0.display())]
|
||||
WouldOverwrite(PathBuf),
|
||||
#[error("item of type '{0}' with id '{1}' is missing from the db")]
|
||||
MissingFromDb(&'static str, String),
|
||||
#[error("the cas id is not set on the path data")]
|
||||
MissingCasId,
|
||||
|
||||
// Not errors
|
||||
#[error("step completed with errors")]
|
||||
@@ -295,7 +299,10 @@ pub struct JobState<Job: StatefulJob> {
|
||||
impl<SJob: StatefulJob> DynJob for Job<SJob> {
|
||||
fn id(&self) -> Uuid {
|
||||
// SAFETY: This method is using during queueing, so we still have a report
|
||||
self.report().as_ref().unwrap().id
|
||||
self.report()
|
||||
.as_ref()
|
||||
.expect("This method is using during queueing, so we still have a report")
|
||||
.id
|
||||
}
|
||||
|
||||
fn parent_id(&self) -> Option<Uuid> {
|
||||
@@ -347,7 +354,9 @@ impl<SJob: StatefulJob> DynJob for Job<SJob> {
|
||||
) => {
|
||||
match step_result {
|
||||
Err(JobError::EarlyFinish { .. }) => {
|
||||
info!("{}", step_result.unwrap_err());
|
||||
step_result.map_err(|err| {
|
||||
warn!("{}", err);
|
||||
}).ok();
|
||||
break;
|
||||
},
|
||||
Err(JobError::StepCompletedWithErrors(errors_text)) => {
|
||||
|
||||
177
core/src/lib.rs
177
core/src/lib.rs
@@ -1,3 +1,5 @@
|
||||
#![warn(clippy::unwrap_used, clippy::panic)]
|
||||
|
||||
use crate::{
|
||||
api::{CoreEvent, Router},
|
||||
job::JobManager,
|
||||
@@ -9,11 +11,18 @@ use crate::{
|
||||
|
||||
pub use sd_prisma::*;
|
||||
|
||||
use std::{path::Path, sync::Arc};
|
||||
use std::{
|
||||
path::{Path, PathBuf},
|
||||
sync::Arc,
|
||||
};
|
||||
use thiserror::Error;
|
||||
use tokio::{fs, sync::broadcast};
|
||||
use tracing::{debug, error, info, warn};
|
||||
use tracing_subscriber::{prelude::*, EnvFilter};
|
||||
use tracing_appender::{
|
||||
non_blocking::{NonBlocking, WorkerGuard},
|
||||
rolling::{RollingFileAppender, Rotation},
|
||||
};
|
||||
use tracing_subscriber::{fmt, prelude::*, EnvFilter};
|
||||
|
||||
pub mod api;
|
||||
pub mod custom_uri;
|
||||
@@ -38,6 +47,7 @@ pub struct NodeContext {
|
||||
}
|
||||
|
||||
pub struct Node {
|
||||
pub data_dir: PathBuf,
|
||||
config: Arc<NodeConfigManager>,
|
||||
pub library_manager: Arc<LibraryManager>,
|
||||
location_manager: Arc<LocationManager>,
|
||||
@@ -47,86 +57,16 @@ pub struct Node {
|
||||
// peer_request: tokio::sync::Mutex<Option<PeerRequest>>,
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "android"))]
|
||||
const CONSOLE_LOG_FILTER: tracing_subscriber::filter::LevelFilter = {
|
||||
use tracing_subscriber::filter::LevelFilter;
|
||||
|
||||
match cfg!(debug_assertions) {
|
||||
true => LevelFilter::DEBUG,
|
||||
false => LevelFilter::INFO,
|
||||
}
|
||||
};
|
||||
|
||||
impl Node {
|
||||
pub async fn new(data_dir: impl AsRef<Path>) -> Result<(Arc<Node>, Arc<Router>), NodeError> {
|
||||
let data_dir = data_dir.as_ref();
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
let init_data = util::debug_initializer::InitConfig::load(data_dir).await;
|
||||
let init_data = util::debug_initializer::InitConfig::load(data_dir).await?;
|
||||
|
||||
// This error is ignored because it's throwing on mobile despite the folder existing.
|
||||
let _ = fs::create_dir_all(&data_dir).await;
|
||||
|
||||
// dbg!(get_object_kind_from_extension("png"));
|
||||
|
||||
// let (non_blocking, _guard) = tracing_appender::non_blocking(rolling::daily(
|
||||
// Path::new(&data_dir).join("logs"),
|
||||
// "log",
|
||||
// ));
|
||||
// TODO: Make logs automatically delete after x time https://github.com/tokio-rs/tracing/pull/2169
|
||||
|
||||
let subscriber = tracing_subscriber::registry().with(
|
||||
EnvFilter::from_default_env()
|
||||
.add_directive("warn".parse().expect("Error invalid tracing directive!"))
|
||||
.add_directive(
|
||||
"sd_core=debug"
|
||||
.parse()
|
||||
.expect("Error invalid tracing directive!"),
|
||||
)
|
||||
.add_directive(
|
||||
"sd_core::location::manager=info"
|
||||
.parse()
|
||||
.expect("Error invalid tracing directive!"),
|
||||
)
|
||||
.add_directive(
|
||||
"sd_core_mobile=debug"
|
||||
.parse()
|
||||
.expect("Error invalid tracing directive!"),
|
||||
)
|
||||
// .add_directive(
|
||||
// "sd_p2p=debug"
|
||||
// .parse()
|
||||
// .expect("Error invalid tracing directive!"),
|
||||
// )
|
||||
.add_directive(
|
||||
"server=debug"
|
||||
.parse()
|
||||
.expect("Error invalid tracing directive!"),
|
||||
)
|
||||
.add_directive(
|
||||
"desktop=debug"
|
||||
.parse()
|
||||
.expect("Error invalid tracing directive!"),
|
||||
),
|
||||
// .add_directive(
|
||||
// "rspc=debug"
|
||||
// .parse()
|
||||
// .expect("Error invalid tracing directive!"),
|
||||
// ),
|
||||
);
|
||||
#[cfg(not(target_os = "android"))]
|
||||
let subscriber = subscriber.with(tracing_subscriber::fmt::layer().with_filter(CONSOLE_LOG_FILTER));
|
||||
// #[cfg(target_os = "android")]
|
||||
// let subscriber = subscriber.with(tracing_android::layer("com.spacedrive.app").unwrap()); // TODO: This is not working
|
||||
subscriber
|
||||
// .with(
|
||||
// Layer::default()
|
||||
// .with_writer(non_blocking)
|
||||
// .with_ansi(false)
|
||||
// .with_filter(LevelFilter::DEBUG),
|
||||
// )
|
||||
.init();
|
||||
|
||||
let event_bus = broadcast::channel(1024);
|
||||
let config = NodeConfigManager::new(data_dir.to_path_buf())
|
||||
.await
|
||||
@@ -134,7 +74,7 @@ impl Node {
|
||||
|
||||
let jobs = JobManager::new();
|
||||
let location_manager = LocationManager::new();
|
||||
let (p2p, mut p2p_rx) = P2PManager::new(config.clone()).await;
|
||||
let (p2p, mut p2p_rx) = P2PManager::new(config.clone()).await?;
|
||||
|
||||
let library_manager = LibraryManager::new(
|
||||
data_dir.join("libraries"),
|
||||
@@ -150,11 +90,9 @@ impl Node {
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
if let Some(init_data) = init_data {
|
||||
init_data.apply(&library_manager).await;
|
||||
init_data.apply(&library_manager).await?;
|
||||
}
|
||||
|
||||
debug!("Watching locations");
|
||||
|
||||
tokio::spawn({
|
||||
let library_manager = library_manager.clone();
|
||||
|
||||
@@ -168,8 +106,12 @@ impl Node {
|
||||
};
|
||||
|
||||
for op in operations {
|
||||
println!("ingest lib id: {}", library.id);
|
||||
library.sync.ingest_op(op).await.unwrap();
|
||||
library.sync.ingest_op(op).await.unwrap_or_else(|err| {
|
||||
error!(
|
||||
"error ingesting operation for library '{}': {err:?}",
|
||||
library.id
|
||||
);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -177,6 +119,7 @@ impl Node {
|
||||
|
||||
let router = api::mount();
|
||||
let node = Node {
|
||||
data_dir: data_dir.to_path_buf(),
|
||||
config,
|
||||
library_manager,
|
||||
location_manager,
|
||||
@@ -190,6 +133,73 @@ impl Node {
|
||||
Ok((Arc::new(node), router))
|
||||
}
|
||||
|
||||
pub fn init_logger(data_dir: impl AsRef<Path>) -> WorkerGuard {
|
||||
let log_filter = match cfg!(debug_assertions) {
|
||||
true => tracing::Level::DEBUG,
|
||||
false => tracing::Level::INFO,
|
||||
};
|
||||
|
||||
let (logfile, guard) = NonBlocking::new(
|
||||
RollingFileAppender::builder()
|
||||
.filename_prefix("sd.log")
|
||||
.rotation(Rotation::DAILY)
|
||||
.max_log_files(4)
|
||||
.build(data_dir.as_ref().join("logs"))
|
||||
.expect("Error setting up log file!"),
|
||||
);
|
||||
|
||||
let collector = tracing_subscriber::registry()
|
||||
.with(fmt::Subscriber::new().with_ansi(false).with_writer(logfile))
|
||||
.with(
|
||||
fmt::Subscriber::new()
|
||||
.with_writer(std::io::stdout.with_max_level(log_filter))
|
||||
.with_filter(
|
||||
EnvFilter::from_default_env()
|
||||
.add_directive(
|
||||
"warn".parse().expect("Error invalid tracing directive!"),
|
||||
)
|
||||
.add_directive(
|
||||
"sd_core=debug"
|
||||
.parse()
|
||||
.expect("Error invalid tracing directive!"),
|
||||
)
|
||||
.add_directive(
|
||||
"sd_core::location::manager=info"
|
||||
.parse()
|
||||
.expect("Error invalid tracing directive!"),
|
||||
)
|
||||
.add_directive(
|
||||
"sd_core_mobile=debug"
|
||||
.parse()
|
||||
.expect("Error invalid tracing directive!"),
|
||||
)
|
||||
// .add_directive(
|
||||
// "sd_p2p=debug"
|
||||
// .parse()
|
||||
// .expect("Error invalid tracing directive!"),
|
||||
// )
|
||||
.add_directive(
|
||||
"server=debug"
|
||||
.parse()
|
||||
.expect("Error invalid tracing directive!"),
|
||||
)
|
||||
.add_directive(
|
||||
"spacedrive=debug"
|
||||
.parse()
|
||||
.expect("Error invalid tracing directive!"),
|
||||
),
|
||||
),
|
||||
);
|
||||
|
||||
tracing::collect::set_global_default(collector)
|
||||
.map_err(|err| {
|
||||
println!("Error initializing global logger: {:?}", err);
|
||||
})
|
||||
.ok();
|
||||
|
||||
guard
|
||||
}
|
||||
|
||||
pub async fn shutdown(&self) {
|
||||
info!("Spacedrive shutting down...");
|
||||
self.jobs.pause().await;
|
||||
@@ -216,12 +226,17 @@ impl Node {
|
||||
/// Error type for Node related errors.
|
||||
#[derive(Error, Debug)]
|
||||
pub enum NodeError {
|
||||
#[error("failed to initialize config")]
|
||||
#[error("failed to initialize config: {0}")]
|
||||
FailedToInitializeConfig(util::migrator::MigratorError),
|
||||
#[error("failed to initialize library manager")]
|
||||
#[error("failed to initialize library manager: {0}")]
|
||||
FailedToInitializeLibraryManager(#[from] library::LibraryManagerError),
|
||||
#[error(transparent)]
|
||||
LocationManager(#[from] LocationManagerError),
|
||||
#[error("failed to initialize p2p manager: {0}")]
|
||||
P2PManager(#[from] sd_p2p::ManagerError),
|
||||
#[error("invalid platform integer")]
|
||||
InvalidPlatformInt(i32),
|
||||
#[cfg(debug_assertions)]
|
||||
#[error("Init config error: {0}")]
|
||||
InitConfig(#[from] util::debug_initializer::InitConfigError),
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ use crate::{
|
||||
prisma::{node, PrismaClient},
|
||||
sync::{SyncManager, SyncMessage},
|
||||
util::{
|
||||
db::load_and_migrate,
|
||||
db::{load_and_migrate, MigrationError},
|
||||
error::{FileIOError, NonUtf8PathError},
|
||||
migrator::MigratorError,
|
||||
seeder::{indexer_rules_seeder, SeederError},
|
||||
@@ -62,6 +62,8 @@ pub enum LibraryManagerError {
|
||||
KeyManager(#[from] sd_crypto::Error),
|
||||
#[error("failed to run library migrations")]
|
||||
MigratorError(#[from] MigratorError),
|
||||
#[error("error migrating the library: {0}")]
|
||||
MigrationError(#[from] MigrationError),
|
||||
#[error("invalid library configuration: {0}")]
|
||||
InvalidConfig(String),
|
||||
#[error(transparent)]
|
||||
@@ -93,7 +95,7 @@ pub async fn seed_keymanager(
|
||||
.iter()
|
||||
.map(|key| {
|
||||
let key = key.clone();
|
||||
let uuid = uuid::Uuid::from_str(&key.uuid).unwrap();
|
||||
let uuid = uuid::Uuid::from_str(&key.uuid).expect("invalid key id in the DB");
|
||||
|
||||
if key.default {
|
||||
default = Some(uuid);
|
||||
@@ -119,8 +121,7 @@ pub async fn seed_keymanager(
|
||||
automount: key.automount,
|
||||
})
|
||||
})
|
||||
.collect::<Result<Vec<StoredKey>, sd_crypto::Error>>()
|
||||
.unwrap();
|
||||
.collect::<Result<Vec<StoredKey>, sd_crypto::Error>>()?;
|
||||
|
||||
// insert all keys from the DB into the keymanager's keystore
|
||||
km.populate_keystore(stored_keys).await?;
|
||||
@@ -368,8 +369,7 @@ impl LibraryManager {
|
||||
LibraryManagerError::NonUtf8Path(NonUtf8PathError(db_path.into()))
|
||||
})?
|
||||
))
|
||||
.await
|
||||
.unwrap(),
|
||||
.await?,
|
||||
);
|
||||
|
||||
let node_config = node_context.config.get().await;
|
||||
|
||||
@@ -99,6 +99,8 @@ pub struct FilePathMetadata {
|
||||
pub enum FilePathError {
|
||||
#[error("file Path not found: <path='{}'>", .0.display())]
|
||||
NotFound(Box<Path>),
|
||||
#[error("location '{0}' not found")]
|
||||
LocationNotFound(i32),
|
||||
#[error("received an invalid sub path: <location_path='{}', sub_path='{}'>", .location_path.display(), .sub_path.display())]
|
||||
InvalidSubPath {
|
||||
location_path: Box<Path>,
|
||||
@@ -154,7 +156,7 @@ pub async fn create_file_path(
|
||||
.select(location::select!({ id pub_id }))
|
||||
.exec()
|
||||
.await?
|
||||
.unwrap();
|
||||
.ok_or(FilePathError::LocationNotFound(location_id))?;
|
||||
|
||||
let params = {
|
||||
use file_path::*;
|
||||
@@ -291,7 +293,9 @@ pub async fn ensure_sub_path_is_in_location(
|
||||
let mut sub_path = sub_path.as_ref();
|
||||
if sub_path.starts_with("/") {
|
||||
// SAFETY: we just checked that it starts with the separator
|
||||
sub_path = sub_path.strip_prefix("/").unwrap();
|
||||
sub_path = sub_path
|
||||
.strip_prefix("/")
|
||||
.expect("we just checked that it starts with the separator");
|
||||
}
|
||||
let location_path = location_path.as_ref();
|
||||
|
||||
@@ -361,7 +365,9 @@ pub async fn ensure_sub_path_is_directory(
|
||||
Err(e) if e.kind() == io::ErrorKind::NotFound => {
|
||||
if sub_path.starts_with("/") {
|
||||
// SAFETY: we just checked that it starts with the separator
|
||||
sub_path = sub_path.strip_prefix("/").unwrap();
|
||||
sub_path = sub_path
|
||||
.strip_prefix("/")
|
||||
.expect("we just checked that it starts with the separator");
|
||||
}
|
||||
|
||||
let location_path = location_path.as_ref();
|
||||
|
||||
@@ -615,7 +615,7 @@ mod tests {
|
||||
use globset::{Glob, GlobSetBuilder};
|
||||
use tempfile::{tempdir, TempDir};
|
||||
use tokio::fs;
|
||||
use tracing_test::traced_test;
|
||||
// use tracing_test::traced_test;
|
||||
|
||||
impl PartialEq for WalkedEntry {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
@@ -758,7 +758,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[traced_test]
|
||||
// #[traced_test]
|
||||
async fn test_only_photos() {
|
||||
let root = prepare_location().await;
|
||||
let root_path = root.path();
|
||||
@@ -822,7 +822,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[traced_test]
|
||||
// #[traced_test]
|
||||
async fn test_git_repos() {
|
||||
let root = prepare_location().await;
|
||||
let root_path = root.path();
|
||||
@@ -895,7 +895,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[traced_test]
|
||||
// #[traced_test]
|
||||
async fn git_repos_without_deps_or_build_dirs() {
|
||||
let root = prepare_location().await;
|
||||
let root_path = root.path();
|
||||
|
||||
@@ -103,6 +103,11 @@ pub enum LocationManagerError {
|
||||
#[error("Job Manager error: (error: {0})")]
|
||||
JobManager(#[from] JobManagerError),
|
||||
|
||||
#[error("invalid inode")]
|
||||
InvalidInode,
|
||||
#[error("invalid device")]
|
||||
InvalidDevice,
|
||||
|
||||
#[error(transparent)]
|
||||
FileIO(#[from] FileIOError),
|
||||
}
|
||||
@@ -554,10 +559,10 @@ impl Drop for StopWatcherGuard<'_> {
|
||||
fn drop(&mut self) {
|
||||
if cfg!(feature = "location-watcher") {
|
||||
// FIXME: change this Drop to async drop in the future
|
||||
if let Err(e) = block_on(
|
||||
self.manager
|
||||
.reinit_watcher(self.location_id, self.library.take().unwrap()),
|
||||
) {
|
||||
if let Err(e) = block_on(self.manager.reinit_watcher(
|
||||
self.location_id,
|
||||
self.library.take().expect("library should be set"),
|
||||
)) {
|
||||
error!("Failed to reinit watcher on stop watcher guard drop: {e}");
|
||||
}
|
||||
}
|
||||
@@ -578,9 +583,9 @@ impl Drop for IgnoreEventsForPathGuard<'_> {
|
||||
// FIXME: change this Drop to async drop in the future
|
||||
if let Err(e) = block_on(self.manager.watcher_management_message(
|
||||
self.location_id,
|
||||
self.library.take().unwrap(),
|
||||
self.library.take().expect("library should be set"),
|
||||
WatcherManagementMessageAction::IgnoreEventsForPath {
|
||||
path: self.path.take().unwrap(),
|
||||
path: self.path.take().expect("path should be set"),
|
||||
ignore: false,
|
||||
},
|
||||
)) {
|
||||
|
||||
@@ -685,13 +685,23 @@ pub(super) async fn extract_inode_and_device_from_path(
|
||||
.select(file_path::select!( {inode device} ))
|
||||
.exec()
|
||||
.await?
|
||||
.map(|file_path| {
|
||||
(
|
||||
u64::from_le_bytes(file_path.inode[0..8].try_into().unwrap()),
|
||||
u64::from_le_bytes(file_path.device[0..8].try_into().unwrap()),
|
||||
)
|
||||
})
|
||||
.ok_or_else(|| FilePathError::NotFound(path.into()).into())
|
||||
.map_or(
|
||||
Err(FilePathError::NotFound(path.into()).into()),
|
||||
|file_path| {
|
||||
Ok((
|
||||
u64::from_le_bytes(
|
||||
file_path.inode[0..8]
|
||||
.try_into()
|
||||
.map_err(|_| LocationManagerError::InvalidInode)?,
|
||||
),
|
||||
u64::from_le_bytes(
|
||||
file_path.device[0..8]
|
||||
.try_into()
|
||||
.map_err(|_| LocationManagerError::InvalidDevice)?,
|
||||
),
|
||||
))
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
pub(super) async fn extract_location_path(
|
||||
|
||||
@@ -156,7 +156,10 @@ impl LocationCreateArgs {
|
||||
if metadata.has_library(library.id) {
|
||||
return Err(LocationError::NeedRelink {
|
||||
// SAFETY: This unwrap is ok as we checked that we have this library_id
|
||||
old_path: metadata.location_path(library.id).unwrap().to_path_buf(),
|
||||
old_path: metadata
|
||||
.location_path(library.id)
|
||||
.expect("This unwrap is ok as we checked that we have this library_id")
|
||||
.to_path_buf(),
|
||||
new_path: self.path,
|
||||
});
|
||||
}
|
||||
@@ -281,7 +284,9 @@ impl LocationUpdateArgs {
|
||||
if let Some(mut metadata) =
|
||||
SpacedriveLocationMetadataFile::try_load(&location.path).await?
|
||||
{
|
||||
metadata.update(library.id, self.name.unwrap()).await?;
|
||||
metadata
|
||||
.update(library.id, self.name.expect("TODO"))
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,20 +18,16 @@ pub struct LibraryNode {
|
||||
pub last_seen: DateTime<Utc>,
|
||||
}
|
||||
|
||||
impl From<node::Data> for LibraryNode {
|
||||
fn from(data: node::Data) -> Self {
|
||||
Self {
|
||||
uuid: Uuid::from_slice(&data.pub_id).unwrap(),
|
||||
name: data.name,
|
||||
platform: Platform::try_from(data.platform).unwrap(),
|
||||
last_seen: data.last_seen.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
impl TryFrom<node::Data> for LibraryNode {
|
||||
type Error = String;
|
||||
|
||||
impl From<Box<node::Data>> for LibraryNode {
|
||||
fn from(data: Box<node::Data>) -> Self {
|
||||
Self::from(*data)
|
||||
fn try_from(data: node::Data) -> Result<Self, Self::Error> {
|
||||
Ok(Self {
|
||||
uuid: Uuid::from_slice(&data.pub_id).map_err(|_| "Invalid node pub_id")?,
|
||||
name: data.name,
|
||||
platform: Platform::try_from(data.platform).map_err(|_| "Invalid platform_id")?,
|
||||
last_seen: data.last_seen.into(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#![allow(dead_code, unused_variables)] // TODO: Reenable once this is working
|
||||
#![allow(dead_code, unused_variables, clippy::panic, clippy::unwrap_used)] // TODO: Reenable once this is working
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use specta::Type;
|
||||
|
||||
@@ -149,7 +149,7 @@ impl StatefulJob for FileIdentifierJob {
|
||||
.select(file_path::select!({ id }))
|
||||
.exec()
|
||||
.await?
|
||||
.unwrap(); // SAFETY: We already validated before that there are orphans `file_path`s
|
||||
.expect("We already validated before that there are orphans `file_path`s"); // SAFETY: We already validated before that there are orphans `file_path`s
|
||||
|
||||
data.cursor = first_path.id;
|
||||
|
||||
|
||||
@@ -108,7 +108,8 @@ async fn identifier_job_step(
|
||||
.await
|
||||
.map(|params| {
|
||||
(
|
||||
Uuid::from_slice(&file_path.pub_id).unwrap(),
|
||||
// SAFETY: This should never happen
|
||||
Uuid::from_slice(&file_path.pub_id).expect("file_path.pub_id is invalid!"),
|
||||
(params, file_path),
|
||||
)
|
||||
})
|
||||
@@ -191,7 +192,7 @@ async fn identifier_job_step(
|
||||
let (crdt_op, db_op) = file_path_object_connect_ops(
|
||||
pub_id,
|
||||
// SAFETY: This pub_id is generated by the uuid lib, but we have to store bytes in sqlite
|
||||
Uuid::from_slice(&object.pub_id).unwrap(),
|
||||
Uuid::from_slice(&object.pub_id).expect("uuid bytes are invalid"),
|
||||
sync,
|
||||
db,
|
||||
);
|
||||
|
||||
@@ -148,7 +148,7 @@ impl StatefulJob for ShallowFileIdentifierJob {
|
||||
.select(file_path::select!({ id }))
|
||||
.exec()
|
||||
.await?
|
||||
.unwrap(); // SAFETY: We already validated before that there are orphans `file_path`s
|
||||
.expect("We already validated before that there are orphans `file_path`s"); // SAFETY: We already validated before that there are orphans `file_path`s
|
||||
|
||||
data.cursor = first_path.id;
|
||||
|
||||
|
||||
@@ -194,7 +194,12 @@ impl StatefulJob for FileEncryptorJob {
|
||||
.config()
|
||||
.data_directory()
|
||||
.join("thumbnails")
|
||||
.join(info.path_data.cas_id.as_ref().unwrap())
|
||||
.join(
|
||||
info.path_data
|
||||
.cas_id
|
||||
.as_ref()
|
||||
.ok_or(JobError::MissingCasId)?,
|
||||
)
|
||||
.with_extension("wepb");
|
||||
|
||||
if tokio::fs::metadata(&pvm_path).await.is_ok() {
|
||||
|
||||
@@ -124,7 +124,11 @@ pub async fn generate_image_thumbnail<P: AsRef<Path>>(
|
||||
let webp = block_in_place(|| -> Result<Vec<u8>, Box<dyn Error>> {
|
||||
#[cfg(all(feature = "heif", target_os = "macos"))]
|
||||
let img = {
|
||||
let ext = file_path.as_ref().extension().unwrap().to_ascii_lowercase();
|
||||
let ext = file_path
|
||||
.as_ref()
|
||||
.extension()
|
||||
.unwrap_or_default()
|
||||
.to_ascii_lowercase();
|
||||
if HEIF_EXTENSIONS
|
||||
.iter()
|
||||
.any(|e| ext == std::ffi::OsStr::new(e))
|
||||
|
||||
@@ -73,7 +73,10 @@ impl StatefulJob for ObjectValidatorJob {
|
||||
.find_unique(location::id::equals(state.init.location_id))
|
||||
.exec()
|
||||
.await?
|
||||
.unwrap();
|
||||
.ok_or(JobError::MissingFromDb(
|
||||
"location",
|
||||
format!("id={}", state.init.location_id),
|
||||
))?;
|
||||
|
||||
state.data = Some(ObjectValidatorJobState {
|
||||
root_path: location.path.into(),
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
#![allow(clippy::unwrap_used)] // TODO: Remove once this is fully stablised
|
||||
|
||||
use std::{
|
||||
borrow::Cow,
|
||||
collections::HashMap,
|
||||
path::PathBuf,
|
||||
sync::Arc,
|
||||
@@ -8,7 +11,7 @@ use std::{
|
||||
use sd_p2p::{
|
||||
spaceblock::{self, BlockSize, SpacedropRequest},
|
||||
spacetime::SpaceTimeStream,
|
||||
Event, Manager, MetadataManager, PeerId,
|
||||
Event, Manager, ManagerError, MetadataManager, PeerId,
|
||||
};
|
||||
use sd_sync::CRDTOperation;
|
||||
use serde::Serialize;
|
||||
@@ -58,7 +61,7 @@ pub struct P2PManager {
|
||||
impl P2PManager {
|
||||
pub async fn new(
|
||||
node_config: Arc<NodeConfigManager>,
|
||||
) -> (Arc<Self>, broadcast::Receiver<(Uuid, Vec<CRDTOperation>)>) {
|
||||
) -> Result<(Arc<Self>, broadcast::Receiver<(Uuid, Vec<CRDTOperation>)>), ManagerError> {
|
||||
let (config, keypair) = {
|
||||
let config = node_config.get().await;
|
||||
(Self::config_to_metadata(&config), config.keypair)
|
||||
@@ -67,9 +70,7 @@ impl P2PManager {
|
||||
let metadata_manager = MetadataManager::new(config);
|
||||
|
||||
let (manager, mut stream) =
|
||||
Manager::new(SPACEDRIVE_APP_ID, &keypair, metadata_manager.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
Manager::new(SPACEDRIVE_APP_ID, &keypair, metadata_manager.clone()).await?;
|
||||
|
||||
info!(
|
||||
"Node '{}' is now online listening at addresses: {:?}",
|
||||
@@ -226,41 +227,7 @@ impl P2PManager {
|
||||
}
|
||||
});
|
||||
|
||||
// TODO(@Oscar): Remove this in the future once i'm done using it for testing
|
||||
if std::env::var("SPACEDROP_DEMO").is_ok() {
|
||||
// tokio::spawn({
|
||||
// let this = this.clone();
|
||||
// async move {
|
||||
// tokio::time::sleep(std::time::Duration::from_secs(5)).await;
|
||||
// let mut connected = this
|
||||
// .manager
|
||||
// .get_connected_peers()
|
||||
// .await
|
||||
// .unwrap()
|
||||
// .into_iter();
|
||||
// if let Some(peer_id) = connected.next() {
|
||||
// info!("Starting Spacedrop to peer '{}'", peer_id);
|
||||
// this.broadcast_sync_events(
|
||||
// Uuid::from_str("e4372586-d028-48f8-8be6-b4ff781a7dc2").unwrap(),
|
||||
// vec![CRDTOperation {
|
||||
// node: Uuid::new_v4(),
|
||||
// timestamp: NTP64(1),
|
||||
// id: Uuid::new_v4(),
|
||||
// typ: CRDTOperationType::Owned(OwnedOperation {
|
||||
// model: "TODO".to_owned(),
|
||||
// items: Vec::new(),
|
||||
// }),
|
||||
// }],
|
||||
// )
|
||||
// .await;
|
||||
// } else {
|
||||
// info!("No clients found so skipping Spacedrop demo!");
|
||||
// }
|
||||
// }
|
||||
// });
|
||||
}
|
||||
|
||||
(this, rx2)
|
||||
Ok((this, rx2))
|
||||
}
|
||||
|
||||
fn config_to_metadata(config: &NodeConfig) -> PeerMetadata {
|
||||
@@ -296,7 +263,13 @@ impl P2PManager {
|
||||
|
||||
#[allow(unused)] // TODO: Remove `allow(unused)` once integrated
|
||||
pub async fn broadcast_sync_events(&self, library_id: Uuid, event: Vec<CRDTOperation>) {
|
||||
let mut buf = rmp_serde::to_vec_named(&event).unwrap(); // TODO: Error handling
|
||||
let mut buf = match rmp_serde::to_vec_named(&event) {
|
||||
Ok(buf) => buf,
|
||||
Err(e) => {
|
||||
error!("Failed to serialize sync event: {:?}", e);
|
||||
return;
|
||||
}
|
||||
};
|
||||
let mut head_buf = Header::Sync(library_id, buf.len() as u32).to_bytes(); // Max Sync payload is like 4GB
|
||||
head_buf.append(&mut buf);
|
||||
|
||||
@@ -309,26 +282,31 @@ impl P2PManager {
|
||||
self.manager.broadcast(Header::Ping.to_bytes()).await;
|
||||
}
|
||||
|
||||
pub async fn big_bad_spacedrop(&self, peer_id: PeerId, path: PathBuf) {
|
||||
let mut stream = self.manager.stream(peer_id).await.unwrap(); // TODO: handle providing incorrect peer id
|
||||
// TODO: Proper error handling
|
||||
pub async fn big_bad_spacedrop(&self, peer_id: PeerId, path: PathBuf) -> Result<(), ()> {
|
||||
let mut stream = self.manager.stream(peer_id).await.map_err(|_| ())?; // TODO: handle providing incorrect peer id
|
||||
|
||||
let file = File::open(&path).await.unwrap();
|
||||
let metadata = file.metadata().await.unwrap();
|
||||
let file = File::open(&path).await.map_err(|_| ())?;
|
||||
let metadata = file.metadata().await.map_err(|_| ())?;
|
||||
|
||||
let header = Header::Spacedrop(SpacedropRequest {
|
||||
name: path.file_name().unwrap().to_str().unwrap().to_string(), // TODO: Encode this as bytes instead
|
||||
name: path
|
||||
.file_name()
|
||||
.map(|v| v.to_string_lossy())
|
||||
.unwrap_or(Cow::Borrowed(""))
|
||||
.to_string(),
|
||||
size: metadata.len(),
|
||||
block_size: BlockSize::from_size(metadata.len()), // TODO: This should be dynamic
|
||||
});
|
||||
stream.write_all(&header.to_bytes()).await.unwrap();
|
||||
stream.write_all(&header.to_bytes()).await.map_err(|_| ())?;
|
||||
|
||||
debug!("Waiting for Spacedrop to be accepted from peer '{peer_id}'");
|
||||
let mut buf = [0; 1];
|
||||
// TODO: Add timeout so the connection is dropped if they never response
|
||||
stream.read_exact(&mut buf).await.unwrap();
|
||||
stream.read_exact(&mut buf).await.map_err(|_| ())?;
|
||||
if buf[0] != 1 {
|
||||
debug!("Spacedrop was rejected from peer '{peer_id}'");
|
||||
return;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
debug!("Starting Spacedrop to peer '{peer_id}'");
|
||||
@@ -349,6 +327,8 @@ impl P2PManager {
|
||||
"Finished Spacedrop to peer '{peer_id}' after '{:?}",
|
||||
i.elapsed()
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn shutdown(&self) {
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
use thiserror::Error;
|
||||
use tokio::io::AsyncReadExt;
|
||||
use uuid::Uuid;
|
||||
|
||||
use sd_p2p::{spaceblock::SpacedropRequest, spacetime::SpaceTimeStream};
|
||||
use sd_p2p::{
|
||||
spaceblock::{SpacedropRequest, SpacedropRequestError},
|
||||
spacetime::SpaceTimeStream,
|
||||
};
|
||||
|
||||
/// TODO
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
@@ -11,31 +15,65 @@ pub enum Header {
|
||||
Sync(Uuid, u32),
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum SyncRequestError {
|
||||
#[error("io error reading library id: {0}")]
|
||||
LibraryIdIoError(std::io::Error),
|
||||
#[error("io error decoding library id: {0}")]
|
||||
ErrorDecodingLibraryId(uuid::Error),
|
||||
#[error("io error reading sync payload len: {0}")]
|
||||
PayloadLenIoError(std::io::Error),
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum HeaderError {
|
||||
#[error("io error reading discriminator: {0}")]
|
||||
DiscriminatorIoError(std::io::Error),
|
||||
#[error("invalid discriminator '{0}'")]
|
||||
InvalidDiscriminator(u8),
|
||||
#[error("error reading spacedrop request: {0}")]
|
||||
SpacedropRequestError(#[from] SpacedropRequestError),
|
||||
#[error("error reading sync request: {0}")]
|
||||
SyncRequestError(#[from] SyncRequestError),
|
||||
#[error("invalid request. Spacedrop requires a unicast stream!")]
|
||||
SpacedropOverMulticastIsForbidden,
|
||||
}
|
||||
|
||||
impl Header {
|
||||
pub async fn from_stream(stream: &mut SpaceTimeStream) -> Result<Self, ()> {
|
||||
let discriminator = stream.read_u8().await.map_err(|e| {
|
||||
dbg!(e);
|
||||
})?; // TODO: Error handling
|
||||
pub async fn from_stream(stream: &mut SpaceTimeStream) -> Result<Self, HeaderError> {
|
||||
let discriminator = stream
|
||||
.read_u8()
|
||||
.await
|
||||
.map_err(HeaderError::DiscriminatorIoError)?;
|
||||
|
||||
match discriminator {
|
||||
0 => match stream {
|
||||
SpaceTimeStream::Unicast(stream) => Ok(Self::Spacedrop(
|
||||
SpacedropRequest::from_stream(stream).await?,
|
||||
)),
|
||||
_ => todo!(),
|
||||
_ => Err(HeaderError::SpacedropOverMulticastIsForbidden),
|
||||
},
|
||||
1 => Ok(Self::Ping),
|
||||
2 => {
|
||||
let mut uuid = [0u8; 16];
|
||||
stream.read_exact(&mut uuid).await.map_err(|_| ())?; // TODO: Error handling
|
||||
stream
|
||||
.read_exact(&mut uuid)
|
||||
.await
|
||||
.map_err(SyncRequestError::LibraryIdIoError)?;
|
||||
|
||||
let mut len = [0; 4];
|
||||
stream.read_exact(&mut len).await.map_err(|_| ())?; // TODO: Error handling
|
||||
stream
|
||||
.read_exact(&mut len)
|
||||
.await
|
||||
.map_err(SyncRequestError::PayloadLenIoError)?;
|
||||
let len = u32::from_le_bytes(len);
|
||||
|
||||
Ok(Self::Sync(Uuid::from_slice(&uuid).unwrap(), len)) // TODO: Error handling
|
||||
Ok(Self::Sync(
|
||||
Uuid::from_slice(&uuid).map_err(SyncRequestError::ErrorDecodingLibraryId)?,
|
||||
len,
|
||||
))
|
||||
}
|
||||
_ => Err(()),
|
||||
d => Err(HeaderError::InvalidDiscriminator(d)),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
#![allow(clippy::unwrap_used, clippy::panic)] // TODO: Brendan remove this once you've got error handling here
|
||||
|
||||
use crate::prisma::*;
|
||||
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
|
||||
@@ -1,16 +1,22 @@
|
||||
// ! A system for loading a default set of data on startup. This is ONLY enabled in development builds.
|
||||
|
||||
use std::{
|
||||
io,
|
||||
path::{Path, PathBuf},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
library::LibraryConfig,
|
||||
location::{delete_location, scan_location, LocationCreateArgs},
|
||||
job::JobManagerError,
|
||||
library::{LibraryConfig, LibraryManagerError},
|
||||
location::{
|
||||
delete_location, scan_location, LocationCreateArgs, LocationError, LocationManagerError,
|
||||
},
|
||||
prisma::location,
|
||||
};
|
||||
use prisma_client_rust::QueryError;
|
||||
use serde::Deserialize;
|
||||
use thiserror::Error;
|
||||
use tokio::{
|
||||
fs::{self, metadata},
|
||||
time::sleep,
|
||||
@@ -47,39 +53,56 @@ pub struct InitConfig {
|
||||
path: PathBuf,
|
||||
}
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum InitConfigError {
|
||||
#[error("error loading the init data: {0}")]
|
||||
Io(#[from] io::Error),
|
||||
#[error("error parsing the init data: {0}")]
|
||||
Json(#[from] serde_json::Error),
|
||||
#[error("job manager: {0}")]
|
||||
JobManager(#[from] JobManagerError),
|
||||
#[error("location manager: {0}")]
|
||||
LocationManager(#[from] LocationManagerError),
|
||||
#[error("library manager: {0}")]
|
||||
LibraryManager(#[from] LibraryManagerError),
|
||||
#[error("query error: {0}")]
|
||||
QueryError(#[from] QueryError),
|
||||
#[error("location error: {0}")]
|
||||
LocationError(#[from] LocationError),
|
||||
}
|
||||
|
||||
impl InitConfig {
|
||||
pub async fn load(data_dir: &Path) -> Option<Self> {
|
||||
let path = std::env::current_dir()
|
||||
.unwrap()
|
||||
pub async fn load(data_dir: &Path) -> Result<Option<Self>, InitConfigError> {
|
||||
let path = std::env::current_dir()?
|
||||
.join(std::env::var("SD_INIT_DATA").unwrap_or("sd_init.json".to_string()));
|
||||
|
||||
if metadata(&path).await.is_ok() {
|
||||
let config = fs::read_to_string(&path).await.unwrap();
|
||||
let mut config: InitConfig = serde_json::from_str(&config).unwrap();
|
||||
let config = fs::read_to_string(&path).await?;
|
||||
let mut config: InitConfig = serde_json::from_str(&config)?;
|
||||
config.path = path;
|
||||
|
||||
if config.reset_on_startup && data_dir.exists() {
|
||||
warn!("previous 'SD_DATA_DIR' was removed on startup!");
|
||||
fs::remove_dir_all(&data_dir).await.unwrap();
|
||||
fs::remove_dir_all(&data_dir).await?;
|
||||
}
|
||||
|
||||
return Some(config);
|
||||
return Ok(Some(config));
|
||||
}
|
||||
|
||||
None
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
pub async fn apply(self, library_manager: &LibraryManager) {
|
||||
pub async fn apply(self, library_manager: &LibraryManager) -> Result<(), InitConfigError> {
|
||||
info!("Initializing app from file: {:?}", self.path);
|
||||
|
||||
for lib in self.libraries {
|
||||
let name = lib.name.clone();
|
||||
let handle = tokio::spawn(async move {
|
||||
let _guard = AbortOnDrop(tokio::spawn(async move {
|
||||
loop {
|
||||
info!("Initializing library '{name}' from 'sd_init.json'...");
|
||||
sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
});
|
||||
}));
|
||||
|
||||
let library = match library_manager.get_library(lib.id).await {
|
||||
Some(lib) => lib,
|
||||
@@ -92,25 +115,27 @@ impl InitConfig {
|
||||
description: lib.description.unwrap_or("".to_string()),
|
||||
},
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
.await?;
|
||||
|
||||
library_manager.get_library(library.uuid).await.unwrap()
|
||||
match library_manager.get_library(library.uuid).await {
|
||||
Some(lib) => lib,
|
||||
None => {
|
||||
warn!(
|
||||
"Debug init error: library '{}' was not found after being created!",
|
||||
library.config.name
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
if lib.reset_locations_on_startup {
|
||||
let locations = library
|
||||
.db
|
||||
.location()
|
||||
.find_many(vec![])
|
||||
.exec()
|
||||
.await
|
||||
.unwrap();
|
||||
let locations = library.db.location().find_many(vec![]).exec().await?;
|
||||
|
||||
for location in locations {
|
||||
warn!("deleting location: {:?}", location.path);
|
||||
delete_location(&library, location.id).await.unwrap();
|
||||
delete_location(&library, location.id).await?;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -120,34 +145,47 @@ impl InitConfig {
|
||||
.location()
|
||||
.find_first(vec![location::path::equals(loc.path.clone())])
|
||||
.exec()
|
||||
.await
|
||||
.unwrap()
|
||||
.await?
|
||||
{
|
||||
warn!("deleting location: {:?}", location.path);
|
||||
delete_location(&library, location.id).await.unwrap();
|
||||
delete_location(&library, location.id).await?;
|
||||
}
|
||||
|
||||
let sd_file = PathBuf::from(&loc.path).join(".spacedrive");
|
||||
if sd_file.exists() {
|
||||
fs::remove_file(sd_file).await.unwrap();
|
||||
fs::remove_file(sd_file).await?;
|
||||
}
|
||||
|
||||
let location = LocationCreateArgs {
|
||||
path: loc.path.into(),
|
||||
path: loc.path.clone().into(),
|
||||
dry_run: false,
|
||||
indexer_rules_ids: Vec::new(),
|
||||
}
|
||||
.create(&library)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
scan_location(&library, location).await.unwrap();
|
||||
.await?;
|
||||
match location {
|
||||
Some(location) => {
|
||||
scan_location(&library, location).await?;
|
||||
}
|
||||
None => {
|
||||
warn!(
|
||||
"Debug init error: location '{}' was not found after being created!",
|
||||
loc.path
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
handle.abort();
|
||||
}
|
||||
|
||||
info!("Initialized app from file: {:?}", self.path);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct AbortOnDrop<T>(tokio::task::JoinHandle<T>);
|
||||
|
||||
impl<T> Drop for AbortOnDrop<T> {
|
||||
fn drop(&mut self) {
|
||||
self.0.abort();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -49,7 +49,7 @@ where
|
||||
// }
|
||||
|
||||
pub fn load(&self, path: &Path) -> Result<T, MigratorError> {
|
||||
match path.try_exists().unwrap() {
|
||||
match path.try_exists()? {
|
||||
true => {
|
||||
let mut file = File::options().read(true).write(true).open(path)?;
|
||||
let mut cfg: BaseConfig = serde_json::from_reader(BufReader::new(&mut file))?;
|
||||
@@ -89,10 +89,7 @@ where
|
||||
other: match serde_json::to_value(content)? {
|
||||
Value::Object(map) => map,
|
||||
_ => {
|
||||
panic!(
|
||||
"Type '{}' as generic `Migrator::T` must be serialiable to a Serde object!",
|
||||
type_name::<T>()
|
||||
);
|
||||
return Err(MigratorError::InvalidType(type_name::<T>()));
|
||||
}
|
||||
},
|
||||
};
|
||||
@@ -113,6 +110,8 @@ pub enum MigratorError {
|
||||
"the config file is for a newer version of the app. Please update to the latest version to load it!"
|
||||
)]
|
||||
YourAppIsOutdated,
|
||||
#[error("Type '{0}' as generic `Migrator::T` must be serialiable to a Serde object!")]
|
||||
InvalidType(&'static str),
|
||||
#[error("custom migration error: {0}")]
|
||||
Custom(String),
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ use sysinfo::{DiskExt, System, SystemExt};
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Type)]
|
||||
#[allow(clippy::upper_case_acronyms)]
|
||||
pub enum DiskType {
|
||||
SSD,
|
||||
HDD,
|
||||
|
||||
@@ -20,17 +20,19 @@ tokio = { workspace = true, features = [
|
||||
"io-util",
|
||||
"fs",
|
||||
] }
|
||||
libp2p = { version = "0.51.0", features = ["tokio", "quic", "serde"] }
|
||||
libp2p = { version = "0.51.3", features = ["tokio", "serde"] }
|
||||
libp2p-quic = { version = "0.7.0-alpha.3", features = ["tokio"] }
|
||||
if-watch = { version = "3.0.1", features = ["tokio"] } # Override the features of if-watch which is used by libp2p-quic
|
||||
mdns-sd = "0.6.1"
|
||||
thiserror = "1.0.39"
|
||||
thiserror = "1.0.40"
|
||||
tracing = "0.1.37"
|
||||
serde = { version = "1.0.152", features = ["derive"] }
|
||||
serde = { version = "1.0.163", features = ["derive"] }
|
||||
rmp-serde = "1.1.1"
|
||||
specta = { workspace = true }
|
||||
flume = "0.10.14"
|
||||
tokio-util = { version = "0.7.7", features = ["compat"] }
|
||||
tokio-util = { version = "0.7.8", features = ["compat"] }
|
||||
arc-swap = "1.6.0"
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { workspace = true, features = ["rt-multi-thread"] }
|
||||
tracing-subscriber = { version = "0.3.16", features = ["env-filter"] }
|
||||
tracing-subscriber = { version = "0.3.17", features = ["env-filter"] }
|
||||
|
||||
@@ -7,7 +7,7 @@ use std::{
|
||||
},
|
||||
};
|
||||
|
||||
use libp2p::{core::muxing::StreamMuxerBox, quic, Swarm, Transport};
|
||||
use libp2p::{core::muxing::StreamMuxerBox, swarm::SwarmBuilder, Transport};
|
||||
use thiserror::Error;
|
||||
use tokio::sync::{mpsc, oneshot};
|
||||
use tracing::{debug, error, warn};
|
||||
@@ -41,7 +41,7 @@ impl<TMetadata: Metadata> Manager<TMetadata> {
|
||||
.then_some(())
|
||||
.ok_or(ManagerError::InvalidAppName)?;
|
||||
|
||||
let peer_id = PeerId(keypair.public().to_peer_id());
|
||||
let peer_id = PeerId(keypair.peer_id());
|
||||
let (event_stream_tx, event_stream_rx) = mpsc::channel(1024);
|
||||
|
||||
let (mdns, mdns_state) = Mdns::new(application_name, peer_id, metadata_manager)
|
||||
@@ -60,13 +60,16 @@ impl<TMetadata: Metadata> Manager<TMetadata> {
|
||||
event_stream_tx,
|
||||
});
|
||||
|
||||
let mut swarm = Swarm::with_tokio_executor(
|
||||
quic::GenTransport::<quic::tokio::Provider>::new(quic::Config::new(keypair.inner()))
|
||||
.map(|(p, c), _| (p, StreamMuxerBox::new(c)))
|
||||
.boxed(),
|
||||
let mut swarm = SwarmBuilder::with_tokio_executor(
|
||||
libp2p_quic::GenTransport::<libp2p_quic::tokio::Provider>::new(
|
||||
libp2p_quic::Config::new(&keypair.inner()),
|
||||
)
|
||||
.map(|(p, c), _| (p, StreamMuxerBox::new(c)))
|
||||
.boxed(),
|
||||
SpaceTime::new(this.clone()),
|
||||
keypair.public().to_peer_id(),
|
||||
);
|
||||
keypair.peer_id(),
|
||||
)
|
||||
.build();
|
||||
{
|
||||
let listener_id = swarm
|
||||
.listen_on("/ip4/0.0.0.0/udp/0/quic-v1".parse().expect("Error passing libp2p multiaddr. This value is hardcoded so this should be impossible."))
|
||||
|
||||
@@ -12,7 +12,7 @@ use libp2p::{
|
||||
futures::StreamExt,
|
||||
swarm::{
|
||||
dial_opts::{DialOpts, PeerCondition},
|
||||
NetworkBehaviourAction, NotifyHandler, SwarmEvent,
|
||||
NotifyHandler, SwarmEvent, ToSwarm,
|
||||
},
|
||||
Swarm,
|
||||
};
|
||||
@@ -114,7 +114,6 @@ where
|
||||
SwarmEvent::IncomingConnection { local_addr, .. } => debug!("incoming connection from '{}'", local_addr),
|
||||
SwarmEvent::IncomingConnectionError { local_addr, error, .. } => warn!("handshake error with incoming connection from '{}': {}", local_addr, error),
|
||||
SwarmEvent::OutgoingConnectionError { peer_id, error } => warn!("error establishing connection with '{:?}': {}", peer_id, error),
|
||||
SwarmEvent::BannedPeer { peer_id, .. } => warn!("banned peer '{}' attempted to connection and was rejected", peer_id),
|
||||
SwarmEvent::NewListenAddr { address, .. } => {
|
||||
match quic_multiaddr_to_socketaddr(address) {
|
||||
Ok(addr) => {
|
||||
@@ -162,6 +161,8 @@ where
|
||||
}
|
||||
SwarmEvent::ListenerError { listener_id, error } => warn!("listener '{:?}' reported a non-fatal error: {}", listener_id, error),
|
||||
SwarmEvent::Dialing(_peer_id) => {},
|
||||
#[allow(deprecated)]
|
||||
SwarmEvent::BannedPeer { .. } => {},
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -202,26 +203,25 @@ where
|
||||
}
|
||||
}
|
||||
ManagerStreamAction::StartStream(peer_id, rx) => {
|
||||
self.swarm.behaviour_mut().pending_events.push_back(
|
||||
NetworkBehaviourAction::NotifyHandler {
|
||||
self.swarm
|
||||
.behaviour_mut()
|
||||
.pending_events
|
||||
.push_back(ToSwarm::NotifyHandler {
|
||||
peer_id: peer_id.0,
|
||||
handler: NotifyHandler::Any,
|
||||
event: OutboundRequest::Unicast(rx),
|
||||
},
|
||||
);
|
||||
});
|
||||
}
|
||||
ManagerStreamAction::BroadcastData(data) => {
|
||||
let connected_peers = self.swarm.connected_peers().copied().collect::<Vec<_>>();
|
||||
let behaviour = self.swarm.behaviour_mut();
|
||||
debug!("Broadcasting message to '{:?}'", connected_peers);
|
||||
for peer_id in connected_peers {
|
||||
behaviour
|
||||
.pending_events
|
||||
.push_back(NetworkBehaviourAction::NotifyHandler {
|
||||
peer_id,
|
||||
handler: NotifyHandler::Any,
|
||||
event: OutboundRequest::Broadcast(data.clone()),
|
||||
});
|
||||
behaviour.pending_events.push_back(ToSwarm::NotifyHandler {
|
||||
peer_id,
|
||||
handler: NotifyHandler::Any,
|
||||
event: OutboundRequest::Broadcast(data.clone()),
|
||||
});
|
||||
}
|
||||
}
|
||||
ManagerStreamAction::Shutdown(tx) => {
|
||||
|
||||
@@ -6,8 +6,10 @@
|
||||
use std::{
|
||||
marker::PhantomData,
|
||||
path::{Path, PathBuf},
|
||||
string::FromUtf8Error,
|
||||
};
|
||||
|
||||
use thiserror::Error;
|
||||
use tokio::{
|
||||
fs::File,
|
||||
io::{AsyncBufRead, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, BufReader},
|
||||
@@ -42,22 +44,38 @@ pub struct SpacedropRequest {
|
||||
pub block_size: BlockSize,
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum SpacedropRequestError {
|
||||
#[error("io error reading name len: {0}")]
|
||||
NameLenIoError(std::io::Error),
|
||||
#[error("io error reading name: {0}")]
|
||||
NameIoError(std::io::Error),
|
||||
#[error("error utf-8 decoding name: {0}")]
|
||||
NameFormatError(FromUtf8Error),
|
||||
#[error("io error reading file size: {0}")]
|
||||
SizeIoError(std::io::Error),
|
||||
}
|
||||
|
||||
impl SpacedropRequest {
|
||||
pub async fn from_stream(stream: &mut (impl AsyncRead + Unpin)) -> Result<Self, ()> {
|
||||
let mut name_len = [0; 2];
|
||||
stream.read_exact(&mut name_len).await.map_err(|_| ())?; // TODO: Error handling
|
||||
let name_len = u16::from_le_bytes(name_len);
|
||||
|
||||
pub async fn from_stream(
|
||||
stream: &mut (impl AsyncRead + Unpin),
|
||||
) -> Result<Self, SpacedropRequestError> {
|
||||
let name_len = stream
|
||||
.read_u16_le()
|
||||
.await
|
||||
.map_err(SpacedropRequestError::NameLenIoError)?;
|
||||
let mut name = vec![0u8; name_len as usize];
|
||||
stream.read_exact(&mut name).await.map_err(|_| ())?; // TODO: Error handling
|
||||
let name = String::from_utf8(name).map_err(|_| ())?; // TODO: Error handling
|
||||
stream
|
||||
.read_exact(&mut name)
|
||||
.await
|
||||
.map_err(SpacedropRequestError::NameIoError)?;
|
||||
let name = String::from_utf8(name).map_err(SpacedropRequestError::NameFormatError)?;
|
||||
|
||||
let mut size = [0; 8];
|
||||
stream.read_exact(&mut size).await.map_err(|_| ())?; // TODO: Error handling
|
||||
let size = u64::from_le_bytes(size);
|
||||
|
||||
// TODO: If we change `BlockSize` and both clients are running a different version this will not match up and everything will explode
|
||||
let block_size = BlockSize::from_size(size); // TODO: Error handling
|
||||
let size = stream
|
||||
.read_u64_le()
|
||||
.await
|
||||
.map_err(SpacedropRequestError::SizeIoError)?;
|
||||
let block_size = BlockSize::from_size(size); // TODO: Get from stream: stream.read_u8().await.map_err(|_| ())?; // TODO: Error handling
|
||||
|
||||
Ok(Self {
|
||||
name,
|
||||
|
||||
@@ -8,8 +8,8 @@ use libp2p::{
|
||||
core::{ConnectedPoint, Endpoint},
|
||||
swarm::{
|
||||
derive_prelude::{ConnectionEstablished, ConnectionId, FromSwarm},
|
||||
ConnectionClosed, ConnectionDenied, ConnectionHandler, NetworkBehaviour,
|
||||
NetworkBehaviourAction, PollParameters, THandler, THandlerInEvent,
|
||||
ConnectionClosed, ConnectionDenied, ConnectionHandler, NetworkBehaviour, PollParameters,
|
||||
THandler, THandlerInEvent, ToSwarm,
|
||||
},
|
||||
Multiaddr,
|
||||
};
|
||||
@@ -34,9 +34,8 @@ pub enum OutboundFailure {}
|
||||
/// This protocol sits under the application to abstract many complexities of 2 way connections and deals with authentication, chucking, etc.
|
||||
pub struct SpaceTime<TMetadata: Metadata> {
|
||||
pub(crate) manager: Arc<Manager<TMetadata>>,
|
||||
pub(crate) pending_events: VecDeque<
|
||||
NetworkBehaviourAction<<Self as NetworkBehaviour>::OutEvent, THandlerInEvent<Self>>,
|
||||
>,
|
||||
pub(crate) pending_events:
|
||||
VecDeque<ToSwarm<<Self as NetworkBehaviour>::OutEvent, THandlerInEvent<Self>>>,
|
||||
// For future me's sake, DON't try and refactor this to use shared state (for the nth time), it doesn't fit into libp2p's synchronous trait and polling model!!!
|
||||
// pub(crate) connected_peers: HashMap<PeerId, ConnectedPeer>,
|
||||
}
|
||||
@@ -116,12 +115,11 @@ impl<TMetadata: Metadata> NetworkBehaviour for SpaceTime<TMetadata> {
|
||||
{
|
||||
debug!("sending establishment request to peer '{}'", peer_id);
|
||||
if other_established == 0 {
|
||||
self.pending_events
|
||||
.push_back(NetworkBehaviourAction::GenerateEvent(
|
||||
ManagerStreamAction::Event(Event::PeerConnected(ConnectedPeer {
|
||||
peer_id,
|
||||
})),
|
||||
));
|
||||
self.pending_events.push_back(ToSwarm::GenerateEvent(
|
||||
ManagerStreamAction::Event(Event::PeerConnected(ConnectedPeer {
|
||||
peer_id,
|
||||
})),
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -133,10 +131,9 @@ impl<TMetadata: Metadata> NetworkBehaviour for SpaceTime<TMetadata> {
|
||||
let peer_id = PeerId(peer_id);
|
||||
if remaining_established == 0 {
|
||||
debug!("Disconnected from peer '{}'", peer_id);
|
||||
self.pending_events
|
||||
.push_back(NetworkBehaviourAction::GenerateEvent(
|
||||
ManagerStreamAction::Event(Event::PeerDisconnected(peer_id)),
|
||||
));
|
||||
self.pending_events.push_back(ToSwarm::GenerateEvent(
|
||||
ManagerStreamAction::Event(Event::PeerDisconnected(peer_id)),
|
||||
));
|
||||
}
|
||||
}
|
||||
FromSwarm::AddressChange(event) => {
|
||||
@@ -187,15 +184,14 @@ impl<TMetadata: Metadata> NetworkBehaviour for SpaceTime<TMetadata> {
|
||||
_connection: ConnectionId,
|
||||
event: <SpaceTimeConnection<TMetadata> as ConnectionHandler>::OutEvent,
|
||||
) {
|
||||
self.pending_events
|
||||
.push_back(NetworkBehaviourAction::GenerateEvent(event));
|
||||
self.pending_events.push_back(ToSwarm::GenerateEvent(event));
|
||||
}
|
||||
|
||||
fn poll(
|
||||
&mut self,
|
||||
_: &mut Context<'_>,
|
||||
_: &mut impl PollParameters,
|
||||
) -> Poll<NetworkBehaviourAction<Self::OutEvent, THandlerInEvent<Self>>> {
|
||||
) -> Poll<ToSwarm<Self::OutEvent, THandlerInEvent<Self>>> {
|
||||
if let Some(ev) = self.pending_events.pop_front() {
|
||||
return Poll::Ready(ev);
|
||||
} else if self.pending_events.capacity() > EMPTY_QUEUE_SHRINK_THRESHOLD {
|
||||
|
||||
@@ -1,20 +1,22 @@
|
||||
use libp2p::identity::{ed25519, PublicKey};
|
||||
use libp2p::identity::ed25519::{self};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Keypair(libp2p::identity::Keypair);
|
||||
pub struct Keypair(ed25519::Keypair);
|
||||
|
||||
impl Keypair {
|
||||
pub fn generate() -> Self {
|
||||
Self(libp2p::identity::Keypair::generate_ed25519())
|
||||
Self(ed25519::Keypair::generate())
|
||||
}
|
||||
|
||||
pub fn public(&self) -> PublicKey {
|
||||
self.0.public()
|
||||
pub fn peer_id(&self) -> libp2p::PeerId {
|
||||
let pk: libp2p::identity::PublicKey = self.0.public().into();
|
||||
|
||||
libp2p::PeerId::from_public_key(&pk)
|
||||
}
|
||||
|
||||
pub fn inner(&self) -> &libp2p::identity::Keypair {
|
||||
&self.0
|
||||
pub fn inner(&self) -> libp2p::identity::Keypair {
|
||||
self.0.clone().into()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,13 +25,7 @@ impl Serialize for Keypair {
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
match &self.0 {
|
||||
libp2p::identity::Keypair::Ed25519(keypair) => {
|
||||
serializer.serialize_bytes(&keypair.encode())
|
||||
}
|
||||
#[allow(unreachable_patterns)]
|
||||
_ => unreachable!(),
|
||||
}
|
||||
serializer.serialize_bytes(&self.0.to_bytes())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39,8 +35,9 @@ impl<'de> Deserialize<'de> for Keypair {
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
let mut bytes = Vec::<u8>::deserialize(deserializer)?;
|
||||
Ok(Self(libp2p::identity::Keypair::Ed25519(
|
||||
ed25519::Keypair::decode(bytes.as_mut_slice()).map_err(serde::de::Error::custom)?,
|
||||
)))
|
||||
Ok(Self(
|
||||
ed25519::Keypair::try_from_bytes(bytes.as_mut_slice())
|
||||
.map_err(serde::de::Error::custom)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ pub struct PeerId(
|
||||
);
|
||||
|
||||
impl FromStr for PeerId {
|
||||
#[allow(deprecated)]
|
||||
type Err = libp2p::core::ParseError;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
|
||||
@@ -1,11 +1,29 @@
|
||||
import { captureException } from '@sentry/browser';
|
||||
import { FallbackProps } from 'react-error-boundary';
|
||||
import { useRouteError } from 'react-router';
|
||||
import { useDebugState } from '@sd/client';
|
||||
import { Button } from '@sd/ui';
|
||||
import { useOperatingSystem } from './hooks';
|
||||
|
||||
export function RouterErrorBoundary() {
|
||||
const error = useRouteError();
|
||||
return (
|
||||
<ErrorPage
|
||||
message={(error as any).toString()}
|
||||
sendReportBtn={() => {
|
||||
captureException(error);
|
||||
location.reload();
|
||||
}}
|
||||
reloadBtn={() => {
|
||||
location.reload();
|
||||
}}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
export default ({ error, resetErrorBoundary }: FallbackProps) => (
|
||||
<ErrorPage
|
||||
message={error.message}
|
||||
message={`Error: ${error.message}`}
|
||||
sendReportBtn={() => {
|
||||
captureException(error);
|
||||
resetErrorBoundary();
|
||||
@@ -17,28 +35,34 @@ export default ({ error, resetErrorBoundary }: FallbackProps) => (
|
||||
export function ErrorPage({
|
||||
reloadBtn,
|
||||
sendReportBtn,
|
||||
message
|
||||
message,
|
||||
submessage
|
||||
}: {
|
||||
reloadBtn?: () => void;
|
||||
sendReportBtn?: () => void;
|
||||
message: string;
|
||||
submessage?: string;
|
||||
}) {
|
||||
const debug = useDebugState();
|
||||
const os = useOperatingSystem();
|
||||
const isMacOS = os === 'macOS';
|
||||
|
||||
if (!submessage && debug.enabled)
|
||||
submessage = 'Check the console (CMD/CTRL + OPTION + i) for stack trace.';
|
||||
|
||||
return (
|
||||
<div
|
||||
data-tauri-drag-region
|
||||
role="alert"
|
||||
className="flex h-screen w-screen flex-col items-center justify-center rounded-lg border border-app-divider bg-app p-4"
|
||||
className={
|
||||
'flex h-screen w-screen flex-col items-center justify-center border border-app-divider bg-app p-4' +
|
||||
(isMacOS ? ' rounded-lg' : '')
|
||||
}
|
||||
>
|
||||
<p className="m-3 text-sm font-bold text-ink-faint">APP CRASHED</p>
|
||||
<h1 className="text-2xl font-bold text-ink">We're past the event horizon...</h1>
|
||||
<pre className="m-2 text-ink">Error: {message}</pre>
|
||||
{debug.enabled && (
|
||||
<pre className="m-2 text-sm text-ink-dull">
|
||||
Check the console (CMD/CTRL + OPTION + i) for stack trace.
|
||||
</pre>
|
||||
)}
|
||||
<pre className="m-2 text-ink">{message}</pre>
|
||||
{submessage && <pre className="m-2 text-sm text-ink-dull">{submessage}</pre>}
|
||||
<div className="flex flex-row space-x-2 text-ink">
|
||||
{reloadBtn && (
|
||||
<Button variant="accent" className="mt-2" onClick={reloadBtn}>
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
import { useBridgeQuery, useLibraryQuery } from '@sd/client';
|
||||
import { CodeBlock } from '~/components/Codeblock';
|
||||
import { usePlatform } from '~/util/Platform';
|
||||
|
||||
// TODO: Bring this back with a button in the sidebar near settings at the bottom
|
||||
export const Component = () => {
|
||||
const platform = usePlatform();
|
||||
const { data: nodeState } = useBridgeQuery(['nodeState']);
|
||||
const { data: libraryState } = useBridgeQuery(['library.list']);
|
||||
const { data: jobs } = useLibraryQuery(['jobs.getRunning']);
|
||||
|
||||
@@ -73,6 +73,17 @@ export const Component = () => {
|
||||
</b>
|
||||
<span className="select-text">{node.data?.data_path}</span>
|
||||
</div>
|
||||
<div
|
||||
onClick={() => {
|
||||
platform?.openLogsDir?.();
|
||||
}}
|
||||
className="text-sm font-medium text-ink-faint"
|
||||
>
|
||||
<b className="mr-2 inline truncate">
|
||||
<Database className="mr-1 mt-[-2px] inline h-4 w-4" /> Logs Folder
|
||||
</b>
|
||||
<span className="select-text">{node.data?.data_path + '/logs'}</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</Card>
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { Navigate, Outlet, RouteObject } from 'react-router-dom';
|
||||
import { currentLibraryCache, useCachedLibraries, useInvalidateQuery } from '@sd/client';
|
||||
import { Dialogs } from '@sd/ui';
|
||||
import { RouterErrorBoundary } from '~/ErrorFallback';
|
||||
import { useKeybindHandler } from '~/hooks/useKeyboardHandler';
|
||||
import libraryRoutes from './$libraryId';
|
||||
import onboardingRoutes from './onboarding';
|
||||
@@ -39,6 +40,7 @@ const Wrapper = () => {
|
||||
export const routes = [
|
||||
{
|
||||
element: <Wrapper />,
|
||||
errorElement: <RouterErrorBoundary />,
|
||||
children: [
|
||||
{
|
||||
index: true,
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
"dependencies": {
|
||||
"@fontsource/inter": "^4.5.13",
|
||||
"@headlessui/react": "^1.7.3",
|
||||
"@icons-pack/react-simple-icons": "^7.2.0",
|
||||
"@radix-ui/react-progress": "^1.0.1",
|
||||
"@radix-ui/react-slider": "^1.1.0",
|
||||
"@radix-ui/react-toast": "^1.1.2",
|
||||
|
||||
@@ -21,6 +21,7 @@ export type Platform = {
|
||||
saveFilePickerDialog?(): Promise<string | null>;
|
||||
showDevtools?(): void;
|
||||
openPath?(path: string): void;
|
||||
openLogsDir?(): void;
|
||||
// Opens a file path with a given ID
|
||||
openFilePath?(library: string, id: number): any;
|
||||
getFilePathOpenWithApps?(library: string, id: number): any;
|
||||
|
||||
@@ -77,7 +77,7 @@ export type Procedures = {
|
||||
{ key: "locations.quickRescan", input: LibraryArgs<LightScanArgs>, result: null } |
|
||||
{ key: "locations.relink", input: LibraryArgs<string>, result: null } |
|
||||
{ key: "locations.update", input: LibraryArgs<LocationUpdateArgs>, result: null } |
|
||||
{ key: "nodes.changeNodeName", input: ChangeNodeNameArgs, result: null } |
|
||||
{ key: "nodes.changeNodeName", input: ChangeNodeNameArgs, result: NodeConfig } |
|
||||
{ key: "p2p.acceptSpacedrop", input: [string, string | null], result: null } |
|
||||
{ key: "p2p.spacedrop", input: SpacedropArgs, result: null } |
|
||||
{ key: "tags.assign", input: LibraryArgs<TagAssignArgs>, result: null } |
|
||||
@@ -96,10 +96,6 @@ export type FilePathSearchArgs = { take?: number | null; order?: FilePathSearchO
|
||||
|
||||
export type PeerMetadata = { name: string; operating_system: OperatingSystem | null; version: string | null; email: string | null; img_url: string | null }
|
||||
|
||||
export type MasterPasswordChangeArgs = { password: Protected<string>; algorithm: Algorithm; hashing_algorithm: HashingAlgorithm }
|
||||
|
||||
export type Node = { id: number; pub_id: number[]; name: string; platform: number; version: string | null; last_seen: string; timezone: string | null; date_created: string }
|
||||
|
||||
/**
|
||||
* NodeConfig is the configuration for a node. This is shared between all libraries and is stored in a JSON file on disk.
|
||||
*/
|
||||
@@ -130,19 +126,11 @@ export type LibraryConfigWrapped = { uuid: string; config: LibraryConfig }
|
||||
*/
|
||||
export type Params = "Standard" | "Hardened" | "Paranoid"
|
||||
|
||||
/**
|
||||
* `LocationUpdateArgs` is the argument received from the client using `rspc` to update a location.
|
||||
* It contains the id of the location to be updated, possible a name to change the current location's name
|
||||
* and a vector of indexer rules ids to add or remove from the location.
|
||||
*
|
||||
* It is important to note that only the indexer rule ids in this vector will be used from now on.
|
||||
* Old rules that aren't in this vector will be purged.
|
||||
*/
|
||||
export type LocationUpdateArgs = { id: number; name: string | null; generate_preview_media: boolean | null; sync_preview_media: boolean | null; hidden: boolean | null; indexer_rules_ids: number[] }
|
||||
export type Location = { id: number; pub_id: number[]; node_id: number; name: string; path: string; total_capacity: number | null; available_capacity: number | null; is_archived: boolean; generate_preview_media: boolean; sync_preview_media: boolean; hidden: boolean; date_created: string }
|
||||
|
||||
export type SortOrder = "Asc" | "Desc"
|
||||
|
||||
export type MediaData = { id: number; pixel_width: number | null; pixel_height: number | null; longitude: number | null; latitude: number | null; fps: number | null; capture_device_make: string | null; capture_device_model: string | null; capture_device_software: string | null; duration_seconds: number | null; codecs: string | null; streams: number | null }
|
||||
export type KeyAddArgs = { algorithm: Algorithm; hashing_algorithm: HashingAlgorithm; key: Protected<string>; library_sync: boolean; automount: boolean }
|
||||
|
||||
/**
|
||||
* Represents the operating system which the remote peer is running.
|
||||
@@ -163,8 +151,6 @@ export type OnboardingConfig = { password: Protected<string>; algorithm: Algorit
|
||||
|
||||
export type FileDecryptorJobInit = { location_id: number; path_id: number; mount_associated_key: boolean; output_path: string | null; password: string | null; save_to_library: boolean | null }
|
||||
|
||||
export type Statistics = { id: number; date_captured: string; total_object_count: number; library_db_size: string; total_bytes_used: string; total_bytes_capacity: string; total_unique_bytes: string; total_bytes_free: string; preview_media_bytes: string }
|
||||
|
||||
export type TagCreateArgs = { name: string; color: string }
|
||||
|
||||
export type LightScanArgs = { location_id: number; sub_path: string }
|
||||
@@ -178,10 +164,20 @@ export type FileEraserJobInit = { location_id: number; path_id: number; passes:
|
||||
*/
|
||||
export type Nonce = { XChaCha20Poly1305: number[] } | { Aes256Gcm: number[] }
|
||||
|
||||
export type UnlockKeyManagerArgs = { password: Protected<string>; secret_key: Protected<string> }
|
||||
export type AutomountUpdateArgs = { uuid: string; status: boolean }
|
||||
|
||||
export type NodeState = ({ id: string; name: string; p2p_port: number | null; p2p_email: string | null; p2p_img_url: string | null }) & { data_path: string }
|
||||
|
||||
/**
|
||||
* `LocationUpdateArgs` is the argument received from the client using `rspc` to update a location.
|
||||
* It contains the id of the location to be updated, possible a name to change the current location's name
|
||||
* and a vector of indexer rules ids to add or remove from the location.
|
||||
*
|
||||
* It is important to note that only the indexer rule ids in this vector will be used from now on.
|
||||
* Old rules that aren't in this vector will be purged.
|
||||
*/
|
||||
export type LocationUpdateArgs = { id: number; name: string | null; generate_preview_media: boolean | null; sync_preview_media: boolean | null; hidden: boolean | null; indexer_rules_ids: number[] }
|
||||
|
||||
export type EditLibraryArgs = { id: string; name: string | null; description: string | null }
|
||||
|
||||
export type SetNoteArgs = { id: number; note: string | null }
|
||||
@@ -204,20 +200,29 @@ export type Salt = number[]
|
||||
*/
|
||||
export type Category = "Recents" | "Favorites" | "Photos" | "Videos" | "Movies" | "Music" | "Documents" | "Downloads" | "Encrypted" | "Projects" | "Applications" | "Archives" | "Databases" | "Games" | "Books" | "Contacts" | "Trash"
|
||||
|
||||
/**
|
||||
* TODO: P2P event for the frontend
|
||||
*/
|
||||
export type P2PEvent = { type: "DiscoveredPeer"; peer_id: PeerId; metadata: PeerMetadata } | { type: "SpacedropRequest"; id: string; peer_id: PeerId; name: string }
|
||||
|
||||
export type FileCopierJobInit = { source_location_id: number; source_path_id: number; target_location_id: number; target_path: string; target_file_name_suffix: string | null }
|
||||
|
||||
export type DiskType = "SSD" | "HDD" | "Removable"
|
||||
|
||||
export type RestoreBackupArgs = { password: Protected<string>; secret_key: Protected<string>; path: string }
|
||||
|
||||
export type SetFavoriteArgs = { id: number; favorite: boolean }
|
||||
|
||||
export type FilePathFilterArgs = { locationId?: number | null; search?: string; extension?: string | null; createdAt?: OptionalRange<string>; path?: string | null; object?: ObjectFilterArgs | null }
|
||||
|
||||
export type RuleKind = "AcceptFilesByGlob" | "RejectFilesByGlob" | "AcceptIfChildrenDirectoriesArePresent" | "RejectIfChildrenDirectoriesArePresent"
|
||||
|
||||
export type Volume = { name: string; mount_point: string; total_capacity: string; available_capacity: string; is_removable: boolean; disk_type: DiskType | null; file_system: string | null; is_root_filesystem: boolean }
|
||||
export type MediaData = { id: number; pixel_width: number | null; pixel_height: number | null; longitude: number | null; latitude: number | null; fps: number | null; capture_device_make: string | null; capture_device_model: string | null; capture_device_software: string | null; duration_seconds: number | null; codecs: string | null; streams: number | null }
|
||||
|
||||
export type FilePathSearchOrdering = { name: SortOrder } | { sizeInBytes: SortOrder } | { dateCreated: SortOrder } | { dateModified: SortOrder } | { dateIndexed: SortOrder } | { object: ObjectSearchOrdering }
|
||||
|
||||
export type IndexerRule = { id: number; name: string; default: boolean; rules_per_kind: number[]; date_created: string; date_modified: string }
|
||||
|
||||
export type BuildInfo = { version: string; commit: string }
|
||||
|
||||
export type IdentifyUniqueFilesArgs = { id: number; path: string }
|
||||
@@ -227,7 +232,7 @@ export type IdentifyUniqueFilesArgs = { id: number; path: string }
|
||||
*/
|
||||
export type Algorithm = "XChaCha20Poly1305" | "Aes256Gcm"
|
||||
|
||||
export type Location = { id: number; pub_id: number[]; node_id: number; name: string; path: string; total_capacity: number | null; available_capacity: number | null; is_archived: boolean; generate_preview_media: boolean; sync_preview_media: boolean; hidden: boolean; date_created: string }
|
||||
export type Tag = { id: number; pub_id: number[]; name: string | null; color: string | null; total_objects: number | null; redundancy_goal: number | null; date_created: string; date_modified: string }
|
||||
|
||||
export type OwnedOperationItem = { id: any; data: OwnedOperationData }
|
||||
|
||||
@@ -235,21 +240,10 @@ export type ObjectSearchOrdering = { dateAccessed: SortOrder }
|
||||
|
||||
export type CRDTOperationType = SharedOperation | RelationOperation | OwnedOperation
|
||||
|
||||
/**
|
||||
* TODO: P2P event for the frontend
|
||||
*/
|
||||
export type P2PEvent = { type: "DiscoveredPeer"; peer_id: PeerId; metadata: PeerMetadata } | { type: "SpacedropRequest"; id: string; peer_id: PeerId; name: string }
|
||||
|
||||
export type RenameFileArgs = { location_id: number; file_name: string; new_file_name: string }
|
||||
|
||||
export type MaybeNot<T> = T | { not: T }
|
||||
|
||||
export type SpacedropArgs = { peer_id: PeerId; file_path: string[] }
|
||||
|
||||
export type Object = { id: number; pub_id: number[]; kind: number; key_id: number | null; hidden: boolean; favorite: boolean; important: boolean; has_thumbnail: boolean; has_thumbstrip: boolean; has_video_preview: boolean; ipfs_id: string | null; note: string | null; date_created: string; date_accessed: string | null }
|
||||
|
||||
export type FilePath = { id: number; pub_id: number[]; is_dir: boolean; cas_id: string | null; integrity_checksum: string | null; location_id: number; materialized_path: string; name: string; extension: string; size_in_bytes: string; inode: number[]; device: number[]; object_id: number | null; key_id: number | null; date_created: string; date_modified: string; date_indexed: string }
|
||||
|
||||
export type JobReport = { id: string; name: string; action: string | null; data: number[] | null; metadata: any | null; is_background: boolean; errors_text: string[]; created_at: string | null; started_at: string | null; completed_at: string | null; parent_id: string | null; status: JobStatus; task_count: number; completed_task_count: number; message: string; estimated_completion: string }
|
||||
|
||||
export type ObjectFilterArgs = { favorite?: boolean | null; hidden?: boolean | null; dateAccessed?: MaybeNot<string | null> | null; kind?: number[]; tags?: number[] }
|
||||
@@ -280,12 +274,8 @@ export type IndexerRuleCreateArgs = { name: string; dry_run: boolean; rules: ([R
|
||||
|
||||
export type SharedOperationCreateData = { u: { [key: string]: any } } | "a"
|
||||
|
||||
export type KeyAddArgs = { algorithm: Algorithm; hashing_algorithm: HashingAlgorithm; key: Protected<string>; library_sync: boolean; automount: boolean }
|
||||
|
||||
export type OptionalRange<T> = { from: T | null; to: T | null }
|
||||
|
||||
export type IndexerRule = { id: number; name: string; default: boolean; rules_per_kind: number[]; date_created: string; date_modified: string }
|
||||
|
||||
export type FileEncryptorJobInit = { location_id: number; path_id: number; key_uuid: string; algorithm: Algorithm; metadata: boolean; preview_media: boolean; output_path: string | null }
|
||||
|
||||
/**
|
||||
@@ -302,22 +292,26 @@ export type ExplorerItem = { type: "Path"; has_thumbnail: boolean; item: FilePat
|
||||
*/
|
||||
export type LibraryArgs<T> = { library_id: string; arg: T }
|
||||
|
||||
export type FileCutterJobInit = { source_location_id: number; source_path_id: number; target_location_id: number; target_path: string }
|
||||
export type UnlockKeyManagerArgs = { password: Protected<string>; secret_key: Protected<string> }
|
||||
|
||||
export type Tag = { id: number; pub_id: number[]; name: string | null; color: string | null; total_objects: number | null; redundancy_goal: number | null; date_created: string; date_modified: string }
|
||||
export type FileCutterJobInit = { source_location_id: number; source_path_id: number; target_location_id: number; target_path: string }
|
||||
|
||||
export type OwnedOperationData = { Create: { [key: string]: any } } | { CreateMany: { values: ([any, { [key: string]: any }])[]; skip_duplicates: boolean } } | { Update: { [key: string]: any } } | "Delete"
|
||||
|
||||
export type SharedOperationData = SharedOperationCreateData | { field: string; value: any } | null
|
||||
|
||||
export type Node = { id: number; pub_id: number[]; name: string; platform: number; version: string | null; last_seen: string; timezone: string | null; date_created: string }
|
||||
|
||||
export type Volume = { name: string; mount_point: string; total_capacity: string; available_capacity: string; is_removable: boolean; disk_type: DiskType | null; file_system: string | null; is_root_filesystem: boolean }
|
||||
|
||||
export type TagUpdateArgs = { id: number; name: string | null; color: string | null }
|
||||
|
||||
export type MasterPasswordChangeArgs = { password: Protected<string>; algorithm: Algorithm; hashing_algorithm: HashingAlgorithm }
|
||||
|
||||
export type ObjectValidatorArgs = { id: number; path: string }
|
||||
|
||||
export type TagAssignArgs = { object_id: number; tag_id: number; unassign: boolean }
|
||||
|
||||
export type ChangeNodeNameArgs = { name: string }
|
||||
|
||||
/**
|
||||
* This defines all available password hashing algorithms.
|
||||
*/
|
||||
@@ -336,11 +330,17 @@ export type LibraryConfig = { name: string; description: string }
|
||||
|
||||
export type SearchData<T> = { cursor: number[] | null; items: T[] }
|
||||
|
||||
export type AutomountUpdateArgs = { uuid: string; status: boolean }
|
||||
export type FilePath = { id: number; pub_id: number[]; is_dir: boolean; cas_id: string | null; integrity_checksum: string | null; location_id: number; materialized_path: string; name: string; extension: string; size_in_bytes: string; inode: number[]; device: number[]; object_id: number | null; key_id: number | null; date_created: string; date_modified: string; date_indexed: string }
|
||||
|
||||
export type Statistics = { id: number; date_captured: string; total_object_count: number; library_db_size: string; total_bytes_used: string; total_bytes_capacity: string; total_unique_bytes: string; total_bytes_free: string; preview_media_bytes: string }
|
||||
|
||||
export type Protected<T> = T
|
||||
|
||||
export type RestoreBackupArgs = { password: Protected<string>; secret_key: Protected<string>; path: string }
|
||||
export type ChangeNodeNameArgs = { name: string }
|
||||
|
||||
export type Object = { id: number; pub_id: number[]; kind: number; key_id: number | null; hidden: boolean; favorite: boolean; important: boolean; has_thumbnail: boolean; has_thumbstrip: boolean; has_video_preview: boolean; ipfs_id: string | null; note: string | null; date_created: string; date_accessed: string | null }
|
||||
|
||||
export type RenameFileArgs = { location_id: number; file_name: string; new_file_name: string }
|
||||
|
||||
export type RelationOperation = { relation_item: string; relation_group: string; relation: string; data: RelationOperationData }
|
||||
|
||||
|
||||
BIN
pnpm-lock.yaml
generated
BIN
pnpm-lock.yaml
generated
Binary file not shown.
Reference in New Issue
Block a user