mirror of
https://github.com/spacedriveapp/spacedrive.git
synced 2026-04-27 18:08:06 -04:00
Merge remote-tracking branch 'origin/main' into eng-1828-migration-to-new-cloud-api-system
This commit is contained in:
BIN
Cargo.lock
generated
BIN
Cargo.lock
generated
Binary file not shown.
27
Cargo.toml
27
Cargo.toml
@@ -13,9 +13,9 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
edition = "2021"
|
||||
license = "AGPL-3.0-only"
|
||||
repository = "https://github.com/spacedriveapp/spacedrive"
|
||||
edition = "2021"
|
||||
license = "AGPL-3.0-only"
|
||||
repository = "https://github.com/spacedriveapp/spacedrive"
|
||||
rust-version = "1.80"
|
||||
|
||||
[workspace.dependencies]
|
||||
@@ -28,14 +28,11 @@ async-stream = "0.3.5"
|
||||
async-trait = "0.1.80"
|
||||
axum = "0.6.20" # Update blocked by hyper
|
||||
base64 = "0.22.1"
|
||||
base91 = "0.1.0"
|
||||
blake3 = "1.5.3" # Update blocked by custom patch below
|
||||
blake3 = "1.5"
|
||||
chrono = "0.4.38"
|
||||
directories = "5.0"
|
||||
ed25519-dalek = "2.1.1"
|
||||
ed25519-dalek = "2.1"
|
||||
futures = "0.3.30"
|
||||
futures-concurrency = "7.6"
|
||||
gix-ignore = "0.11.2"
|
||||
globset = "0.4.14"
|
||||
http = "0.2" # Update blocked by axum
|
||||
hyper = "0.14" # Update blocked due to API breaking changes
|
||||
@@ -46,18 +43,16 @@ libc = "0.2"
|
||||
normpath = "1.2"
|
||||
once_cell = "1.19"
|
||||
pin-project-lite = "0.2.14"
|
||||
rand = "0.8.5"
|
||||
rand = "0.9.0-alpha.2"
|
||||
regex = "1.10"
|
||||
reqwest = "0.11" # Update blocked by hyper
|
||||
reqwest = { version = "0.11", default-features = false } # Update blocked by hyper
|
||||
rmp = "0.8.14"
|
||||
rmp-serde = "1.3.0"
|
||||
rmp-serde = "1.3"
|
||||
rmpv = { version = "1.3", features = ["with-serde"] }
|
||||
rspc = "0.1.4" # Update blocked by custom patch below
|
||||
serde = "1.0"
|
||||
serde_json = "1.0"
|
||||
specta = "=2.0.0-rc.20"
|
||||
specta-typescript = "=0.0.7"
|
||||
static_assertions = "1.1"
|
||||
strum = "0.26"
|
||||
strum_macros = "0.26"
|
||||
tempfile = "3.10"
|
||||
@@ -78,12 +73,6 @@ features = ["migrations", "specta", "sqlite", "sqlite-create-many"]
|
||||
git = "https://github.com/brendonovich/prisma-client-rust"
|
||||
rev = "4f9ef9d38c"
|
||||
|
||||
[workspace.dependencies.prisma-client-rust-cli]
|
||||
default-features = false
|
||||
features = ["migrations", "specta", "sqlite", "sqlite-create-many"]
|
||||
git = "https://github.com/brendonovich/prisma-client-rust"
|
||||
rev = "4f9ef9d38c"
|
||||
|
||||
[workspace.dependencies.prisma-client-rust-sdk]
|
||||
default-features = false
|
||||
features = ["sqlite"]
|
||||
|
||||
@@ -7,7 +7,7 @@ license.workspace = true
|
||||
repository.workspace = true
|
||||
|
||||
[target.'cfg(target_os = "macos")'.dependencies]
|
||||
swift-rs = { version = "1.0.6", features = ["serde"] }
|
||||
swift-rs = { version = "1.0", features = ["serde"] }
|
||||
|
||||
[target.'cfg(target_os = "macos")'.build-dependencies]
|
||||
swift-rs = { version = "1.0.6", features = ["build"] }
|
||||
swift-rs = { version = "1.0", features = ["build"] }
|
||||
|
||||
@@ -7,9 +7,8 @@ license.workspace = true
|
||||
repository.workspace = true
|
||||
|
||||
[dependencies]
|
||||
libc = { workspace = true }
|
||||
normpath = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
libc = { workspace = true }
|
||||
normpath = { workspace = true }
|
||||
|
||||
[target.'cfg(target_os = "windows")'.dependencies.windows]
|
||||
features = ["Win32_Foundation", "Win32_System_Com", "Win32_UI_Shell"]
|
||||
|
||||
@@ -17,7 +17,6 @@ sd-prisma = { path = "../../../crates/prisma" }
|
||||
|
||||
# Workspace dependencies
|
||||
axum = { workspace = true, features = ["headers", "query"] }
|
||||
directories = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
http = { workspace = true }
|
||||
hyper = { workspace = true }
|
||||
@@ -27,7 +26,6 @@ rspc = { workspace = true, features = ["tauri", "tracing"] }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
specta = { workspace = true }
|
||||
specta-typescript = { workspace = true }
|
||||
strum = { workspace = true, features = ["derive"] }
|
||||
thiserror = { workspace = true }
|
||||
tokio = { workspace = true, features = ["sync"] }
|
||||
@@ -37,14 +35,16 @@ uuid = { workspace = true, features = ["serde"] }
|
||||
# Specific Desktop dependencies
|
||||
# WARNING: Do NOT enable default features, as that vendors dbus (see below)
|
||||
opener = { version = "0.7.1", features = ["reveal"], default-features = false }
|
||||
tauri = { version = "=2.0.0-rc.2", features = ["linux-libxdo", "macos-private-api", "unstable"] }
|
||||
specta-typescript = "=0.0.7"
|
||||
tauri-plugin-dialog = "=2.0.0-rc.0"
|
||||
tauri-plugin-os = "=2.0.0-rc.0"
|
||||
tauri-plugin-shell = "=2.0.0-rc.0"
|
||||
tauri-plugin-updater = "=2.0.0-rc.0"
|
||||
tauri-plugin-deep-link = "=2.0.0-rc.0"
|
||||
tauri-runtime = { version = "=2.0.0-rc.2" }
|
||||
tauri-utils = { version = "=2.0.0-rc.2" }
|
||||
|
||||
[dependencies.tauri]
|
||||
features = ["linux-libxdo", "macos-private-api", "native-tls-vendored", "unstable"]
|
||||
version = "=2.0.0-rc.2"
|
||||
|
||||
[dependencies.tauri-specta]
|
||||
features = ["derive", "typescript"]
|
||||
@@ -58,7 +58,7 @@ sd-desktop-linux = { path = "../crates/linux" }
|
||||
# Specific Desktop dependencies
|
||||
# WARNING: dbus must NOT be vendored, as that breaks the app on Linux,X11,Nvidia
|
||||
dbus = { version = "0.9.7", features = ["stdfd"] }
|
||||
# https://github.com/tauri-apps/tauri/blob/tauri-v2.0.0-rc.2/core/tauri/Cargo.toml
|
||||
# https://github.com/tauri-apps/tauri/blob/tauri-v2.0.0-rc.2/core/tauri/Cargo.toml#L86
|
||||
webkit2gtk = { version = "=2.0.1", features = ["v2_38"] }
|
||||
|
||||
[target.'cfg(target_os = "macos")'.dependencies]
|
||||
|
||||
@@ -17,7 +17,7 @@ use tauri_plugins::{sd_error_plugin, sd_server_plugin};
|
||||
use tauri_specta::{collect_events, Builder};
|
||||
use tokio::task::block_in_place;
|
||||
use tokio::time::sleep;
|
||||
use tracing::error;
|
||||
use tracing::{debug, error};
|
||||
|
||||
mod file;
|
||||
mod menu;
|
||||
@@ -179,7 +179,7 @@ pub enum DragAndDropEvent {
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, specta::Type, tauri_specta::Event)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct DeepLinkEvent {
|
||||
data: String,
|
||||
data: String,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
@@ -226,15 +226,6 @@ async fn main() -> tauri::Result<()> {
|
||||
// We need a the app handle to determine the data directory now.
|
||||
// This means all the setup code has to be within `setup`, however it doesn't support async so we `block_on`.
|
||||
|
||||
app.listen("deep-link://new-url", move |event| {
|
||||
let deep_link_event = DeepLinkEvent {
|
||||
data: event.payload().to_string(),
|
||||
};
|
||||
println!("Deep link event: {:#?}", deep_link_event);
|
||||
|
||||
app.emit("deeplink", deep_link_event).unwrap();
|
||||
});
|
||||
|
||||
block_in_place(|| {
|
||||
block_on(async move {
|
||||
builder.mount_events(app);
|
||||
@@ -254,6 +245,16 @@ async fn main() -> tauri::Result<()> {
|
||||
Err(err) => (None, Err(NodeError::Logger(err))),
|
||||
};
|
||||
|
||||
let handle = app.handle().clone();
|
||||
app.listen("deep-link://new-url", move |event| {
|
||||
let deep_link_event = DeepLinkEvent {
|
||||
data: event.payload().to_string(),
|
||||
};
|
||||
debug!(?deep_link_event, "Deep link event;",);
|
||||
|
||||
handle.emit("deeplink", deep_link_event).unwrap();
|
||||
});
|
||||
|
||||
let handle = app.handle();
|
||||
let (node, router) = match result {
|
||||
Ok(r) => r,
|
||||
|
||||
@@ -15,7 +15,7 @@ use axum::{
|
||||
};
|
||||
use http::Method;
|
||||
use hyper::server::{accept::Accept, conn::AddrIncoming};
|
||||
use rand::{distributions::Alphanumeric, Rng};
|
||||
use rand::{distr::Alphanumeric, Rng};
|
||||
use sd_core::{custom_uri, Node, NodeError};
|
||||
use serde::Deserialize;
|
||||
use tauri::{async_runtime::block_on, plugin::TauriPlugin, RunEvent, Runtime};
|
||||
|
||||
@@ -11,7 +11,7 @@ rust-version = "1.64"
|
||||
# Android can use dynamic linking since all FFI is done via JNI
|
||||
crate-type = ["cdylib"]
|
||||
|
||||
[dependencies]
|
||||
[target.'cfg(target_os = "android")'.dependencies]
|
||||
# Spacedrive Sub-crates
|
||||
sd-mobile-core = { path = "../../core" }
|
||||
|
||||
|
||||
@@ -13,6 +13,6 @@ rust-version = "1.64"
|
||||
# which are only available when linking against the app's ObjC
|
||||
crate-type = ["staticlib"]
|
||||
|
||||
[dependencies]
|
||||
[target.'cfg(target_os = "ios")'.dependencies]
|
||||
# Spacedrive Sub-crates
|
||||
sd-mobile-core = { path = "../../core" }
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
[package]
|
||||
name = "sd-p2p-relay"
|
||||
version = "0.0.1"
|
||||
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
publish = false
|
||||
repository.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Workspace dependencies
|
||||
libp2p = { version = "0.53.2", features = [
|
||||
"autonat",
|
||||
"macros",
|
||||
"quic",
|
||||
"relay",
|
||||
"tokio"
|
||||
] } # Update blocked due to custom patch
|
||||
reqwest = { workspace = true, features = ["json", "native-tls-vendored"] }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
tokio = { workspace = true, features = ["macros", "rt-multi-thread"] }
|
||||
tracing = { workspace = true }
|
||||
tracing-subscriber = { workspace = true, features = ["env-filter"] }
|
||||
uuid = { workspace = true, features = ["serde", "v4"] }
|
||||
|
||||
# Specific P2P Relay dependencies
|
||||
hex = "0.4.3"
|
||||
@@ -1,15 +0,0 @@
|
||||
#!/bin/bash
|
||||
# A temporary script to deploy the p2p-relay to the server for testing
|
||||
|
||||
set -e
|
||||
|
||||
SERVER=""
|
||||
TARGET_DIR=$(cargo metadata | jq -r .target_directory)
|
||||
cargo zigbuild --target aarch64-unknown-linux-musl --release
|
||||
|
||||
scp "$TARGET_DIR/aarch64-unknown-linux-musl/release/sd-p2p-relay" ec2-user@$SERVER:/home/ec2-user/sd-p2p-relay
|
||||
|
||||
# ssh ec2-user@$SERVER
|
||||
# ./sd-p2p-relay init
|
||||
# Enter the `P2P_SECRET` secret env var from Vercel
|
||||
# ./sd-p2p-relay
|
||||
@@ -1,75 +0,0 @@
|
||||
use std::{borrow::Cow, path::Path};
|
||||
|
||||
use libp2p::identity::Keypair;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Config {
|
||||
// Unique ID of this relay server.
|
||||
pub id: Uuid,
|
||||
// URL of the cloud API.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
api_url: Option<String>,
|
||||
// Secret used for authenticating with cloud backend.
|
||||
pub p2p_secret: String,
|
||||
// Port to listen on.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
port: Option<u16>,
|
||||
// Private/public keypair to use for the relay.
|
||||
#[serde(with = "keypair")]
|
||||
pub keypair: Keypair,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
pub fn init(
|
||||
path: impl AsRef<Path>,
|
||||
p2p_secret: String,
|
||||
) -> Result<Self, Box<dyn std::error::Error>> {
|
||||
let config = Self {
|
||||
id: Uuid::new_v4(),
|
||||
api_url: None,
|
||||
p2p_secret,
|
||||
port: None,
|
||||
keypair: Keypair::generate_ed25519(),
|
||||
};
|
||||
std::fs::write(path, serde_json::to_string_pretty(&config)?)?;
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
pub fn load(path: impl AsRef<Path>) -> Result<Self, Box<dyn std::error::Error>> {
|
||||
let config = std::fs::read_to_string(path)?;
|
||||
Ok(serde_json::from_str(&config)?)
|
||||
}
|
||||
|
||||
pub fn api_url(&self) -> Cow<'_, str> {
|
||||
match self.api_url {
|
||||
Some(ref url) => Cow::Borrowed(url),
|
||||
None => Cow::Borrowed("https://api.spacedrive.com"),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn port(&self) -> u16 {
|
||||
self.port.unwrap_or(7373) // TODO: Should we use HTTPS port to avoid strict internet filters???
|
||||
}
|
||||
}
|
||||
|
||||
mod keypair {
|
||||
use libp2p::identity::Keypair;
|
||||
use serde::{de::Error, Deserialize, Deserializer, Serializer};
|
||||
|
||||
pub fn deserialize<'de, D>(deserializer: D) -> Result<Keypair, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let s = String::deserialize(deserializer)?;
|
||||
let bytes = hex::decode(s).map_err(D::Error::custom)?;
|
||||
Keypair::from_protobuf_encoding(bytes.as_slice()).map_err(D::Error::custom)
|
||||
}
|
||||
|
||||
pub fn serialize<S: Serializer>(v: &Keypair, serializer: S) -> Result<S::Ok, S::Error> {
|
||||
serializer.serialize_str(&hex::encode(
|
||||
v.to_protobuf_encoding().expect("invalid keypair type"),
|
||||
))
|
||||
}
|
||||
}
|
||||
@@ -1,217 +0,0 @@
|
||||
use std::{
|
||||
io::{stdin, stdout, Write},
|
||||
net::{Ipv4Addr, Ipv6Addr, SocketAddr},
|
||||
path::PathBuf,
|
||||
};
|
||||
|
||||
use libp2p::{
|
||||
autonat,
|
||||
futures::StreamExt,
|
||||
relay,
|
||||
swarm::{NetworkBehaviour, SwarmEvent},
|
||||
};
|
||||
use reqwest::header::{self, HeaderMap, HeaderValue};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tracing::{error, info, warn};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::utils::socketaddr_to_quic_multiaddr;
|
||||
|
||||
mod config;
|
||||
mod utils;
|
||||
|
||||
// TODO: Authentication with the Spacedrive Cloud
|
||||
// TODO: Rate-limit data usage by Spacedrive account.
|
||||
// TODO: Expose libp2p metrics like - https://github.com/mxinden/rust-libp2p-server/blob/master/src/behaviour.rs
|
||||
|
||||
#[derive(NetworkBehaviour)]
|
||||
pub struct Behaviour {
|
||||
relay: relay::Behaviour,
|
||||
autonat: autonat::Behaviour,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct RelayServerEntry {
|
||||
id: Uuid,
|
||||
// TODO: Try and drop this field cause it's libp2p specific
|
||||
peer_id: String,
|
||||
addrs: Vec<SocketAddr>,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
tracing_subscriber::fmt()
|
||||
// .with_env_filter(EnvFilter::from_default_env()) // TODO: ???
|
||||
.init();
|
||||
|
||||
let config_path =
|
||||
PathBuf::from(std::env::var("CONFIG_PATH").unwrap_or("./config.json".to_string()));
|
||||
|
||||
let mut args = std::env::args();
|
||||
args.next(); // Skip binary name
|
||||
if args.next().as_deref() == Some("init") {
|
||||
println!("Initializing config at '{config_path:?}'...");
|
||||
|
||||
if config_path.exists() {
|
||||
panic!("Config already exists at path '{config_path:?}'. Please delete it first!");
|
||||
// TODO: Error handling
|
||||
}
|
||||
|
||||
print!("Please enter the p2p secret: ");
|
||||
let mut p2p_secret = String::new();
|
||||
let _ = stdout().flush();
|
||||
stdin()
|
||||
.read_line(&mut p2p_secret)
|
||||
.expect("Did not enter a correct string");
|
||||
|
||||
config::Config::init(&config_path, p2p_secret.replace('\n', "")).unwrap(); // TODO: Error handling
|
||||
println!("\nSuccessfully initialized config at '{config_path:?}'!");
|
||||
return;
|
||||
}
|
||||
|
||||
if !config_path.exists() {
|
||||
panic!("Unable to find config at path '{config_path:?}'. Please create it!"); // TODO: Error handling
|
||||
}
|
||||
let config = config::Config::load(&config_path).unwrap(); // TODO: Error handling
|
||||
|
||||
info!("Starting...");
|
||||
|
||||
let public_ipv4: Ipv4Addr = reqwest::get("https://api.ipify.org")
|
||||
.await
|
||||
.unwrap() // TODO: Error handling
|
||||
.text()
|
||||
.await
|
||||
.unwrap() // TODO: Error handling
|
||||
.parse()
|
||||
.unwrap(); // TODO: Error handling
|
||||
|
||||
let public_ipv6: Option<Ipv6Addr> = match reqwest::get("https://api6.ipify.org").await {
|
||||
Ok(v) => Some(
|
||||
v.text()
|
||||
.await
|
||||
.unwrap() // TODO: Error handling
|
||||
.parse()
|
||||
.unwrap(), // TODO: Error handling
|
||||
),
|
||||
Err(_) => {
|
||||
warn!("Error getting public IPv6 address. Skipping IPv6 configuration.");
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
info!("Determined public addresses of the current relay to be: '{public_ipv4}' and '{public_ipv6:?}'");
|
||||
|
||||
let (first_advertisement_tx, mut first_advertisement_rx) = tokio::sync::mpsc::channel(1);
|
||||
tokio::spawn({
|
||||
let config = config.clone();
|
||||
async move {
|
||||
let client = reqwest::Client::new();
|
||||
|
||||
let mut first_advertisement_tx = Some(first_advertisement_tx);
|
||||
loop {
|
||||
let result = client
|
||||
.post(format!("{}/api/p2p/relays", config.api_url()))
|
||||
.headers({
|
||||
let mut map = HeaderMap::new();
|
||||
map.insert(
|
||||
header::AUTHORIZATION,
|
||||
HeaderValue::from_str(&format!("Bearer {}", config.p2p_secret))
|
||||
.unwrap(),
|
||||
);
|
||||
map
|
||||
})
|
||||
.json(&RelayServerEntry {
|
||||
id: config.id,
|
||||
peer_id: config.keypair.public().to_peer_id().to_base58(),
|
||||
addrs: {
|
||||
let mut ips: Vec<SocketAddr> =
|
||||
vec![SocketAddr::from((public_ipv4, config.port()))];
|
||||
if let Some(ip) = public_ipv6 {
|
||||
ips.push(SocketAddr::from((ip, config.port())));
|
||||
}
|
||||
ips
|
||||
},
|
||||
})
|
||||
.send()
|
||||
.await;
|
||||
|
||||
let mut is_ok = result.is_ok();
|
||||
match result {
|
||||
Ok(result) => {
|
||||
if result.status() != 200 {
|
||||
error!(
|
||||
"Failed to register relay server with cloud status {}: {:?}",
|
||||
result.status(),
|
||||
result.text().await
|
||||
);
|
||||
is_ok = false;
|
||||
} else {
|
||||
info!(
|
||||
"Successfully registered '{}' as relay server with cloud",
|
||||
config.id
|
||||
);
|
||||
}
|
||||
}
|
||||
Err(e) => error!("Failed to register relay server with cloud: {e}"),
|
||||
}
|
||||
|
||||
if let Some(tx) = first_advertisement_tx.take() {
|
||||
tx.send(is_ok).await.ok();
|
||||
}
|
||||
|
||||
tokio::time::sleep(std::time::Duration::from_secs(9 * 60)).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
if !first_advertisement_rx
|
||||
.recv()
|
||||
.await
|
||||
.expect("Advertisement task died during startup!")
|
||||
{
|
||||
panic!(
|
||||
"Failed to register relay server with cloud. Please check your config and try again."
|
||||
); // TODO: Error handling
|
||||
}
|
||||
|
||||
// TODO: Setup logging to filesystem with auto-rotation
|
||||
|
||||
let peer_id = config.keypair.public().to_peer_id();
|
||||
|
||||
let mut swarm = libp2p::SwarmBuilder::with_existing_identity(config.keypair.clone())
|
||||
.with_tokio()
|
||||
.with_quic()
|
||||
.with_behaviour(|key| Behaviour {
|
||||
relay: relay::Behaviour::new(key.public().to_peer_id(), Default::default()), // TODO: Proper config
|
||||
autonat: autonat::Behaviour::new(key.public().to_peer_id(), Default::default()), // TODO: Proper config
|
||||
})
|
||||
.unwrap() // TODO: Error handling
|
||||
.build();
|
||||
|
||||
swarm
|
||||
.listen_on(socketaddr_to_quic_multiaddr(&SocketAddr::from((
|
||||
Ipv6Addr::UNSPECIFIED,
|
||||
config.port(),
|
||||
))))
|
||||
.unwrap(); // TODO: Error handling
|
||||
swarm
|
||||
.listen_on(socketaddr_to_quic_multiaddr(&SocketAddr::from((
|
||||
Ipv4Addr::UNSPECIFIED,
|
||||
config.port(),
|
||||
))))
|
||||
.unwrap(); // TODO: Error handling
|
||||
|
||||
info!("Started Relay as PeerId '{peer_id}'");
|
||||
|
||||
loop {
|
||||
match swarm.next().await.expect("Infinite Stream.") {
|
||||
// SwarmEvent::Behaviour(event) => {
|
||||
// println!("{event:?}")
|
||||
// }
|
||||
SwarmEvent::NewListenAddr { address, .. } => {
|
||||
info!("Listening on {address:?}");
|
||||
}
|
||||
event => println!("{event:?}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
//! This file contains some fairly meaningless glue code for integrating with libp2p.
|
||||
|
||||
use std::net::SocketAddr;
|
||||
|
||||
use libp2p::{multiaddr::Protocol, Multiaddr};
|
||||
|
||||
#[must_use]
|
||||
pub(crate) fn socketaddr_to_quic_multiaddr(m: &SocketAddr) -> Multiaddr {
|
||||
let mut addr = Multiaddr::empty();
|
||||
match m {
|
||||
SocketAddr::V4(ip) => addr.push(Protocol::Ip4(*ip.ip())),
|
||||
SocketAddr::V6(ip) => addr.push(Protocol::Ip6(*ip.ip())),
|
||||
}
|
||||
addr.push(Protocol::Udp(m.port()));
|
||||
addr.push(Protocol::QuicV1);
|
||||
addr
|
||||
}
|
||||
@@ -17,7 +17,6 @@ sd-core = { path = "../../core", features = ["ffmpeg", "heif"] }
|
||||
|
||||
# Workspace dependencies
|
||||
axum = { workspace = true, features = ["headers"] }
|
||||
base64 = { workspace = true }
|
||||
http = { workspace = true }
|
||||
rspc = { workspace = true, features = ["axum"] }
|
||||
tempfile = { workspace = true }
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
[package]
|
||||
authors = ["Spacedrive Technology Inc <support@spacedrive.com>"]
|
||||
description = "Virtual distributed filesystem engine that powers Spacedrive."
|
||||
edition = { workspace = true }
|
||||
license = { workspace = true }
|
||||
name = "sd-core"
|
||||
repository = { workspace = true }
|
||||
rust-version = "1.78"
|
||||
version = "0.4.1"
|
||||
name = "sd-core"
|
||||
version = "0.4.1"
|
||||
|
||||
authors = ["Spacedrive Technology Inc <support@spacedrive.com>"]
|
||||
description = "Virtual distributed filesystem engine that powers Spacedrive."
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version = "1.78"
|
||||
|
||||
[features]
|
||||
default = []
|
||||
@@ -14,12 +15,11 @@ default = []
|
||||
mobile = []
|
||||
# This feature controls whether the Spacedrive Core contains functionality which requires FFmpeg.
|
||||
ai = ["dep:sd-ai"]
|
||||
ffmpeg = ["dep:sd-ffmpeg", "sd-core-heavy-lifting/ffmpeg", "sd-media-metadata/ffmpeg"]
|
||||
ffmpeg = ["sd-core-heavy-lifting/ffmpeg", "sd-media-metadata/ffmpeg"]
|
||||
heif = ["sd-images/heif"]
|
||||
|
||||
[dependencies]
|
||||
# Inner Core Sub-crates
|
||||
sd-cloud-schema = { workspace = true }
|
||||
sd-core-cloud-services = { path = "./crates/cloud-services" }
|
||||
sd-core-file-path-helper = { path = "./crates/file-path-helper" }
|
||||
sd-core-heavy-lifting = { path = "./crates/heavy-lifting" }
|
||||
@@ -31,7 +31,6 @@ sd-core-sync = { path = "./crates/sync" }
|
||||
sd-actors = { path = "../crates/actors" }
|
||||
sd-ai = { path = "../crates/ai", optional = true }
|
||||
sd-cloud-api = { path = "../crates/cloud-api" }
|
||||
sd-ffmpeg = { path = "../crates/ffmpeg", optional = true }
|
||||
sd-file-ext = { path = "../crates/file-ext" }
|
||||
sd-images = { path = "../crates/images", features = ["rspc", "serde", "specta"] }
|
||||
sd-media-metadata = { path = "../crates/media-metadata" }
|
||||
@@ -50,13 +49,10 @@ async-stream = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
axum = { workspace = true, features = ["ws"] }
|
||||
base64 = { workspace = true }
|
||||
base91 = { workspace = true }
|
||||
blake3 = { workspace = true }
|
||||
chrono = { workspace = true, features = ["serde"] }
|
||||
directories = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
futures-concurrency = { workspace = true }
|
||||
gix-ignore = { workspace = true }
|
||||
hyper = { workspace = true, features = ["client", "http1", "server"] }
|
||||
image = { workspace = true }
|
||||
itertools = { workspace = true }
|
||||
@@ -67,14 +63,13 @@ pin-project-lite = { workspace = true }
|
||||
prisma-client-rust = { workspace = true, features = ["rspc"] }
|
||||
regex = { workspace = true }
|
||||
reqwest = { workspace = true, features = ["json", "native-tls-vendored"] }
|
||||
rmp = { workspace = true }
|
||||
rmp-serde = { workspace = true }
|
||||
rmpv = { workspace = true }
|
||||
rspc = { workspace = true, features = ["alpha", "axum", "chrono", "tracing", "unstable", "uuid"] }
|
||||
sd-cloud-schema = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
specta = { workspace = true }
|
||||
static_assertions = { workspace = true }
|
||||
strum = { workspace = true, features = ["derive"] }
|
||||
strum_macros = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
@@ -83,13 +78,14 @@ tokio-stream = { workspace = true, features = ["fs"] }
|
||||
tokio-util = { workspace = true, features = ["io"] }
|
||||
tracing = { workspace = true }
|
||||
tracing-subscriber = { workspace = true, features = ["env-filter"] }
|
||||
uuid = { workspace = true, features = ["serde", "std", "v4", "v7"] }
|
||||
webp = { workspace = true }
|
||||
|
||||
uuid = { workspace = true, features = ["serde", "v4", "v7"] }
|
||||
|
||||
# Specific Core dependencies
|
||||
async-recursion = "1.1"
|
||||
base91 = "0.1.0"
|
||||
bytes = "1.6"
|
||||
ctor = "0.2.8"
|
||||
directories = "5.0"
|
||||
flate2 = "1.0"
|
||||
hostname = "0.4.0"
|
||||
http-body = "0.4.6" # Update blocked by http
|
||||
@@ -126,13 +122,13 @@ version = "0.9.103"
|
||||
# Platform-specific dependencies
|
||||
[target.'cfg(target_os = "macos")'.dependencies]
|
||||
plist = "1.6"
|
||||
trash = "5.1.0"
|
||||
trash = "5.1"
|
||||
|
||||
[target.'cfg(target_os = "linux")'.dependencies]
|
||||
trash = "5.1.0"
|
||||
trash = "5.1"
|
||||
|
||||
[target.'cfg(target_os = "windows")'.dependencies]
|
||||
trash = "5.1.0"
|
||||
trash = "5.1"
|
||||
|
||||
[target.'cfg(target_os = "ios")'.dependencies]
|
||||
icrate = { version = "0.1.2", features = [
|
||||
@@ -147,7 +143,6 @@ tracing-android = "0.2.0"
|
||||
|
||||
[dev-dependencies]
|
||||
# Workspace dependencies
|
||||
globset = { workspace = true }
|
||||
tracing-test = { workspace = true }
|
||||
|
||||
# Specific Core dependencies
|
||||
|
||||
@@ -27,5 +27,7 @@ thiserror = { workspace = true }
|
||||
tokio = { workspace = true, features = ["fs"] }
|
||||
tracing = { workspace = true }
|
||||
|
||||
[target.'cfg(windows)'.dependencies]
|
||||
winapi-util = "0.1.8"
|
||||
# Specific File Path Helper dependencies
|
||||
[target.'cfg(target_os = "windows")'.dependencies.windows]
|
||||
features = ["Win32_Security", "Win32_Storage_FileSystem"]
|
||||
version = "0.58"
|
||||
|
||||
@@ -102,6 +102,43 @@ pub fn path_is_hidden(path: impl AsRef<Path>, metadata: &Metadata) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
#[cfg(target_family = "windows")]
|
||||
fn get_inode_windows(path: &str) -> Result<u64, std::io::Error> {
|
||||
use std::{ffi::OsStr, os::windows::ffi::OsStrExt, ptr::null_mut};
|
||||
use windows::{
|
||||
Win32::Foundation::HANDLE,
|
||||
Win32::Storage::FileSystem::{
|
||||
CreateFileW, GetFileInformationByHandle, BY_HANDLE_FILE_INFORMATION,
|
||||
FILE_ATTRIBUTE_NORMAL, FILE_FLAG_BACKUP_SEMANTICS, FILE_SHARE_READ, OPEN_EXISTING,
|
||||
},
|
||||
};
|
||||
|
||||
let handle = unsafe {
|
||||
CreateFileW(
|
||||
PCWSTR::from(&HSTRING::from(path)),
|
||||
0,
|
||||
FILE_SHARE_READ,
|
||||
null_mut(),
|
||||
OPEN_EXISTING,
|
||||
FILE_ATTRIBUTE_NORMAL | FILE_FLAG_BACKUP_SEMANTICS,
|
||||
HANDLE(0),
|
||||
)
|
||||
};
|
||||
|
||||
if handle.is_invalid() {
|
||||
return Err(std::io::Error::last_os_error());
|
||||
}
|
||||
|
||||
let mut file_info = BY_HANDLE_FILE_INFORMATION::default();
|
||||
let result = unsafe { GetFileInformationByHandle(handle, &mut file_info) };
|
||||
|
||||
if result.as_bool() {
|
||||
Ok(file_info.nFileIndexLow as u64 | ((file_info.nFileIndexHigh as u64) << 32))
|
||||
} else {
|
||||
Err(std::io::Error::last_os_error())
|
||||
}
|
||||
}
|
||||
|
||||
impl FilePathMetadata {
|
||||
pub fn from_path(path: impl AsRef<Path>, metadata: &Metadata) -> Result<Self, FilePathError> {
|
||||
let path = path.as_ref();
|
||||
@@ -113,15 +150,9 @@ impl FilePathMetadata {
|
||||
|
||||
#[cfg(target_family = "windows")]
|
||||
{
|
||||
use winapi_util::{file::information, Handle};
|
||||
|
||||
let info = tokio::task::block_in_place(|| {
|
||||
Handle::from_path_any(path)
|
||||
.and_then(|ref handle| information(handle))
|
||||
.map_err(|e| FileIOError::from((path, e)))
|
||||
tokio::task::block_in_place(|| {
|
||||
get_inode_windows(path).map_err(|e| FileIOError::from((path, e)))
|
||||
})?;
|
||||
|
||||
info.file_index()
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -48,7 +48,6 @@ rspc = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
specta = { workspace = true }
|
||||
static_assertions = { workspace = true }
|
||||
strum = { workspace = true, features = ["derive", "phf"] }
|
||||
thiserror = { workspace = true }
|
||||
tokio = { workspace = true, features = ["fs", "parking_lot", "sync"] }
|
||||
@@ -57,6 +56,9 @@ tracing = { workspace = true }
|
||||
uuid = { workspace = true, features = ["serde", "v4"] }
|
||||
webp = { workspace = true }
|
||||
|
||||
# Specific Heavy Lifting dependencies
|
||||
static_assertions = "1.1"
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = { workspace = true }
|
||||
tracing-test = { workspace = true }
|
||||
|
||||
@@ -15,7 +15,6 @@ sd-utils = { path = "../../../crates/utils" }
|
||||
# Workspace dependencies
|
||||
chrono = { workspace = true }
|
||||
futures-concurrency = { workspace = true }
|
||||
gix-ignore = { workspace = true, features = ["serde"] }
|
||||
globset = { workspace = true, features = ["serde1"] }
|
||||
once_cell = { workspace = true }
|
||||
prisma-client-rust = { workspace = true }
|
||||
@@ -28,5 +27,8 @@ tokio = { workspace = true, features = ["fs"] }
|
||||
tracing = { workspace = true }
|
||||
uuid = { workspace = true, features = ["serde", "v4"] }
|
||||
|
||||
# Specific Indexer Rules dependencies
|
||||
gix-ignore = { version = "0.11.2", features = ["serde"] }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = { workspace = true }
|
||||
|
||||
@@ -23,7 +23,6 @@ rmp-serde = { workspace = true }
|
||||
rmpv = { workspace = true }
|
||||
rspc = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
|
||||
@@ -1,49 +0,0 @@
|
||||
// TODO: Ensure this file has normalised caching setup before reenabling
|
||||
|
||||
// use crate::library::Category;
|
||||
|
||||
// use std::{collections::BTreeMap, str::FromStr};
|
||||
|
||||
// use rspc::{alpha::AlphaRouter, ErrorCode};
|
||||
// use strum::VariantNames;
|
||||
|
||||
// use super::{utils::library, Ctx, R};
|
||||
|
||||
// pub(crate) fn mount() -> AlphaRouter<Ctx> {
|
||||
// R.router().procedure("list", {
|
||||
// R.with2(library()).query(|(_, library), _: ()| async move {
|
||||
// let (categories, queries): (Vec<_>, Vec<_>) = Category::VARIANTS
|
||||
// .iter()
|
||||
// .map(|category| {
|
||||
// let category = Category::from_str(category)
|
||||
// .expect("it's alright this category string exists");
|
||||
// (
|
||||
// category,
|
||||
// library.db.object().count(vec![category.to_where_param()]),
|
||||
// )
|
||||
// })
|
||||
// .unzip();
|
||||
|
||||
// Ok(categories
|
||||
// .into_iter()
|
||||
// .zip(
|
||||
// library
|
||||
// .db
|
||||
// ._batch(queries)
|
||||
// .await?
|
||||
// .into_iter()
|
||||
// // TODO(@Oscar): rspc bigint support
|
||||
// .map(|count| {
|
||||
// i32::try_from(count).map_err(|_| {
|
||||
// rspc::Error::new(
|
||||
// ErrorCode::InternalServerError,
|
||||
// "category item count overflowed 'i32'!".into(),
|
||||
// )
|
||||
// })
|
||||
// })
|
||||
// .collect::<Result<Vec<_>, _>>()?,
|
||||
// )
|
||||
// .collect::<BTreeMap<_, _>>())
|
||||
// })
|
||||
// })
|
||||
// }
|
||||
@@ -1,506 +0,0 @@
|
||||
// TODO: Ensure this file has normalised caching setup before reenabling
|
||||
|
||||
// use keyring::Entry;
|
||||
// use rspc::alpha::AlphaRouter;
|
||||
|
||||
// use serde_json::Value;
|
||||
// use tracing::{debug, error};
|
||||
// use sd_crypto::keys::keymanager::{StoredKey, StoredKeyType};
|
||||
// use sd_crypto::primitives::SECRET_KEY_IDENTIFIER;
|
||||
// use sd_crypto::types::{Algorithm, HashingAlgorithm, OnboardingConfig, SecretKeyString};
|
||||
// use sd_crypto::{Error, Protected};
|
||||
// use serde::Deserialize;
|
||||
// use specta::Type;
|
||||
// use std::{path::PathBuf, str::FromStr};
|
||||
// use tokio::fs::File;
|
||||
// use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
// use uuid::Uuid;
|
||||
|
||||
// use crate::util::db::write_storedkey_to_db;
|
||||
// use crate::{invalidate_query, prisma::key};
|
||||
|
||||
// use super::utils::library;
|
||||
// use super::{Ctx, R};
|
||||
|
||||
// #[derive(Type, Deserialize)]
|
||||
// pub struct KeyAddArgs {
|
||||
// algorithm: Algorithm,
|
||||
// hashing_algorithm: HashingAlgorithm,
|
||||
// key: Protected<String>,
|
||||
// library_sync: bool,
|
||||
// automount: bool,
|
||||
// }
|
||||
|
||||
// #[derive(Type, Deserialize)]
|
||||
// pub struct UnlockKeyManagerArgs {
|
||||
// password: Protected<String>,
|
||||
// secret_key: Protected<String>,
|
||||
// }
|
||||
|
||||
// #[derive(Type, Deserialize)]
|
||||
// pub struct RestoreBackupArgs {
|
||||
// password: Protected<String>,
|
||||
// secret_key: Protected<String>,
|
||||
// path: PathBuf,
|
||||
// }
|
||||
|
||||
// #[derive(Type, Deserialize)]
|
||||
// pub struct MasterPasswordChangeArgs {
|
||||
// password: Protected<String>,
|
||||
// algorithm: Algorithm,
|
||||
// hashing_algorithm: HashingAlgorithm,
|
||||
// }
|
||||
|
||||
// #[derive(Type, Deserialize)]
|
||||
// pub struct AutomountUpdateArgs {
|
||||
// uuid: Uuid,
|
||||
// status: bool,
|
||||
// }
|
||||
|
||||
// pub(crate) fn mount() -> AlphaRouter<Ctx> {
|
||||
// R.router()
|
||||
// .procedure("set", {
|
||||
// R.mutation(|_, key: String| async move {
|
||||
// let username = whoami::username();
|
||||
// let entry = match Entry::new("spacedrive-auth-service", username.as_str()) {
|
||||
// Ok(entry) => entry,
|
||||
// Err(e) => {
|
||||
// error!("Error creating entry: {}", e);
|
||||
// return Err(rspc::Error::new(
|
||||
// rspc::ErrorCode::InternalServerError,
|
||||
// "Error creating entry".to_string(),
|
||||
// ));
|
||||
// }
|
||||
// };
|
||||
|
||||
// match entry.set_password(key.as_str()) {
|
||||
// Ok(_) => (),
|
||||
// Err(e) => {
|
||||
// error!("Error setting key: {}", e);
|
||||
// return Err(rspc::Error::new(
|
||||
// rspc::ErrorCode::InternalServerError,
|
||||
// "Error setting key".to_string(),
|
||||
// ));
|
||||
// }
|
||||
// }
|
||||
|
||||
// debug!(
|
||||
// "Key set successfully: key={key}, service={service}",
|
||||
// key = key,
|
||||
// service = "spacedrive-auth-service",
|
||||
// );
|
||||
// Ok(())
|
||||
// })
|
||||
// })
|
||||
// .procedure("get", {
|
||||
// R.query(|_, _: ()| async move {
|
||||
// let username = whoami::username();
|
||||
// let entry = match Entry::new("spacedrive-auth-service", username.as_str()) {
|
||||
// Ok(entry) => entry,
|
||||
// Err(e) => {
|
||||
// error!("Error creating entry: {}", e);
|
||||
// return Err(rspc::Error::new(
|
||||
// rspc::ErrorCode::InternalServerError,
|
||||
// "Error creating entry".to_string(),
|
||||
// ));
|
||||
// }
|
||||
// };
|
||||
|
||||
// let data = match entry.get_password() {
|
||||
// Ok(key) => key,
|
||||
// Err(e) => {
|
||||
// error!("Error retrieving key: {}. Does the key exist yet?", e);
|
||||
// return Ok("".to_string());
|
||||
// }
|
||||
// };
|
||||
|
||||
// debug!(
|
||||
// "Key retrieved successfully: service={service}, data={_data}",
|
||||
// _data = data,
|
||||
// service = "spacedrive-auth-service",
|
||||
// );
|
||||
|
||||
// Ok(data)
|
||||
// })
|
||||
// })
|
||||
// }
|
||||
|
||||
//NOTE(@Rocky43007): OLD PROCEDURES -> MAY BE USEFUL FOR REFERENCE AND COULD BE USED IN THE FUTURE
|
||||
// .procedure("list", {
|
||||
// R.with2(library())
|
||||
// .query(|(_, library), _: ()| async move { Ok(library.key_manager.dump_keystore()) })
|
||||
// })
|
||||
// // do not unlock the key manager until this route returns true
|
||||
// .procedure("isUnlocked", {
|
||||
// R.with2(library()).query(|(_, library), _: ()| async move {
|
||||
// Ok(library.key_manager.is_unlocked().await)
|
||||
// })
|
||||
// })
|
||||
// .procedure("isSetup", {
|
||||
// R.with2(library()).query(|(_, library), _: ()| async move {
|
||||
// Ok(!library.db.key().find_many(vec![]).exec().await?.is_empty())
|
||||
// })
|
||||
// })
|
||||
// .procedure("setup", {
|
||||
// R.with2(library())
|
||||
// .mutation(|(_, library), config: OnboardingConfig| async move {
|
||||
// let root_key = library.key_manager.onboarding(config, library.id).await?;
|
||||
// write_storedkey_to_db(&library.db, &root_key).await?;
|
||||
// library
|
||||
// .key_manager
|
||||
// .populate_keystore(vec![root_key])
|
||||
// .await?;
|
||||
|
||||
// invalidate_query!(library, "keys.isSetup");
|
||||
// invalidate_query!(library, "keys.isUnlocked");
|
||||
|
||||
// Ok(())
|
||||
// })
|
||||
// })
|
||||
// // this is so we can show the key as mounted in the UI
|
||||
// .procedure("listMounted", {
|
||||
// R.with2(library()).query(|(_, library), _: ()| async move {
|
||||
// Ok(library.key_manager.get_mounted_uuids())
|
||||
// })
|
||||
// })
|
||||
// .procedure("getKey", {
|
||||
// R.with2(library())
|
||||
// .query(|(_, library), key_uuid: Uuid| async move {
|
||||
// Ok(library
|
||||
// .key_manager
|
||||
// .get_key(key_uuid)
|
||||
// .await?
|
||||
// .expose()
|
||||
// .clone())
|
||||
// })
|
||||
// })
|
||||
// .procedure("mount", {
|
||||
// R.with2(library())
|
||||
// .mutation(|(_, library), key_uuid: Uuid| async move {
|
||||
// library.key_manager.mount(key_uuid).await?;
|
||||
// // we also need to dispatch jobs that automatically decrypt preview media and metadata here
|
||||
// invalidate_query!(library, "keys.listMounted");
|
||||
// Ok(())
|
||||
// })
|
||||
// })
|
||||
// .procedure("getSecretKey", {
|
||||
// R.with2(library()).query(|(_, library), _: ()| async move {
|
||||
// if library
|
||||
// .key_manager
|
||||
// .keyring_contains_valid_secret_key(library.id)
|
||||
// .await
|
||||
// .is_ok()
|
||||
// {
|
||||
// Ok(Some(
|
||||
// library
|
||||
// .key_manager
|
||||
// .keyring_retrieve(library.id, SECRET_KEY_IDENTIFIER.to_string())
|
||||
// .await?
|
||||
// .expose()
|
||||
// .clone(),
|
||||
// ))
|
||||
// } else {
|
||||
// Ok(None)
|
||||
// }
|
||||
// })
|
||||
// })
|
||||
// .procedure("unmount", {
|
||||
// R.with2(library())
|
||||
// .mutation(|(_, library), key_uuid: Uuid| async move {
|
||||
// library.key_manager.unmount(key_uuid)?;
|
||||
// // we also need to delete all in-memory decrypted data associated with this key
|
||||
// invalidate_query!(library, "keys.listMounted");
|
||||
// Ok(())
|
||||
// })
|
||||
// })
|
||||
// .procedure("clearMasterPassword", {
|
||||
// R.with2(library())
|
||||
// .mutation(|(_, library), _: ()| async move {
|
||||
// // This technically clears the root key, but it means the same thing to the frontend
|
||||
// library.key_manager.clear_root_key().await?;
|
||||
|
||||
// invalidate_query!(library, "keys.isUnlocked");
|
||||
// Ok(())
|
||||
// })
|
||||
// })
|
||||
// .procedure("syncKeyToLibrary", {
|
||||
// R.with2(library())
|
||||
// .mutation(|(_, library), key_uuid: Uuid| async move {
|
||||
// let key = library.key_manager.sync_to_database(key_uuid).await?;
|
||||
|
||||
// // does not check that the key doesn't exist before writing
|
||||
// write_storedkey_to_db(&library.db, &key).await?;
|
||||
|
||||
// invalidate_query!(library, "keys.list");
|
||||
// Ok(())
|
||||
// })
|
||||
// })
|
||||
// .procedure("updateAutomountStatus", {
|
||||
// R.with2(library())
|
||||
// .mutation(|(_, library), args: AutomountUpdateArgs| async move {
|
||||
// if !library.key_manager.is_memory_only(args.uuid).await? {
|
||||
// library
|
||||
// .key_manager
|
||||
// .change_automount_status(args.uuid, args.status)
|
||||
// .await?;
|
||||
|
||||
// library
|
||||
// .db
|
||||
// .key()
|
||||
// .update(
|
||||
// key::uuid::equals(args.uuid.to_string()),
|
||||
// vec![key::automount::set(args.status)],
|
||||
// )
|
||||
// .exec()
|
||||
// .await?;
|
||||
|
||||
// invalidate_query!(library, "keys.list");
|
||||
// }
|
||||
|
||||
// Ok(())
|
||||
// })
|
||||
// })
|
||||
// .procedure("deleteFromLibrary", {
|
||||
// R.with2(library())
|
||||
// .mutation(|(_, library), key_uuid: Uuid| async move {
|
||||
// if !library.key_manager.is_memory_only(key_uuid).await? {
|
||||
// library
|
||||
// .db
|
||||
// .key()
|
||||
// .delete(key::uuid::equals(key_uuid.to_string()))
|
||||
// .exec()
|
||||
// .await?;
|
||||
// }
|
||||
|
||||
// library.key_manager.remove_key(key_uuid).await?;
|
||||
|
||||
// // we also need to delete all in-memory decrypted data associated with this key
|
||||
// invalidate_query!(library, "keys.list");
|
||||
// invalidate_query!(library, "keys.listMounted");
|
||||
// invalidate_query!(library, "keys.getDefault");
|
||||
// Ok(())
|
||||
// })
|
||||
// })
|
||||
// .procedure("unlockKeyManager", {
|
||||
// R.with2(library())
|
||||
// .mutation(|(_, library), args: UnlockKeyManagerArgs| async move {
|
||||
// let secret_key =
|
||||
// (!args.secret_key.expose().is_empty()).then_some(args.secret_key);
|
||||
|
||||
// library
|
||||
// .key_manager
|
||||
// .unlock(
|
||||
// args.password,
|
||||
// secret_key.map(SecretKeyString),
|
||||
// library.id,
|
||||
// || invalidate_query!(library, "keys.isKeyManagerUnlocking"),
|
||||
// )
|
||||
// .await?;
|
||||
|
||||
// invalidate_query!(library, "keys.isUnlocked");
|
||||
|
||||
// let automount = library
|
||||
// .db
|
||||
// .key()
|
||||
// .find_many(vec![key::automount::equals(true)])
|
||||
// .exec()
|
||||
// .await?;
|
||||
|
||||
// for key in automount {
|
||||
// library
|
||||
// .key_manager
|
||||
// .mount(Uuid::from_str(&key.uuid).map_err(|_| Error::Serialization)?)
|
||||
// .await?;
|
||||
|
||||
// invalidate_query!(library, "keys.listMounted");
|
||||
// }
|
||||
|
||||
// Ok(())
|
||||
// })
|
||||
// })
|
||||
// .procedure("setDefault", {
|
||||
// R.with2(library())
|
||||
// .mutation(|(_, library), key_uuid: Uuid| async move {
|
||||
// library.key_manager.set_default(key_uuid).await?;
|
||||
|
||||
// library
|
||||
// .db
|
||||
// .key()
|
||||
// .update_many(
|
||||
// vec![key::default::equals(true)],
|
||||
// vec![key::default::set(false)],
|
||||
// )
|
||||
// .exec()
|
||||
// .await?;
|
||||
|
||||
// library
|
||||
// .db
|
||||
// .key()
|
||||
// .update(
|
||||
// key::uuid::equals(key_uuid.to_string()),
|
||||
// vec![key::default::set(true)],
|
||||
// )
|
||||
// .exec()
|
||||
// .await?;
|
||||
|
||||
// invalidate_query!(library, "keys.getDefault");
|
||||
// Ok(())
|
||||
// })
|
||||
// })
|
||||
// .procedure("getDefault", {
|
||||
// R.with2(library()).query(|(_, library), _: ()| async move {
|
||||
// library.key_manager.get_default().await.ok()
|
||||
// })
|
||||
// })
|
||||
// .procedure("isKeyManagerUnlocking", {
|
||||
// R.with2(library()).query(|(_, library), _: ()| async move {
|
||||
// library.key_manager.is_unlocking().await.ok()
|
||||
// })
|
||||
// })
|
||||
// .procedure("unmountAll", {
|
||||
// R.with2(library())
|
||||
// .mutation(|(_, library), _: ()| async move {
|
||||
// library.key_manager.empty_keymount();
|
||||
// invalidate_query!(library, "keys.listMounted");
|
||||
// Ok(())
|
||||
// })
|
||||
// })
|
||||
// .procedure("add", {
|
||||
// // this also mounts the key
|
||||
// R.with2(library())
|
||||
// .mutation(|(_, library), args: KeyAddArgs| async move {
|
||||
// // register the key with the keymanager
|
||||
// let uuid = library
|
||||
// .key_manager
|
||||
// .add_to_keystore(
|
||||
// args.key,
|
||||
// args.algorithm,
|
||||
// args.hashing_algorithm,
|
||||
// !args.library_sync,
|
||||
// args.automount,
|
||||
// None,
|
||||
// )
|
||||
// .await?;
|
||||
|
||||
// if args.library_sync {
|
||||
// write_storedkey_to_db(
|
||||
// &library.db,
|
||||
// &library.key_manager.access_keystore(uuid).await?,
|
||||
// )
|
||||
// .await?;
|
||||
|
||||
// if args.automount {
|
||||
// library
|
||||
// .db
|
||||
// .key()
|
||||
// .update(
|
||||
// key::uuid::equals(uuid.to_string()),
|
||||
// vec![key::automount::set(true)],
|
||||
// )
|
||||
// .exec()
|
||||
// .await?;
|
||||
// }
|
||||
// }
|
||||
|
||||
// library.key_manager.mount(uuid).await?;
|
||||
|
||||
// invalidate_query!(library, "keys.list");
|
||||
// invalidate_query!(library, "keys.listMounted");
|
||||
// Ok(())
|
||||
// })
|
||||
// })
|
||||
// .procedure("backupKeystore", {
|
||||
// R.with2(library())
|
||||
// .mutation(|(_, library), path: PathBuf| async move {
|
||||
// // dump all stored keys that are in the key manager (maybe these should be taken from prisma as this will include even "non-sync with library" keys)
|
||||
// let mut stored_keys = library.key_manager.dump_keystore();
|
||||
|
||||
// // include the verification key at the time of backup
|
||||
// stored_keys.push(library.key_manager.get_verification_key().await?);
|
||||
|
||||
// // exclude all memory-only keys
|
||||
// stored_keys.retain(|k| !k.memory_only);
|
||||
|
||||
// let mut output_file = File::create(path).await.map_err(Error::Io)?;
|
||||
// output_file
|
||||
// .write_all(
|
||||
// &serde_json::to_vec(&stored_keys).map_err(|_| Error::Serialization)?,
|
||||
// )
|
||||
// .await
|
||||
// .map_err(Error::Io)?;
|
||||
// Ok(())
|
||||
// })
|
||||
// })
|
||||
// .procedure("restoreKeystore", {
|
||||
// R.with2(library())
|
||||
// .mutation(|(_, library), args: RestoreBackupArgs| async move {
|
||||
// let mut input_file = File::open(args.path).await.map_err(Error::Io)?;
|
||||
|
||||
// let mut backup = Vec::new();
|
||||
|
||||
// input_file
|
||||
// .read_to_end(&mut backup)
|
||||
// .await
|
||||
// .map_err(Error::Io)?;
|
||||
|
||||
// let stored_keys: Vec<StoredKey> =
|
||||
// serde_json::from_slice(&backup).map_err(|_| Error::Serialization)?;
|
||||
|
||||
// let updated_keys = library
|
||||
// .key_manager
|
||||
// .import_keystore_backup(
|
||||
// args.password,
|
||||
// SecretKeyString(args.secret_key),
|
||||
// &stored_keys,
|
||||
// )
|
||||
// .await?;
|
||||
|
||||
// for key in &updated_keys {
|
||||
// write_storedkey_to_db(&library.db, key).await?;
|
||||
// }
|
||||
|
||||
// invalidate_query!(library, "keys.list");
|
||||
// invalidate_query!(library, "keys.listMounted");
|
||||
|
||||
// TryInto::<u32>::try_into(updated_keys.len()).map_err(|_| {
|
||||
// rspc::Error::new(ErrorCode::InternalServerError, "integer overflow".into())
|
||||
// }) // We convert from `usize` (bigint type) to `u32` (number type) because rspc doesn't support bigints.
|
||||
// })
|
||||
// })
|
||||
// .procedure(
|
||||
// "changeMasterPassword",
|
||||
// #[allow(clippy::unwrap_used)] // TODO: Jake is fixing this in a Crypto PR
|
||||
// {
|
||||
// R.with2(library()).mutation(
|
||||
// |(_, library), args: MasterPasswordChangeArgs| async move {
|
||||
// let verification_key = library
|
||||
// .key_manager
|
||||
// .change_master_password(
|
||||
// args.password,
|
||||
// args.algorithm,
|
||||
// args.hashing_algorithm,
|
||||
// library.id,
|
||||
// )
|
||||
// .await?;
|
||||
|
||||
// invalidate_query!(library, "keys.getSecretKey");
|
||||
|
||||
// // remove old root key if present
|
||||
// library
|
||||
// .db
|
||||
// .key()
|
||||
// .delete_many(vec![key::key_type::equals(
|
||||
// serde_json::to_string(&StoredKeyType::Root).unwrap(),
|
||||
// )])
|
||||
// .exec()
|
||||
// .await?;
|
||||
|
||||
// // write the new verification key
|
||||
// write_storedkey_to_db(&library.db, &verification_key).await?;
|
||||
|
||||
// Ok(())
|
||||
// },
|
||||
// )
|
||||
// },
|
||||
// )
|
||||
// }
|
||||
@@ -16,19 +16,17 @@ use sd_prisma::prisma::file_path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use itertools::Itertools;
|
||||
use rspc::{alpha::Rspc, Config};
|
||||
use rspc::{alpha::Rspc, Config, ErrorCode};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use specta::Type;
|
||||
use tracing::warn;
|
||||
use uuid::Uuid;
|
||||
|
||||
mod backups;
|
||||
mod cloud;
|
||||
// mod categories;
|
||||
mod ephemeral_files;
|
||||
mod files;
|
||||
mod jobs;
|
||||
// #[cfg(not(any(target_os = "ios", target_os = "android")))]
|
||||
// mod keys;
|
||||
mod labels;
|
||||
mod libraries;
|
||||
pub mod locations;
|
||||
@@ -67,6 +65,23 @@ pub enum CoreEvent {
|
||||
InvalidateOperation(InvalidateOperationEvent),
|
||||
}
|
||||
|
||||
/// All of the feature flags provided by the core itself. The frontend has it's own set of feature flags!
|
||||
///
|
||||
/// If you want a variant of this to show up on the frontend it must be added to `backendFeatures` in `useFeatureFlag.tsx`
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize, Type)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum BackendFeature {}
|
||||
|
||||
// impl BackendFeature {
|
||||
// pub fn restore(&self, node: &Node) {
|
||||
// match self {
|
||||
// BackendFeature::CloudSync => {
|
||||
// node.cloud_sync_flag.store(true, Ordering::Relaxed);
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
/// A version of [`NodeConfig`] that is safe to share with the frontend
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, Type)]
|
||||
pub struct SanitizedNodeConfig {
|
||||
@@ -76,6 +91,7 @@ pub struct SanitizedNodeConfig {
|
||||
pub name: String,
|
||||
pub identity: RemoteIdentity,
|
||||
pub p2p: NodeConfigP2P,
|
||||
pub features: Vec<BackendFeature>,
|
||||
pub preferences: NodePreferences,
|
||||
}
|
||||
|
||||
@@ -86,6 +102,7 @@ impl From<NodeConfig> for SanitizedNodeConfig {
|
||||
name: value.name,
|
||||
identity: value.identity.to_remote_identity(),
|
||||
p2p: value.p2p,
|
||||
features: value.features,
|
||||
preferences: value.preferences,
|
||||
}
|
||||
}
|
||||
@@ -137,6 +154,40 @@ pub(crate) fn mount() -> Arc<Router> {
|
||||
})
|
||||
})
|
||||
})
|
||||
.procedure("toggleFeatureFlag", {
|
||||
R.mutation(|node, feature: BackendFeature| async move {
|
||||
let config = node.config.get().await;
|
||||
|
||||
let enabled = if config.features.iter().contains(&feature) {
|
||||
node.config
|
||||
.write(|cfg| {
|
||||
cfg.features.retain(|f| *f != feature);
|
||||
})
|
||||
.await
|
||||
.map(|_| false)
|
||||
} else {
|
||||
node.config
|
||||
.write(|cfg| {
|
||||
cfg.features.push(feature.clone());
|
||||
})
|
||||
.await
|
||||
.map(|_| true)
|
||||
}
|
||||
.map_err(|e| rspc::Error::new(ErrorCode::InternalServerError, e.to_string()))?;
|
||||
|
||||
warn!("Feature {:?} is now {}", feature, enabled);
|
||||
|
||||
// match feature {
|
||||
// BackendFeature::CloudSync => {
|
||||
// node.cloud_sync_flag.store(enabled, Ordering::Relaxed);
|
||||
// }
|
||||
// }
|
||||
|
||||
invalidate_query!(node; node, "nodeState");
|
||||
|
||||
Ok(())
|
||||
})
|
||||
})
|
||||
.merge("api.", web_api::mount())
|
||||
.merge("cloud.", cloud::mount())
|
||||
.merge("search.", search::mount())
|
||||
@@ -144,7 +195,6 @@ pub(crate) fn mount() -> Arc<Router> {
|
||||
.merge("volumes.", volumes::mount())
|
||||
.merge("tags.", tags::mount())
|
||||
.merge("labels.", labels::mount())
|
||||
// .merge("categories.", categories::mount())
|
||||
.merge("locations.", locations::mount())
|
||||
.merge("ephemeralFiles.", ephemeral_files::mount())
|
||||
.merge("files.", files::mount())
|
||||
@@ -168,9 +218,6 @@ pub(crate) fn mount() -> Arc<Router> {
|
||||
);
|
||||
});
|
||||
|
||||
// #[cfg(not(any(target_os = "ios", target_os = "android")))]
|
||||
// let r = r.merge("keys.", keys::mount());
|
||||
|
||||
let r = r
|
||||
.build(
|
||||
#[allow(clippy::let_and_return)]
|
||||
|
||||
@@ -31,8 +31,6 @@ pub async fn declare_actors(
|
||||
let ingest_notify = Arc::new(Notify::new());
|
||||
let state = State::default();
|
||||
|
||||
let autorun = node.cloud_sync_flag.load(atomic::Ordering::Relaxed);
|
||||
|
||||
// actors
|
||||
// .declare(
|
||||
// "Cloud Sync Sender",
|
||||
|
||||
@@ -17,7 +17,7 @@ use volume::save_storage_statistics;
|
||||
use std::{
|
||||
fmt,
|
||||
path::{Path, PathBuf},
|
||||
sync::{atomic::AtomicBool, Arc},
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
@@ -67,7 +67,6 @@ pub struct Node {
|
||||
pub p2p: Arc<p2p::P2PManager>,
|
||||
pub event_bus: (broadcast::Sender<CoreEvent>, broadcast::Receiver<CoreEvent>),
|
||||
pub notifications: Notifications,
|
||||
pub cloud_sync_flag: Arc<AtomicBool>,
|
||||
pub http: reqwest::Client,
|
||||
pub task_system: TaskSystem<sd_core_heavy_lifting::Error>,
|
||||
pub job_system: JobSystem<NodeContext, JobContext<NodeContext>>,
|
||||
@@ -138,9 +137,6 @@ impl Node {
|
||||
config,
|
||||
event_bus,
|
||||
libraries,
|
||||
cloud_sync_flag: Arc::new(AtomicBool::new(
|
||||
cfg!(target_os = "ios") || cfg!(target_os = "android"),
|
||||
)),
|
||||
http: reqwest::Client::new(),
|
||||
cloud_services: Arc::new(
|
||||
CloudServices::new(&get_cloud_api_address, cloud_services_domain_name).await?,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use crate::{
|
||||
api::notifications::Notification,
|
||||
api::{notifications::Notification, BackendFeature},
|
||||
/*object::media::old_thumbnail::preferences::ThumbnailerPreferences,*/
|
||||
util::version_manager::{Kind, ManagedVersion, VersionManager, VersionManagerError},
|
||||
};
|
||||
@@ -131,6 +131,9 @@ pub struct NodeConfig {
|
||||
/// P2P config
|
||||
#[serde(default)]
|
||||
pub p2p: NodeConfigP2P,
|
||||
/// Feature flags enabled on the node
|
||||
#[serde(default)]
|
||||
pub features: Vec<BackendFeature>,
|
||||
/// The aggregation of many different preferences for the node
|
||||
pub preferences: NodePreferences,
|
||||
// Operating System of the node
|
||||
@@ -199,6 +202,7 @@ impl ManagedVersion<NodeConfigVersion> for NodeConfig {
|
||||
identity: Identity::default(),
|
||||
p2p: NodeConfigP2P::default(),
|
||||
version: Self::LATEST_VERSION,
|
||||
features: vec![],
|
||||
notifications: vec![],
|
||||
preferences: NodePreferences::default(),
|
||||
os,
|
||||
|
||||
@@ -1,159 +0,0 @@
|
||||
// use crate::{
|
||||
// invalidate_query,
|
||||
// job::{
|
||||
// JobError, JobInitData, JobReportUpdate, JobResult, JobState, StatefulJob, WorkerContext,
|
||||
// },
|
||||
// library::Library,
|
||||
// location::{file_path_helper:: location::id::Type},
|
||||
// util::error::FileIOError,
|
||||
// };
|
||||
|
||||
// use sd_crypto::{crypto::Decryptor, header::file::FileHeader, Protected};
|
||||
|
||||
// use serde::{Deserialize, Serialize};
|
||||
// use specta::Type;
|
||||
// use tokio::fs::File;
|
||||
|
||||
// use super::{get_location_path_from_location_id, get_many_files_datas, FileData, BYTES_EXT};
|
||||
// pub struct FileDecryptorJob;
|
||||
|
||||
// // decrypt could have an option to restore metadata (and another specific option for file name? - would turn "output file" into "output path" in the UI)
|
||||
// #[derive(Serialize, Deserialize, Debug, Type, Hash)]
|
||||
// pub struct FileDecryptorJobInit {
|
||||
// pub location_id: location::id::Type,
|
||||
// pub file_path_ids: Vec<file_path::id::Type>,
|
||||
// pub mount_associated_key: bool,
|
||||
// pub password: Option<String>, // if this is set, we can assume the user chose password decryption
|
||||
// pub save_to_library: Option<bool>,
|
||||
// }
|
||||
|
||||
// impl JobInitData for FileDecryptorJobInit {
|
||||
// type Job = FileDecryptorJob;
|
||||
// }
|
||||
|
||||
// #[async_trait::async_trait]
|
||||
// impl StatefulJob for FileDecryptorJob {
|
||||
// type Init = FileDecryptorJobInit;
|
||||
// type Data = ();
|
||||
// type Step = FileData;
|
||||
|
||||
// const NAME: &'static str = "file_decryptor";
|
||||
|
||||
// fn new() -> Self {
|
||||
// Self {}
|
||||
// }
|
||||
|
||||
// async fn init(&self, ctx: WorkerContext, state: &mut JobState<Self>) -> Result<(), JobError> {
|
||||
// let Library { db, .. } = &*ctx.library;
|
||||
|
||||
// state.steps = get_many_files_datas(
|
||||
// db,
|
||||
// get_location_path_from_location_id(db, state.init.location_id).await?,
|
||||
// &state.init.file_path_ids,
|
||||
// )
|
||||
// .await?
|
||||
// .into();
|
||||
|
||||
// ctx.progress(vec![JobReportUpdate::TaskCount(state.steps.len())]);
|
||||
|
||||
// Ok(())
|
||||
// }
|
||||
|
||||
// async fn execute_step(
|
||||
// &self,
|
||||
// ctx: WorkerContext,
|
||||
// state: &mut JobState<Self>,
|
||||
// ) -> Result<(), JobError> {
|
||||
// let step = &state.steps[0];
|
||||
// let key_manager = &ctx.library.key_manager;
|
||||
|
||||
// // handle overwriting checks, and making sure there's enough available space
|
||||
// let output_path = {
|
||||
// let mut path = step.full_path.clone();
|
||||
// let extension = path.extension().map_or("decrypted", |ext| {
|
||||
// if ext == BYTES_EXT {
|
||||
// ""
|
||||
// } else {
|
||||
// "decrypted"
|
||||
// }
|
||||
// });
|
||||
// path.set_extension(extension);
|
||||
// path
|
||||
// };
|
||||
|
||||
// let mut reader = File::open(&step.full_path)
|
||||
// .await
|
||||
// .map_err(|e| FileIOError::from((&step.full_path, e)))?;
|
||||
// let mut writer = File::create(&output_path)
|
||||
// .await
|
||||
// .map_err(|e| FileIOError::from((output_path, e)))?;
|
||||
|
||||
// let (header, aad) = FileHeader::from_reader(&mut reader).await?;
|
||||
|
||||
// let master_key = if let Some(password) = state.init.password.clone() {
|
||||
// if let Some(save_to_library) = state.init.save_to_library {
|
||||
// // we can do this first, as `find_key_index` requires a successful decryption (just like `decrypt_master_key`)
|
||||
// let password_bytes = Protected::new(password.as_bytes().to_vec());
|
||||
|
||||
// if save_to_library {
|
||||
// let index = header.find_key_index(password_bytes.clone()).await?;
|
||||
|
||||
// // inherit the encryption algorithm from the keyslot
|
||||
// key_manager
|
||||
// .add_to_keystore(
|
||||
// Protected::new(password),
|
||||
// header.algorithm,
|
||||
// header.keyslots[index].hashing_algorithm,
|
||||
// false,
|
||||
// false,
|
||||
// Some(header.keyslots[index].salt),
|
||||
// )
|
||||
// .await?;
|
||||
// }
|
||||
|
||||
// header.decrypt_master_key(password_bytes).await?
|
||||
// } else {
|
||||
// return Err(JobError::JobDataNotFound(String::from(
|
||||
// "Password decryption selected, but save to library boolean was not included",
|
||||
// )));
|
||||
// }
|
||||
// } else {
|
||||
// if state.init.mount_associated_key {
|
||||
// for key in key_manager.dump_keystore().iter().filter(|x| {
|
||||
// header
|
||||
// .keyslots
|
||||
// .iter()
|
||||
// .any(|k| k.content_salt == x.content_salt)
|
||||
// }) {
|
||||
// key_manager.mount(key.uuid).await.ok();
|
||||
// }
|
||||
// }
|
||||
|
||||
// let keys = key_manager.enumerate_hashed_keys();
|
||||
|
||||
// header.decrypt_master_key_from_prehashed(keys).await?
|
||||
// };
|
||||
|
||||
// let decryptor = Decryptor::new(master_key, header.nonce, header.algorithm)?;
|
||||
|
||||
// decryptor
|
||||
// .decrypt_streams(&mut reader, &mut writer, &aad)
|
||||
// .await?;
|
||||
|
||||
// // need to decrypt preview media/metadata, and maybe add an option in the UI so the user can chosoe to restore these values
|
||||
// // for now this can't easily be implemented, as we don't know what the new object id for the file will be (we know the old one, but it may differ)
|
||||
|
||||
// ctx.progress(vec![JobReportUpdate::CompletedTaskCount(
|
||||
// state.step_number + 1,
|
||||
// )]);
|
||||
|
||||
// Ok(())
|
||||
// }
|
||||
|
||||
// async fn finalize(&self, ctx: WorkerContext, state: &mut JobState<Self>) -> JobResult {
|
||||
// invalidate_query!(ctx.library, "search.paths");
|
||||
|
||||
// // mark job as successful
|
||||
// Ok(Some(serde_json::to_value(&state.init)?))
|
||||
// }
|
||||
// }
|
||||
@@ -1,261 +0,0 @@
|
||||
// use crate::{
|
||||
// invalidate_query,
|
||||
// job::*,
|
||||
// library::Library,
|
||||
// location::{file_path_helper:: location::id::Type},
|
||||
// util::error::{FileIOError, NonUtf8PathError},
|
||||
// };
|
||||
|
||||
// use sd_crypto::{
|
||||
// crypto::Encryptor,
|
||||
// header::{file::FileHeader, keyslot::Keyslot},
|
||||
// primitives::{LATEST_FILE_HEADER, LATEST_KEYSLOT, LATEST_METADATA, LATEST_PREVIEW_MEDIA},
|
||||
// types::{Algorithm, Key},
|
||||
// };
|
||||
|
||||
// use chrono::FixedOffset;
|
||||
// use serde::{Deserialize, Serialize};
|
||||
// use specta::Type;
|
||||
// use tokio::{
|
||||
// fs::{self, File},
|
||||
// io,
|
||||
// };
|
||||
// use tracing::{error, warn};
|
||||
// use uuid::Uuid;
|
||||
|
||||
// use super::{
|
||||
// error::FileSystemJobsError, get_location_path_from_location_id, get_many_files_datas, FileData,
|
||||
// BYTES_EXT,
|
||||
// };
|
||||
|
||||
// pub struct FileEncryptorJob;
|
||||
|
||||
// #[derive(Serialize, Deserialize, Type, Hash)]
|
||||
// pub struct FileEncryptorJobInit {
|
||||
// pub location_id: location::id::Type,
|
||||
// pub file_path_ids: Vec<file_path::id::Type>,
|
||||
// pub key_uuid: Uuid,
|
||||
// pub algorithm: Algorithm,
|
||||
// pub metadata: bool,
|
||||
// pub preview_media: bool,
|
||||
// }
|
||||
|
||||
// #[derive(Serialize, Deserialize)]
|
||||
// pub struct Metadata {
|
||||
// pub file_path_id: file_path::id::Type,
|
||||
// pub name: String,
|
||||
// pub hidden: bool,
|
||||
// pub favorite: bool,
|
||||
// pub important: bool,
|
||||
// pub note: Option<String>,
|
||||
// pub date_created: chrono::DateTime<FixedOffset>,
|
||||
// }
|
||||
|
||||
// impl JobInitData for FileEncryptorJobInit {
|
||||
// type Job = FileEncryptorJob;
|
||||
// }
|
||||
|
||||
// #[async_trait::async_trait]
|
||||
// impl StatefulJob for FileEncryptorJob {
|
||||
// type Init = FileEncryptorJobInit;
|
||||
// type Data = ();
|
||||
// type Step = FileData;
|
||||
|
||||
// const NAME: &'static str = "file_encryptor";
|
||||
|
||||
// fn new() -> Self {
|
||||
// Self {}
|
||||
// }
|
||||
|
||||
// async fn init(&self, ctx: WorkerContext, state: &mut JobState<Self>) -> Result<(), JobError> {
|
||||
// let Library { db, .. } = &*ctx.library;
|
||||
|
||||
// state.steps = get_many_files_datas(
|
||||
// db,
|
||||
// get_location_path_from_location_id(db, state.init.location_id).await?,
|
||||
// &state.init.file_path_ids,
|
||||
// )
|
||||
// .await?
|
||||
// .into();
|
||||
|
||||
// ctx.progress(vec![JobReportUpdate::TaskCount(state.steps.len())]);
|
||||
|
||||
// Ok(())
|
||||
// }
|
||||
|
||||
// async fn execute_step(
|
||||
// &self,
|
||||
// ctx: WorkerContext,
|
||||
// state: &mut JobState<Self>,
|
||||
// ) -> Result<(), JobError> {
|
||||
// let step = &state.steps[0];
|
||||
|
||||
// let Library { key_manager, .. } = &*ctx.library;
|
||||
|
||||
// if !step.file_path.is_dir {
|
||||
// // handle overwriting checks, and making sure there's enough available space
|
||||
|
||||
// let user_key = key_manager
|
||||
// .access_keymount(state.init.key_uuid)
|
||||
// .await?
|
||||
// .hashed_key;
|
||||
|
||||
// let user_key_details = key_manager.access_keystore(state.init.key_uuid).await?;
|
||||
|
||||
// let output_path = {
|
||||
// let mut path = step.full_path.clone();
|
||||
// let extension = path.extension().map_or_else(
|
||||
// || Ok("bytes".to_string()),
|
||||
// |extension| {
|
||||
// Ok::<String, JobError>(format!(
|
||||
// "{}{BYTES_EXT}",
|
||||
// extension.to_str().ok_or(FileSystemJobsError::FilePath(
|
||||
// NonUtf8PathError(step.full_path.clone().into_boxed_path()).into()
|
||||
// ))?
|
||||
// ))
|
||||
// },
|
||||
// )?;
|
||||
|
||||
// path.set_extension(extension);
|
||||
// path
|
||||
// };
|
||||
|
||||
// let _guard = ctx
|
||||
// .library
|
||||
// .location_manager()
|
||||
// .temporary_ignore_events_for_path(
|
||||
// state.init.location_id,
|
||||
// ctx.library.clone(),
|
||||
// &output_path,
|
||||
// )
|
||||
// .await
|
||||
// .map_or_else(
|
||||
// |e| {
|
||||
// error!(
|
||||
// "Failed to make location manager ignore the path {}; Error: {e:#?}",
|
||||
// output_path.display()
|
||||
// );
|
||||
// None
|
||||
// },
|
||||
// Some,
|
||||
// );
|
||||
|
||||
// let mut reader = File::open(&step.full_path)
|
||||
// .await
|
||||
// .map_err(|e| FileIOError::from((&step.full_path, e)))?;
|
||||
// let mut writer = File::create(&output_path)
|
||||
// .await
|
||||
// .map_err(|e| FileIOError::from((output_path, e)))?;
|
||||
|
||||
// let master_key = Key::generate();
|
||||
|
||||
// let mut header = FileHeader::new(
|
||||
// LATEST_FILE_HEADER,
|
||||
// state.init.algorithm,
|
||||
// vec![
|
||||
// Keyslot::new(
|
||||
// LATEST_KEYSLOT,
|
||||
// state.init.algorithm,
|
||||
// user_key_details.hashing_algorithm,
|
||||
// user_key_details.content_salt,
|
||||
// user_key,
|
||||
// master_key.clone(),
|
||||
// )
|
||||
// .await?,
|
||||
// ],
|
||||
// )?;
|
||||
|
||||
// if state.init.metadata || state.init.preview_media {
|
||||
// // if any are requested, we can make the query as it'll be used at least once
|
||||
// if let Some(ref object) = step.file_path.object {
|
||||
// if state.init.metadata {
|
||||
// let metadata = Metadata {
|
||||
// file_path_id: step.file_path.id,
|
||||
// name: step.file_path.materialized_path.clone(),
|
||||
// hidden: object.hidden,
|
||||
// favorite: object.favorite,
|
||||
// important: object.important,
|
||||
// note: object.note.clone(),
|
||||
// date_created: object.date_created,
|
||||
// };
|
||||
|
||||
// header
|
||||
// .add_metadata(
|
||||
// LATEST_METADATA,
|
||||
// state.init.algorithm,
|
||||
// master_key.clone(),
|
||||
// &metadata,
|
||||
// )
|
||||
// .await?;
|
||||
// }
|
||||
|
||||
// // if state.init.preview_media
|
||||
// // && (object.has_thumbnail
|
||||
// // || object.has_video_preview || object.has_thumbstrip)
|
||||
|
||||
// // may not be the best - preview media (thumbnail) isn't guaranteed to be webp
|
||||
// let thumbnail_path = ctx
|
||||
// .library
|
||||
// .config()
|
||||
// .data_directory()
|
||||
// .join("thumbnails")
|
||||
// .join(
|
||||
// step.file_path
|
||||
// .cas_id
|
||||
// .as_ref()
|
||||
// .ok_or(JobError::MissingCasId)?,
|
||||
// )
|
||||
// .with_extension("wepb");
|
||||
|
||||
// match fs::read(&thumbnail_path).await {
|
||||
// Ok(thumbnail_bytes) => {
|
||||
// header
|
||||
// .add_preview_media(
|
||||
// LATEST_PREVIEW_MEDIA,
|
||||
// state.init.algorithm,
|
||||
// master_key.clone(),
|
||||
// &thumbnail_bytes,
|
||||
// )
|
||||
// .await?;
|
||||
// }
|
||||
// Err(e) if e.kind() == io::ErrorKind::NotFound => {
|
||||
// // If the file just doesn't exist, then we don't care
|
||||
// }
|
||||
// Err(e) => {
|
||||
// return Err(FileIOError::from((thumbnail_path, e)).into());
|
||||
// }
|
||||
// }
|
||||
// } else {
|
||||
// // should use container encryption if it's a directory
|
||||
// warn!("skipping metadata/preview media inclusion, no associated object found")
|
||||
// }
|
||||
// }
|
||||
|
||||
// header.write(&mut writer).await?;
|
||||
|
||||
// let encryptor = Encryptor::new(master_key, header.nonce, header.algorithm)?;
|
||||
|
||||
// encryptor
|
||||
// .encrypt_streams(&mut reader, &mut writer, &header.generate_aad())
|
||||
// .await?;
|
||||
// } else {
|
||||
// warn!(
|
||||
// "encryption is skipping {}/{} as it isn't a file",
|
||||
// step.file_path.materialized_path, step.file_path.name
|
||||
// )
|
||||
// }
|
||||
|
||||
// ctx.progress(vec![JobReportUpdate::CompletedTaskCount(
|
||||
// state.step_number + 1,
|
||||
// )]);
|
||||
|
||||
// Ok(())
|
||||
// }
|
||||
|
||||
// async fn finalize(&self, ctx: WorkerContext, state: &mut JobState<Self>) -> JobResult {
|
||||
// invalidate_query!(ctx.library, "search.paths");
|
||||
|
||||
// // mark job as successful
|
||||
// Ok(Some(serde_json::to_value(&state.init)?))
|
||||
// }
|
||||
// }
|
||||
@@ -32,17 +32,16 @@ reqwest = { workspace = true, features = ["native-tls-vendored", "st
|
||||
rmp-serde = { workspace = true }
|
||||
rmpv = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
tokio = { workspace = true, features = ["fs"] }
|
||||
tokio-stream = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
uuid = { workspace = true, features = ["serde", "v4"] }
|
||||
|
||||
# Specific AI dependencies
|
||||
# Note: half and ndarray version must be the same as used in ort
|
||||
half = { version = "2.1", features = ['num-traits'] }
|
||||
ndarray = "0.15"
|
||||
url = '2.5.0'
|
||||
url = '2.5'
|
||||
|
||||
# Microsoft does not provide a release for osx-gpu. See: https://github.com/microsoft/onnxruntime/releases
|
||||
# "gpu" means CUDA or TensorRT EP. Thus, the ort crate cannot download them at build time.
|
||||
|
||||
@@ -11,9 +11,7 @@ repository.workspace = true
|
||||
sd-p2p = { path = "../p2p" }
|
||||
|
||||
# Workspace dependencies
|
||||
base64 = { workspace = true }
|
||||
reqwest = { workspace = true, features = ["native-tls-vendored"] }
|
||||
rmpv = { workspace = true }
|
||||
rspc = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
|
||||
@@ -20,6 +20,7 @@ rust-version.workspace = true
|
||||
async-stream = { workspace = true }
|
||||
blake3 = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
thiserror = { workspace = true }
|
||||
tokio = { workspace = true, features = ["io-util", "macros", "rt-multi-thread", "sync"] }
|
||||
@@ -28,20 +29,17 @@ tokio = { workspace = true, features = ["io-util", "macros", "rt-multi-th
|
||||
aead = { version = "0.6.0-rc.0", default-features = false, features = ["stream"] }
|
||||
chacha20poly1305 = "0.11.0-pre.1"
|
||||
cmov = "0.3.1"
|
||||
# Some deps use this same version, so we just use it too
|
||||
generic-array = { version = "=0.14.7", features = ["serde", "zeroize"] }
|
||||
hex = "0.4.3"
|
||||
rand = "0.9.0-alpha.2"
|
||||
rand_chacha = "0.9.0-alpha.2"
|
||||
rand_core = "0.9.0-alpha.2"
|
||||
serde-big-array = "0.5.1"
|
||||
serdect = "0.3.0-pre.0"
|
||||
typenum = "1.17.0"
|
||||
zeroize = { version = "1.7.0", features = ["aarch64", "derive"] }
|
||||
generic-array = { version = "=0.14.7", features = ["serde", "zeroize"] } # Update blocked by aead
|
||||
hex = "0.4.3"
|
||||
rand_chacha = "0.9.0-alpha.2"
|
||||
rand_core = "0.9.0-alpha.2"
|
||||
serdect = "0.3.0-pre.0"
|
||||
typenum = "1.17"
|
||||
zeroize = { version = "1.7", features = ["aarch64", "derive"] }
|
||||
|
||||
[dev-dependencies]
|
||||
paste = "1.0.14"
|
||||
tempfile = "3.10.1"
|
||||
paste = "1.0"
|
||||
tempfile = "3.10"
|
||||
|
||||
[[example]]
|
||||
name = "secure_erase"
|
||||
|
||||
@@ -8,6 +8,7 @@ license.workspace = true
|
||||
repository.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Workspace dependencies
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
specta = { workspace = true }
|
||||
|
||||
@@ -25,5 +25,5 @@ bincode = { version = "=2.0.0-rc.3", features = ["alloc", "derive"], optional =
|
||||
# Disable defaults for libheif* to avoid bindgen and use pre-compiled headers
|
||||
libheif-rs = { version = "1.0", default-features = false, optional = true }
|
||||
libheif-sys = { version = "2.1", default-features = false, optional = true }
|
||||
pdfium-render = { version = "0.8.15", features = ["image", "sync", "thread_safe"] }
|
||||
pdfium-render = { version = "0.8.24", features = ["image", "sync", "thread_safe"] }
|
||||
resvg = "0.43.0"
|
||||
|
||||
@@ -9,10 +9,7 @@ use crate::{
|
||||
};
|
||||
use image::DynamicImage;
|
||||
use once_cell::sync::Lazy;
|
||||
use pdfium_render::{
|
||||
color::PdfColor,
|
||||
prelude::{PdfPageRenderRotation, PdfRenderConfig, Pdfium},
|
||||
};
|
||||
use pdfium_render::prelude::{PdfColor, PdfPageRenderRotation, PdfRenderConfig, Pdfium};
|
||||
use tracing::error;
|
||||
|
||||
// This path must be relative to the running binary
|
||||
|
||||
@@ -20,7 +20,6 @@ sd-utils = { path = "../utils" }
|
||||
# Workspace dependencies
|
||||
chrono = { workspace = true, features = ["serde"] }
|
||||
image = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
specta = { workspace = true, features = ["chrono"] }
|
||||
|
||||
@@ -17,47 +17,33 @@ specta = []
|
||||
|
||||
[dependencies]
|
||||
# Workspace dependencies
|
||||
base64 = { workspace = true }
|
||||
base91 = { workspace = true }
|
||||
ed25519-dalek = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
pin-project-lite = { workspace = true }
|
||||
reqwest = { workspace = true }
|
||||
rmp-serde = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
specta = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
tokio = { workspace = true, features = ["fs", "io-util", "macros", "sync", "time"] }
|
||||
tokio-stream = { workspace = true, features = ["sync"] }
|
||||
tokio-util = { workspace = true, features = ["compat"] }
|
||||
tracing = { workspace = true }
|
||||
uuid = { workspace = true, features = ["serde"] }
|
||||
base64 = { workspace = true }
|
||||
ed25519-dalek = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
rmp-serde = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
specta = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
tokio = { workspace = true, features = ["fs", "io-util", "macros", "sync", "time"] }
|
||||
tokio-util = { workspace = true, features = ["compat"] }
|
||||
tracing = { workspace = true }
|
||||
uuid = { workspace = true, features = ["serde"] }
|
||||
|
||||
# Specific P2P dependencies
|
||||
dns-lookup = "2.0"
|
||||
flume = "=0.11.0" # Must match version used by `mdns-sd`
|
||||
futures-core = "0.3.30"
|
||||
dns-lookup = "2.0"
|
||||
flume = "=0.11.0" # Must match version used by `mdns-sd`
|
||||
hash_map_diff = "0.2.0"
|
||||
if-watch = { version = "=3.2.0", features = ["tokio"] } # Override features used by libp2p-quic
|
||||
libp2p = { version = "=0.53.2", features = [
|
||||
"autonat",
|
||||
"dcutr",
|
||||
"macros",
|
||||
"noise",
|
||||
"quic",
|
||||
"relay",
|
||||
"serde",
|
||||
"tokio",
|
||||
"yamux"
|
||||
] } # Update blocked due to custom patch
|
||||
libp2p-stream = "=0.1.0-alpha" # Update blocked due to custom patch
|
||||
mdns-sd = "0.11.1"
|
||||
rand_core = "0.6.4"
|
||||
sha256 = "1.5.0"
|
||||
stable-vec = "0.4.1"
|
||||
streamunordered = "0.5.3"
|
||||
sync_wrapper = "1.0"
|
||||
zeroize = { version = "1.8", features = ["derive"] }
|
||||
if-watch = { version = "=3.2.0", features = ["tokio"] } # Override features used by libp2p-quic
|
||||
libp2p-stream = "=0.1.0-alpha" # Update blocked due to custom patch
|
||||
mdns-sd = "0.11.1"
|
||||
rand_core = "0.6.4"
|
||||
stable-vec = "0.4.1"
|
||||
sync_wrapper = "1.0"
|
||||
zeroize = { version = "1.8", features = ["derive"] }
|
||||
|
||||
[dependencies.libp2p]
|
||||
features = ["autonat", "dcutr", "macros", "noise", "quic", "relay", "serde", "tokio", "yamux"]
|
||||
version = "=0.53.2" # Update blocked due to custom patch
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { workspace = true, features = ["macros", "rt-multi-thread"] }
|
||||
|
||||
@@ -9,7 +9,6 @@ repository.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Spacedrive Sub-crates
|
||||
sd-p2p = { path = "../../" }
|
||||
sd-p2p-proto = { path = "../proto" }
|
||||
|
||||
# Workspace dependencies
|
||||
|
||||
@@ -15,4 +15,3 @@ sd-p2p-proto = { path = "../proto" }
|
||||
# Workspace dependencies
|
||||
thiserror = { workspace = true }
|
||||
tokio = { workspace = true, features = ["io-util"] }
|
||||
uuid = { workspace = true, features = ["v4"] }
|
||||
|
||||
@@ -10,5 +10,9 @@ repository.workspace = true
|
||||
# Spacedrive Sub-crates
|
||||
sd-sync-generator = { path = "../sync-generator" }
|
||||
|
||||
# Workspace dependencies
|
||||
prisma-client-rust-cli = { workspace = true }
|
||||
# Specific prisma-cli dependencies
|
||||
[dependencies.prisma-client-rust-cli]
|
||||
default-features = false
|
||||
features = ["migrations", "specta", "sqlite", "sqlite-create-many"]
|
||||
git = "https://github.com/brendonovich/prisma-client-rust"
|
||||
rev = "4f9ef9d38c"
|
||||
|
||||
@@ -13,5 +13,4 @@ prisma-client-rust = { workspace = true }
|
||||
rmp-serde = { workspace = true }
|
||||
rmpv = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
uuid = { workspace = true }
|
||||
|
||||
@@ -7,9 +7,10 @@ license.workspace = true
|
||||
repository.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Workspace dependencies
|
||||
prisma-client-rust-sdk = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
thiserror = { workspace = true }
|
||||
|
||||
# Specific Sync Generator dependencies
|
||||
nom = "7.1.3"
|
||||
nom = "7.1"
|
||||
|
||||
12
packages/client/src/core.ts
generated
12
packages/client/src/core.ts
generated
@@ -132,7 +132,8 @@ export type Procedures = {
|
||||
{ key: "tags.assign", input: LibraryArgs<{ targets: Target[]; tag_id: number; unassign: boolean }>, result: null } |
|
||||
{ key: "tags.create", input: LibraryArgs<TagCreateArgs>, result: Tag } |
|
||||
{ key: "tags.delete", input: LibraryArgs<number>, result: null } |
|
||||
{ key: "tags.update", input: LibraryArgs<TagUpdateArgs>, result: null },
|
||||
{ key: "tags.update", input: LibraryArgs<TagUpdateArgs>, result: null } |
|
||||
{ key: "toggleFeatureFlag", input: BackendFeature, result: null },
|
||||
subscriptions:
|
||||
{ key: "invalidation.listen", input: never, result: InvalidateOperationEvent[] } |
|
||||
{ key: "jobs.newFilePathIdentified", input: LibraryArgs<null>, result: number[] } |
|
||||
@@ -158,6 +159,13 @@ export type Args = { search?: string | null; filters?: string | null; name?: str
|
||||
|
||||
export type AudioProps = { delay: number; padding: number; sample_rate: number | null; sample_format: string | null; bit_per_sample: number | null; channel_layout: string | null }
|
||||
|
||||
/**
|
||||
* All of the feature flags provided by the core itself. The frontend has it's own set of feature flags!
|
||||
*
|
||||
* If you want a variant of this to show up on the frontend it must be added to `backendFeatures` in `useFeatureFlag.tsx`
|
||||
*/
|
||||
export type BackendFeature = "cloudSync"
|
||||
|
||||
export type Backup = ({ id: string; timestamp: string; library_id: string; library_name: string }) & { path: string }
|
||||
|
||||
export type BuildInfo = { version: string; commit: string }
|
||||
@@ -521,7 +529,7 @@ id: string;
|
||||
/**
|
||||
* name is the display name of the current node. This is set by the user and is shown in the UI. // TODO: Length validation so it can fit in DNS record
|
||||
*/
|
||||
name: string; identity: RemoteIdentity; p2p: NodeConfigP2P; preferences: NodePreferences }) & { data_path: string; device_model: string | null; is_in_docker: boolean }
|
||||
name: string; identity: RemoteIdentity; p2p: NodeConfigP2P; features: BackendFeature[]; preferences: NodePreferences }) & { data_path: string; device_model: string | null; is_in_docker: boolean }
|
||||
|
||||
export type NonCriticalError = { indexer: NonCriticalIndexerError } | { file_identifier: NonCriticalFileIdentifierError } | { media_processor: NonCriticalMediaProcessorError }
|
||||
|
||||
|
||||
Reference in New Issue
Block a user