From 936a0f65d2bfc3e9602abccfa92a91255a3ef5df Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Fri, 19 Jul 2024 16:43:38 +0300 Subject: [PATCH 001/218] New Config Structure --- Cargo.lock | Bin 302364 -> 302565 bytes core/Cargo.toml | 2 +- core/src/node/config.rs | 71 +++++++++++++++++++++++++++++++--------- 3 files changed, 57 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cb8296eff15c2db496decaf4e22eda5371d93b5c..b9d4e69ae726f6d4f7e97c28a5feb06044c45bb3 100644 GIT binary patch delta 120 zcmbQUOX%rtp$&Gr(+fM86CILGEt67`j7^e~%}fmvjg8EW4U>Q<#UjnX!pJf?+0Zn_ z$RyF+z#=u(*wEC}GSwgns5;FkDcRV_AUP>, + // Operating System of the node -> "linux", "macos", "windows", "android", "ios" + os: String, version: NodeConfigVersion, } @@ -182,35 +184,27 @@ pub enum NodeConfigVersion { V1 = 1, V2 = 2, V3 = 3, + V4 = 4, } impl ManagedVersion for NodeConfig { - const LATEST_VERSION: NodeConfigVersion = NodeConfigVersion::V3; + const LATEST_VERSION: NodeConfigVersion = NodeConfigVersion::V4; const KIND: Kind = Kind::Json("version"); type MigrationError = NodeConfigError; fn from_latest_version() -> Option { - let mut name = match hostname::get() { - // SAFETY: This is just for display purposes so it doesn't matter if it's lossy - Ok(hostname) => hostname.to_string_lossy().into_owned(), - Err(e) => { - error!( - ?e, - "Falling back to default node name as an error occurred getting your systems hostname;", - ); - - "my-spacedrive".into() - } - }; - name.truncate(250); + let mut name = generate_device_name(); + name.truncate(255); #[cfg(feature = "ai")] let image_labeler_version = Some(sd_ai::old_image_labeler::DEFAULT_MODEL_VERSION.to_string()); #[cfg(not(feature = "ai"))] let image_labeler_version = None; + let os = std::env::consts::OS; + Some(Self { - id: Uuid::new_v4(), + id: Uuid::now_v7(), name, identity: Identity::default(), p2p: NodeConfigP2P::default(), @@ -221,6 +215,7 @@ impl ManagedVersion for NodeConfig { sd_api_origin: None, preferences: NodePreferences::default(), image_labeler_version, + os: os.to_string(), }) } } @@ -313,6 +308,33 @@ impl NodeConfig { .map_err(|e| FileIOError::from((path, e)))?; } + (NodeConfigVersion::V3, NodeConfigVersion::V4) => { + let mut config: Map = + serde_json::from_slice(&fs::read(path).await.map_err(|e| { + FileIOError::from(( + path, + e, + "Failed to read node config file for migration", + )) + })?) + .map_err(VersionManagerError::SerdeJson)?; + + config.remove("id"); + config.insert(String::from("id"), json!(Uuid::now_v7())); + + config.remove("name"); + config.insert(String::from("name"), json!(generate_device_name())); + + config.insert(String::from("os"), json!(std::env::consts::OS)); + + let a = + serde_json::to_vec(&config).map_err(VersionManagerError::SerdeJson)?; + + fs::write(path, a) + .await + .map_err(|e| FileIOError::from((path, e)))?; + } + _ => { error!(current_version = ?current, "Node config version is not handled;"); return Err(VersionManagerError::UnexpectedMigration { @@ -436,3 +458,22 @@ pub enum NodeConfigError { #[error(transparent)] FileIO(#[from] FileIOError), } + +fn generate_device_name() -> String { + #[cfg(target_os = "android")] + let name = "Android Spacedrive Device".into(); + #[cfg(not(target_os = "android"))] + let name = match hostname::get() { + Ok(hostname) => hostname.to_string_lossy().into_owned(), + Err(e) => { + error!( + ?e, + "Falling back to default node name as an error occurred getting your systems hostname;", + ); + + "my-spacedrive".into() + } + }; + + name +} From f1fa1c0069886f2576ba2490e5a748e401c43b4f Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Fri, 19 Jul 2024 16:44:40 +0300 Subject: [PATCH 002/218] Start of fixing Android Device Naming in node --- .../modules/sd-core/android/crate/src/lib.rs | 44 +++++++++++++++++++ .../java/com/spacedrive/core/SDCoreModule.kt | 11 +++++ 2 files changed, 55 insertions(+) diff --git a/apps/mobile/modules/sd-core/android/crate/src/lib.rs b/apps/mobile/modules/sd-core/android/crate/src/lib.rs index 88f507cc0..d90f1df96 100644 --- a/apps/mobile/modules/sd-core/android/crate/src/lib.rs +++ b/apps/mobile/modules/sd-core/android/crate/src/lib.rs @@ -110,3 +110,47 @@ pub extern "system" fn Java_com_spacedrive_core_SDCoreModule_handleCoreMsg( ); } } + +#[no_mangle] +pub extern "system" fn Java_com_spacedrive_core_SDCoreModule_getDeviceName( + env: JNIEnv, + class: JClass, + callback: JObject, +) { + let jvm = env.get_java_vm().unwrap(); + let mut env = jvm.attach_current_thread().unwrap(); + let callback = env.new_global_ref(callback).unwrap(); + + let result = panic::catch_unwind(|| { + let device_name = { + let mut env = jvm.attach_current_thread().unwrap(); + let name = env + .call_method(&class, "getDeviceName", "()Ljava/lang/String;", &[]) + .unwrap() + .l() + .unwrap(); + + env.get_string((&name).into()).unwrap().into() + }; + + let jvm = env.get_java_vm().unwrap(); + let mut env = jvm.attach_current_thread().unwrap(); + let s = env + .new_string(device_name) + .expect("Couldn't create java string!"); + env.call_method( + &callback, + "resolve", + "(Ljava/lang/String;)V", + &[(&s).into()], + ) + .unwrap(); + }); + + if let Err(err) = result { + error!( + "Error in Java_com_spacedrive_core_SDCoreModule_getDeviceName: {:?}", + err + ); + } +} diff --git a/apps/mobile/modules/sd-core/android/src/main/java/com/spacedrive/core/SDCoreModule.kt b/apps/mobile/modules/sd-core/android/src/main/java/com/spacedrive/core/SDCoreModule.kt index 8b7cc069b..8973c313d 100644 --- a/apps/mobile/modules/sd-core/android/src/main/java/com/spacedrive/core/SDCoreModule.kt +++ b/apps/mobile/modules/sd-core/android/src/main/java/com/spacedrive/core/SDCoreModule.kt @@ -3,6 +3,7 @@ package com.spacedrive.core import expo.modules.kotlin.Promise import expo.modules.kotlin.modules.Module import expo.modules.kotlin.modules.ModuleDefinition +import android.provider.Settings class SDCoreModule : Module() { private var registeredWithRust = false @@ -36,6 +37,12 @@ class SDCoreModule : Module() { } } + public fun getDeviceName(): String { + return Settings.Secure.getString(appContext.reactContext?.contentResolver, "device_name") + ?: "Android Spacedrive Device" + } + + override fun definition() = ModuleDefinition { Name("SDCore") @@ -57,5 +64,9 @@ class SDCoreModule : Module() { AsyncFunction("sd_core_msg") { query: String, promise: Promise -> this@SDCoreModule.handleCoreMsg(query, SDCorePromise(promise)) } + + Function("getDeviceName") { + getDeviceName() + } } } From 774c356f8f64b7e43fb0796981e6ad58634bb8d9 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Tue, 23 Jul 2024 12:51:13 +0300 Subject: [PATCH 003/218] Tracing error for mobile core --- .../modules/sd-core/android/crate/src/lib.rs | 48 +------------------ 1 file changed, 2 insertions(+), 46 deletions(-) diff --git a/apps/mobile/modules/sd-core/android/crate/src/lib.rs b/apps/mobile/modules/sd-core/android/crate/src/lib.rs index d90f1df96..bc8fd2998 100644 --- a/apps/mobile/modules/sd-core/android/crate/src/lib.rs +++ b/apps/mobile/modules/sd-core/android/crate/src/lib.rs @@ -37,7 +37,7 @@ pub extern "system" fn Java_com_spacedrive_core_SDCoreModule_registerCoreEventLi if let Err(err) = result { // TODO: Send rspc error or something here so we can show this in the UI. // TODO: Maybe reinitialise the core cause it could be in an invalid state? - println!( + error!( "Error in Java_com_spacedrive_core_SDCoreModule_registerCoreEventListener: {err:?}" ); } @@ -109,48 +109,4 @@ pub extern "system" fn Java_com_spacedrive_core_SDCoreModule_handleCoreMsg( err ); } -} - -#[no_mangle] -pub extern "system" fn Java_com_spacedrive_core_SDCoreModule_getDeviceName( - env: JNIEnv, - class: JClass, - callback: JObject, -) { - let jvm = env.get_java_vm().unwrap(); - let mut env = jvm.attach_current_thread().unwrap(); - let callback = env.new_global_ref(callback).unwrap(); - - let result = panic::catch_unwind(|| { - let device_name = { - let mut env = jvm.attach_current_thread().unwrap(); - let name = env - .call_method(&class, "getDeviceName", "()Ljava/lang/String;", &[]) - .unwrap() - .l() - .unwrap(); - - env.get_string((&name).into()).unwrap().into() - }; - - let jvm = env.get_java_vm().unwrap(); - let mut env = jvm.attach_current_thread().unwrap(); - let s = env - .new_string(device_name) - .expect("Couldn't create java string!"); - env.call_method( - &callback, - "resolve", - "(Ljava/lang/String;)V", - &[(&s).into()], - ) - .unwrap(); - }); - - if let Err(err) = result { - error!( - "Error in Java_com_spacedrive_core_SDCoreModule_getDeviceName: {:?}", - err - ); - } -} +} \ No newline at end of file From 2d713e982700af5d7888727ba71be3388c2fa921 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Tue, 23 Jul 2024 12:51:32 +0300 Subject: [PATCH 004/218] Remove kotlin code for Android Name We're going for a different solution. --- .../src/main/java/com/spacedrive/core/SDCoreModule.kt | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/apps/mobile/modules/sd-core/android/src/main/java/com/spacedrive/core/SDCoreModule.kt b/apps/mobile/modules/sd-core/android/src/main/java/com/spacedrive/core/SDCoreModule.kt index 8973c313d..053d1a173 100644 --- a/apps/mobile/modules/sd-core/android/src/main/java/com/spacedrive/core/SDCoreModule.kt +++ b/apps/mobile/modules/sd-core/android/src/main/java/com/spacedrive/core/SDCoreModule.kt @@ -37,12 +37,6 @@ class SDCoreModule : Module() { } } - public fun getDeviceName(): String { - return Settings.Secure.getString(appContext.reactContext?.contentResolver, "device_name") - ?: "Android Spacedrive Device" - } - - override fun definition() = ModuleDefinition { Name("SDCore") @@ -64,9 +58,5 @@ class SDCoreModule : Module() { AsyncFunction("sd_core_msg") { query: String, promise: Promise -> this@SDCoreModule.handleCoreMsg(query, SDCorePromise(promise)) } - - Function("getDeviceName") { - getDeviceName() - } } } From 3db7ae77128a04521290ffc7db22724cef844be3 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Tue, 23 Jul 2024 13:41:30 +0300 Subject: [PATCH 005/218] Change service subdomain url from `app` to `api` --- apps/mobile/src/screens/settings/info/Debug.tsx | 4 ++-- apps/p2p-relay/src/config.rs | 2 +- apps/server/src/main.rs | 2 +- core/src/env.rs | 2 +- interface/app/$libraryId/Layout/Sidebar/DebugPopover.tsx | 4 ++-- packages/client/src/hooks/usePlausible.tsx | 2 +- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/apps/mobile/src/screens/settings/info/Debug.tsx b/apps/mobile/src/screens/settings/info/Debug.tsx index 994aae7c0..8d6600ed8 100644 --- a/apps/mobile/src/screens/settings/info/Debug.tsx +++ b/apps/mobile/src/screens/settings/info/Debug.tsx @@ -43,9 +43,9 @@ const DebugScreen = ({ navigation }: SettingsStackScreenProps<'Debug'>) => { */} diff --git a/interface/app/$libraryId/settings/client/account/LoginRegister.tsx b/interface/app/$libraryId/settings/client/account/LoginRegister.tsx new file mode 100644 index 000000000..5efc4f16b --- /dev/null +++ b/interface/app/$libraryId/settings/client/account/LoginRegister.tsx @@ -0,0 +1,257 @@ +import { zodResolver } from '@hookform/resolvers/zod'; +import { useZodForm } from '@sd/client'; +import { Button, Card, Divider, Form, Input, Tooltip, z } from '@sd/ui'; +import { motion } from 'framer-motion'; +import { useState } from 'react'; + +import { GoogleLogo, Icon } from '@phosphor-icons/react'; +import { Apple, Github } from '@sd/assets/svgs/brands'; +import clsx from 'clsx'; +import { Controller, useForm } from 'react-hook-form'; + +const Tabs = ['Login', 'Register'] as const; +const LoginSchema = z.object({ + email: z.string().email(), + password: z.string().min(6), +}) +const RegisterSchema = z.object({ + email: z.string().email(), + password: z.string().min(6), + confirmPassword: z.string().min(6), +}).refine(data => data.password === data.confirmPassword, { + message: 'Passwords do not match', + path: ['confirmPassword'] +}) +type RegisterData = z.infer + +type SocialLogin = { + name: "Github" | "Google" | "Apple"; + icon: Icon; +} + +const SocialLogins: SocialLogin[] = [ + {name: 'Github', icon: Github}, + {name: 'Google', icon: GoogleLogo}, + {name: 'Apple', icon: Apple}, +] + +const LoginRegister = () => { + + const [activeTab, setActiveTab] = useState<'Login' | 'Register'>('Login'); + + const socialLoginHandlers = (name: SocialLogin['name']) => { + return { + 'Github': () => { + console.log('Github login'); + }, + 'Google': () => { + console.log('Google login'); + }, + 'Apple': () => { + console.log('Apple login'); + } + }[name](); + } + + return ( + +
+ {Tabs.map((text) => ( +
{ + setActiveTab(text) + }} className={clsx("relative flex-1 border-b border-app-line p-2.5 text-center", + text === 'Login' ? 'rounded-tl-md' : 'rounded-tr-md', + )}> +

{text}

+ {text === activeTab && ( + + )} +
+ ))} +
+
+ {activeTab === 'Login' ? : } +
+ +

OR

+ +
+
+ {SocialLogins.map((social) => ( + +
socialLoginHandlers(social.name)} key={social.name} className='rounded-full border border-app-line bg-app-input p-3'> + +
+
+ ))} +
+
+
+ ) +} + +const Register = () => { + + // useZodForm seems to be out-dated or needs + //fixing as it does not support the schema using zod.refine + const form = useForm( + { + resolver: zodResolver(RegisterSchema), + defaultValues: { + email: '', + password: '', + confirmPassword: '', + } + }) + return ( +
{ + // handle login submission + return console.log(data); + })} + form={form} + > +
+ ( + + )} + /> + {form.formState.errors.email && ( +

{form.formState.errors.email.message}

+ )} + ( + + )} + /> + {form.formState.errors.password && ( +

{form.formState.errors.password.message}

+ )} + ( + + )} + /> + {form.formState.errors.confirmPassword && ( +

{form.formState.errors.confirmPassword.message}

+ )} + +
+
+ ) +} + +const Login = () => { + const form = useZodForm( + { + schema: LoginSchema, + defaultValues: { + email: '', + password: '', + } + }) + return ( +
{ + // handle login submission + console.log(data); + })} + form={form} + > +
+ ( + + )} + /> + {form.formState.errors.email && ( +

{form.formState.errors.email.message}

+ )} + ( + + )} + /> + {form.formState.errors.password && ( +

{form.formState.errors.password.message}

+ )} + +
+
+ ) +} + +export default LoginRegister; diff --git a/interface/app/$libraryId/settings/client/account/Profile.tsx b/interface/app/$libraryId/settings/client/account/Profile.tsx new file mode 100644 index 000000000..e44289dfa --- /dev/null +++ b/interface/app/$libraryId/settings/client/account/Profile.tsx @@ -0,0 +1,31 @@ +import { Envelope } from "@phosphor-icons/react"; +import { Card } from '@sd/ui'; +import { TruncatedText } from "~/components"; +import { AuthRequiredOverlay } from "~/components/AuthRequiredOverlay"; + + +const Profile = ({ email, authStore }: { email?: string; authStore: { status: string } }) => { + const emailName = authStore.status === 'loggedIn' ? email?.split('@')[0] : 'guest user'; + return ( + + +
+

+ Welcome {emailName}, +

+
+ +
+ +
+ + {authStore.status === 'loggedIn' ? email : 'guestuser@outlook.com'} + +
+
+
+
+ ); +}; + +export default Profile; diff --git a/interface/app/$libraryId/settings/client/account.tsx b/interface/app/$libraryId/settings/client/account/index.tsx similarity index 73% rename from interface/app/$libraryId/settings/client/account.tsx rename to interface/app/$libraryId/settings/client/account/index.tsx index 78be76179..d02002e83 100644 --- a/interface/app/$libraryId/settings/client/account.tsx +++ b/interface/app/$libraryId/settings/client/account/index.tsx @@ -1,12 +1,11 @@ -import { Envelope, User } from '@phosphor-icons/react'; -import { useEffect, useState } from 'react'; import { auth, useBridgeMutation, useBridgeQuery, useFeatureFlag } from '@sd/client'; -import { Button, Card, Input, toast } from '@sd/ui'; -import { TruncatedText } from '~/components'; -import { AuthRequiredOverlay } from '~/components/AuthRequiredOverlay'; +import { Button, Input, toast } from '@sd/ui'; +import { useEffect, useState } from 'react'; import { useLocale } from '~/hooks'; -import { Heading } from '../Layout'; +import { Heading } from '../../Layout'; +import LoginRegister from './LoginRegister'; +import Profile from './Profile'; export const Component = () => { const { t } = useLocale(); @@ -30,41 +29,17 @@ export const Component = () => { description={t('spacedrive_cloud_description')} />
- + {authStore.status === 'notLoggedIn' ? ( + + ) : ( + + )}
{useFeatureFlag('hostedLocations') && } ); }; -const Profile = ({ email, authStore }: { email?: string; authStore: { status: string } }) => { - const emailName = authStore.status === 'loggedIn' ? email?.split('@')[0] : 'guest user'; - return ( - - -
- -
-

- Welcome {emailName}, -

-
- -
- -
- - {authStore.status === 'loggedIn' ? email : 'guestuser@outlook.com'} - -
-
-
- ); -}; - function HostedLocationsPlayground() { const locations = useBridgeQuery(['cloud.locations.list'], { retry: false }); From d67cbec7df9c62db53731635fe8e01ef470df408 Mon Sep 17 00:00:00 2001 From: ameer2468 <33054370+ameer2468@users.noreply.github.com> Date: Wed, 24 Jul 2024 18:51:33 +0300 Subject: [PATCH 008/218] clean up --- .../settings/client/account/Login.tsx | 79 ++++++ .../settings/client/account/LoginRegister.tsx | 257 ------------------ .../settings/client/account/Register.tsx | 105 +++++++ .../settings/client/account/Tabs.tsx | 89 ++++++ .../settings/client/account/index.tsx | 4 +- 5 files changed, 275 insertions(+), 259 deletions(-) create mode 100644 interface/app/$libraryId/settings/client/account/Login.tsx delete mode 100644 interface/app/$libraryId/settings/client/account/LoginRegister.tsx create mode 100644 interface/app/$libraryId/settings/client/account/Register.tsx create mode 100644 interface/app/$libraryId/settings/client/account/Tabs.tsx diff --git a/interface/app/$libraryId/settings/client/account/Login.tsx b/interface/app/$libraryId/settings/client/account/Login.tsx new file mode 100644 index 000000000..416f0a58b --- /dev/null +++ b/interface/app/$libraryId/settings/client/account/Login.tsx @@ -0,0 +1,79 @@ +import { useZodForm } from '@sd/client'; +import { Button, Form, Input, z } from '@sd/ui'; + +import { Controller } from 'react-hook-form'; + + +const LoginSchema = z.object({ + email: z.string().email(), + password: z.string().min(6), +}) + +const Login = () => { + const form = useZodForm( + { + schema: LoginSchema, + defaultValues: { + email: '', + password: '', + } + }) + return ( +
{ + // handle login submission + console.log(data); + })} + form={form} + > +
+ ( + + )} + /> + {form.formState.errors.email && ( +

{form.formState.errors.email.message}

+ )} + ( + + )} + /> + {form.formState.errors.password && ( +

{form.formState.errors.password.message}

+ )} + +
+
+ ) +} + +export default Login; \ No newline at end of file diff --git a/interface/app/$libraryId/settings/client/account/LoginRegister.tsx b/interface/app/$libraryId/settings/client/account/LoginRegister.tsx deleted file mode 100644 index 5efc4f16b..000000000 --- a/interface/app/$libraryId/settings/client/account/LoginRegister.tsx +++ /dev/null @@ -1,257 +0,0 @@ -import { zodResolver } from '@hookform/resolvers/zod'; -import { useZodForm } from '@sd/client'; -import { Button, Card, Divider, Form, Input, Tooltip, z } from '@sd/ui'; -import { motion } from 'framer-motion'; -import { useState } from 'react'; - -import { GoogleLogo, Icon } from '@phosphor-icons/react'; -import { Apple, Github } from '@sd/assets/svgs/brands'; -import clsx from 'clsx'; -import { Controller, useForm } from 'react-hook-form'; - -const Tabs = ['Login', 'Register'] as const; -const LoginSchema = z.object({ - email: z.string().email(), - password: z.string().min(6), -}) -const RegisterSchema = z.object({ - email: z.string().email(), - password: z.string().min(6), - confirmPassword: z.string().min(6), -}).refine(data => data.password === data.confirmPassword, { - message: 'Passwords do not match', - path: ['confirmPassword'] -}) -type RegisterData = z.infer - -type SocialLogin = { - name: "Github" | "Google" | "Apple"; - icon: Icon; -} - -const SocialLogins: SocialLogin[] = [ - {name: 'Github', icon: Github}, - {name: 'Google', icon: GoogleLogo}, - {name: 'Apple', icon: Apple}, -] - -const LoginRegister = () => { - - const [activeTab, setActiveTab] = useState<'Login' | 'Register'>('Login'); - - const socialLoginHandlers = (name: SocialLogin['name']) => { - return { - 'Github': () => { - console.log('Github login'); - }, - 'Google': () => { - console.log('Google login'); - }, - 'Apple': () => { - console.log('Apple login'); - } - }[name](); - } - - return ( - -
- {Tabs.map((text) => ( -
{ - setActiveTab(text) - }} className={clsx("relative flex-1 border-b border-app-line p-2.5 text-center", - text === 'Login' ? 'rounded-tl-md' : 'rounded-tr-md', - )}> -

{text}

- {text === activeTab && ( - - )} -
- ))} -
-
- {activeTab === 'Login' ? : } -
- -

OR

- -
-
- {SocialLogins.map((social) => ( - -
socialLoginHandlers(social.name)} key={social.name} className='rounded-full border border-app-line bg-app-input p-3'> - -
-
- ))} -
-
-
- ) -} - -const Register = () => { - - // useZodForm seems to be out-dated or needs - //fixing as it does not support the schema using zod.refine - const form = useForm( - { - resolver: zodResolver(RegisterSchema), - defaultValues: { - email: '', - password: '', - confirmPassword: '', - } - }) - return ( -
{ - // handle login submission - return console.log(data); - })} - form={form} - > -
- ( - - )} - /> - {form.formState.errors.email && ( -

{form.formState.errors.email.message}

- )} - ( - - )} - /> - {form.formState.errors.password && ( -

{form.formState.errors.password.message}

- )} - ( - - )} - /> - {form.formState.errors.confirmPassword && ( -

{form.formState.errors.confirmPassword.message}

- )} - -
-
- ) -} - -const Login = () => { - const form = useZodForm( - { - schema: LoginSchema, - defaultValues: { - email: '', - password: '', - } - }) - return ( -
{ - // handle login submission - console.log(data); - })} - form={form} - > -
- ( - - )} - /> - {form.formState.errors.email && ( -

{form.formState.errors.email.message}

- )} - ( - - )} - /> - {form.formState.errors.password && ( -

{form.formState.errors.password.message}

- )} - -
-
- ) -} - -export default LoginRegister; diff --git a/interface/app/$libraryId/settings/client/account/Register.tsx b/interface/app/$libraryId/settings/client/account/Register.tsx new file mode 100644 index 000000000..eba8497f5 --- /dev/null +++ b/interface/app/$libraryId/settings/client/account/Register.tsx @@ -0,0 +1,105 @@ +import { zodResolver } from '@hookform/resolvers/zod'; +import { Button, Form, Input, z } from '@sd/ui'; + +import { Controller, useForm } from 'react-hook-form'; + + +const RegisterSchema = z.object({ + email: z.string().email(), + password: z.string().min(6), + confirmPassword: z.string().min(6), +}).refine(data => data.password === data.confirmPassword, { + message: 'Passwords do not match', + path: ['confirmPassword'] +}) +type RegisterType = z.infer + +const Register = () => { + + // useZodForm seems to be out-dated or needs + //fixing as it does not support the schema using zod.refine + const form = useForm( + { + resolver: zodResolver(RegisterSchema), + defaultValues: { + email: '', + password: '', + confirmPassword: '', + } + }) + return ( +
{ + // handle login submission + return console.log(data); + })} + form={form} + > +
+ ( + + )} + /> + {form.formState.errors.email && ( +

{form.formState.errors.email.message}

+ )} + ( + + )} + /> + {form.formState.errors.password && ( +

{form.formState.errors.password.message}

+ )} + ( + + )} + /> + {form.formState.errors.confirmPassword && ( +

{form.formState.errors.confirmPassword.message}

+ )} + +
+
+ ) +} + +export default Register; diff --git a/interface/app/$libraryId/settings/client/account/Tabs.tsx b/interface/app/$libraryId/settings/client/account/Tabs.tsx new file mode 100644 index 000000000..dc7cc3e8e --- /dev/null +++ b/interface/app/$libraryId/settings/client/account/Tabs.tsx @@ -0,0 +1,89 @@ +import { Card, Divider, Tooltip } from '@sd/ui'; +import { motion } from 'framer-motion'; +import { useState } from 'react'; + +import { GoogleLogo, Icon } from '@phosphor-icons/react'; +import { Apple, Github } from '@sd/assets/svgs/brands'; +import clsx from 'clsx'; +import Login from './Login'; +import Register from './Register'; + +const AccountTabs = ['Login', 'Register'] as const; + +type SocialLogin = { + name: "Github" | "Google" | "Apple"; + icon: Icon; +} + +const SocialLogins: SocialLogin[] = [ + {name: 'Github', icon: Github}, + {name: 'Google', icon: GoogleLogo}, + {name: 'Apple', icon: Apple}, +] + +const Tabs = () => { + + const [activeTab, setActiveTab] = useState<'Login' | 'Register'>('Login'); + + const socialLoginHandlers = (name: SocialLogin['name']) => { + return { + 'Github': () => { + console.log('Github login'); + }, + 'Google': () => { + console.log('Google login'); + }, + 'Apple': () => { + console.log('Apple login'); + } + }[name](); + } + + return ( + +
+ {AccountTabs.map((text) => ( +
{ + setActiveTab(text) + }} className={clsx("relative flex-1 border-b border-app-line p-2.5 text-center", + text === 'Login' ? 'rounded-tl-md' : 'rounded-tr-md', + )}> +

{text}

+ {text === activeTab && ( + + )} +
+ ))} +
+
+ {activeTab === 'Login' ? : } +
+ +

OR

+ +
+
+ {SocialLogins.map((social) => ( + +
socialLoginHandlers(social.name)} key={social.name} className='rounded-full border border-app-line bg-app-input p-3'> + +
+
+ ))} +
+
+
+ ) +} + + +export default Tabs; diff --git a/interface/app/$libraryId/settings/client/account/index.tsx b/interface/app/$libraryId/settings/client/account/index.tsx index d02002e83..adc6c2762 100644 --- a/interface/app/$libraryId/settings/client/account/index.tsx +++ b/interface/app/$libraryId/settings/client/account/index.tsx @@ -4,8 +4,8 @@ import { useEffect, useState } from 'react'; import { useLocale } from '~/hooks'; import { Heading } from '../../Layout'; -import LoginRegister from './LoginRegister'; import Profile from './Profile'; +import Tabs from './Tabs'; export const Component = () => { const { t } = useLocale(); @@ -30,7 +30,7 @@ export const Component = () => { />
{authStore.status === 'notLoggedIn' ? ( - + ) : ( )} From 9cc85372ae33e70e60e211a081a2c7384eda75e4 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Thu, 25 Jul 2024 00:17:33 +0300 Subject: [PATCH 009/218] Add Supertoken login page --- apps/desktop/package.json | 3 +- .../settings/client/account/Login.tsx | 133 +++++++--- .../settings/client/account/Register.tsx | 240 ++++++++++++------ .../client/account/handlers/cookieHandler.ts | 106 ++++++++ .../client/account/handlers/windowHandler.ts | 88 +++++++ interface/index.tsx | 22 ++ interface/package.json | 1 + pnpm-lock.yaml | Bin 1036427 -> 1037545 bytes 8 files changed, 474 insertions(+), 119 deletions(-) create mode 100644 interface/app/$libraryId/settings/client/account/handlers/cookieHandler.ts create mode 100644 interface/app/$libraryId/settings/client/account/handlers/windowHandler.ts diff --git a/apps/desktop/package.json b/apps/desktop/package.json index e354d3acd..d818718a6 100644 --- a/apps/desktop/package.json +++ b/apps/desktop/package.json @@ -29,7 +29,8 @@ "react": "^18.2.0", "react-dom": "^18.2.0", "react-router-dom": "=6.20.1", - "sonner": "^1.0.3" + "sonner": "^1.0.3", + "supertokens-web-js": "^0.13.0" }, "devDependencies": { "@sd/config": "workspace:*", diff --git a/interface/app/$libraryId/settings/client/account/Login.tsx b/interface/app/$libraryId/settings/client/account/Login.tsx index 416f0a58b..c6580f307 100644 --- a/interface/app/$libraryId/settings/client/account/Login.tsx +++ b/interface/app/$libraryId/settings/client/account/Login.tsx @@ -1,33 +1,80 @@ -import { useZodForm } from '@sd/client'; -import { Button, Form, Input, z } from '@sd/ui'; - +import { Eye, EyeClosed } from '@phosphor-icons/react'; +import { useState } from 'react'; import { Controller } from 'react-hook-form'; +import { signIn, signUp } from 'supertokens-web-js/recipe/emailpassword'; +import { useZodForm } from '@sd/client'; +import { Button, Form, Input, toast, z } from '@sd/ui'; +async function signInClicked(email: string, password: string) { + try { + const response = await signIn({ + formFields: [ + { + id: 'email', + value: email + }, + { + id: 'password', + value: password + } + ] + }); + console.log('[signInClicked] response', response); + + if (response.status === 'FIELD_ERROR') { + response.formFields.forEach((formField) => { + if (formField.id === 'email') { + // Email validation failed (for example incorrect email syntax). + toast.error(formField.error); + } + }); + } else if (response.status === 'WRONG_CREDENTIALS_ERROR') { + toast.error('Email & password combination is incorrect.'); + } else if (response.status === 'SIGN_IN_NOT_ALLOWED') { + // the reason string is a user friendly message + // about what went wrong. It can also contain a support code which users + // can tell you so you know why their sign in was not allowed. + toast.error(response.reason); + } else { + // sign in successful. The session tokens are automatically handled by + // the frontend SDK. + console.log('Sign in successful'); + } + } catch (err: any) { + if (err.isSuperTokensGeneralError === true) { + // this may be a custom error message sent from the API by you. + toast.error(err.message); + } else { + toast.error('Oops! Something went wrong.'); + } + } +} const LoginSchema = z.object({ email: z.string().email(), - password: z.string().min(6), -}) + password: z.string().min(6) +}); const Login = () => { - const form = useZodForm( - { - schema: LoginSchema, - defaultValues: { - email: '', - password: '', - } - }) - return ( -
{ + const [showPassword, setShowPassword] = useState(false); + const form = useZodForm({ + schema: LoginSchema, + defaultValues: { + email: '', + password: '' + } + }); + return ( + { // handle login submission console.log(data); - })} + await signInClicked(data.email, data.password); + })} form={form} - > -
- +
+ ( @@ -47,33 +94,47 @@ const Login = () => { control={form.control} name="password" render={({ field }) => ( - +
+ { + const pastedText = e.clipboardData.getData('text'); + field.onChange(pastedText); + }} + /> + +
)} /> {form.formState.errors.password && (

{form.formState.errors.password.message}

)} - -
- - ) -} +
+ + ); +}; -export default Login; \ No newline at end of file +export default Login; diff --git a/interface/app/$libraryId/settings/client/account/Register.tsx b/interface/app/$libraryId/settings/client/account/Register.tsx index eba8497f5..0551446d0 100644 --- a/interface/app/$libraryId/settings/client/account/Register.tsx +++ b/interface/app/$libraryId/settings/client/account/Register.tsx @@ -1,105 +1,181 @@ import { zodResolver } from '@hookform/resolvers/zod'; -import { Button, Form, Input, z } from '@sd/ui'; - +import { Eye, EyeClosed } from '@phosphor-icons/react'; +import { useState } from 'react'; import { Controller, useForm } from 'react-hook-form'; +import { signUp } from 'supertokens-web-js/recipe/emailpassword'; +import { Button, Form, Input, toast, z } from '@sd/ui'; +const RegisterSchema = z + .object({ + email: z.string().email(), + password: z.string().min(6), + confirmPassword: z.string().min(6) + }) + .refine((data) => data.password === data.confirmPassword, { + message: 'Passwords do not match', + path: ['confirmPassword'] + }); +type RegisterType = z.infer; -const RegisterSchema = z.object({ - email: z.string().email(), - password: z.string().min(6), - confirmPassword: z.string().min(6), -}).refine(data => data.password === data.confirmPassword, { - message: 'Passwords do not match', - path: ['confirmPassword'] -}) -type RegisterType = z.infer +async function signUpClicked(email: string, password: string) { + try { + const response = await signUp({ + formFields: [ + { + id: 'email', + value: email + }, + { + id: 'password', + value: password + } + ] + }); + + if (response.status === 'FIELD_ERROR') { + // one of the input formFields failed validaiton + response.formFields.forEach((formField) => { + if (formField.id === 'email') { + // Email validation failed (for example incorrect email syntax), + // or the email is not unique. + toast.error(formField.error); + } else if (formField.id === 'password') { + // Password validation failed. + // Maybe it didn't match the password strength + toast.error(formField.error); + } + }); + } else if (response.status === 'SIGN_UP_NOT_ALLOWED') { + // the reason string is a user friendly message + // about what went wrong. It can also contain a support code which users + // can tell you so you know why their sign up was not allowed. + toast.error(response.reason); + } else { + // sign up successful. The session tokens are automatically handled by + // the frontend SDK. + window.location.href = '/homepage'; + } + } catch (err: any) { + if (err.isSuperTokensGeneralError === true) { + // this may be a custom error message sent from the API by you. + toast.error(err.message); + } else { + toast.error('Oops! Something went wrong.'); + } + } +} const Register = () => { - + const [showPassword, setShowPassword] = useState(false); // useZodForm seems to be out-dated or needs //fixing as it does not support the schema using zod.refine - const form = useForm( - { + const form = useForm({ resolver: zodResolver(RegisterSchema), defaultValues: { email: '', password: '', - confirmPassword: '', + confirmPassword: '' } - }) + }); return (
{ - // handle login submission - return console.log(data); + onSubmit={form.handleSubmit(async (data) => { + // handle login submission + console.log(data); + await signUpClicked(data.email, data.password); })} - form={form} + form={form} > -
- ( - - )} - /> - {form.formState.errors.email && ( -

{form.formState.errors.email.message}

- )} - ( - - )} - /> - {form.formState.errors.password && ( -

{form.formState.errors.password.message}

- )} - ( - - )} +
+ ( + + )} /> - {form.formState.errors.confirmPassword && ( -

{form.formState.errors.confirmPassword.message}

- )} + {form.formState.errors.email && ( +

{form.formState.errors.email.message}

+ )} + ( +
+ { + const pastedText = e.clipboardData.getData('text'); + field.onChange(pastedText); + }} + /> + +
+ )} + /> + {form.formState.errors.password && ( +

{form.formState.errors.password.message}

+ )} + ( +
+ + +
+ )} + /> + {form.formState.errors.confirmPassword && ( +

+ {form.formState.errors.confirmPassword.message} +

+ )} + type="submit" + className="mx-auto mt-2 w-full" + variant="accent" + onClick={form.handleSubmit(async (data) => { + console.log(data); + await signUpClicked(data.email, data.password); + })} + disabled={form.formState.isSubmitting} + > + Submit +
- ) -} + ); +}; export default Register; diff --git a/interface/app/$libraryId/settings/client/account/handlers/cookieHandler.ts b/interface/app/$libraryId/settings/client/account/handlers/cookieHandler.ts new file mode 100644 index 000000000..2de7bb498 --- /dev/null +++ b/interface/app/$libraryId/settings/client/account/handlers/cookieHandler.ts @@ -0,0 +1,106 @@ +import { CookieHandlerInterface } from "supertokens-website/utils/cookieHandler/types"; + +const frontendCookiesKey = "frontendCookies"; + +function getCookiesFromStorage(): string { + const cookiesFromStorage = window.localStorage.getItem(frontendCookiesKey); + + if (cookiesFromStorage === null) { + window.localStorage.setItem(frontendCookiesKey, "[]"); + return ""; + } + + /** + * Because we store cookies in local storage, we need to manually check + * for expiry before returning all cookies + */ + const cookieArrayInStorage: string[] = JSON.parse(cookiesFromStorage); + const cookieArrayToReturn: string[] = []; + + for (let cookieIndex = 0; cookieIndex < cookieArrayInStorage.length; cookieIndex++) { + const currentCookieString = cookieArrayInStorage[cookieIndex]; + const parts = currentCookieString?.split(";") ?? []; + let expirationString: string = ""; + + for (let partIndex = 0; partIndex < parts.length; partIndex++) { + const currentPart = parts[partIndex]; + + if (currentPart?.toLocaleLowerCase().includes("expires=")) { + expirationString = currentPart; + break; + } + } + + if (expirationString !== "") { + const expirationValueString = expirationString.split("=")[1]; + const expirationDate = expirationValueString ? new Date(expirationValueString) : null; + const currentTimeInMillis = Date.now(); + + // if the cookie has expired, we skip it + if (expirationDate && expirationDate.getTime() < currentTimeInMillis) { + continue; + } + } + + if (currentCookieString !== undefined) { + cookieArrayToReturn.push(currentCookieString); + } + } + + /** + * After processing and removing expired cookies we need to update the cookies + * in storage so we dont have to process the expired ones again + */ + window.localStorage.setItem(frontendCookiesKey, JSON.stringify(cookieArrayToReturn)); + + return cookieArrayToReturn.join("; "); +} + +function setCookieToStorage(cookieString: string) { + const cookieName = cookieString.split(";")[0]?.split("=")[0]; + const cookiesFromStorage = window.localStorage.getItem(frontendCookiesKey); + let cookiesArray: string[] = []; + + if (cookiesFromStorage !== null) { + const cookiesArrayFromStorage: string[] = JSON.parse(cookiesFromStorage); + cookiesArray = cookiesArrayFromStorage; + } + + let cookieIndex = -1; + + for (let i = 0; i < cookiesArray.length; i++) { + const currentCookie = cookiesArray[i]; + + if (currentCookie?.indexOf(`${cookieName}=`) !== -1) { + cookieIndex = i; + break; + } + } + + /** + * If a cookie with the same name already exists (index != -1) then we + * need to remove the old value and replace it with the new one. + * + * If it does not exist then simply add the new cookie + */ + if (cookieIndex !== -1) { + cookiesArray[cookieIndex] = cookieString; + } else { + cookiesArray.push(cookieString); + } + + window.localStorage.setItem(frontendCookiesKey, JSON.stringify(cookiesArray)); +} + +export default function getCookieHandler(original: CookieHandlerInterface): CookieHandlerInterface { + return { + ...original, + getCookie: async function () { + const cookies = getCookiesFromStorage(); + return cookies; + }, + setCookie: async function (cookieString: string) { + setCookieToStorage(cookieString); + }, + }; +} diff --git a/interface/app/$libraryId/settings/client/account/handlers/windowHandler.ts b/interface/app/$libraryId/settings/client/account/handlers/windowHandler.ts new file mode 100644 index 000000000..c5f095733 --- /dev/null +++ b/interface/app/$libraryId/settings/client/account/handlers/windowHandler.ts @@ -0,0 +1,88 @@ +import { WindowHandlerInterface } from "supertokens-website/utils/windowHandler/types"; + +/** + * This example app uses HashRouter from react-router-dom. The SuperTokens SDK relies on + * some window properties like location hash, query params etc. Because HashRouter places + * everything other than the website base in the location hash, we need to add custom + * handling for some of the properties of the Window API + */ +export default function getWindowHandler(original: WindowHandlerInterface): WindowHandlerInterface { + return { + ...original, + location: { + ...original.location, + getSearch: function () { + const currentURL = window.location.href; + const firstQuestionMarkIndex = currentURL.indexOf("?"); + + if (firstQuestionMarkIndex !== -1) { + // Return the query string from the url + let queryString = currentURL.substring(firstQuestionMarkIndex); + + // Remove any hash + if (queryString.includes("#")) { + queryString = queryString.split("#")[0] ?? ""; + } + + return queryString; + } + + return ""; + }, + getHash: function () { + // Location hash always starts with a #, when returning we prepend it + let locationHash = window.location.hash; + + if (locationHash === "") { + return "#"; + } + + if (locationHash.startsWith("#")) { + // Remove the starting pound symbol + locationHash = locationHash.substring(1); + } + + if (!locationHash.includes("#")) { + // The remaining string did not have any "#" character + return "#"; + } + + const locationSplit = locationHash.split("#"); + + if (locationSplit.length < 2) { + // The string contains a "#" but is followed by nothing + return "#"; + } + + return "#" + locationSplit[1]; + }, + getOrigin: function () { + return "http://localhost:8001"; + }, + getHostName: function () { + return "localhost"; + }, + getPathName: function () { + let locationHash = window.location.hash; + + if (locationHash === "") { + return ""; + } + + if (locationHash.startsWith("#")) { + // Remove the starting pound symbol + locationHash = locationHash.substring(1); + } + + locationHash = locationHash.split("?")[0] ?? ""; + + if (locationHash.includes("#")) { + // Remove location hash + locationHash = locationHash.split("#")[0] ?? ""; + } + + return locationHash; + }, + }, + }; +} diff --git a/interface/index.tsx b/interface/index.tsx index 1d8799098..626b03b0a 100644 --- a/interface/index.tsx +++ b/interface/index.tsx @@ -5,6 +5,10 @@ import relativeTime from 'dayjs/plugin/relativeTime'; import { PropsWithChildren, Suspense } from 'react'; import { I18nextProvider } from 'react-i18next'; import { RouterProvider, RouterProviderProps } from 'react-router-dom'; +import SuperTokens from 'supertokens-web-js'; +import EmailPassword from 'supertokens-web-js/recipe/emailpassword'; +import Session from 'supertokens-web-js/recipe/session'; +import ThirdParty from 'supertokens-web-js/recipe/thirdparty'; import { InteropProviderReact, P2PContextProvider, @@ -15,6 +19,8 @@ import { import { toast, TooltipProvider } from '@sd/ui'; import { createRoutes } from './app'; +import getCookieHandler from './app/$libraryId/settings/client/account/handlers/cookieHandler'; +import getWindowHandler from './app/$libraryId/settings/client/account/handlers/windowHandler'; import { SpacedropProvider } from './app/$libraryId/Spacedrop'; import i18n from './app/I18n'; import { Devtools } from './components/Devtools'; @@ -42,6 +48,22 @@ import('@sentry/browser').then(({ init, Integrations }) => { }); }); +SuperTokens.init({ + // enableDebugLogs: true, + appInfo: { + apiDomain: 'http://localhost:9000', + apiBasePath: '/api/auth', + appName: 'Spacedrive Auth Service' + }, + cookieHandler: getCookieHandler, + windowHandler: getWindowHandler, + recipeList: [ + Session.init({ tokenTransferMethod: 'header' }), + EmailPassword.init(), + ThirdParty.init() + ] +}); + export type Router = RouterProviderProps['router']; export function SpacedriveRouterProvider(props: { diff --git a/interface/package.json b/interface/package.json index 133278847..92aa3ff99 100644 --- a/interface/package.json +++ b/interface/package.json @@ -65,6 +65,7 @@ "rooks": "^7.14.1", "solid-js": "^1.8.8", "solid-refresh": "^0.6.3", + "supertokens-web-js": "^0.13.0", "use-count-up": "^3.0.1", "use-debounce": "^9.0.4", "use-resize-observer": "^9.1.0", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 4ead2ad2cd9a3bd074152feb81e0647943a29ef7..010d7ed969165d6bc46cade0132e96cd97696e78 100644 GIT binary patch delta 902 zcmaKrO>EL&7{@cmK15_&G)qk8(yJ1nrIZaaGN>C{yM9o{`0^qz6lhC%OM!LF=wdu@ z@UjG#9;e>CId>p2#+xRd3}=r!Eiv)xZFWxGlA(v+>3#CN&-4HPpJy)@k6$mo`8;_8 zK^iSh)SFNdYYn#{0=L|KGIFb%cs^}IJs95o{toFrmLD9FQ$ssb|EwK7t@WT!7j`=f zyF(UNdJDV5{kL{^GY=Z>Dl90b@O8|UDhFz-c?gnH5*69poLs>IS#8Ou6y&Jpic>Wp zrRq3~Z3WiW^2=1JlJsu|;YNa^v>e0da7F+Ep2?QD3L6F9U}c+QsI+c$PDq=@h{dSI zSnq@!|8{r!?_P4l4L3zymx^KpH4lE+?(FC&9V2ChrHZ14GeE)VsEgHVT8nQQ{=|A) zuO^~E-)0M2hLkNpd?l8ydW(5U6>Jye4Sq@Es~ND)SLL`Dwp)5R9DZcC574v` zITI*kJtBxMHq0-F<1;%H3g_dX+VZbpB-GSE0nJiAkQ2Nx42dFSiVss(52rVek+QsVPQ$))*fDpDE1yPTTE_ zuL4Lfo2Es>pPmPGqK>k2L{r|V)ueod+0LZ+TFXV?gp99}e#3ye!O~c6rBwD2JQJ*A zCDmVcQAutsCDS5Zrt8Z~RW%w8caGF?i>nrPi)+0j)$ty4oF^iJnL==4)_gJS7&W~I zPJ3r>-(hjRx3}-S`7t=yJ`ZXwfIR%4&Sw9UVXj kM*h~&>`yvpP58k%7C7G~gkT=)1P1iI{CV$HapG9{3Hq2e=Kufz delta 122 zcmaF4(!P74{f3hQljlx~o&2GgXLG%vV14u53GH_$Faj~t_PZ09Pi>n1W)Y9@^uHn; z^6f<~EI`Z(#B4y!zP-qWV~y5yk4%1^wnC1!Le92AuC_w%ZG}854BLUafS7mtn?-zF SN5N{Q6>@NIm&@cooecmmD>HTg From df56d7725e1964a9121d9a15c62babf704eefbea Mon Sep 17 00:00:00 2001 From: ameer2468 <33054370+ameer2468@users.noreply.github.com> Date: Thu, 25 Jul 2024 13:07:19 +0300 Subject: [PATCH 010/218] Make show password a component --- .../settings/client/account/Login.tsx | 19 ++++++------- .../settings/client/account/Register.tsx | 26 +++++++----------- .../settings/client/account/ShowPassword.tsx | 27 +++++++++++++++++++ 3 files changed, 45 insertions(+), 27 deletions(-) create mode 100644 interface/app/$libraryId/settings/client/account/ShowPassword.tsx diff --git a/interface/app/$libraryId/settings/client/account/Login.tsx b/interface/app/$libraryId/settings/client/account/Login.tsx index c6580f307..d6c51571e 100644 --- a/interface/app/$libraryId/settings/client/account/Login.tsx +++ b/interface/app/$libraryId/settings/client/account/Login.tsx @@ -1,9 +1,9 @@ -import { Eye, EyeClosed } from '@phosphor-icons/react'; -import { useState } from 'react'; -import { Controller } from 'react-hook-form'; -import { signIn, signUp } from 'supertokens-web-js/recipe/emailpassword'; import { useZodForm } from '@sd/client'; import { Button, Form, Input, toast, z } from '@sd/ui'; +import { useState } from 'react'; +import { Controller } from 'react-hook-form'; +import { signIn } from 'supertokens-web-js/recipe/emailpassword'; +import ShowPassword from './ShowPassword'; async function signInClicked(email: string, password: string) { try { @@ -107,13 +107,10 @@ const Login = () => { field.onChange(pastedText); }} /> - +
)} /> diff --git a/interface/app/$libraryId/settings/client/account/Register.tsx b/interface/app/$libraryId/settings/client/account/Register.tsx index 0551446d0..18ea5e3c7 100644 --- a/interface/app/$libraryId/settings/client/account/Register.tsx +++ b/interface/app/$libraryId/settings/client/account/Register.tsx @@ -1,9 +1,9 @@ import { zodResolver } from '@hookform/resolvers/zod'; -import { Eye, EyeClosed } from '@phosphor-icons/react'; +import { Button, Form, Input, toast, z } from '@sd/ui'; import { useState } from 'react'; import { Controller, useForm } from 'react-hook-form'; import { signUp } from 'supertokens-web-js/recipe/emailpassword'; -import { Button, Form, Input, toast, z } from '@sd/ui'; +import ShowPassword from './ShowPassword'; const RegisterSchema = z .object({ @@ -120,13 +120,10 @@ const Register = () => { field.onChange(pastedText); }} /> - +
)} /> @@ -146,13 +143,10 @@ const Register = () => { disabled={form.formState.isSubmitting} type={showPassword ? 'text' : 'password'} /> - + )} /> diff --git a/interface/app/$libraryId/settings/client/account/ShowPassword.tsx b/interface/app/$libraryId/settings/client/account/ShowPassword.tsx new file mode 100644 index 000000000..d3e846da0 --- /dev/null +++ b/interface/app/$libraryId/settings/client/account/ShowPassword.tsx @@ -0,0 +1,27 @@ +import { Eye, EyeClosed } from '@phosphor-icons/react'; +import { Button, Tooltip } from '@sd/ui'; + +interface Props { + showPassword: boolean; + setShowPassword: (value: boolean) => void; +} + +const ShowPassword = ({ showPassword, setShowPassword }: Props) => { + return ( + + + + ); +}; + +export default ShowPassword; From 81787e86fd02d5db3253e2e169740a9b75340c19 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Thu, 25 Jul 2024 16:39:25 +0300 Subject: [PATCH 011/218] WIP 3rd Party Logins --- .../settings/client/account/Tabs.tsx | 181 +++++++++++++----- 1 file changed, 130 insertions(+), 51 deletions(-) diff --git a/interface/app/$libraryId/settings/client/account/Tabs.tsx b/interface/app/$libraryId/settings/client/account/Tabs.tsx index dc7cc3e8e..a8a8a1fef 100644 --- a/interface/app/$libraryId/settings/client/account/Tabs.tsx +++ b/interface/app/$libraryId/settings/client/account/Tabs.tsx @@ -1,89 +1,168 @@ -import { Card, Divider, Tooltip } from '@sd/ui'; -import { motion } from 'framer-motion'; -import { useState } from 'react'; - import { GoogleLogo, Icon } from '@phosphor-icons/react'; import { Apple, Github } from '@sd/assets/svgs/brands'; import clsx from 'clsx'; +import { motion } from 'framer-motion'; +import { useState } from 'react'; +import { Button, Card, Divider, toast, Tooltip } from '@sd/ui'; + import Login from './Login'; import Register from './Register'; +import { getAuthorisationURLWithQueryParamsAndSetState } from 'supertokens-web-js/recipe/thirdparty'; const AccountTabs = ['Login', 'Register'] as const; type SocialLogin = { - name: "Github" | "Google" | "Apple"; + name: 'Github' | 'Google' | 'Apple'; icon: Icon; -} +}; const SocialLogins: SocialLogin[] = [ - {name: 'Github', icon: Github}, - {name: 'Google', icon: GoogleLogo}, - {name: 'Apple', icon: Apple}, -] + { name: 'Github', icon: Github }, + { name: 'Google', icon: GoogleLogo }, + { name: 'Apple', icon: Apple } +]; const Tabs = () => { - const [activeTab, setActiveTab] = useState<'Login' | 'Register'>('Login'); + // Currently opens in App. const socialLoginHandlers = (name: SocialLogin['name']) => { return { - 'Github': () => { - console.log('Github login'); + Github: async () => { + try { + const authUrl = await getAuthorisationURLWithQueryParamsAndSetState({ + thirdPartyId: "github", + + // This is where Github should redirect the user back after login or error. + frontendRedirectURI: "http://localhost:9000/api/auth/callback/github", + }); + + // we redirect the user to Github for auth. + window.location.assign(authUrl); + } catch (err: any) { + if (err.isSuperTokensGeneralError === true) { + // this may be a custom error message sent from the API by you. + toast.error(err.message); + } else { + toast.error("Oops! Something went wrong."); + } + } }, - 'Google': () => { - console.log('Google login'); + Google: async () => { + try { + const authUrl = await getAuthorisationURLWithQueryParamsAndSetState({ + thirdPartyId: "google", + + // This is where Google should redirect the user back after login or error. + // This URL goes on the Google's dashboard as well. + frontendRedirectURI: "http://localhost:9000/api/auth/callback/google", + }); + + /* + Example value of authUrl: https://accounts.google.com/o/oauth2/v2/auth/oauthchooseaccount?scope=https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fuserinfo.email&access_type=offline&include_granted_scopes=true&response_type=code&client_id=1060725074195-kmeum4crr01uirfl2op9kd5acmi9jutn.apps.googleusercontent.com&state=5a489996a28cafc83ddff&redirect_uri=https%3A%2F%2Fsupertokens.io%2Fdev%2Foauth%2Fredirect-to-app&flowName=GeneralOAuthFlow + */ + + // we redirect the user to google for auth. + window.location.assign(authUrl); + } catch (err: any) { + if (err.isSuperTokensGeneralError === true) { + // this may be a custom error message sent from the API by you. + toast.error(err.message); + } else { + toast.error("Oops! Something went wrong."); + } + } }, - 'Apple': () => { - console.log('Apple login'); + Apple: async () => { + try { + const authUrl = await getAuthorisationURLWithQueryParamsAndSetState({ + thirdPartyId: "apple", + + // This is where Apple should redirect the user back after login or error. + frontendRedirectURI: "http://localhost:9000/api/auth/callback/apple", + }); + + + // we redirect the user to Apple for auth. + window.location.assign(authUrl); + } catch (err: any) { + if (err.isSuperTokensGeneralError === true) { + // this may be a custom error message sent from the API by you. + toast.error(err.message); + } else { + toast.error("Oops! Something went wrong."); + } + } } }[name](); - } + }; return ( -
+
{AccountTabs.map((text) => ( -
{ - setActiveTab(text) - }} className={clsx("relative flex-1 border-b border-app-line p-2.5 text-center", - text === 'Login' ? 'rounded-tl-md' : 'rounded-tr-md', - )}> -

{text}

- {text === activeTab && ( - { + setActiveTab(text); }} - layoutId='tab' className={clsx("absolute inset-x-0 top-0 z-0 size-full bg-app-line/60" - )} /> + className={clsx( + 'relative flex-1 border-b border-app-line p-2.5 text-center', + text === 'Login' ? 'rounded-tl-md' : 'rounded-tr-md' + )} + > +

+ {text} +

+ {text === activeTab && ( + )}
))}
-
- {activeTab === 'Login' ? : } -
- -

OR

- -
-
+
+ {activeTab === 'Login' ? : } +
+ +

OR

+ +
+
{SocialLogins.map((social) => ( - -
socialLoginHandlers(social.name)} key={social.name} className='rounded-full border border-app-line bg-app-input p-3'> - -
+ + ))} +
-
- ) -} - + ); +}; export default Tabs; From a1e7df67a8dd03d560c3c223ea05fc70715bb745 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Fri, 26 Jul 2024 23:57:37 +0300 Subject: [PATCH 012/218] wip: Use Keychain instead of Localhost for Storage We store auth creds in the actual keychain of the device and not in localstorage. Currently works, but will fail if we move completely away from localStorage. --- Cargo.lock | Bin 302565 -> 303613 bytes core/Cargo.toml | 2 + core/src/api/keys.rs | 93 +++++++++++++++++- core/src/api/mod.rs | 2 +- .../settings/client/account/Login.tsx | 3 - .../client/account/handlers/cookieHandler.ts | 25 +++++ interface/index.tsx | 1 - packages/client/src/core.ts | 2 + 8 files changed, 118 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1fc7cd4eea97f4ff9fc82d171cf27fb653edd341..30c170cf182a8d50bd53a552b3ce6cc7be6d1799 100644 GIT binary patch delta 512 zcmZY5ziSjh6u@zI=fa7k@z6vu$a00E7@0ToW@kn)F!(@>(0IybxTJ(sLD+EH198E_HP*g>Ph}x98v!n+a02t*u$jFfe5xoE`xk#`}7Shg_eEwK5^#324rpx(K zitow!wE8aVKN-!kb~MXhj{6&bo7%7E$ZOIsh7azJ&An#-+sSmBkwfjP3uNObskTCz zUdhd6t9)0+Ek-7S#0Uywi4M61L9K=mgDg>UEi5vobmS@sD$3F;ri8G~8Z4%3XM6MQ zsWZKLY3=~48VhE{AR3E08eeh@O2uwFs;I5y#(EZ+^pa5o$xQbGC!N+x1_} AlphaRouter { -// R.router() +pub(crate) fn mount() -> AlphaRouter { + R.router() + .procedure("set", { + R.mutation(|_, key: String| async move { + let username = whoami::username(); + let entry = match Entry::new("spacedrive-auth-service", username.as_str()) { + Ok(entry) => entry, + Err(e) => { + error!("Error creating entry: {}", e); + return Err(rspc::Error::new( + rspc::ErrorCode::InternalServerError, + "Error creating entry".to_string(), + )); + } + }; + + // Check if the key already exists -> if it does, delete it first before setting the new key + // if entry.get_password().is_ok() { + // debug!("Key already exists. Deleting key first"); + // match entry.delete_credential() { + // Ok(_) => debug!("Key deleted successfully"), + // Err(e) => { + // error!("Error deleting key: {}", e); + // return Err(rspc::Error::new( + // rspc::ErrorCode::InternalServerError, + // "Error deleting key".to_string(), + // )); + // } + // } + // } + + match entry.set_password(key.as_str()) { + Ok(_) => debug!("Key set successfully"), + Err(e) => { + error!("Error setting key: {}", e); + return Err(rspc::Error::new( + rspc::ErrorCode::InternalServerError, + "Error setting key".to_string(), + )); + } + } + + debug!( + "Key set successfully: key={key}, service={service}, user={user}", + key = key, + service = "spacedrive-auth-service", + ); + Ok(()) + }) + }) + .procedure("get", { + R.query(|_, _: ()| async move { + let username = whoami::username(); + let entry = match Entry::new("spacedrive-auth-service", username.as_str()) { + Ok(entry) => entry, + Err(e) => { + error!("Error creating entry: {}", e); + return Err(rspc::Error::new( + rspc::ErrorCode::InternalServerError, + "Error creating entry".to_string(), + )); + } + }; + + let data = match entry.get_password() { + Ok(key) => key, + Err(e) => { + error!("Error retrieving key: {}. Does the key exist yet?", e); + return Ok("".to_string()); + } + }; + + debug!( + "Key retrieved successfully: service={service}, user={user}", + service = "spacedrive-auth-service", + ); + + Ok(data) + }) + }) +} + +//NOTE(@Rocky43007): OLD PROCEDURES -> MAY BE USEFUL FOR REFERENCE AND COULD BE USED IN THE FUTURE // .procedure("list", { // R.with2(library()) // .query(|(_, library), _: ()| async move { Ok(library.key_manager.dump_keystore()) }) diff --git a/core/src/api/mod.rs b/core/src/api/mod.rs index 9a05264bd..62f12d346 100644 --- a/core/src/api/mod.rs +++ b/core/src/api/mod.rs @@ -201,7 +201,7 @@ pub(crate) fn mount() -> Arc { .merge("tags.", tags::mount()) .merge("labels.", labels::mount()) // .merge("categories.", categories::mount()) - // .merge("keys.", keys::mount()) + .merge("keys.", keys::mount()) .merge("locations.", locations::mount()) .merge("ephemeralFiles.", ephemeral_files::mount()) .merge("files.", files::mount()) diff --git a/interface/app/$libraryId/settings/client/account/Login.tsx b/interface/app/$libraryId/settings/client/account/Login.tsx index d6c51571e..8354c6d37 100644 --- a/interface/app/$libraryId/settings/client/account/Login.tsx +++ b/interface/app/$libraryId/settings/client/account/Login.tsx @@ -19,7 +19,6 @@ async function signInClicked(email: string, password: string) { } ] }); - console.log('[signInClicked] response', response); if (response.status === 'FIELD_ERROR') { response.formFields.forEach((formField) => { @@ -68,7 +67,6 @@ const Login = () => {
{ // handle login submission - console.log(data); await signInClicked(data.email, data.password); })} form={form} @@ -122,7 +120,6 @@ const Login = () => { className="mx-auto mt-2 w-full" variant="accent" onClick={form.handleSubmit(async (data) => { - console.log(data); await signInClicked(data.email, data.password); })} disabled={form.formState.isSubmitting} diff --git a/interface/app/$libraryId/settings/client/account/handlers/cookieHandler.ts b/interface/app/$libraryId/settings/client/account/handlers/cookieHandler.ts index 2de7bb498..7ffe897f1 100644 --- a/interface/app/$libraryId/settings/client/account/handlers/cookieHandler.ts +++ b/interface/app/$libraryId/settings/client/account/handlers/cookieHandler.ts @@ -1,8 +1,24 @@ import { CookieHandlerInterface } from "supertokens-website/utils/cookieHandler/types"; +import { NonLibraryProceduresDef, rspc } from '@sd/client' const frontendCookiesKey = "frontendCookies"; +export const nonLibraryClient = rspc.dangerouslyHookIntoInternals(); function getCookiesFromStorage(): string { + // let cookiesFromStorage: string = ""; + + nonLibraryClient.query(['keys.get']).then((response) => { + // Debugging + const cookiesArrayFromStorage: string[] = JSON.parse(response); + console.log("Cookies fetched from storage: ", cookiesArrayFromStorage); + + // Actual + // cookiesFromStorage = response; + }).catch((e) => { + console.error("Error fetching cookies from storage: ", e); + }); + + const cookiesFromStorage = window.localStorage.getItem(frontendCookiesKey); if (cookiesFromStorage === null) { @@ -89,6 +105,15 @@ function setCookieToStorage(cookieString: string) { cookiesArray.push(cookieString); } + nonLibraryClient.mutation(['keys.set', JSON.stringify(cookiesArray)]).then(() => { + console.log("Cookies set successfully"); + }).catch((e) => { + new Error("Error setting cookies to storage: ", e); + return; + }) + + console.log("Setting cookies to storage: ", cookiesArray); + window.localStorage.setItem(frontendCookiesKey, JSON.stringify(cookiesArray)); } diff --git a/interface/index.tsx b/interface/index.tsx index 626b03b0a..4370b2312 100644 --- a/interface/index.tsx +++ b/interface/index.tsx @@ -49,7 +49,6 @@ import('@sentry/browser').then(({ init, Integrations }) => { }); SuperTokens.init({ - // enableDebugLogs: true, appInfo: { apiDomain: 'http://localhost:9000', apiBasePath: '/api/auth', diff --git a/packages/client/src/core.ts b/packages/client/src/core.ts index 98019c02c..bfb244a16 100644 --- a/packages/client/src/core.ts +++ b/packages/client/src/core.ts @@ -18,6 +18,7 @@ export type Procedures = { { key: "invalidation.test-invalidate", input: never, result: number } | { key: "jobs.isActive", input: LibraryArgs, result: boolean } | { key: "jobs.reports", input: LibraryArgs, result: JobGroup[] } | + { key: "keys.get", input: never, result: string } | { key: "labels.count", input: LibraryArgs, result: number } | { key: "labels.get", input: LibraryArgs, result: Label | null } | { key: "labels.getForObject", input: LibraryArgs, result: Label[] } | @@ -98,6 +99,7 @@ export type Procedures = { { key: "jobs.objectValidator", input: LibraryArgs, result: null } | { key: "jobs.pause", input: LibraryArgs, result: null } | { key: "jobs.resume", input: LibraryArgs, result: null } | + { key: "keys.set", input: string, result: null } | { key: "labels.delete", input: LibraryArgs, result: null } | { key: "library.create", input: CreateLibraryArgs, result: LibraryConfigWrapped } | { key: "library.delete", input: string, result: null } | From 4bdb8ebb476553e6324437c05de830cf692e1ee9 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Sat, 27 Jul 2024 17:20:47 +0300 Subject: [PATCH 013/218] Update keys.rs --- core/src/api/keys.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/src/api/keys.rs b/core/src/api/keys.rs index 6e4bd04c8..7c3e90095 100644 --- a/core/src/api/keys.rs +++ b/core/src/api/keys.rs @@ -99,7 +99,7 @@ pub(crate) fn mount() -> AlphaRouter { } debug!( - "Key set successfully: key={key}, service={service}, user={user}", + "Key set successfully: key={key}, service={service}", key = key, service = "spacedrive-auth-service", ); @@ -129,7 +129,7 @@ pub(crate) fn mount() -> AlphaRouter { }; debug!( - "Key retrieved successfully: service={service}, user={user}", + "Key retrieved successfully: service={service}", service = "spacedrive-auth-service", ); From cbf9c93e0af20238152d477894bd90566efa20bc Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Sat, 27 Jul 2024 20:48:15 +0300 Subject: [PATCH 014/218] Working auth key storage in Keychain Credentials like the auth access token are stored in the user's Keychain instead of in localStorage for security reasons. --- core/src/api/keys.rs | 15 ------- .../client/account/handlers/cookieHandler.ts | 44 ++++++++++++------- 2 files changed, 27 insertions(+), 32 deletions(-) diff --git a/core/src/api/keys.rs b/core/src/api/keys.rs index 7c3e90095..6fa72cfbb 100644 --- a/core/src/api/keys.rs +++ b/core/src/api/keys.rs @@ -72,21 +72,6 @@ pub(crate) fn mount() -> AlphaRouter { } }; - // Check if the key already exists -> if it does, delete it first before setting the new key - // if entry.get_password().is_ok() { - // debug!("Key already exists. Deleting key first"); - // match entry.delete_credential() { - // Ok(_) => debug!("Key deleted successfully"), - // Err(e) => { - // error!("Error deleting key: {}", e); - // return Err(rspc::Error::new( - // rspc::ErrorCode::InternalServerError, - // "Error deleting key".to_string(), - // )); - // } - // } - // } - match entry.set_password(key.as_str()) { Ok(_) => debug!("Key set successfully"), Err(e) => { diff --git a/interface/app/$libraryId/settings/client/account/handlers/cookieHandler.ts b/interface/app/$libraryId/settings/client/account/handlers/cookieHandler.ts index 7ffe897f1..e549730ca 100644 --- a/interface/app/$libraryId/settings/client/account/handlers/cookieHandler.ts +++ b/interface/app/$libraryId/settings/client/account/handlers/cookieHandler.ts @@ -1,28 +1,22 @@ import { CookieHandlerInterface } from "supertokens-website/utils/cookieHandler/types"; -import { NonLibraryProceduresDef, rspc } from '@sd/client' - -const frontendCookiesKey = "frontendCookies"; -export const nonLibraryClient = rspc.dangerouslyHookIntoInternals(); +import { nonLibraryClient } from '@sd/client' function getCookiesFromStorage(): string { - // let cookiesFromStorage: string = ""; + let cookiesFromStorage: string = ""; nonLibraryClient.query(['keys.get']).then((response) => { // Debugging + console.log("rspc response: ", response); const cookiesArrayFromStorage: string[] = JSON.parse(response); console.log("Cookies fetched from storage: ", cookiesArrayFromStorage); // Actual - // cookiesFromStorage = response; + cookiesFromStorage = response; }).catch((e) => { console.error("Error fetching cookies from storage: ", e); }); - - const cookiesFromStorage = window.localStorage.getItem(frontendCookiesKey); - - if (cookiesFromStorage === null) { - window.localStorage.setItem(frontendCookiesKey, "[]"); + if (cookiesFromStorage.length === 0) { return ""; } @@ -67,17 +61,35 @@ function getCookiesFromStorage(): string { * After processing and removing expired cookies we need to update the cookies * in storage so we dont have to process the expired ones again */ - window.localStorage.setItem(frontendCookiesKey, JSON.stringify(cookieArrayToReturn)); + // window.localStorage.setItem(frontendCookiesKey, JSON.stringify(cookieArrayToReturn)); + nonLibraryClient.mutation(['keys.set', JSON.stringify(cookieArrayToReturn)]).then(() => { + console.log("Cookies set successfully"); + }).catch((e) => { + console.error("Error setting cookies to storage: ", e); + return; + }) return cookieArrayToReturn.join("; "); } function setCookieToStorage(cookieString: string) { const cookieName = cookieString.split(";")[0]?.split("=")[0]; - const cookiesFromStorage = window.localStorage.getItem(frontendCookiesKey); + + let cookiesFromStorage: string = ""; + nonLibraryClient.query(['keys.get']).then((response) => { + // Debugging + const cookiesArrayFromStorage: string[] = JSON.parse(response); + console.log("Cookies fetched from storage: ", cookiesArrayFromStorage); + + // Actual + cookiesFromStorage = response; + }).catch((e) => { + console.error("Error fetching cookies from storage: ", e); + }); + let cookiesArray: string[] = []; - if (cookiesFromStorage !== null) { + if (cookiesFromStorage.length !== 0) { const cookiesArrayFromStorage: string[] = JSON.parse(cookiesFromStorage); cookiesArray = cookiesArrayFromStorage; } @@ -108,13 +120,11 @@ function setCookieToStorage(cookieString: string) { nonLibraryClient.mutation(['keys.set', JSON.stringify(cookiesArray)]).then(() => { console.log("Cookies set successfully"); }).catch((e) => { - new Error("Error setting cookies to storage: ", e); + console.error("Error setting cookies to storage: ", e); return; }) console.log("Setting cookies to storage: ", cookiesArray); - - window.localStorage.setItem(frontendCookiesKey, JSON.stringify(cookiesArray)); } export default function getCookieHandler(original: CookieHandlerInterface): CookieHandlerInterface { From dc23c3fdfea8ed08bdfbfbb77bafe1dfdfe8d925 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Sun, 28 Jul 2024 19:48:06 +0000 Subject: [PATCH 015/218] Update SDCoreModule.kt --- .../android/src/main/java/com/spacedrive/core/SDCoreModule.kt | 1 - 1 file changed, 1 deletion(-) diff --git a/apps/mobile/modules/sd-core/android/src/main/java/com/spacedrive/core/SDCoreModule.kt b/apps/mobile/modules/sd-core/android/src/main/java/com/spacedrive/core/SDCoreModule.kt index 053d1a173..8b7cc069b 100644 --- a/apps/mobile/modules/sd-core/android/src/main/java/com/spacedrive/core/SDCoreModule.kt +++ b/apps/mobile/modules/sd-core/android/src/main/java/com/spacedrive/core/SDCoreModule.kt @@ -3,7 +3,6 @@ package com.spacedrive.core import expo.modules.kotlin.Promise import expo.modules.kotlin.modules.Module import expo.modules.kotlin.modules.ModuleDefinition -import android.provider.Settings class SDCoreModule : Module() { private var registeredWithRust = false From 4130f792f651e1cf12f2c631ef7a1595f98ecca8 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Tue, 30 Jul 2024 11:00:59 +0300 Subject: [PATCH 016/218] wip: Getting User Data from backend --- core/src/api/keys.rs | 56 ++++++++++++++++++- .../settings/client/account/Login.tsx | 7 ++- .../client/account/handlers/cookieHandler.ts | 56 +++++-------------- .../settings/client/account/index.tsx | 23 +++++++- packages/client/src/core.ts | 1 + 5 files changed, 94 insertions(+), 49 deletions(-) diff --git a/core/src/api/keys.rs b/core/src/api/keys.rs index 6fa72cfbb..4d63d4f2d 100644 --- a/core/src/api/keys.rs +++ b/core/src/api/keys.rs @@ -3,6 +3,7 @@ use keyring::Entry; use rspc::alpha::AlphaRouter; +use serde_json::Value; use tracing::{debug, error}; // use sd_crypto::keys::keymanager::{StoredKey, StoredKeyType}; // use sd_crypto::primitives::SECRET_KEY_IDENTIFIER; @@ -73,7 +74,7 @@ pub(crate) fn mount() -> AlphaRouter { }; match entry.set_password(key.as_str()) { - Ok(_) => debug!("Key set successfully"), + Ok(_) => (), Err(e) => { error!("Error setting key: {}", e); return Err(rspc::Error::new( @@ -114,13 +115,64 @@ pub(crate) fn mount() -> AlphaRouter { }; debug!( - "Key retrieved successfully: service={service}", + "Key retrieved successfully: service={service}, data={_data}", + _data = data, service = "spacedrive-auth-service", ); Ok(data) }) }) + .procedure("getAccessToken", { + R.query(|_, _: ()| async move { + let username = whoami::username(); + let entry = match Entry::new("spacedrive-auth-service", username.as_str()) { + Ok(entry) => entry, + Err(e) => { + error!("Error creating entry: {}", e); + return Err(rspc::Error::new( + rspc::ErrorCode::InternalServerError, + "Error creating entry".to_string(), + )); + } + }; + + let data = match entry.get_password() { + Ok(key) => key, + Err(e) => { + error!("Error retrieving key: {}. Does the key exist yet?", e); + return Ok("".to_string()); + } + }; + + let json_value: Value = match serde_json::from_str(&data) { + Ok(json_value) => json_value, + Err(e) => { + error!("Error parsing JSON value: {}", e); + return Ok("".to_string()); + } + }; + let cookie_str = match json_value[0].as_str() { + Some(cookie_str) => cookie_str, + None => { + error!("Error parsing JSON value: {}", "No cookie string found"); + return Ok("".to_string()); + } + }; + + // Extract the sFrontToken value + let token_start = "st-access-token="; + let token_end = ";"; + let token = cookie_str[token_start.len()..cookie_str.find(token_end).expect("Failed to find token end")].to_string(); + + debug!( + "Key retrieved successfully: service={service}", + service = "spacedrive-auth-service", + ); + + Ok(token) + }) + }) } //NOTE(@Rocky43007): OLD PROCEDURES -> MAY BE USEFUL FOR REFERENCE AND COULD BE USED IN THE FUTURE diff --git a/interface/app/$libraryId/settings/client/account/Login.tsx b/interface/app/$libraryId/settings/client/account/Login.tsx index 8354c6d37..dce7c310c 100644 --- a/interface/app/$libraryId/settings/client/account/Login.tsx +++ b/interface/app/$libraryId/settings/client/account/Login.tsx @@ -1,8 +1,8 @@ -import { useZodForm } from '@sd/client'; -import { Button, Form, Input, toast, z } from '@sd/ui'; -import { useState } from 'react'; +import { useEffect, useState } from 'react'; import { Controller } from 'react-hook-form'; import { signIn } from 'supertokens-web-js/recipe/emailpassword'; +import { nonLibraryClient, useZodForm } from '@sd/client'; +import { Button, Form, Input, toast, z } from '@sd/ui'; import ShowPassword from './ShowPassword'; async function signInClicked(email: string, password: string) { @@ -63,6 +63,7 @@ const Login = () => { password: '' } }); + return ( { diff --git a/interface/app/$libraryId/settings/client/account/handlers/cookieHandler.ts b/interface/app/$libraryId/settings/client/account/handlers/cookieHandler.ts index e549730ca..f4ac8cb15 100644 --- a/interface/app/$libraryId/settings/client/account/handlers/cookieHandler.ts +++ b/interface/app/$libraryId/settings/client/account/handlers/cookieHandler.ts @@ -1,20 +1,10 @@ import { CookieHandlerInterface } from "supertokens-website/utils/cookieHandler/types"; import { nonLibraryClient } from '@sd/client' -function getCookiesFromStorage(): string { - let cookiesFromStorage: string = ""; +async function getCookiesFromStorage(): Promise { + const cookiesFromStorage = await nonLibraryClient.query(['keys.get']) - nonLibraryClient.query(['keys.get']).then((response) => { - // Debugging - console.log("rspc response: ", response); - const cookiesArrayFromStorage: string[] = JSON.parse(response); - console.log("Cookies fetched from storage: ", cookiesArrayFromStorage); - - // Actual - cookiesFromStorage = response; - }).catch((e) => { - console.error("Error fetching cookies from storage: ", e); - }); + console.log("Cookies from storage (getCookie): ", cookiesFromStorage); if (cookiesFromStorage.length === 0) { return ""; @@ -61,31 +51,18 @@ function getCookiesFromStorage(): string { * After processing and removing expired cookies we need to update the cookies * in storage so we dont have to process the expired ones again */ - // window.localStorage.setItem(frontendCookiesKey, JSON.stringify(cookieArrayToReturn)); - nonLibraryClient.mutation(['keys.set', JSON.stringify(cookieArrayToReturn)]).then(() => { - console.log("Cookies set successfully"); - }).catch((e) => { - console.error("Error setting cookies to storage: ", e); - return; - }) + await nonLibraryClient.mutation(['keys.set', JSON.stringify(cookieArrayToReturn)]) return cookieArrayToReturn.join("; "); } -function setCookieToStorage(cookieString: string) { +async function setCookieToStorage(cookieString: string): Promise { const cookieName = cookieString.split(";")[0]?.split("=")[0]; + console.log("Setting cookie: ", cookieName); - let cookiesFromStorage: string = ""; - nonLibraryClient.query(['keys.get']).then((response) => { - // Debugging - const cookiesArrayFromStorage: string[] = JSON.parse(response); - console.log("Cookies fetched from storage: ", cookiesArrayFromStorage); + const cookiesFromStorage = await nonLibraryClient.query(['keys.get']) - // Actual - cookiesFromStorage = response; - }).catch((e) => { - console.error("Error fetching cookies from storage: ", e); - }); + console.log("Cookies from storage: ", cookiesFromStorage); let cookiesArray: string[] = []; @@ -93,6 +70,7 @@ function setCookieToStorage(cookieString: string) { const cookiesArrayFromStorage: string[] = JSON.parse(cookiesFromStorage); cookiesArray = cookiesArrayFromStorage; } + console.log("Cookies array: ", cookiesArray); let cookieIndex = -1; @@ -104,6 +82,7 @@ function setCookieToStorage(cookieString: string) { break; } } + console.log("Cookie index: ", cookieIndex); /** * If a cookie with the same name already exists (index != -1) then we @@ -116,26 +95,19 @@ function setCookieToStorage(cookieString: string) { } else { cookiesArray.push(cookieString); } + console.log("Updated cookies array: ", cookiesArray); - nonLibraryClient.mutation(['keys.set', JSON.stringify(cookiesArray)]).then(() => { - console.log("Cookies set successfully"); - }).catch((e) => { - console.error("Error setting cookies to storage: ", e); - return; - }) - - console.log("Setting cookies to storage: ", cookiesArray); + await nonLibraryClient.mutation(['keys.set', JSON.stringify(cookiesArray)]) } export default function getCookieHandler(original: CookieHandlerInterface): CookieHandlerInterface { return { ...original, getCookie: async function () { - const cookies = getCookiesFromStorage(); - return cookies; + return getCookiesFromStorage(); }, setCookie: async function (cookieString: string) { - setCookieToStorage(cookieString); + return setCookieToStorage(cookieString); }, }; } diff --git a/interface/app/$libraryId/settings/client/account/index.tsx b/interface/app/$libraryId/settings/client/account/index.tsx index adc6c2762..751a1c349 100644 --- a/interface/app/$libraryId/settings/client/account/index.tsx +++ b/interface/app/$libraryId/settings/client/account/index.tsx @@ -1,6 +1,7 @@ +import { useEffect, useState } from 'react'; +import Session from 'supertokens-web-js/recipe/session'; import { auth, useBridgeMutation, useBridgeQuery, useFeatureFlag } from '@sd/client'; import { Button, Input, toast } from '@sd/ui'; -import { useEffect, useState } from 'react'; import { useLocale } from '~/hooks'; import { Heading } from '../../Layout'; @@ -10,7 +11,25 @@ import Tabs from './Tabs'; export const Component = () => { const { t } = useLocale(); const me = useBridgeQuery(['auth.me'], { retry: false }); + const token = useBridgeQuery(['keys.getAccessToken'], { retry: false }); const authStore = auth.useStateSnapshot(); + useEffect(() => { + async function _() { + console.log("Token data: ", token.data); + const user_data = await fetch('http://localhost:9000/api/user', { + method: 'GET', + headers: { + 'Authorization': `Bearer ${token.data ?? ''}` + } + }); + const data = await user_data.json(); + return data; + } + _().then((data) => { + console.log("User data: ", data); + }); + // eslint-disable-next-line react-hooks/exhaustive-deps + }, []); return ( <> { />
{authStore.status === 'notLoggedIn' ? ( - + ) : ( )} diff --git a/packages/client/src/core.ts b/packages/client/src/core.ts index bfb244a16..e51821732 100644 --- a/packages/client/src/core.ts +++ b/packages/client/src/core.ts @@ -19,6 +19,7 @@ export type Procedures = { { key: "jobs.isActive", input: LibraryArgs, result: boolean } | { key: "jobs.reports", input: LibraryArgs, result: JobGroup[] } | { key: "keys.get", input: never, result: string } | + { key: "keys.getAccessToken", input: never, result: string } | { key: "labels.count", input: LibraryArgs, result: number } | { key: "labels.get", input: LibraryArgs, result: Label | null } | { key: "labels.getForObject", input: LibraryArgs, result: Label[] } | From 547bedab9b60fae457a9b93bd2263636450f93c8 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Tue, 30 Jul 2024 16:32:44 +0300 Subject: [PATCH 017/218] Actually fetch data from rspc --- apps/desktop/src/App.tsx | 42 +++++++++++++++---- .../settings/client/account/Login.tsx | 1 + .../client/account/handlers/cookieHandler.ts | 12 ++++++ .../settings/client/account/index.tsx | 2 +- interface/index.tsx | 19 --------- 5 files changed, 47 insertions(+), 29 deletions(-) diff --git a/apps/desktop/src/App.tsx b/apps/desktop/src/App.tsx index b8309a53a..dd6ac6693 100644 --- a/apps/desktop/src/App.tsx +++ b/apps/desktop/src/App.tsx @@ -17,14 +17,6 @@ import { RouteTitleContext } from '@sd/interface/hooks/useRouteTitle'; import '@sd/ui/style/style.scss'; -import { useLocale } from '@sd/interface/hooks'; - -import { commands } from './commands'; -import { platform } from './platform'; -import { queryClient } from './query'; -import { createMemoryRouterWithHistory } from './router'; -import { createUpdater } from './updater'; - // TODO: Bring this back once upstream is fixed up. // const client = hooks.createClient({ // links: [ @@ -34,13 +26,45 @@ import { createUpdater } from './updater'; // tauriLink() // ] // }); +import SuperTokens from 'supertokens-web-js'; +import EmailPassword from 'supertokens-web-js/recipe/emailpassword'; +import Session from 'supertokens-web-js/recipe/session'; +import ThirdParty from 'supertokens-web-js/recipe/thirdparty'; +import getCookieHandler, { + setAppReady +} from '@sd/interface/app/$libraryId/settings/client/account/handlers/cookieHandler'; +import getWindowHandler from '@sd/interface/app/$libraryId/settings/client/account/handlers/windowHandler'; +import { useLocale } from '@sd/interface/hooks'; + +import { commands } from './commands'; +import { platform } from './platform'; +import { queryClient } from './query'; +import { createMemoryRouterWithHistory } from './router'; +import { createUpdater } from './updater'; + +SuperTokens.init({ + appInfo: { + apiDomain: 'http://localhost:9420', + apiBasePath: '/api/auth', + appName: 'Spacedrive Auth Service' + }, + cookieHandler: getCookieHandler, + windowHandler: getWindowHandler, + recipeList: [ + Session.init({ tokenTransferMethod: 'header' }), + EmailPassword.init(), + ThirdParty.init() + ] +}); const startupError = (window as any).__SD_ERROR__ as string | undefined; export default function App() { useEffect(() => { // This tells Tauri to show the current window because it's finished loading - commands.appReady(); + commands.appReady().then(() => { + setAppReady(); + }); }, []); useEffect(() => { diff --git a/interface/app/$libraryId/settings/client/account/Login.tsx b/interface/app/$libraryId/settings/client/account/Login.tsx index dce7c310c..9c1cbf720 100644 --- a/interface/app/$libraryId/settings/client/account/Login.tsx +++ b/interface/app/$libraryId/settings/client/account/Login.tsx @@ -44,6 +44,7 @@ async function signInClicked(email: string, password: string) { // this may be a custom error message sent from the API by you. toast.error(err.message); } else { + console.error(err); toast.error('Oops! Something went wrong.'); } } diff --git a/interface/app/$libraryId/settings/client/account/handlers/cookieHandler.ts b/interface/app/$libraryId/settings/client/account/handlers/cookieHandler.ts index f4ac8cb15..bee374a77 100644 --- a/interface/app/$libraryId/settings/client/account/handlers/cookieHandler.ts +++ b/interface/app/$libraryId/settings/client/account/handlers/cookieHandler.ts @@ -1,7 +1,12 @@ import { CookieHandlerInterface } from "supertokens-website/utils/cookieHandler/types"; import { nonLibraryClient } from '@sd/client' +let APP_READY = false; + async function getCookiesFromStorage(): Promise { + if (!APP_READY) { + return ""; + } const cookiesFromStorage = await nonLibraryClient.query(['keys.get']) console.log("Cookies from storage (getCookie): ", cookiesFromStorage); @@ -57,6 +62,9 @@ async function getCookiesFromStorage(): Promise { } async function setCookieToStorage(cookieString: string): Promise { + if (!APP_READY) { + return; + } const cookieName = cookieString.split(";")[0]?.split("=")[0]; console.log("Setting cookie: ", cookieName); @@ -111,3 +119,7 @@ export default function getCookieHandler(original: CookieHandlerInterface): Cook }, }; } + +export function setAppReady() { + APP_READY = true; +} diff --git a/interface/app/$libraryId/settings/client/account/index.tsx b/interface/app/$libraryId/settings/client/account/index.tsx index 751a1c349..a6f1c280e 100644 --- a/interface/app/$libraryId/settings/client/account/index.tsx +++ b/interface/app/$libraryId/settings/client/account/index.tsx @@ -16,7 +16,7 @@ export const Component = () => { useEffect(() => { async function _() { console.log("Token data: ", token.data); - const user_data = await fetch('http://localhost:9000/api/user', { + const user_data = await fetch('http://localhost:9420/api/user', { method: 'GET', headers: { 'Authorization': `Bearer ${token.data ?? ''}` diff --git a/interface/index.tsx b/interface/index.tsx index 4370b2312..1d4d4a8a2 100644 --- a/interface/index.tsx +++ b/interface/index.tsx @@ -5,10 +5,6 @@ import relativeTime from 'dayjs/plugin/relativeTime'; import { PropsWithChildren, Suspense } from 'react'; import { I18nextProvider } from 'react-i18next'; import { RouterProvider, RouterProviderProps } from 'react-router-dom'; -import SuperTokens from 'supertokens-web-js'; -import EmailPassword from 'supertokens-web-js/recipe/emailpassword'; -import Session from 'supertokens-web-js/recipe/session'; -import ThirdParty from 'supertokens-web-js/recipe/thirdparty'; import { InteropProviderReact, P2PContextProvider, @@ -48,21 +44,6 @@ import('@sentry/browser').then(({ init, Integrations }) => { }); }); -SuperTokens.init({ - appInfo: { - apiDomain: 'http://localhost:9000', - apiBasePath: '/api/auth', - appName: 'Spacedrive Auth Service' - }, - cookieHandler: getCookieHandler, - windowHandler: getWindowHandler, - recipeList: [ - Session.init({ tokenTransferMethod: 'header' }), - EmailPassword.init(), - ThirdParty.init() - ] -}); - export type Router = RouterProviderProps['router']; export function SpacedriveRouterProvider(props: { From af9c83befa8f3e1a3799051463aa0dbf1ddce197 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Tue, 30 Jul 2024 16:35:16 +0300 Subject: [PATCH 018/218] Cleanup cookieHandler.ts --- .../settings/client/account/handlers/cookieHandler.ts | 8 -------- 1 file changed, 8 deletions(-) diff --git a/interface/app/$libraryId/settings/client/account/handlers/cookieHandler.ts b/interface/app/$libraryId/settings/client/account/handlers/cookieHandler.ts index bee374a77..97bc96b3e 100644 --- a/interface/app/$libraryId/settings/client/account/handlers/cookieHandler.ts +++ b/interface/app/$libraryId/settings/client/account/handlers/cookieHandler.ts @@ -9,8 +9,6 @@ async function getCookiesFromStorage(): Promise { } const cookiesFromStorage = await nonLibraryClient.query(['keys.get']) - console.log("Cookies from storage (getCookie): ", cookiesFromStorage); - if (cookiesFromStorage.length === 0) { return ""; } @@ -66,19 +64,15 @@ async function setCookieToStorage(cookieString: string): Promise { return; } const cookieName = cookieString.split(";")[0]?.split("=")[0]; - console.log("Setting cookie: ", cookieName); const cookiesFromStorage = await nonLibraryClient.query(['keys.get']) - console.log("Cookies from storage: ", cookiesFromStorage); - let cookiesArray: string[] = []; if (cookiesFromStorage.length !== 0) { const cookiesArrayFromStorage: string[] = JSON.parse(cookiesFromStorage); cookiesArray = cookiesArrayFromStorage; } - console.log("Cookies array: ", cookiesArray); let cookieIndex = -1; @@ -90,7 +84,6 @@ async function setCookieToStorage(cookieString: string): Promise { break; } } - console.log("Cookie index: ", cookieIndex); /** * If a cookie with the same name already exists (index != -1) then we @@ -103,7 +96,6 @@ async function setCookieToStorage(cookieString: string): Promise { } else { cookiesArray.push(cookieString); } - console.log("Updated cookies array: ", cookiesArray); await nonLibraryClient.mutation(['keys.set', JSON.stringify(cookiesArray)]) } From 96880086f090f04cecacade9f6a23dc579c2556c Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Wed, 31 Jul 2024 23:31:40 +0300 Subject: [PATCH 019/218] Show profile info if logged in --- .../settings/client/account/Profile.tsx | 39 +++++++++---------- .../settings/client/account/index.tsx | 24 +++++++----- 2 files changed, 33 insertions(+), 30 deletions(-) diff --git a/interface/app/$libraryId/settings/client/account/Profile.tsx b/interface/app/$libraryId/settings/client/account/Profile.tsx index e44289dfa..ac32bf688 100644 --- a/interface/app/$libraryId/settings/client/account/Profile.tsx +++ b/interface/app/$libraryId/settings/client/account/Profile.tsx @@ -1,28 +1,27 @@ -import { Envelope } from "@phosphor-icons/react"; +import { Envelope } from '@phosphor-icons/react'; import { Card } from '@sd/ui'; -import { TruncatedText } from "~/components"; -import { AuthRequiredOverlay } from "~/components/AuthRequiredOverlay"; - +import { TruncatedText } from '~/components'; +import { AuthRequiredOverlay } from '~/components/AuthRequiredOverlay'; const Profile = ({ email, authStore }: { email?: string; authStore: { status: string } }) => { - const emailName = authStore.status === 'loggedIn' ? email?.split('@')[0] : 'guest user'; + const emailName = email?.split('@')[0]; + const capitalizedEmailName = (emailName?.charAt(0).toUpperCase() ?? '') + emailName?.slice(1); + return ( - -
-

- Welcome {emailName}, -

-
- -
- -
- - {authStore.status === 'loggedIn' ? email : 'guestuser@outlook.com'} - -
-
+ {/* */} +
+

+ Welcome {capitalizedEmailName}, +

+
+ +
+ +
+ {email} +
+
); diff --git a/interface/app/$libraryId/settings/client/account/index.tsx b/interface/app/$libraryId/settings/client/account/index.tsx index a6f1c280e..b4a37d54a 100644 --- a/interface/app/$libraryId/settings/client/account/index.tsx +++ b/interface/app/$libraryId/settings/client/account/index.tsx @@ -8,34 +8,38 @@ import { Heading } from '../../Layout'; import Profile from './Profile'; import Tabs from './Tabs'; +type User = { + email: string; + id: string; + timejoined: number; + roles: string[]; +}; + export const Component = () => { const { t } = useLocale(); + const [userInfo, setUserInfo] = useState(null as any); const me = useBridgeQuery(['auth.me'], { retry: false }); const token = useBridgeQuery(['keys.getAccessToken'], { retry: false }); const authStore = auth.useStateSnapshot(); useEffect(() => { async function _() { - console.log("Token data: ", token.data); const user_data = await fetch('http://localhost:9420/api/user', { - method: 'GET', - headers: { - 'Authorization': `Bearer ${token.data ?? ''}` - } + method: 'GET' }); const data = await user_data.json(); return data; } _().then((data) => { - console.log("User data: ", data); + setUserInfo(data as User); }); - // eslint-disable-next-line react-hooks/exhaustive-deps + // eslint-disable-next-line react-hooks/exhaustive-deps }, []); return ( <> - {authStore.status === 'loggedIn' && ( + {userInfo !== null && (
+ ))} + + + {activeTab === 'Login' ? : } + + + OR + + + + {SocialLogins.map((social) => ( + + ))} + + + + + + ); +}; +export default AccountLogin; diff --git a/apps/mobile/src/screens/settings/client/AccountSettings/AccountProfile.tsx b/apps/mobile/src/screens/settings/client/AccountSettings/AccountProfile.tsx new file mode 100644 index 000000000..d8bc06dd4 --- /dev/null +++ b/apps/mobile/src/screens/settings/client/AccountSettings/AccountProfile.tsx @@ -0,0 +1,58 @@ +import { Envelope } from 'phosphor-react-native'; +import { useEffect, useState } from 'react'; +import { Text, View } from 'react-native'; +import Card from '~/components/layout/Card'; +import ScreenContainer from '~/components/layout/ScreenContainer'; +import { tw } from '~/lib/tailwind'; +import { User } from '~/navigation/tabs/SettingsStack'; + +const AccountProfile = () => { + const [userInfo, setUserInfo] = useState(null); + useEffect(() => { + async function _() { + const user_data = await fetch('http://localhost:9420/api/user', { + method: 'GET' + }); + const data = await user_data.json(); + return data; + } + _().then((data) => { + if (data.message !== 'unauthorised') { + setUserInfo(data as User); + } + }); + // eslint-disable-next-line react-hooks/exhaustive-deps + }, []); + + const emailName = userInfo ? userInfo.email.split('@')[0] : ''; + const capitalizedEmailName = (emailName?.charAt(0).toUpperCase() ?? '') + emailName?.slice(1); + + return ( + + + + + + Welcome{' '} + {capitalizedEmailName} + + + + + + + {userInfo ? userInfo.email : ''} + + + + + + + ); +}; + +export default AccountProfile; diff --git a/apps/mobile/src/screens/settings/client/AccountSettings/Login.tsx b/apps/mobile/src/screens/settings/client/AccountSettings/Login.tsx new file mode 100644 index 000000000..d8ce39a83 --- /dev/null +++ b/apps/mobile/src/screens/settings/client/AccountSettings/Login.tsx @@ -0,0 +1,153 @@ +import { useState } from 'react'; +import { Controller } from 'react-hook-form'; +import { Text, View } from 'react-native'; +import { z } from 'zod'; +import { useZodForm } from '@sd/client'; +import { Button } from '~/components/primitive/Button'; +import { Input } from '~/components/primitive/Input'; +import { toast } from '~/components/primitive/Toast'; +import { tw } from '~/lib/tailwind'; +import { useNavigation } from '@react-navigation/native'; +import { SettingsStackScreenProps } from '~/navigation/tabs/SettingsStack'; + +async function signInClicked(email: string, password: string, navigator: SettingsStackScreenProps<'AccountProfile'>['navigation']) { + try { + const req = await fetch('http://localhost:9420/api/auth/signin', { + method: 'POST', + headers: { + 'Content-Type': 'application/json; charset=utf-8' + }, + body: JSON.stringify({ + formFields: [ + { + id: 'email', + value: email + }, + { + id: 'password', + value: password + } + ] + }) + }); + + const response: { + status: string; + reason?: string; + user?: { + id: string; + email: string; + timeJoined: number; + tenantIds: string[]; + }; + } = await req.json(); + + if (response.status === 'FIELD_ERROR') { + // response.reason?.forEach((formField) => { + // if (formField.id === 'email') { + // // Email validation failed (for example incorrect email syntax). + // toast.error(formField.error); + // } + // }); + console.error('Field error: ', response.reason); + } else if (response.status === 'WRONG_CREDENTIALS_ERROR') { + toast.error('Email & password combination is incorrect.'); + } else if (response.status === 'SIGN_IN_NOT_ALLOWED') { + // the reason string is a user friendly message + // about what went wrong. It can also contain a support code which users + // can tell you so you know why their sign in was not allowed. + toast.error(response.reason!); + } else { + // sign in successful. The session tokens are automatically handled by + // the frontend SDK. + toast.success('Sign in successful'); + // Refresh the page to show the user is logged in + navigator.navigate('AccountProfile') + } + } catch (err: any) { + if (err.isSuperTokensGeneralError === true) { + // this may be a custom error message sent from the API by you. + toast.error(err.message); + } else { + console.error(err); + toast.error('Oops! Something went wrong.'); + } + } +} + +const LoginSchema = z.object({ + email: z.string().email(), + password: z.string().min(6) +}); + +const Login = () => { + const [showPassword, setShowPassword] = useState(false); + const form = useZodForm({ + schema: LoginSchema, + defaultValues: { + email: '', + password: '' + } + }); + const navigator = useNavigation['navigation']>(); + + return ( + + + ( + + )} + /> + {form.formState.errors.email && ( + + {form.formState.errors.email.message} + + )} + ( + + + {/* FIXME: Fix positioning of button */} + {/* */} + + )} + /> + {form.formState.errors.password && ( + + {form.formState.errors.password.message} + + )} + + + + ); +}; + +export default Login; diff --git a/apps/mobile/src/screens/settings/client/AccountSettings/Register.tsx b/apps/mobile/src/screens/settings/client/AccountSettings/Register.tsx new file mode 100644 index 000000000..b3d41a3dc --- /dev/null +++ b/apps/mobile/src/screens/settings/client/AccountSettings/Register.tsx @@ -0,0 +1,176 @@ +import { zodResolver } from '@hookform/resolvers/zod'; +import { useNavigation } from '@react-navigation/native'; +import { useState } from 'react'; +import { Controller, useForm } from 'react-hook-form'; +import { Text, View } from 'react-native'; +import { signUp } from 'supertokens-web-js/recipe/emailpassword'; +import { z } from 'zod'; +import { telemetryState } from '@sd/client'; +import { Button } from '~/components/primitive/Button'; +import { Input } from '~/components/primitive/Input'; +import { toast } from '~/components/primitive/Toast'; +import { tw } from '~/lib/tailwind'; +import { SettingsStackScreenProps } from '~/navigation/tabs/SettingsStack'; + +import ShowPassword from './ShowPassword'; + +const RegisterSchema = z + .object({ + email: z.string().email(), + password: z.string().min(6), + confirmPassword: z.string().min(6) + }) + .refine((data) => data.password === data.confirmPassword, { + message: 'Passwords do not match', + path: ['confirmPassword'] + }); +type RegisterType = z.infer; + +async function signUpClicked( + email: string, + password: string, + navigator: SettingsStackScreenProps<'AccountProfile'>['navigation'] +) { + try { + const req = await fetch('http://localhost:9000/api/auth/signup', { + method: 'POST', + headers: { + 'Content-Type': 'application/json; charset=utf-8' + }, + body: JSON.stringify({ + formFields: [ + { + id: 'email', + value: email + }, + { + id: 'password', + value: password + } + ] + }) + }); + + const response: { + status: string; + reason?: string; + user?: { + id: string; + email: string; + timeJoined: number; + tenantIds: string[]; + }; + } = await req.json(); + + if (response.status === 'FIELD_ERROR') { + // one of the input formFields failed validaiton + console.error('Field error: ', response.reason); + } else if (response.status === 'SIGN_UP_NOT_ALLOWED') { + // the reason string is a user friendly message + // about what went wrong. It can also contain a support code which users + // can tell you so you know why their sign up was not allowed. + toast.error(response.reason!); + } else { + // sign up successful. The session tokens are automatically handled by + // the frontend SDK. + toast.success('Sign up successful'); + navigator.navigate('AccountProfile'); + } + } catch (err: any) { + if (err.isSuperTokensGeneralError === true) { + // this may be a custom error message sent from the API by you. + toast.error(err.message); + } else { + toast.error('Oops! Something went wrong.'); + } + } +} + +const Register = () => { + const [showPassword, setShowPassword] = useState(false); + // useZodForm seems to be out-dated or needs + //fixing as it does not support the schema using zod.refine + const form = useForm({ + resolver: zodResolver(RegisterSchema), + defaultValues: { + email: '', + password: '', + confirmPassword: '' + } + }); + + const navigator = useNavigation['navigation']>(); + return ( + + ( + + )} + /> + {form.formState.errors.email && ( + {form.formState.errors.email.message} + )} + ( + + + {/* */} + + )} + /> + {form.formState.errors.password && ( + + {form.formState.errors.password.message} + + )} + ( + + + {/* */} + + )} + /> + {form.formState.errors.confirmPassword && ( + + {form.formState.errors.confirmPassword.message} + + )} + + + ); +}; + +export default Register; diff --git a/apps/mobile/src/screens/settings/client/AccountSettings/ShowPassword.tsx b/apps/mobile/src/screens/settings/client/AccountSettings/ShowPassword.tsx new file mode 100644 index 000000000..e203c9a4a --- /dev/null +++ b/apps/mobile/src/screens/settings/client/AccountSettings/ShowPassword.tsx @@ -0,0 +1,22 @@ +import { Eye, EyeClosed } from 'phosphor-react-native'; +import { Button } from '~/components/primitive/Button'; +import { tw } from '~/lib/tailwind'; + +interface Props { + showPassword: boolean; + setShowPassword: (value: boolean) => void; +} + +const ShowPassword = ({ showPassword, setShowPassword }: Props) => { + return ( + + ); +}; + +export default ShowPassword; diff --git a/interface/app/$libraryId/settings/client/account/Login.tsx b/interface/app/$libraryId/settings/client/account/Login.tsx index 9c1cbf720..63ab28f69 100644 --- a/interface/app/$libraryId/settings/client/account/Login.tsx +++ b/interface/app/$libraryId/settings/client/account/Login.tsx @@ -37,7 +37,7 @@ async function signInClicked(email: string, password: string) { } else { // sign in successful. The session tokens are automatically handled by // the frontend SDK. - console.log('Sign in successful'); + toast.success('Sign in successful'); } } catch (err: any) { if (err.isSuperTokensGeneralError === true) { diff --git a/interface/app/$libraryId/settings/client/account/Tabs.tsx b/interface/app/$libraryId/settings/client/account/Tabs.tsx index a8a8a1fef..aa518f318 100644 --- a/interface/app/$libraryId/settings/client/account/Tabs.tsx +++ b/interface/app/$libraryId/settings/client/account/Tabs.tsx @@ -34,7 +34,7 @@ const Tabs = () => { thirdPartyId: "github", // This is where Github should redirect the user back after login or error. - frontendRedirectURI: "http://localhost:9000/api/auth/callback/github", + frontendRedirectURI: "http://localhost:9420/api/auth/callback/github", }); // we redirect the user to Github for auth. @@ -55,7 +55,7 @@ const Tabs = () => { // This is where Google should redirect the user back after login or error. // This URL goes on the Google's dashboard as well. - frontendRedirectURI: "http://localhost:9000/api/auth/callback/google", + frontendRedirectURI: "http://localhost:9420/api/auth/callback/google", }); /* @@ -79,7 +79,7 @@ const Tabs = () => { thirdPartyId: "apple", // This is where Apple should redirect the user back after login or error. - frontendRedirectURI: "http://localhost:9000/api/auth/callback/apple", + frontendRedirectURI: "http://localhost:9420/api/auth/callback/apple", }); diff --git a/interface/app/$libraryId/settings/client/account/index.tsx b/interface/app/$libraryId/settings/client/account/index.tsx index b4a37d54a..16ce25574 100644 --- a/interface/app/$libraryId/settings/client/account/index.tsx +++ b/interface/app/$libraryId/settings/client/account/index.tsx @@ -17,7 +17,7 @@ type User = { export const Component = () => { const { t } = useLocale(); - const [userInfo, setUserInfo] = useState(null as any); + const [userInfo, setUserInfo] = useState(null); const me = useBridgeQuery(['auth.me'], { retry: false }); const token = useBridgeQuery(['keys.getAccessToken'], { retry: false }); const authStore = auth.useStateSnapshot(); @@ -30,7 +30,11 @@ export const Component = () => { return data; } _().then((data) => { - setUserInfo(data as User); + if (data.message !== 'unauthorised') { + setUserInfo(data as User); + } else { + setUserInfo(null); + } }); // eslint-disable-next-line react-hooks/exhaustive-deps }, []); diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index bb224ab16acc306484f9db9bce76b68ddaf09eb2..b9e7e3e6d7679baf9148caa1e85b9f3f6e9ca4db 100644 GIT binary patch delta 965 zcmaKrOK2Nc6o!#xTk_}`3(<#ZY{dcFC?1V8qmfnYM(>wxRU>I8k`ipj@<^J|NV49t z+N6ap)Mk;^jFU^CTD09{l{jQE;4DnLs3D7xP)eZ`O4@y)+tNj^+)cXag&*$aoO{pD zx!*Z`c=h(_)#GoF$z}n?mcjFBzKAQ@tjoZFTm!X1iYD zYAxwiVL=+*c?Z}ZC^4{Y(Q^9bCAQDkTV4lBqN~#3l_L09VN`!|wlh52QOIXI%BO!R zn+55sAC~AY;UcIOo(e?^c%F56Xq%l&SN*NMYS@M+LN1$|w29#$%eR?Eg2qpfJNdW8GgpH~6g2`Ao z5b0xye6(J|$vwf=j`F=EL2V6MdE8gpVtCrW5yNbZt47dRrV3d{tT~k^TT# z@uZ>w|3R(38Ls0o$@QFEwK890170+clb`B`^l?&iSS*rnG%xWjW`UBsh2c?(qj|{b>>GcnTNx1ZFvIPGA#qg12I+y}vTYp)BNVRRN z{MH|a-4D(G?Jw#;olF1AK78qI;9b@l&l@xi?ZPWgSVlo`Tr=RENm!6#3kg-{tJD-U?DYYa~&(LggAg{>e0zUT5 zJ9#;RSWC)_@{%Vja*9okNR^%L{)R7Q@&^O<=6#9n`w|(q?@MHo_ysm@x}gk{dT6A3 zR(6o3M@6uHWTj_*VSa>5cD6;nWn`tPU$|MRWn@KFYN2a>N_n||Xntz2L0)3AS#Fh2 zNVsuWq-SYxYK}*4WoV#tdQzpUUs$ABs;RSAgrmuH#zbbh?Zq+7bywRPj97q}6^Plk zHyE*BpE$WZPH4NuQFfK}?R}*jK+L(luarwy3G66Fv*`;R?J0#CXJ}uZv+;o$f2aqda|nEVDd>6(_~RIX&_%6Zdv` zah|BI_P&KYK+L Date: Fri, 2 Aug 2024 18:53:29 -0300 Subject: [PATCH 021/218] Setup client for Cloud Services in Node --- Cargo.lock | Bin 303613 -> 312594 bytes Cargo.toml | 4 + core/Cargo.toml | 2 + core/crates/cloud-services/Cargo.toml | 28 ++++ core/crates/cloud-services/src/error.rs | 21 +++ core/crates/cloud-services/src/lib.rs | 203 ++++++++++++++++++++++++ core/src/lib.rs | 26 +++ crates/ai/Cargo.toml | 2 +- 8 files changed, 285 insertions(+), 1 deletion(-) create mode 100644 core/crates/cloud-services/Cargo.toml create mode 100644 core/crates/cloud-services/src/error.rs create mode 100644 core/crates/cloud-services/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 8db081fa4a921b8b83f7828afa1347aa5ae1669f..ee6c3188e7a14c5918a4975acfdcf636fef3f886 100644 GIT binary patch delta 7650 zcmbtZd2n4l$WJ!Kc za&TBmhGp2Yaf43-Q<^Z7&w)V(vS$w}v`>OKcfyKMCYE9$n*%?+%jG0O9xT2)c zECdyZvn~`Su$&Msbm_{X)P?bucw5>K3TcAlPSad_rkx{s+&$FTw93tIJ+E(ctDQF2 z1{R;+m#yocGwt?uvn@Y1W!h7tWL(Dno6=2-`QTTU#cW45Qa!(+cX43jc>{|tjy*E4 z9G{?coK~mnjf>B1-!@e3{?oP9n(;l8!sR7lUIe3L$s8>t$7;*c7oO!sZoPGi`@l+q z75RWa%fdNd8iM}|7yGA@^^2>n{BUnZ$9eT+@9s%UOCf_6H1|c$SnhHom1iu7oI1sb z5{fx%B*m?zCXQ%Rmco_FQ}3Or`X3*Pdmqbst0&40i#P1OSf;jp_}?851qrq~8mmZT^>!;_Uv zYlhBXMPR6PVG3&~;VLNSnNY+y>O*b?lRj4^lO)owCPUQ|KOKojZ_m!CUb<^b+;uQ{ z>*D9{x@MrdWqP>Ua^Fzg(vz%+m+#EBRhM;^FJAKXl}UAZr>C*k&c3U9;NxbJDq1ir zO0G@8f-8(FB&AY1Cmd>NI8oAC>MiF=(wr&7bf8*dEuc4)dI&|ak>eivT`Pseg z;96lG*N*45vyruJGu<|fbQ`%HT0Xe>xMOEVv6f|hulVqt1Bsci+AB0#ZhVqfWE%}yVpoXDkn zch?cO@61^BrN3LTxc9zIz44-UHdtM>tAFtm4-EDXja@etTH&PbpyMo>knNsS%q#S< z6BIR)!5WJ$#y=Ksa8Fg?WS*Cr3bdvdI=Dc+@wCht#e=Zw23c>@Wc8UJZ;tMnY}fg+ zUY?p@jF=^3+O-Yl(`Goc;fIjdV#mYQ3My)+U#YHY5oEfu56D%2SuB?a2aYwJW= z8jDtlJBnl|j;w90jQ8J_Y{4y7#<}liL(9mCdrYw0YpPxF^*oo3=2m)XIDo?{#Q=D@ zXIKZL%n3ZCwZUVk93GZ2Pvf0;CBvx6S@FT!v$Gbz`;*V5z4h>n%{Kj_dAM(ILk(US z&hGZLN~d(L@e&7|UH8-*s@r}!n5gQ=gS#gJfVD)Wq>~Keqi|a}#KDYY*MBeuHb?BTfYuhV5&KCWZ`>U7=fN4JLQ>E_&g(~Y@>*1=FOgpU`m zT8J1Y)y&hw@z3u_&#dlx>PL;byBBAk-k%Km1$S~Ek#P}k*_`c}tOrK0vMeyph67@4 z$gy4*0I?+Joa>qm3L>mF*c7fD=z}Vw4QGzX>e#(QtHaE~{xNVsbH+?JuMd&#NiSG@ z?3qs_%j$)qJo~nq`l+wh74pM(g!dhUByOj29Y((sbh zlY}-i(o&;cZZX0J7;d;$GWQmc%`hE8=A0Ei4@4Fv(4jkC)yO;7R_{K#GM@Q-GO_r9 zUp>(~6b`)#f5^DP{vEh)_k_c&Nw0OuL|zEz9o3}=c%bH1=YfMnOvt5jz!Rtrjok=^ z&b^|6nNU6S!J*#fj1M#Op%vfp56Rh!?xkmwu4Gs|@Vlcu@rp*WCSG`NvS$+I6qE^J zbIcNEnFg7Qk^&Vh2v;aw;@@0j-W8}BOawT=MteggJ3UT)IvKG#$=8b5eNvTkW9B`I|%b0~*0hhWg9 zM8|=bp{)v3vQWM-1cWWQ#f0F~L2*!(bCyapLYI`he`aW zOOm%uZ~^$k!%?umR?hfB@KWTZhhmYC9s&zd-g^PT2EvxU02~(%GQ?6=RtHuLRjcpm zjX$_8(VfA=$&uxaZl!N^iJ5eP0e%Ut2{3e67uJeaKS-2Yce|H}2P*{LyW+OGx|R(<&$dnQW`2?nGO-cTTUfp!d9qktuZ zL@kN}?q7H#oT8<4-h-_)0e@1)xL|R|Rmt#{V0?E5&CH>x+3UK$x+P)hLcC)+YTEapnPOR;nn{CfK z)ADi0Gs$4Q5DXlLj-+e)L#s90>VBi$o*KJ09E!I-kqq|EPw#ccO@-=Re7vW7t?Hqv zb%}__E=+b$0(7jYADckOk`g@u?c}|zG1Qs7?vH@pP~2eTt#AcX8UPFxTM({!Jm#|D z>XzmOapIosnnDR11n}Epwt6Gn6XOvgfpV=*8i9bO9RlYmcUvc8>l{(z7uh zJTF<>*Pd(60M#W>zW)cw#?I5t9T6y|m9ZuWruFkNM`+@ug#_ne?s@(3}Ni zGSd+KPy?JmvpNf6_taSkMkt<8);0kpb@wCWPS0!u1VK4 zrki$u^`%SK_11*i1Tt1%zG<+o(CYZxr;=6GhxqB8w>qsX2A^=+>!gecAoc)PCdQv1fs(b5A6F zs~7D2bm+cuaCWX$R`2I$cD`rR4-K4N^Q|~8a$b97=&-nVAw4(VIG?Vq4un11oh~p| zb(4Wh15yf?Au=Oix_S|9bTsR`%?!$bnX;slTbCa$Is2fRZEJFN{y zxCBT_YjPbQXr=3A_`fehG8+HoBMl2-=im%sr63F#mFT{*#(kCNu>2-qKrN(^thF3w z*s)-NOGU6+Vx*+44gbgmC-l-%HU3ZQss&TWFQgw{5r6UCbVK~nB_MS!R0&fnU?^dx z1P=v>+)=b?4vh)jz%ea^GC-IYBo~rlLjxrWz=@}-39goO%Q}DizVx$cEi?tDw%&=( z!VT#!(-r&2Ako3pMLh7|sX4zWEZ0t3WF3xS)4^w;?E=|?fWLq&G6AxPgJdlPxfcqG z1r(|9DOyzb|7?BtZYj*-4cDa`J6C-$y<{Yz@f*D6)1oY`E)fOPlB38qr@%zTP;|l_ zRRVHED+afypca@w2%tD9SQW-0ji^VhEBJdL_`zs znXilhmUeeP=6^}ME_L{EVjQb3|MfX1X!6USYz)OC1K7jT35zYFu#GvC2<`wPQgD$H z&u8!={$slKkWjh;1yvYL2VTsvD$}JS_^s383tvmOb{6kRvqn#Mq@w;%!%uR_19b*= zP$?AABlWQeZZx*6j(=oHk?lYmQ3Nee`5GXGyCago_68d24q5wJ+~9HFRmrA!z6sl$akIYGBK){r{6h+5sqkHURwrxa^uqp7?c1^`e)!R>zk2bU z{(-3gm4mlVPf<~r^TDk2D!fB$LX?8>!EaHEZ+E1 zI#@k%H#Q7XfkAp9LxM0DaNhyu(?Bes*$pfckl!J>0%vREEC`E3t&~a$cob*jfiGsm z8SuLr{rVZ5kNq~?zala5vQ}m$r6)l`q?fQQB;=?Df+;9|bSP3RjG?y$uV<}L46z<& zgIWniuQ6l9m$e&VuOZms$61mV_{7$<1DdZ;#Hbg4OCZOI_eQL*`_`%f9& z+Mc_%2}F3=oppY_JiBwSrO^X1+4cLDk4BH04{R)35^(5OI;v`t&_-?srcq| zv!S!xRBLu-Y42azS7co4ak`WW)W)suqw#LrbQOBtM6+`*3y1nIWViE6+zZv`x^r;u zo3f4Z=3gbJR`2D#ao0K7^6CS8TW9;Z*?*>IzT!E%d(ie{4w8H9BS z%8BfED~|^Zv2SN~ZM^s`*_!ym$`pu&7k1?ykMf2el1t#mDFs(nuncHXozlZ{@y*e!kMJ(p#-Bysl- zlilY-5tdRGb=Gb%a~vWh=V&y9bS3o;mI_EHl!aEs8`cPPT&>&TP%j8b(T@fxjWPWa_P`YU zS1`z{D|1Kp(siy43I1nUPv`Mn+0ir}_(puh% zD0o*R5&>uwfKm_zf|5W)fM5lkLcM7MIfiHi89oMqR(FgKzb3`GeBbLjE3e9~Srs?$ zM_%ypW;WKjtyYx#fC86C$`rBsdu}@@P+waYWYOLK1tVIn@uFX*0YD=U#98nJ74)#_REbG|Jkld9E}wS!*Rla$lZhg z5Lx08Bs?=HTH$bqAz^ZeFgTMKR-Ag(Sc*sigXvYbleYIaHuTiSnc!q^Bwlt_+S_@5 zf8(y?1T>zpwn2O1`VEbr$8U@@j>da_k-l}Zp!EqJj&tDC>%auGhX=CwyF_jYdk=RI zFs}}oGuYP{WE(J3A_PR2aPoBtwAjb_hCZ6#0g#@)3c>j;N$ z$KeS&Ti}cg=O@Uuq4)@nmT;8B5SipqI*ec@C6(wAq?!=(3`ZP{YE@W4ad+g&!_~fL z2LE54xIPK1ZhPjU&SR%H{^-iir><{Yv!=7{#>P#{;^L&h+8Nxk=~n?TxE@ F{|Dcy9gF|~ delta 4227 zcmZ`+dyHOXna}yonYIiU+o?>4p|#UWxI|mt`^DYP6p)ojS)nCrB&NJq9hi1zXQthh zY!+BIQ6#c-58-TdvqsSn39Hr5%~?^571Urwp8hY~anZlK*}t}a?2+;2>3fPj{k@OeHu7Lm zp4Z$rT3p`W`uIrEfBb8gtZCl$(Q>4}`lhWjCYWlMqCq?##lE{hEfm+G4rJReH zG!p`q=D;%#ctPhx73bVIF1^i45UU7pic6~v%{z|uURVF}S<|dJQf%p8|MO4w`X`T9 zuRm|=n};X1&gY=4w)NQmTv>ncrSCN_T~@B^&%ON4qW`1+y|<|I$*KC$lW(kld~#d; z;#X+>wO5AvKmGj+#lQW1u~;^1R~4(;iJ{`hYue{8DE_Nh8SusJ3u}vKhT2bWD6S>V ziTjG-<_~WxM(X3A-Oyb6wqm@!;ksgWb9?mt#W24h9M~IXXJ%4ZoSxmkP(O3q8|uyP zy=F)BEIT0?BP#ins0qT$z?c@%XBrdL1gB!O@FE70iA&B#LW9#biRSpjy|F9e^upYX zI}~TbpxHySx2M4`ccl5*JU_c{`JvgotGDT#X;d;Z8>Zu_8m1_B!F$6swN&R|HTB7; z91M{<2Yen3-No``p}M>&5B)hZYv+2fzRI zJU(!KW~IVGQR|nB+PgUK_Alhw`F&k^&F3B}-a1Jmr7mz8lSzW)n4%*j+C-C$mPsZv zAQO$zAx7<-p;0p}tpiS|~g ztm@}4AG@I2WIsmZW~Q<8%K6!lcp}lU6qJ$HMrwJmJQ8E5U@3z+Hpqx0wcZe}wJ=g@ zp<__ai24)t=$)@Ipwr;GJc*Uv|71dQX?}4!EiAwKs-Z2Q=9zuUYzmf2 z2qu?S>tKWPPEo>Dee^#!-1#bcc4zonlO0fU1%RiI@sCCQ$;8i&$)!oZcX;@gTjpHz z=3TwDg8{BII9Tgcms3hRlX8aI$H+J9XwOE2k(R9j04f8!QgV2-Y~Oxlb$uim(^Kyn_63=dqt+9HJA!a%Ez( zZg=sT*c?pj^cf9iv3H*f^Rr73Tv+U6W-nNqayQoL#lh>Rv-j0vbXMIgmMDJiX$>|z zKV4i{KX==@PQ!Q76pT%lbFH{ZAqE1yMhn6cfRQ@Qt@YVNszid4I;EtLB6S$l(nWye zXM5Ujf2{a!(ZDfoo)VfW#~s0nj&@?tGOSRN1h`t*grqnokzk69(h#D-Dj{ZIH7B7{ zz2)CW-|}jyIDJy5^Ylzw8qlygdQG*lz3X3!&#!JyJY1}6dk+`C8*Ym)71hPfLq94; z+Z{hFp3zmO@AdunxG5!g@Wh)aGEBh;C|LyGNK`t2YBc7YUGyN2gk-~9f?y;IW#Hq; z91Zr zp3{~O_HOMh{o#Rs>ixRc-ulnI>&M&Aw7qlBZGZ4&uU9p5&-7Ne-+8Lne`CAzL~rA^ z_UcXL?dPV@;KWO(GYKq7m7RyCBX&4O1t&TX zVw>l6Z?vBHo1y0G2g=LZrw*1+_Xg{NRKW)V|Mv=WQA}q^;Azry$D5VUgkZ=%VH+<3 zhk@elEM*MROJ8ryGk{HSMoIP`>Z{E`7E850?*bY9IVU`SiMG z&lBZ%v+IUp_Y@cjEmO*&2gNPHPeT#4upG%DQ5u!kuzJWNVT5Y9FBE|p(4QEqn-_N# zOMABA$?{WIG{+vRZkm)rAv55ckPvi^u1LmrIewa$4NFjH{8Kb{>q9*|JyDc01W@3h z<9hSxf0x(QhsO{KRe)IvhT$M>s3=_)$N|t;BV^4cMX92gbeI{ei|L}QJd zVRMd>E0K{qr1pk&X1V?hvs`wm=E%$C>h}9Dl~=58`75Q_-0s<2y>M|ow|L`Z)`3X` z7t0XhAg3TKv4~k(gvh-Ho1I3QaY$7VSf#iFz>G{xSQS%qSRv#d|Fi0%A-bJxCpEch zydC@V>R*fI=|{_p+F$Od#)d9jGST6KX?_7Ab7IHDR=nJ7`FXi}$}rBL8X>xbhNMa) zLuDev4v3b-TaHL4xS)h&c&JWL8%P>*c2Z2J=EX~^vF87-u6|W@n>6qEY0po26{sMJ zk)&ZBkh;KPN)qmuA#O%E+&W~=fx%u7OPtIk3X3oq5rCBMB2Dw$HPsKBfA}yU>8Rl$ zBjYCzdjQQM4S2sxaSC@41to>jWoH%cE<_?h5fhB0xWFU`Pknbay2>ri?wbzHkz?g} zyZ_qiLqqMoyQ;UmYX~L7=XYLLxT#2uh58UgY-yu&i5Ykl3dSg`r;MxMQpa41h{h0a z@o54Pz(*A=Z^)kNqN+==&EadS_04ymLg`%)&5(Y$8w(tlEn$*_z#&{AfS_y0BeO11 zpeaHBGDZg2N*{uiK{mxgby54|L)GnTtImb$@liLaz-=&6c%rR`6+mDpEHX>MBX*)1 zaD@3%tePmel!7Kg5TJ+&5}KRFiVe`3HCIk-J&O}|J8gqKP9Ize<9WXRt9v#s6|wH( z(fW$f-BZj2Y^?=_I|m39g}7(Rvh*MzPdvhrNEd0ncZdTKZbzg9!>tk4+ve`0y@6C* zwjQM%+7BWg2#W{iQ+H40&E0jevAyz()qSg~?yk~2J~H5F3KZIxwAHb5Nni=j3Z4=} z1XYdTKwHEe2w1|dngiF~iEJUza39jRZgrq^;n~?<~t+R?n#myVCJ~u z8Y?P=6b(+oMj?7u#kQ8v(1h5nc6rFG4(m;u= zkN)lG8_t?nW|97unqJ*Cji*7+8-Lr|Gii0m66u$Mrvzsv1cmC5fCwRjQikKg9W2AO h+FD|$RUzZvWo, Service>), +} + +/// Cloud services are a optional feature that allows you to interact with the cloud services +/// of Spacedrive. +/// They're optional in two different ways: +/// - The cloud services depends on a user being logged in with our server. +/// - The user being connected to the internet to begin with. +/// As we don't want to force the user to be connected to the internet, we have to make sure +/// that core can always operate without the cloud services. +#[derive(Debug, Clone)] +pub struct CloudServices { + client_state: Arc>, + get_cloud_api_address: Url, + http_client: reqwest::Client, + domain_name: String, +} + +impl CloudServices { + /// Creates a new cloud services client that can be used to interact with the cloud services. + /// The client will try to connect to the cloud services on a best effort basis, as the user + /// might not be connected to the internet. + /// If the client fails to connect, it will try again the next time it's used. + pub async fn new( + get_cloud_api_address: impl IntoUrl, + domain_name: String, + ) -> Result { + let http_client_builder = reqwest::Client::builder().timeout(Duration::from_secs(3)); + + #[cfg(not(debug_assertions))] + { + builder = builder.https_only(true); + } + + let http_client = http_client_builder.build().map_err(Error::HttpClientInit)?; + let get_cloud_api_address = get_cloud_api_address + .into_url() + .map_err(Error::InvalidUrl)?; + + let client_state = match Self::init_client( + &http_client, + get_cloud_api_address.clone(), + domain_name.clone(), + ) + .await + { + Ok(client) => Arc::new(RwLock::new(ClientState::Connected(client))), + Err(e) => { + warn!( + ?e, + "Failed to initialize cloud services client; \ + This is a best effort and we will continue in Not Connected mode" + ); + Arc::new(RwLock::new(ClientState::NotConnected)) + } + }; + + Ok(Self { + client_state, + get_cloud_api_address, + http_client, + domain_name, + }) + } + + async fn init_client( + http_client: &reqwest::Client, + get_cloud_api_address: Url, + domain_name: String, + ) -> Result, Service>, Error> { + let cloud_api_address = http_client + .get(get_cloud_api_address) + .send() + .await + .map_err(Error::FailedToRequestApiAddress)? + .error_for_status() + .map_err(Error::AuthServerError)? + .text() + .await + .map_err(Error::FailedToExtractApiAddress)? + .parse::()?; + + let crypto_config = { + #[cfg(debug_assertions)] + { + struct SkipServerVerification; + impl rustls_old::client::ServerCertVerifier for SkipServerVerification { + fn verify_server_cert( + &self, + _end_entity: &rustls_old::Certificate, + _intermediates: &[rustls_old::Certificate], + _server_name: &rustls_old::ServerName, + _scts: &mut dyn Iterator, + _ocsp_response: &[u8], + _now: std::time::SystemTime, + ) -> Result { + Ok(rustls_old::client::ServerCertVerified::assertion()) + } + } + + rustls_old::ClientConfig::builder() + .with_safe_defaults() + .with_custom_certificate_verifier(Arc::new(SkipServerVerification)) + .with_no_client_auth() + } + + #[cfg(not(debug_assertions))] + { + rustls_old::ClientConfig::builder() + .with_safe_defaults() + .with_no_client_auth() + } + }; + + let client_config = ClientConfig::new(Arc::new(crypto_config)); + + let mut endpoint = Endpoint::client("[::]:0".parse().expect("hardcoded address")) + .map_err(Error::FailedToCreateEndpoint)?; + endpoint.set_default_client_config(client_config); + + // TODO(@fogodev): It's possible that we can't keep the connection alive all the time, + // and need to use single shot connections. I will only be sure when we have + // actually battle-tested the cloud services in core. + Ok(Client::new(RpcClient::new(QuinnConnection::new( + endpoint, + cloud_api_address, + domain_name, + )))) + } + + /// Returns a client to the cloud services. + /// + /// If the client is not connected, it will try to connect to the cloud services. + /// Available routes documented in + /// [`sd_cloud_schema::Service`](https://github.com/spacedriveapp/cloud-services-schema). + pub async fn client(&self) -> Result, Service>, Error> { + if let ClientState::Connected(client) = &*self.client_state.read().await { + return Ok(client.clone()); + } + + // If we're not connected, we need to try to connect. + let client = Self::init_client( + &self.http_client, + self.get_cloud_api_address.clone(), + self.domain_name.clone(), + ) + .await?; + *self.client_state.write().await = ClientState::Connected(client.clone()); + + Ok(client) + } +} + +#[cfg(test)] +mod tests { + use sd_cloud_schema::{auth, devices}; + + use super::*; + + #[tokio::test] + async fn test_client() { + let response = CloudServices::new( + "http://localhost:9420/cloud-api-address", + "localhost".to_string(), + ) + .await + .unwrap() + .client() + .await + .unwrap() + .devices() + .list(devices::list::Request { + access_token: auth::AccessToken("invalid".to_string()), + }) + .await + .unwrap(); + + assert!(matches!( + response, + Err(sd_cloud_schema::Error::Client( + sd_cloud_schema::error::ClientSideError::Unauthorized + )) + )) + } +} diff --git a/core/src/lib.rs b/core/src/lib.rs index bac56e73f..57bfdd11e 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -6,6 +6,7 @@ use crate::{ location::LocationManagerError, }; +use sd_core_cloud_services::CloudServices; use sd_core_heavy_lifting::{media_processor::ThumbnailKind, JobSystem}; use sd_core_prisma_helpers::CasId; @@ -80,6 +81,7 @@ pub struct Node { pub http: reqwest::Client, pub task_system: TaskSystem, pub job_system: JobSystem>, + pub cloud_services: Arc, #[cfg(feature = "ai")] pub old_image_labeller: Option, } @@ -128,6 +130,25 @@ impl Node { let (old_jobs, jobs_actor) = old_job::OldJobs::new(); let libraries = library::Libraries::new(data_dir.join("libraries")).await?; + let (get_cloud_api_address, cloud_services_domain_name) = { + #[cfg(debug_assertions)] + { + ( + std::env::var("SD_CLOUD_API_ADDRESS_URL") + .unwrap_or_else(|_| "http://localhost:9420/cloud-api-address".to_string()), + std::env::var("SD_CLOUD_API_DOMAIN_NAME") + .unwrap_or_else(|_| "localhost".to_string()), + ) + } + #[cfg(not(debug_assertions))] + { + ( + "https://auth.spacedrive.com/cloud-api-address".to_string(), + "api.spacedrive.com".to_string(), + ) + } + }; + let task_system = TaskSystem::new(); let (p2p, start_p2p) = p2p::P2PManager::new(config.clone(), libraries.clone()) @@ -149,6 +170,9 @@ impl Node { )), http: reqwest::Client::new(), env, + cloud_services: Arc::new( + CloudServices::new(&get_cloud_api_address, cloud_services_domain_name).await?, + ), #[cfg(feature = "ai")] old_image_labeller: OldImageLabeler::new( YoloV8::model(image_labeler_version)?, @@ -441,6 +465,8 @@ pub enum NodeError { Logger(#[from] FromEnvError), #[error(transparent)] JobSystem(#[from] sd_core_heavy_lifting::JobSystemError), + #[error(transparent)] + CloudServices(#[from] sd_core_cloud_services::Error), #[cfg(feature = "ai")] #[error("ai error: {0}")] diff --git a/crates/ai/Cargo.toml b/crates/ai/Cargo.toml index a1cb437e6..edb1a36ea 100644 --- a/crates/ai/Cargo.toml +++ b/crates/ai/Cargo.toml @@ -36,12 +36,12 @@ thiserror = { workspace = true } tokio = { workspace = true, features = ["fs"] } tokio-stream = { workspace = true } tracing = { workspace = true } +url = { workspace = true } uuid = { workspace = true, features = ["v4", "serde"] } # Note: half and ndarray version must be the same as used in ort half = { version = "2.1", features = ['num-traits'] } ndarray = "0.15" -url = '2.5.0' # Microsoft does not provide a release for osx-gpu. See: https://github.com/microsoft/onnxruntime/releases # "gpu" means CUDA or TensorRT EP. Thus, the ort crate cannot download them at build time. From acda40ba78b757794f5639fa78c39c972bab5832 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Sat, 3 Aug 2024 21:01:56 +0300 Subject: [PATCH 022/218] Use `secureTextEntry` for password fields --- .../settings/client/AccountSettings/Login.tsx | 7 +++--- .../client/AccountSettings/Register.tsx | 11 ++++---- .../client/AccountSettings/ShowPassword.tsx | 25 +++++++++++++------ 3 files changed, 26 insertions(+), 17 deletions(-) diff --git a/apps/mobile/src/screens/settings/client/AccountSettings/Login.tsx b/apps/mobile/src/screens/settings/client/AccountSettings/Login.tsx index d8ce39a83..989fa4e39 100644 --- a/apps/mobile/src/screens/settings/client/AccountSettings/Login.tsx +++ b/apps/mobile/src/screens/settings/client/AccountSettings/Login.tsx @@ -9,6 +9,7 @@ import { toast } from '~/components/primitive/Toast'; import { tw } from '~/lib/tailwind'; import { useNavigation } from '@react-navigation/native'; import { SettingsStackScreenProps } from '~/navigation/tabs/SettingsStack'; +import ShowPassword from './ShowPassword'; async function signInClicked(email: string, password: string, navigator: SettingsStackScreenProps<'AccountProfile'>['navigation']) { try { @@ -121,12 +122,12 @@ const Login = () => { placeholder="Password" style={tw`w-full`} onChangeText={field.onChange} + secureTextEntry={!showPassword} /> - {/* FIXME: Fix positioning of button */} - {/* */} + /> )} /> diff --git a/apps/mobile/src/screens/settings/client/AccountSettings/Register.tsx b/apps/mobile/src/screens/settings/client/AccountSettings/Register.tsx index b3d41a3dc..277dde897 100644 --- a/apps/mobile/src/screens/settings/client/AccountSettings/Register.tsx +++ b/apps/mobile/src/screens/settings/client/AccountSettings/Register.tsx @@ -122,11 +122,8 @@ const Register = () => { placeholder="Password" style={tw`w-full`} onChangeText={field.onChange} + secureTextEntry={!showPassword} /> - {/* */} )} /> @@ -145,11 +142,13 @@ const Register = () => { placeholder="Confirm Password" style={tw`w-full`} onChangeText={field.onChange} + secureTextEntry={!showPassword} /> - {/* */} + plural={true} + /> )} /> diff --git a/apps/mobile/src/screens/settings/client/AccountSettings/ShowPassword.tsx b/apps/mobile/src/screens/settings/client/AccountSettings/ShowPassword.tsx index e203c9a4a..068a948cb 100644 --- a/apps/mobile/src/screens/settings/client/AccountSettings/ShowPassword.tsx +++ b/apps/mobile/src/screens/settings/client/AccountSettings/ShowPassword.tsx @@ -1,21 +1,30 @@ import { Eye, EyeClosed } from 'phosphor-react-native'; +import { Text, View } from 'react-native'; import { Button } from '~/components/primitive/Button'; import { tw } from '~/lib/tailwind'; interface Props { showPassword: boolean; setShowPassword: (value: boolean) => void; + plural?: boolean; } -const ShowPassword = ({ showPassword, setShowPassword }: Props) => { +const ShowPassword = ({ showPassword, setShowPassword, plural }: Props) => { return ( - + + + ); }; From c930b417efabde0a9a5161d3d0458f6e521f0af9 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Sat, 3 Aug 2024 21:25:47 +0300 Subject: [PATCH 023/218] Update ShowPassword.tsx --- .../screens/settings/client/AccountSettings/ShowPassword.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apps/mobile/src/screens/settings/client/AccountSettings/ShowPassword.tsx b/apps/mobile/src/screens/settings/client/AccountSettings/ShowPassword.tsx index 068a948cb..5da608d46 100644 --- a/apps/mobile/src/screens/settings/client/AccountSettings/ShowPassword.tsx +++ b/apps/mobile/src/screens/settings/client/AccountSettings/ShowPassword.tsx @@ -14,7 +14,7 @@ const ShowPassword = ({ showPassword, setShowPassword, plural }: Props) => { -
))}
diff --git a/packages/client/src/core.ts b/packages/client/src/core.ts index 4c62391d6..6145a7dca 100644 --- a/packages/client/src/core.ts +++ b/packages/client/src/core.ts @@ -8,9 +8,13 @@ export type Procedures = { { key: "buildInfo", input: never, result: BuildInfo } | { key: "cloud.devices.get", input: DeviceGetRequest, result: Device } | { key: "cloud.devices.list", input: DeviceListRequest, result: Device[] } | + { key: "cloud.libraries.get", input: LibraryGetRequest, result: Library } | + { key: "cloud.libraries.list", input: LibraryListRequest, result: Library[] } | { key: "cloud.library.get", input: LibraryArgs, result: null } | { key: "cloud.library.list", input: never, result: null } | - { key: "cloud.locations.list", input: never, result: CloudLocation[] } | + { key: "cloud.locations.list", input: never, result: Core_CloudLocation[] } | + { key: "cloud.new_locations.get", input: LocationGetRequest, result: CloudLocation } | + { key: "cloud.new_locations.list", input: LocationListRequest, result: CloudLocation[] } | { key: "ephemeralFiles.getMediaData", input: string, result: MediaData | null } | { key: "files.get", input: LibraryArgs, result: ObjectWithFilePaths2 | null } | { key: "files.getConvertibleImageExtensions", input: never, result: string[] } | @@ -67,11 +71,17 @@ export type Procedures = { { key: "cloud.bootstrap", input: AccessToken, result: null } | { key: "cloud.devices.delete", input: DeviceDeleteRequest, result: null } | { key: "cloud.devices.update", input: DeviceUpdateRequest, result: null } | + { key: "cloud.libraries.create", input: LibraryArgs, result: null } | + { key: "cloud.libraries.join", input: string, result: null } | + { key: "cloud.libraries.update", input: LibraryUpdateRequest, result: null } | { key: "cloud.library.create", input: LibraryArgs, result: null } | { key: "cloud.library.join", input: string, result: null } | { key: "cloud.library.sync", input: LibraryArgs, result: null } | - { key: "cloud.locations.create", input: string, result: CloudLocation } | - { key: "cloud.locations.remove", input: string, result: CloudLocation } | + { key: "cloud.locations.create", input: string, result: Core_CloudLocation } | + { key: "cloud.locations.remove", input: string, result: Core_CloudLocation } | + { key: "cloud.new_locations.create", input: LocationCreateRequest, result: null } | + { key: "cloud.new_locations.delete", input: LocationDeleteRequest, result: null } | + { key: "cloud.new_locations.update", input: LocationUpdateRequest, result: null } | { key: "ephemeralFiles.copyFiles", input: LibraryArgs, result: null } | { key: "ephemeralFiles.createFile", input: LibraryArgs, result: string } | { key: "ephemeralFiles.createFolder", input: LibraryArgs, result: string } | @@ -183,7 +193,7 @@ export type ChangeNodeNameArgs = { name: string | null; p2p_port: Port | null; p export type Chapter = { id: number; start: [number, number]; end: [number, number]; time_base_den: number; time_base_num: number; metadata: Metadata } -export type CloudLocation = { id: string; name: string } +export type CloudLocation = { pub_id: LocationPubId; name: string; device: Device | null; library: Library | null; created_at: string; updated_at: string } export type Codec = { kind: string | null; sub_kind: string | null; tag: string | null; name: string | null; profile: string | null; bit_rate: number; props: Props | null } @@ -217,6 +227,8 @@ export type ConvertImageArgs = { location_id: number; file_path_id: number; dele export type ConvertibleExtension = "bmp" | "dib" | "ff" | "gif" | "ico" | "jpg" | "jpeg" | "png" | "pnm" | "qoi" | "tga" | "icb" | "vda" | "vst" | "tiff" | "tif" | "hif" | "heif" | "heifs" | "heic" | "heics" | "avif" | "avci" | "avcs" | "svg" | "svgz" | "pdf" | "webp" +export type Core_CloudLocation = { id: string; name: string } + export type CreateEphemeralFileArgs = { path: string; context: EphemeralFileCreateContextTypes; name: string | null } export type CreateEphemeralFolderArgs = { path: string; name: string | null } @@ -411,6 +423,8 @@ export type Label = { id: number; name: string; date_created: string | null; dat export type LabelWithObjects = { id: number; name: string; date_created: string | null; date_modified: string | null; label_objects: { object: { id: number; file_paths: FilePath[] } }[] } +export type Library = { pub_id: LibraryPubId; name: string; original_device: Device | null; created_at: string; updated_at: string } + /** * Can wrap a query argument to require it to contain a `library_id` and provide helpers for working with libraries. */ @@ -442,10 +456,18 @@ export type LibraryConfigVersion = "V0" | "V1" | "V2" | "V3" | "V4" | "V5" | "V6 export type LibraryConfigWrapped = { uuid: string; instance_id: string; instance_public_key: RemoteIdentity; config: LibraryConfig } +export type LibraryGetRequest = { access_token: AccessToken; pub_id: LibraryPubId; with_device: boolean } + +export type LibraryListRequest = { access_token: AccessToken; with_device: boolean } + export type LibraryName = string export type LibraryPreferences = { location?: { [key in string]: LocationSettings }; tag?: { [key in string]: TagSettings } } +export type LibraryPubId = string + +export type LibraryUpdateRequest = { access_token: AccessToken; pub_id: LibraryPubId; name: string } + export type LightScanArgs = { location_id: number; sub_path: string } export type ListenerState = { type: "Listening" } | { type: "Error"; error: string } | { type: "NotListening" } @@ -461,6 +483,16 @@ export type Location = { id: number; pub_id: number[]; name: string | null; path */ export type LocationCreateArgs = { path: string; dry_run: boolean; indexer_rules_ids: number[] } +export type LocationCreateRequest = { access_token: AccessToken; pub_id: LocationPubId; name: string; library_pub_id: LibraryPubId; device_pub_id: DevicePubId } + +export type LocationDeleteRequest = { access_token: AccessToken; pub_id: LocationPubId } + +export type LocationGetRequest = { access_token: AccessToken; pub_id: LocationPubId; with_library: boolean; with_device: boolean } + +export type LocationListRequest = { access_token: AccessToken; with_library: boolean; with_device: boolean } + +export type LocationPubId = string + export type LocationSettings = { explorer: ExplorerSettings } /** @@ -473,6 +505,8 @@ export type LocationSettings = { explorer: ExplorerSettings } */ export type LocationUpdateArgs = { id: number; name: string | null; generate_preview_media: boolean | null; sync_preview_media: boolean | null; hidden: boolean | null; indexer_rules_ids: number[]; path: string | null } +export type LocationUpdateRequest = { access_token: AccessToken; pub_id: LocationPubId; name: string } + export type LocationWithIndexerRule = { id: number; pub_id: number[]; name: string | null; path: string | null; total_capacity: number | null; available_capacity: number | null; size_in_bytes: number[] | null; is_archived: boolean | null; generate_preview_media: boolean | null; sync_preview_media: boolean | null; hidden: boolean | null; date_created: string | null; instance_id: number | null; indexer_rules: IndexerRule[] } export type MaybeUndefined = null | T From f3bbee09e0ff0592fd7a6c22bac46471cf747ed3 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Wed, 7 Aug 2024 18:16:06 +0300 Subject: [PATCH 031/218] Add create library route to `libraries.rs` --- core/src/api/cloud/libraries.rs | 46 ++++++++++++++------------------- 1 file changed, 20 insertions(+), 26 deletions(-) diff --git a/core/src/api/cloud/libraries.rs b/core/src/api/cloud/libraries.rs index 609d24e53..b821f248d 100644 --- a/core/src/api/cloud/libraries.rs +++ b/core/src/api/cloud/libraries.rs @@ -42,33 +42,27 @@ pub fn mount() -> AlphaRouter { }) }) .procedure("create", { - R.with2(library()) - .mutation(|(node, library), _: ()| async move { - // let node_config = node.config.get().await; - // let cloud_library = sd_cloud_api::library::create( - // node.cloud_api_config().await, - // library.id, - // &library.config().await.name, - // library.instance_uuid, - // library.identity.to_remote_identity(), - // node_config.id, - // node_config.identity.to_remote_identity(), - // &node.p2p.peer_metadata(), - // ) - // .await?; - // node.libraries - // .edit( - // library.id, - // None, - // MaybeUndefined::Undefined, - // MaybeUndefined::Value(cloud_library.id), - // None, - // ) - // .await?; + #[derive(Debug, serde::Serialize, serde::Deserialize, specta::Type)] + struct LibrariesCreateArgs { + access_token: AccessToken, + device_pub_id: devices::PubId, + } - invalidate_query!(library, "cloud.library.get"); - // invalidate_query!(library, "cloud.library.get"); - debug!("TODO: Functionality not implemented"); + R.with2(library()) + .mutation(|(node, library), args: LibrariesCreateArgs | async move { + let req = libraries::create::Request { + name: library.config().await.name.to_string(), + access_token: args.access_token, + pub_id: library.id, + device_pub_id: args.device_pub_id, + }; + super::handle_comm_error( + try_get_cloud_services_client!(node)? + .libraries() + .create(req) + .await, + "Failed to create library;", + )??; Ok(()) }) From fa7de373feee6ba20dab1e6002776704fd537150 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Wed, 7 Aug 2024 18:20:20 +0300 Subject: [PATCH 032/218] Forgot to import types D: --- core/src/api/cloud/libraries.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/api/cloud/libraries.rs b/core/src/api/cloud/libraries.rs index b821f248d..20db0908e 100644 --- a/core/src/api/cloud/libraries.rs +++ b/core/src/api/cloud/libraries.rs @@ -1,6 +1,6 @@ use crate::{api::utils::library, invalidate_query}; use rspc::alpha::AlphaRouter; -use sd_cloud_schema::libraries; +use sd_cloud_schema::{auth::AccessToken, devices, libraries}; use tracing::debug; use uuid::Uuid; From 01cc022f43e511bf52a9226091ba9047318e3481 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Wed, 7 Aug 2024 20:00:15 +0300 Subject: [PATCH 033/218] PubId, not Uuid --- core/src/api/cloud/libraries.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/api/cloud/libraries.rs b/core/src/api/cloud/libraries.rs index 20db0908e..a8f1ad446 100644 --- a/core/src/api/cloud/libraries.rs +++ b/core/src/api/cloud/libraries.rs @@ -53,7 +53,7 @@ pub fn mount() -> AlphaRouter { let req = libraries::create::Request { name: library.config().await.name.to_string(), access_token: args.access_token, - pub_id: library.id, + pub_id: libraries::PubId(library.id), device_pub_id: args.device_pub_id, }; super::handle_comm_error( From 41e21a375a018b1ee473d108606ddf832f7b8ed1 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Wed, 7 Aug 2024 23:14:56 +0300 Subject: [PATCH 034/218] Forgot library delete D: --- core/src/api/cloud/libraries.rs | 81 +++++---------------------------- 1 file changed, 11 insertions(+), 70 deletions(-) diff --git a/core/src/api/cloud/libraries.rs b/core/src/api/cloud/libraries.rs index a8f1ad446..9ee6fb69d 100644 --- a/core/src/api/cloud/libraries.rs +++ b/core/src/api/cloud/libraries.rs @@ -2,7 +2,6 @@ use crate::{api::utils::library, invalidate_query}; use rspc::alpha::AlphaRouter; use sd_cloud_schema::{auth::AccessToken, devices, libraries}; use tracing::debug; -use uuid::Uuid; use crate::{ api::{Ctx, R}, @@ -49,7 +48,7 @@ pub fn mount() -> AlphaRouter { } R.with2(library()) - .mutation(|(node, library), args: LibrariesCreateArgs | async move { + .mutation(|(node, library), args: LibrariesCreateArgs| async move { let req = libraries::create::Request { name: library.config().await.name.to_string(), access_token: args.access_token, @@ -67,75 +66,17 @@ pub fn mount() -> AlphaRouter { Ok(()) }) }) - .procedure("join", { - R.mutation(|node, library_id: Uuid| async move { - // let Some(cloud_library) = - // sd_cloud_api::library::get(node.cloud_api_config().await, library_id).await? - // else { - // return Err(rspc::Error::new( - // rspc::ErrorCode::NotFound, - // "Library not found".to_string(), - // )); - // }; + .procedure("delete", { + R.mutation(|node, req: libraries::delete::Request| async move { + super::handle_comm_error( + try_get_cloud_services_client!(node)? + .libraries() + .delete(req) + .await, + "Failed to delete library;", + )??; - // let library = node - // .libraries - // .create_with_uuid( - // library_id, - // LibraryName::new(cloud_library.name).map_err(|e| { - // rspc::Error::new(rspc::ErrorCode::InternalServerError, e.to_string()) - // })?, - // None, - // false, - // None, - // &node, - // true, - // ) - // .await?; - // node.libraries - // .edit( - // library.id, - // None, - // MaybeUndefined::Undefined, - // MaybeUndefined::Value(cloud_library.id), - // None, - // ) - // .await?; - - // let node_config = node.config.get().await; - // let instances = sd_cloud_api::library::join( - // node.cloud_api_config().await, - // library_id, - // library.instance_uuid, - // library.identity.to_remote_identity(), - // node_config.id, - // node_config.identity.to_remote_identity(), - // node.p2p.peer_metadata(), - // ) - // .await?; - - // for instance in instances { - // crate::cloud::sync::receive::upsert_instance( - // library.id, - // &library.db, - // &library.sync, - // &node.libraries, - // &instance.uuid, - // instance.identity, - // &instance.node_id, - // RemoteIdentity::from_str(&instance.node_remote_identity) - // .expect("malformed remote identity in the DB"), - // instance.metadata, - // ) - // .await?; - // } - - // invalidate_query!(library, "cloud.library.get"); - // invalidate_query!(library, "cloud.library.list"); - - // Ok(LibraryConfigWrapped::from_library(&library).await) - - debug!("TODO: Functionality not implemented. Joining will be removed in the future, but for now, it's a no-op"); + debug!("Deleted library"); Ok(()) }) From 381a9e4577b1e0155114cbff104b34b498980cc9 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Thu, 8 Aug 2024 01:46:08 +0300 Subject: [PATCH 035/218] Mocked Device data for UI --- core/src/api/cloud/devices.rs | 129 ++++++++++++++++++++--- interface/app/$libraryId/debug/cloud.tsx | 49 +++++---- packages/client/src/core.ts | 18 ++-- 3 files changed, 149 insertions(+), 47 deletions(-) diff --git a/core/src/api/cloud/devices.rs b/core/src/api/cloud/devices.rs index e95c9663f..e009197e3 100644 --- a/core/src/api/cloud/devices.rs +++ b/core/src/api/cloud/devices.rs @@ -3,22 +3,49 @@ use crate::{ try_get_cloud_services_client, }; -use sd_cloud_schema::devices; +use chrono::DateTime; +use sd_cloud_schema::devices::{self, DeviceOS, PubId}; use rspc::alpha::AlphaRouter; use tracing::debug; +use uuid::Uuid; + +#[derive(Debug, serde::Serialize, serde::Deserialize, specta::Type)] +struct MockDevice { + pub_id: PubId, + name: String, + os: DeviceOS, + storage_size: u64, + created_at: DateTime, + updated_at: DateTime, +} pub fn mount() -> AlphaRouter { R.router() .procedure("get", { - R.query(|node, req: devices::get::Request| async move { - let devices::get::Response(device) = super::handle_comm_error( - try_get_cloud_services_client!(node)? - .devices() - .get(req) - .await, - "Failed to get device;", - )??; + // R.query(|node, req: devices::get::Request| async move { + R.query(|node, _: ()| async move { + // let devices::get::Response(device) = super::handle_comm_error( + // try_get_cloud_services_client!(node)? + // .devices() + // .get(req) + // .await, + // "Failed to get device;", + // )??; + + let device = MockDevice { + name: "Mac Device".to_string(), + pub_id: PubId(Uuid::now_v7()), + // Date: 8th Aug 2024 12:00:00 UTC + created_at: DateTime::parse_from_rfc3339("2024-08-08T12:00:00Z") + .expect("Failed to parse created_at datetime") + .with_timezone(&chrono::Utc), + // Always set to the current time + updated_at: chrono::Utc::now(), + os: DeviceOS::MacOS, + // Always set to 256 GB in bytes (u64) + storage_size: 256 * 1024 * 1024 * 1024, + }; debug!(?device, "Got device"); @@ -26,14 +53,82 @@ pub fn mount() -> AlphaRouter { }) }) .procedure("list", { - R.query(|node, req: devices::list::Request| async move { - let devices::list::Response(devices) = super::handle_comm_error( - try_get_cloud_services_client!(node)? - .devices() - .list(req) - .await, - "Failed to list devices;", - )??; + R.query(|node, _: ()| async move { + // let devices::list::Response(devices) = super::handle_comm_error( + // try_get_cloud_services_client!(node)? + // .devices() + // .list(req) + // .await, + // "Failed to list devices;", + // )??; + + let devices: Vec = vec![ + MockDevice { + name: "Mac Device".to_string(), + pub_id: PubId(Uuid::now_v7()), + // Date: 8th Aug 2024 12:00:00 UTC + created_at: DateTime::parse_from_rfc3339("2024-08-08T12:00:00Z") + .expect("Failed to parse created_at datetime") + .with_timezone(&chrono::Utc), + // Always set to the current time + updated_at: chrono::Utc::now(), + os: DeviceOS::MacOS, + // Randomize between 256 GB and 1 TB in bytes (u64) + storage_size: 256 * 1024 * 1024 * 1024, + }, + MockDevice { + name: "Windows Device".to_string(), + pub_id: PubId(Uuid::now_v7()), + // Date: 8th Aug 2024 12:00:00 UTC + created_at: DateTime::parse_from_rfc3339("2024-08-08T12:00:00Z") + .expect("Failed to parse created_at datetime") + .with_timezone(&chrono::Utc), + // Always set to the current time + updated_at: chrono::Utc::now(), + os: DeviceOS::Windows, + // Randomize between 256 GB and 1 TB in bytes (u64) + storage_size: 256 * 1024 * 1024 * 1024, + }, + MockDevice { + name: "Linux Device".to_string(), + pub_id: PubId(Uuid::now_v7()), + // Date: 8th Aug 2024 12:00:00 UTC + created_at: DateTime::parse_from_rfc3339("2024-08-08T12:00:00Z") + .expect("Failed to parse created_at datetime") + .with_timezone(&chrono::Utc), + // Always set to the current time + updated_at: chrono::Utc::now(), + os: DeviceOS::Linux, + // Always set to 256 GB in bytes (u64) + storage_size: 256 * 1024 * 1024 * 1024, + }, + MockDevice { + name: "Android Device".to_string(), + pub_id: PubId(Uuid::now_v7()), + // Date: 8th Aug 2024 12:00:00 UTC + created_at: DateTime::parse_from_rfc3339("2024-08-08T12:00:00Z") + .expect("Failed to parse created_at datetime") + .with_timezone(&chrono::Utc), + // Always set to the current time + updated_at: chrono::Utc::now(), + os: DeviceOS::Android, + // Always set to 256 GB in bytes (u64) + storage_size: 256 * 1024 * 1024 * 1024, + }, + MockDevice { + name: "iOS Device".to_string(), + pub_id: PubId(Uuid::now_v7()), + // Date: 8th Aug 2024 12:00:00 UTC + created_at: DateTime::parse_from_rfc3339("2024-08-08T12:00:00Z") + .expect("Failed to parse created_at datetime") + .with_timezone(&chrono::Utc), + // Always set to the current time + updated_at: chrono::Utc::now(), + os: DeviceOS::IOS, + // Always set to 256 GB in bytes (u64) + storage_size: 256 * 1024 * 1024 * 1024, + }, + ]; debug!(?devices, "Listed devices"); diff --git a/interface/app/$libraryId/debug/cloud.tsx b/interface/app/$libraryId/debug/cloud.tsx index 4f6a55527..9deca7fbb 100644 --- a/interface/app/$libraryId/debug/cloud.tsx +++ b/interface/app/$libraryId/debug/cloud.tsx @@ -21,29 +21,29 @@ const Count = tw.div`min-w-[20px] flex h-[20px] px-1 items-center justify-center export const Component = () => { useRouteTitle('Cloud'); - const authState = auth.useStateSnapshot(); + // const authState = auth.useStateSnapshot(); - const authSensitiveChild = () => { - if (authState.status === 'loggedIn') return ; - if (authState.status === 'notLoggedIn' || authState.status === 'loggingIn') - return ( -
- -
- -

- To access cloud related features, please login -

-
- -
-
- ); + // const authSensitiveChild = () => { + // if (authState.status === 'loggedIn') return ; + // if (authState.status === 'notLoggedIn' || authState.status === 'loggingIn') + // return ( + //
+ // + //
+ // + //

+ // To access cloud related features, please login + //

+ //
+ // + //
+ //
+ // ); - return null; - }; + // return null; + // }; - return
{authSensitiveChild()}
; + return
{Authenticated()}
; }; // million-ignore @@ -53,11 +53,16 @@ function Authenticated() { suspense: true, retry: false }); - const cloudLibraryList = useBridgeQuery(['cloud.library.list'], { + const getCloudDevice = useBridgeQuery(['cloud.devices.get'], { suspense: true, retry: false }); - console.log('[DEBUG] cloudLibraryList', cloudLibraryList); + const cloudDevicesList = useBridgeQuery(['cloud.devices.list'], { + suspense: true, + retry: false + }); + console.log('[DEBUG] fetch cloud device:', getCloudDevice.data); + console.log('[DEBUG] cloudDevicesList', cloudDevicesList.data); const createLibrary = useLibraryMutation(['cloud.library.create']); const { t } = useLocale(); diff --git a/packages/client/src/core.ts b/packages/client/src/core.ts index 6145a7dca..42560c8aa 100644 --- a/packages/client/src/core.ts +++ b/packages/client/src/core.ts @@ -6,8 +6,8 @@ export type Procedures = { { key: "auth.me", input: never, result: { id: string; email: string } } | { key: "backups.getAll", input: never, result: GetAll } | { key: "buildInfo", input: never, result: BuildInfo } | - { key: "cloud.devices.get", input: DeviceGetRequest, result: Device } | - { key: "cloud.devices.list", input: DeviceListRequest, result: Device[] } | + { key: "cloud.devices.get", input: never, result: MockDevice } | + { key: "cloud.devices.list", input: never, result: MockDevice[] } | { key: "cloud.libraries.get", input: LibraryGetRequest, result: Library } | { key: "cloud.libraries.list", input: LibraryListRequest, result: Library[] } | { key: "cloud.library.get", input: LibraryArgs, result: null } | @@ -71,8 +71,8 @@ export type Procedures = { { key: "cloud.bootstrap", input: AccessToken, result: null } | { key: "cloud.devices.delete", input: DeviceDeleteRequest, result: null } | { key: "cloud.devices.update", input: DeviceUpdateRequest, result: null } | - { key: "cloud.libraries.create", input: LibraryArgs, result: null } | - { key: "cloud.libraries.join", input: string, result: null } | + { key: "cloud.libraries.create", input: LibraryArgs, result: null } | + { key: "cloud.libraries.delete", input: LibraryDeleteRequest, result: null } | { key: "cloud.libraries.update", input: LibraryUpdateRequest, result: null } | { key: "cloud.library.create", input: LibraryArgs, result: null } | { key: "cloud.library.join", input: string, result: null } | @@ -247,10 +247,6 @@ export type Device = { pub_id: DevicePubId; name: string; os: DeviceOS; storage_ export type DeviceDeleteRequest = { access_token: AccessToken; pub_id: DevicePubId } -export type DeviceGetRequest = { access_token: AccessToken; pub_id: DevicePubId } - -export type DeviceListRequest = { access_token: AccessToken } - export type DeviceOS = "Linux" | "Windows" | "MacOS" | "IOS" | "Android" export type DevicePubId = string @@ -423,6 +419,8 @@ export type Label = { id: number; name: string; date_created: string | null; dat export type LabelWithObjects = { id: number; name: string; date_created: string | null; date_modified: string | null; label_objects: { object: { id: number; file_paths: FilePath[] } }[] } +export type LibrariesCreateArgs = { access_token: AccessToken; device_pub_id: DevicePubId } + export type Library = { pub_id: LibraryPubId; name: string; original_device: Device | null; created_at: string; updated_at: string } /** @@ -456,6 +454,8 @@ export type LibraryConfigVersion = "V0" | "V1" | "V2" | "V3" | "V4" | "V5" | "V6 export type LibraryConfigWrapped = { uuid: string; instance_id: string; instance_public_key: RemoteIdentity; config: LibraryConfig } +export type LibraryDeleteRequest = { access_token: AccessToken; pub_id: LibraryPubId } + export type LibraryGetRequest = { access_token: AccessToken; pub_id: LibraryPubId; with_device: boolean } export type LibraryListRequest = { access_token: AccessToken; with_device: boolean } @@ -523,6 +523,8 @@ export type MediaLocation = { latitude: number; longitude: number; pluscode: Plu export type Metadata = { album: string | null; album_artist: string | null; artist: string | null; comment: string | null; composer: string | null; copyright: string | null; creation_time: string | null; date: string | null; disc: number | null; encoder: string | null; encoded_by: string | null; filename: string | null; genre: string | null; language: string | null; performer: string | null; publisher: string | null; service_name: string | null; service_provider: string | null; title: string | null; track: number | null; variant_bit_rate: number | null; custom: { [key in string]: string } } +export type MockDevice = { pub_id: DevicePubId; name: string; os: DeviceOS; storage_size: bigint; created_at: string; updated_at: string } + export type NodeConfigP2P = { discovery?: P2PDiscoveryState; port: Port; disabled: boolean; disable_ipv6: boolean; disable_relay: boolean; enable_remote_access: boolean; /** * A list of peer addresses to try and manually connect to, instead of relying on discovery. From 5be1acd9415f71a1f84500a245d12d011110c425 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Thu, 8 Aug 2024 01:46:27 +0300 Subject: [PATCH 036/218] Proper state change for showing logged out page --- interface/app/$libraryId/settings/client/account/index.tsx | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/interface/app/$libraryId/settings/client/account/index.tsx b/interface/app/$libraryId/settings/client/account/index.tsx index a7a6ea53c..73e5ac4f9 100644 --- a/interface/app/$libraryId/settings/client/account/index.tsx +++ b/interface/app/$libraryId/settings/client/account/index.tsx @@ -29,14 +29,16 @@ export const Component = () => { return data; } _().then((data) => { - if (data.message !== 'unauthorised') { - setUserInfo(data as User); + // Check if data is the same as the user type + if (data.id) { + setUserInfo(data); } else { setUserInfo(null); } }); // eslint-disable-next-line react-hooks/exhaustive-deps }, []); + console.log('[DEBUG] userInfo', userInfo); return ( <> Date: Thu, 8 Aug 2024 11:10:44 +0300 Subject: [PATCH 037/218] Working Popout to default browser + Signout button Signout button should be working, and popout is working great --- .../settings/client/account/Tabs.tsx | 28 +++++++++---------- .../settings/client/account/index.tsx | 12 ++++++-- 2 files changed, 23 insertions(+), 17 deletions(-) diff --git a/interface/app/$libraryId/settings/client/account/Tabs.tsx b/interface/app/$libraryId/settings/client/account/Tabs.tsx index aa518f318..e04f90615 100644 --- a/interface/app/$libraryId/settings/client/account/Tabs.tsx +++ b/interface/app/$libraryId/settings/client/account/Tabs.tsx @@ -1,13 +1,14 @@ import { GoogleLogo, Icon } from '@phosphor-icons/react'; import { Apple, Github } from '@sd/assets/svgs/brands'; +import { open } from '@tauri-apps/plugin-shell'; import clsx from 'clsx'; import { motion } from 'framer-motion'; import { useState } from 'react'; +import { getAuthorisationURLWithQueryParamsAndSetState } from 'supertokens-web-js/recipe/thirdparty'; import { Button, Card, Divider, toast, Tooltip } from '@sd/ui'; import Login from './Login'; import Register from './Register'; -import { getAuthorisationURLWithQueryParamsAndSetState } from 'supertokens-web-js/recipe/thirdparty'; const AccountTabs = ['Login', 'Register'] as const; @@ -31,31 +32,31 @@ const Tabs = () => { Github: async () => { try { const authUrl = await getAuthorisationURLWithQueryParamsAndSetState({ - thirdPartyId: "github", + thirdPartyId: 'github', // This is where Github should redirect the user back after login or error. - frontendRedirectURI: "http://localhost:9420/api/auth/callback/github", + frontendRedirectURI: 'http://localhost:9420/api/auth/callback/github' }); // we redirect the user to Github for auth. - window.location.assign(authUrl); + await open(authUrl); } catch (err: any) { if (err.isSuperTokensGeneralError === true) { // this may be a custom error message sent from the API by you. toast.error(err.message); } else { - toast.error("Oops! Something went wrong."); + toast.error('Oops! Something went wrong.'); } } }, Google: async () => { try { const authUrl = await getAuthorisationURLWithQueryParamsAndSetState({ - thirdPartyId: "google", + thirdPartyId: 'google', // This is where Google should redirect the user back after login or error. // This URL goes on the Google's dashboard as well. - frontendRedirectURI: "http://localhost:9420/api/auth/callback/google", + frontendRedirectURI: 'http://localhost:9420/api/auth/callback/google' }); /* @@ -63,34 +64,33 @@ const Tabs = () => { */ // we redirect the user to google for auth. - window.location.assign(authUrl); + await open(authUrl); } catch (err: any) { if (err.isSuperTokensGeneralError === true) { // this may be a custom error message sent from the API by you. toast.error(err.message); } else { - toast.error("Oops! Something went wrong."); + toast.error('Oops! Something went wrong.'); } } }, Apple: async () => { try { const authUrl = await getAuthorisationURLWithQueryParamsAndSetState({ - thirdPartyId: "apple", + thirdPartyId: 'apple', // This is where Apple should redirect the user back after login or error. - frontendRedirectURI: "http://localhost:9420/api/auth/callback/apple", + frontendRedirectURI: 'http://localhost:9420/api/auth/callback/apple' }); - // we redirect the user to Apple for auth. - window.location.assign(authUrl); + await open(authUrl); } catch (err: any) { if (err.isSuperTokensGeneralError === true) { // this may be a custom error message sent from the API by you. toast.error(err.message); } else { - toast.error("Oops! Something went wrong."); + toast.error('Oops! Something went wrong.'); } } } diff --git a/interface/app/$libraryId/settings/client/account/index.tsx b/interface/app/$libraryId/settings/client/account/index.tsx index 73e5ac4f9..f807b0cff 100644 --- a/interface/app/$libraryId/settings/client/account/index.tsx +++ b/interface/app/$libraryId/settings/client/account/index.tsx @@ -1,5 +1,5 @@ import { useEffect, useState } from 'react'; -import Session from 'supertokens-web-js/recipe/session'; +import Session, { signOut } from 'supertokens-web-js/recipe/session'; import { auth, useBridgeMutation, useBridgeQuery, useFeatureFlag } from '@sd/client'; import { Button, Input, toast } from '@sd/ui'; import { useLocale } from '~/hooks'; @@ -38,7 +38,6 @@ export const Component = () => { }); // eslint-disable-next-line react-hooks/exhaustive-deps }, []); - console.log('[DEBUG] userInfo', userInfo); return ( <> { <> {userInfo !== null && (
-
From f0c71ca74fb9db3c900a6e5994b15ae28be59954 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Thu, 8 Aug 2024 23:21:21 +0300 Subject: [PATCH 038/218] Migrate to Tauri v2 Release Canidate --- apps/desktop/src-tauri/Cargo.toml | 2 +- .../src-tauri/capabilities/migrated.json | 17 +++++++++++ apps/desktop/src-tauri/tauri.conf.json | 27 ++++++++++++++---- pnpm-lock.yaml | Bin 1037025 -> 1061383 bytes 4 files changed, 39 insertions(+), 7 deletions(-) create mode 100644 apps/desktop/src-tauri/capabilities/migrated.json diff --git a/apps/desktop/src-tauri/Cargo.toml b/apps/desktop/src-tauri/Cargo.toml index 1ab1056f6..1c8d3cfb3 100644 --- a/apps/desktop/src-tauri/Cargo.toml +++ b/apps/desktop/src-tauri/Cargo.toml @@ -35,7 +35,7 @@ uuid = { workspace = true, features = ["serde"] } # Specific Desktop dependencies # WARNING: Do NOT enable default features, as that vendors dbus (see below) opener = { version = "0.7.1", features = ["reveal"], default-features = false } -tauri = { version = "=2.0.0-beta.17", features = [ +tauri = { version = "2.0.0-beta", features = [ "macos-private-api", "unstable", "linux-libxdo", diff --git a/apps/desktop/src-tauri/capabilities/migrated.json b/apps/desktop/src-tauri/capabilities/migrated.json new file mode 100644 index 000000000..03fd3578e --- /dev/null +++ b/apps/desktop/src-tauri/capabilities/migrated.json @@ -0,0 +1,17 @@ +{ + "identifier": "migrated", + "description": "permissions that were migrated from v1", + "local": true, + "windows": [ + "main" + ], + "permissions": [ + "path:default", + "event:default", + "window:default", + "app:default", + "resources:default", + "menu:default", + "tray:default" + ] +} \ No newline at end of file diff --git a/apps/desktop/src-tauri/tauri.conf.json b/apps/desktop/src-tauri/tauri.conf.json index f6334dc17..3726b8bdc 100644 --- a/apps/desktop/src-tauri/tauri.conf.json +++ b/apps/desktop/src-tauri/tauri.conf.json @@ -29,7 +29,9 @@ "transparent": true, "center": true, "windowEffects": { - "effects": ["sidebar"], + "effects": [ + "sidebar" + ], "state": "followsWindowActiveState", "radius": 9 } @@ -41,7 +43,12 @@ }, "bundle": { "active": true, - "targets": ["deb", "msi", "dmg", "updater"], + "targets": [ + "deb", + "msi", + "dmg", + "updater" + ], "publisher": "Spacedrive Technology Inc.", "copyright": "Spacedrive Technology Inc.", "category": "Productivity", @@ -59,19 +66,27 @@ "files": { "/usr/share/spacedrive/models/yolov8s.onnx": "../../.deps/models/yolov8s.onnx" }, - "depends": ["libc6", "libxdo3", "dbus"] + "depends": [ + "libc6", + "libxdo3", + "dbus" + ] } }, - "macOS": { "minimumSystemVersion": "10.15", "exceptionDomain": null, "entitlements": null, - "frameworks": ["../../.deps/Spacedrive.framework"] + "frameworks": [ + "../../.deps/Spacedrive.framework" + ] }, "windows": { "certificateThumbprint": null, - "webviewInstallMode": { "type": "embedBootstrapper", "silent": true }, + "webviewInstallMode": { + "type": "embedBootstrapper", + "silent": true + }, "digestAlgorithm": "sha256", "timestampUrl": "", "wix": { diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index c282f204c9313ee71b6802e60027a936f4d128ff..956dc8da2a76bf19b79dbe6f5d752652e5d51d2f 100644 GIT binary patch delta 14015 zcmb_@dAJ+Zeds6Mdv&kYYh#0rw`&8&%fy};X(WwEz#i?(Xc>)Wv`_-l%xE8JMjEZz zaI+^QKyM!>Xw=OV&0CIBlAc7vLS~+F(NZeeaLw ze{^T&obx-sef8OoAA9EykDvUgWvM3|#SVcJrq!LNzGC^daREMq$G6xn1*f;#?5DnL zzxDbh=TZD929&3*+ZSE zrdLR$opDcZss!*6HSVPvZM0eN`QusD->*-aov;h2dzbgDXTGxy{G2v#2FPy1+9~Ig zk1l}AZZaOoiRDq=*QgHjR9@y}A6iFHj&U}7;;3Tz|T;VhfGOAbQU8>!YdT%N`5pIn_94&M+a(2&NK(@ zIg-2APyTcZ_y%vj1pKmMSX;mEU*9%+T7h8aBI@^RuCCGuq{m#(Ck;w&wAV!oLlWh^ zleoLmEGE?m8o-NGmrTYIRA(TU%I!c3k^2>);mR~>6QUhssu@nHCX-HYG#M!u!P!m5 zIowmPdwTN%c+K@@JJ?4U4{;I;?M zn@)*8t{MOAri}4o9?sak~c?bE}>eU$PAf})-h!~6rJ*lsgldg{kN{m{z# zgU?zG=jDf_CQ(-a@e)EU!1IOf2$d(jNvB6ehDayJ*WAOzz}YD&b>G;V6|$qLT*vru zx!sNr!nu-99(x9a#EOMdRY*@+HOI+V6x_aS+WlwG>;xZ{%-g^uZn%2=fpZV8Kl=>4 z!(C>}TzoIEw>imDmB!f9L`Or?Rn-JOF8ejFlhL9vw^?FtqaFa+Uo*zv@wN5skV`HUgBZmx=L9CULf=IuMwY~DVmCWJ3noDUG zeUV%zI$?ztS`SWDMAY&q!Ph#;Rzp$xfHA!c{SMi@bv^ssKY*vI=LvqE-@Po7@e2&j zCYfMx6803Z(O8R+%!CfqI6o$3sYq?0X6b4xK#|_IcS2>nQe7q4RMY24rz5#EnfCKu zEKwhXi$bgwEu^8z|IyM+pLRf~mi0@2cY5|~=_*>I+nJfF8P12eafQ@;S`bf0=|QJf zN2yphUrZr2K3ewsTBG)WtW~Rp2&=im;ZQ8+%{EzNoUaJZAs58CTuF>6i6&aExO$=* z_YSI^+N9xp1=65fH``C0`CZGfe)8F+rT!fDT(OhQ8eJls<$#QJBh7e|3}|f974(y- zbh=8`oo#P0kdrd;zSB2AhTcgU&H4+z4qI<0lC^Lr+v=kXq4_Dv8y$DLLmv+njec3f z;=jLtJ^p+91)U19T3k_y^q@$Ni`9CBFLY{+8gyRyaaBtcd{9>-!H(Q6y1HIJG-Qbw zCHM+Nw~-*dgG`(i&|%+AlOA71lHJvMsZ0uGz@T%RY+YY`{_B=LHQW6_uR;1~ zu3u@A!+6m55+}j?nuhzq`l6j&?bD41i_0Nrma=A`X>y^EEpX5QiR4Ei~VwJd5&t$na9rdBr3Zx`m^<$|z zQ-FjEWLw$3YZ~kC`Z`BmF;3;{@aAzP0r5@TQ!hPZb*k$nbv*tB$=taX-`fJ;U zPc?w;3BzRu9NfMI#$^#SiE6P%vX`UAp?tPVSDAQHj-(1sGB)U+q7F4FLa#1=Q z^WaxZ{K|=TF(i=) zn@LJ)4HGM6sp}?DjBQiC{+Lxu^$w?vrv)GC3{NL@*4NK7RX2-8;vtMm_pn1P1pJ&KwJ6aJd zo}M!thz4AJSrgM#r5E?dhAy|J@aRPGRpcTUuOef)qzrt)Y%V6n`FgklnAezhgPWZT zSAvr%PO+Otp9x{U$-fl539><}zn_YraTBRJ)2p40eKHu>mk!ccZ zu?1w{O0smI=#xdH9gC;Ci8h)+YVmM&9M>e&gK>RPAp;W8OZUl$IN%gElag>zj7)+# zybm`2t8ovwIc}uR1K+9td(s$M1bcsR9(gW6wupKsR1w_avOFj;K4&MFR>%S>HUdSr zr@-Ypkp4a{H;6a1z_gr7MJc{7%5Ee|2gDf3(oCPu$9fSLsfl88O!M8U2Go?Xa1qY? zbS!Ipn-M&F+-L`PPApq5LcA1gCgX7?+n%&Y3Qv^60RgE5D*Z{XgO73}F{QaZVXrn4 zt6a4ck&NzAGDw3a(Pw?c)#>8VWx|FO0ZSAvfanAnkCl+|YP6 zMU-moU^J8pmXlPrn25yr(qP9sW&ZT6Y8+ij%1^@hwg+n|S@^;5{hNeYdZBX&Vp+BkzRm_va!k%zvs*RGV z?66pCN(}3lk^)|epc+MZL!+uV7`6rsOO};ND~YxhTFLjz0jbz8@HPGQf46YTw<2_V zo#Q0TIPZ!q9pp4YiA-X?X{9mE`Qk9)=@vtT*UNk4T)QwFLaT#CTx=><3^~h+j7Hq` zfxnC+3DU<$r0%d%Lz)vI(5u$lE-E+<_0og1Cl1cOVQ~+5=(?pH`le?VuF5W!1*s3t z{nZM>O_)ldmPXTTi4Qts{RZ9cbrEm0>S1s)6z?V2P+#mOrwJ*OC`U+Q6L=Wn=zu852mW+0hz6r5I@eKJl*ADxt{FGwlkgyt`#HsSh|eFI)h>(8!2%9 zLYM4NxSEio$qDU88$45U=W>J;2KW5$s< zj3YCY=B`f2agFn#%~V>b^CU5<#>PDgYiAk1w;RZj&0Yr`0mD|~4sd72WCAyTVacRF z-&(XR0OwyX9lXBmN{AVXN@Xgk5K-cWzPg7NT5Q`lrTfKnhDiG5dMwk*mZiz0q9Hwk ztJUeDii}5Sal{D$yrxdtffh~lM85B$M+2oP6AYxI#jcl3Hm1&3gcCR@GFiL^Elx*T=GXS8-5^W(cXEG+m);HA064 zKUx_0D5}IvX+g*}0z49uxvr~TCS=Ap>a_J+j7vXW1oxSiE&=;am=1DuGTL^Dg?dh? z4#`@QXoaRRPRcQex2lMcJ5G(Ox(L507Ba1=lX8#aZW_%9b1X-NSU_lU$QLKR>0$TEFLT*`BcK~Nx={;J!)uSzgJL$BNYoU1FbSbF#nM5 z*W+wo!jfEyLne4Q;Hq#eN-;HWm+RCJbd>1zS`BZ!DvvZi+TyDjx;?t~T98?_?*)$r z7uWQE+p@HM76RkHT{ytGrmkUF8Yg0*PBR*EO#{hn*Hi7F5I9$n1S;FXs5lwK8cJds>{1_2u&rVfy-BP`=^CKFOuj9F}7Mugwz~M_=73vdqP1* z#8|@BY%t_7R(6fJL=f|;4Xq?rr){ajHeiIO0{p8BJHaEvm91d1e`&vd`0&!V+?%=` zsiQT@?IV@4(CSvJfO>QZDT)QJgwjzy?#a7?A(%Ag8$K2f6{o=v8%kB_Ua?z`I3ozs z9hQ2^B=DQ`=Wbl;uYhlj><-{Z zmJf2{Rx>RL)lMSPV*BZlR&r;iUapVkJaVs+VR|aosUzh;+RghqEq98}%jG^59mu&v zvyp*Th)PLFiz<;sTp+vMcxzfBNft0SEM2bG>r0ng&Fv$-a;GLiAwBWV#Y0@NA5kP% zK1wIZQn5j3`C!c%pSYw-cIxcG0!(n+X^TXq#rQEy5Kz1{@=^It0BiWBS`Y?pjiJU2 z%Q0V~Oiv&ykh(vQO=jWnmHM0eODCN6BU)eRcB+jUbVy#XZ_DBVzC@`#x;?DHeB)iRsv(ndU9y!}6l`E}8HPEF|$ww*` ze~6CpXfYsB`maB}^aKVz^u&@G1}k@c0BU`wtVhltEoUWo3XA&c48hZynv=%turY9! z6n|}sc{#GlYaLPbcvZ22Dt$~Hmh-`qtJ=yT(Y~t(pT)O07sV(=7J(ZSDff{ z*z&Q737(4tG<+0}hr4Jp@6RL)?y)x^4C!(+pNkB-vM?2yf(OqGi|$Fn!}3&Mq;#cm zzDwe-!HSJ(U?lO`QpD$p<5&d?OR!iy=m>Ng?-ty?T(nEc(N=0uDaH|BE#EKp6?cmq z5{-(d8ywg06mk;D3h3G)yrnRY1o>r&nY;YW$=2>5p=d-+LfCyPnE;5X! zs}ZqEPve267Y$;Wo}|&$iHamgDJQ0lqGLLb__;!>TlSOv00St~GR%@rF0KIlYtY?Z z`mpH`_{P1a_s>CcZm;UEr(IK~+U3xChA6e^yt@vI0kCLY^%p#TA|a1LCY0NZJ5 z@xC}a9|eo|ne+`HSfogeFuEm2Dl$dlVM$3al0xGN4kg`z$6ZtoV?F;{|1oG$nW<=D@vrD>X(v( z>6ll?Y9yBKcidrDJfZXzGBa(k-V!16WEpKzxJbZ1)3u@;EIUX;jwP5+Y};oI#!wHCykg>Ovxi>r{k~j zC`KVkcfOp<)k+>Q9-6wuX)|5P^d`YhOU-A7u?QW=Xbo`B-HTTN^Zll8Yyc@thCU@! zRMLG)>h;>9S77Ow&c1e zUM`HHTo0j0pI-?jJ+t1=Rr(k1H|fR=D4}A&9iY8cq{2>ns@TFSlXgN4ry}J_wGne? zf`T^gX2xEu8P-T&e2V1iLb@-6Qr#LEuMl(y##o+6D_v}lbGaSAh>cXu4$%E?1~yDL3gW$!x3^FH$g@D~{UJTp<*#Qf#R#_;STeAXOPQQ@IWn ziY8R08%L3t?2fpSr5w>MO^ZXK*cncqWy>@Yukx8GPyV1vaVB({o*RtW-J%t0H0FfTWDrmz?V`I?&igBvJa+nNbX-Y>gK=WyS6qF5$oex{0_|xqlPF_j|BQh9 zz>W7Aw}Rsj!NB`1Uo<_nfmd6i#=e?HAx>Bg?gylN!c)l6{>so<^>T{;^1Pnc7`j-vCn5L>J zI!I=xK3EKhi|L%Ig`Hh*Bi9sy$z)OFSsym_AS9lfiq0U`4(55K-S=kkdRb&W<53cs z$hAV4$u*N6-ZKh!OXUDI7eDa(hfH7D01^&n8d$+QQp#01m<<&qyj`og^3zamtQG2> zgrbs2Y}Ajq+*zlVoG8v#IHm^0eqHR=2g9I-$5Ay%Wra{i_3~&l)Xr7#)+>p(1n#`o z^cwxA51S%a+XNWVmo=dd{W}JjkHCGb=PAAQyTGDr^$_7O=*yjJl^7<#o#oXDftsYHI0IML)*9A57B zIVMzRGESTryIlF!XzYqJ$ze=Jv{;u|T(dd^>J8g-S=NG`fCP;a=(Y*F^)}glWDq@6^0$YQgD@u z0Z$F)vAtj=JP6C#pz6&8i3!Zl=tPR4JuPRmL-lyF-Ykym-a22WN`6n@7Y#;~5;%dG z4(igEm&0!0++{kLE4YY8lnSOQsbIC|N#|HCMn&^1H5?|GY_UeCJ>r03%Y4-vck-00 zJ(N?$N|%bJ2Xw$$AC!l9Y*Z`7wMj^b2NL`=JWjLVqznzl8>>qeJ^g={5AO%3*H+Bn z@2_9Hn(OjXD}xIOErtYg!C^FC40A$K>nBhQ8_84%y6{9bB~n?L9N?oAUuSFOf|nvi zf*ZuzT+A(zL?bjx*Zba5rkItdoR9N>+usGX8GF&V2(aIqHtD0iD>e~4sI4v=U^8Ba zv@B*|XbfAEy%5e#Xc}#|b9oA58^N?16DX!gOI*_{#Ea8Hw4KdSog`cHDH)02YIqp2_nU`D&?68qf$|nnoT)d%LHn| zM8=x~I_h#dJrl~?4~63in8gG6&8xe>dqc}>df+1~3wu_MX!AaSXP$Bv##%TY9f)I+ zbq3k)pxI1{cwWj6JKjVhAI^r@QZ6accvjJNubTpIK8nsYZ#9bYc z0@OakUU2uC`4iyTJ?2Gl#ho8?LU>qIBZG1pQyR{Aq$!8U za-PfuOQR|09S+7qBrDP=QOhtR##;}%sU!;?Z$lTf*JEDPw`1n;u9wGcZrX0$4W3V! ztp?Y+_uE&1f4tLr8MrNL-Vg4k&DSo%QJFeO9kc8M@6VdoW|iM0?BAaJ-Xb`u*;c_d zb<1qyxo=;gKTn$vFTe#%(!6(J)@`_W37=pfKO!1QK*{FWXw;3@^u;w9zpmOG2@Te0XGwg)K_LCsz<=`Q*DlO* z+Bg5eya!(8#Oux5!EF`u<+JqwSIn1Pm@(LN8GLi_cm)nGwNY4shRHYVmBK~mK5V`W zJe4;u!z~y%6SMAwf4IN{E%R=8;1@HNwfXajGv*!hL*I4QaR9uh0Qu(8|F&SpH>O44 z(zAbQ-4AX%W46whow&)e6?AU0T&I8Wdh=t3joFUbo!6gzaRmlH=GFPK*%fEKaq#aC znQagg5`6LW>JEcT&)i_ztN{7rYvzdxL@%&7A+ zU!VE5d1XEM<7F^un76Or_A7^u)Xd*CUYM}>f4~4-n2z|)Y$#$*inW&ciut$#*w!*1 zp8vHx!)L~D_$@Lcoc-mYf*JMT>+SQV|N5EVt%A$jP=t>wkZ>mWH|F1`&QtT z&D*wZh`iDexr5obEUdK0R67B=FeYuzEQ15 z_F6u9?H^7cg4-RI-2i>P<)g3qa_&2pE5NfqvtFVfy3w+Ac{YToAFo;z*zAJ6;uUC0 zAk;U!3EUGhZq-kYEnhOsTe`E$Z&;kOV~YnGp3FxLJHS!axCPwwF3Zv))M|Xb2f#M$ z8o;cU9pI+7T6XJ?OfBCv!1d?1egBP?-4G@Qo`0jo4=Ig-hYwk|K=W9B!FHM6ev{>n z<#`KtY1YF1Lzu5LeUHD@;<^a#nU5J30c^us!SD^1E#QTlEeB>dNP=U>jVt<{Z?{~! z(Vu-4Ca*jd8q^)L8#s(FgMT$73%KbQ&}qQ))8Kx}xLLpEJ(kl3a9`+rBX)UyeMs7$ zzt{4kd1Dmfzi)l5y9fOBt(G)ve&ecQ|%}$97p^Hn7cle#cFFtTz4F zZtJfXAYpO9;Fg_$+-t3azr4q21}4Pn1F1``2Z3Xs)elBjT3-*o`E!Q_{L>ZI_g&a< zxenidWdTOzRMT=Tc=m{u*B?7zT{J=t-G0#ecJP_QRtr$>HLd^$V%-XEK5V@KUJnMI z_Zauyi&&pq0-t59n;@}TaPcyF}MYTzV9i*`dw&f?{{16OY^sPJ!0Lh z|Ku9$_qWb^>E~$cj@j>-v~@eY#O9QFuf9EF-PL`S9773T1IqWUCH=84TF)3>_5FgJ zHvsxQ>xy3ed+UvRU$tm{$nw+H%B#NI^0f6G7rivi27V760@OcQeY)~P>y@ttNd1ZJ zHTu8)$a>eImDw!uOpCxhr3Ei&Z?;{j|8&)+>_0yfym{Ta2Yec{S;0ZDuoe8J+m-@X zY_?r)p9Qq21U>k?yM5Pj7>8hK$mdUQb*z97Wo?)0KXcnwmQA8`-03_D&T%lh_~z?u z4wDBKETN1%@~MTxy8T+){>5b|Tk{*-^mhw~^a^GB;2{`Fl#Px(VD*EmvodkBWrJ3_ zWqZtep))+Q1j}t(bp?K%MKj>@X=uK0)odS!FOJ`Axg2!ImKCVVuT&VQ96P~Y1`7VI zCoD^UOl$NlL)#AzZ*+std~J0vICH1XVZilg?zG*#^y<#-KG@&-l*fMXwLAW>-*g-j`sj=qZ_*!8?X+p$8iG;jj5xYsZ2zyti#j;! z&ec8oci(8gW7{9JS?=;UcIoV&+n?S8UihrtyzGQ}G2_AMZ!N8XM?`2jZ#ZSY`+RT! zdl)W2ow6TUoGk+s36=1U+CoZ#SZVL1G4 z&boQy;+;IqYMyaBUI)BigGIitUggl|>jC;(#}44vO*_EN`yJai-t8^g7q|XFukV_@ zv&&xnTgM^rleBg7%mTl4zoWge!E&|Z=mb6!d}1}a5z_S%mS5ImS9Kx2(&ri_Wh14fU;y;289ET;Ku6reAaduc=Q1D zfpdW4Bhcy9|L?-Uifzj*o%W`%RaEYY#y6n(fx%?&~jt z?a*M%Ib#S6;GqrIt%n>(?Jr$z2e`uHfUXS^7hsP;k20s&F^>aIGQoH2nO{K(&sZFg z>Kozz1@d19wjOhApCJ#4zh>=LpyXml$yxMy?ieIhm2@P)Bd1NUx0Frb}^wVC4W#j5Ear%6U_4yV|M=wf@<^nLtLnWNEJ^T>NcfUzG zo|_;4ip_JKeRVWDKj6N!)n(;b z?~!AU{pasLm%giB^7_~4f9rGXFkT>`<>jzi9iGn;|Jz9U)w$m@DrC-MuXDWfBAIwu z_TO=>=#y#re8JDX5|KX}Z6g6|!3G-j{IzjIuB5h)&ct>a$8@^Y;hoc$Zy z9($!X8;+Xk9R?-qp1y6T-c=mjC4NP>t8jlK6wy4EUv;4&MWM*Vg0Nm5otrtaS3sAFRZUvV< zxCchMO2uLavCpk?=a-ywt!@Kf_}uE1a}@NaKeu{?=_M;Z|Ks%O)t%6ZedqM*8uU27 zJH5IKCSLe?y^vmQ+MzoJuahl1^+z9A{etoT-&tD>y7#bj3-DARM d4(v_(N4~y#`@)LchNV`RVd{q-UA=GV{{lliCKvz! delta 3945 zcma)c*_t+~qHWr2&E7O^QYci0O=J)7 z&UJjWGsqy0Lb2)@P!=DTs~}iV?&yGsGs2)C@?6wW2IPJTbG`G&{Bi%ulXK3y<-G5C ze{a@(x#WvIi^q2p5}SRXyF^}I1UGGLX8iC9*F-TH;EDga9XHTv@gO6HaSJ7yI7V-I z7@ieLXU8XZi{a25ih^1VpPh)i)xyO70~6Q%2MzJlW1aB)LSb55XMb=)`KBNpZja*W zarTpa6aAmwJ*fYcasy=V_4Iua5&^ax7O`xaT>e3 zZThNerC!k=bvY^=K4n=$ag96L91CkA{;;R9!xgSE)Ra{f>xzS34^-U|P_7@h*}RbmBk>yJmEZVx^kzWyD~#8+O+ooK(EB7n^=;j<=m_gXlmz1(4p zhaCgmJzNBY0Q(~ZpU1P}AN(T^y(`2PVzBT9JRK&**lw7IWACG*1ooyE9?T)8CPM;m z6k7vb!vFs*@5AcfGL;8TuqiFHfMiov| zxvQ;5UK*X>*kPzwIhC40W3N{24t2KFwravn)y)-UYMZ)7-7@HvSJ+&Na7Tr?r{5`e zwD;E)D_tRzYsk>r8)}Xk;LS4L6!esw$L7Jer-&(N$jH0JgQIh~IM%iE2pmRB2?31# zCqV+YEh@zMk^a*80)O*=2fgRk7YbU1pJ$bg4Eyn=O4xOwOaH~*qQbib6~&1Kk} zHfw#K$5^3_no6y)hCnD_squK`2P&*FRcl9iq{12$o$PcN?&`gjf+rOW2|->TFQp1Bf*NMHn-hAG=#(_`4bf=Diq8zq8f|f zS7P*9m!L-iHLRe>GSnWdtJStTT-L^h;Z~z&u(zyQ?uiA;`-T;rrWSoH7BMQ-I(@IZ zsl#it`a4@=H64+dQLa{nYW!7=&F<=e%AwJO^frajZE<#ZJvzgpMI-RR4Js2pXc6S1 zkKKYrB*?#`vf-MATVIU`SO#7@A}TNk6;0;mn8B$usaqSg?r86jt=>}XuhV)08h4e_ zYVgH62KsFCEqqBOBzr|DR>V)o> zB)MqYYJrpwZ;Qo+W?5USyFNT%Gx)0Oh6a?~p1wYfQx$ABwwYpmrFu_?LEahAl@0p` ze9nqkbI4?j4%K!m-PI05bA4aPHYl2oe zKsB9;?qvy22tZUvr^2<>!d&#qOyNvE+Fl@ZNZ>E4g@xuG3ujI*>ru8T9Nx~xCW9$5 z?6w9%>K1QXale1i(b?w+s$Inm>ZS@!t3{>s=qvoOj=^5rP@h2=k-Ix=x@KjhqTJ!M zcQ&^L4BpOaHHwr9J4txutfbJ~=g3Lb@@i|R+ba+BxVt@$4xOUk7^>^@%6(yv#^%*1j8-`Fksu$I zoW=x@*UFy`qbq3%+EyvtiNo372@1`+n(&af#-x*J8*7FwW|?Ks*{Sf>DZD0yrK+{jW%5u`3lg!DdMIQ zv_T_YjweOYxn{9-5Bl=I#pNpai$ze_7Ey6k9CL;ni!JSCy_)t?y;o`K?`aL1Ee40r z>az?)>g?JYi(}ZMbSqo^_5ObQKzX#O!{cex_O)nDrSkGvOM6AIw#r)-@rQhUUN2xs zkc}n^C92tIcdLZxMeYg7#%#3ej6|J-T5n4JmVq)T{H<#Agb{xz1wG!2%d!%z+l*3H z;guZdR^j{fSFy~kJG&s2i*TN+SD}s+~L@I1tNjv~uzsJ+y+**7p zMAza2pd7`O@yv_q&^b<|1G@&F4I``ZY|fAvcCN#pj^Djh34d6Rmx6f}2eD_@;51xX zg%fb$7{P$_Jz^#VO*{ctS;PVeJb{0Pu37L-0UY7#{cV!e0XHfp7#E9c% z7nbBC;yC#gFUiNa?j9G(Gx#F7`FniJeNeu&7caOE%9+pOxrhU0cao2r5@<|=#W_S4 zNO$5HaBn{@hHpqrjHEB(n?s1h7bS-g@Q!iKFz;DncX9@S z@1)Xv7}ZkKQ1%$Xiuv-AGE};k2&Uc#+sB89gIEG=6|nR;u^+KVh(-*$4inYz^id*} zFDp^O!J~u*D}%fYDjTI7Bld`s^g|r|4hRs(yy}0!9eQAr*eCekfeFrhOoZU5l@ej) zAocR{k@Fm}bPbI9NddfWB@OVK07;?;9wu*bG+j#8!XsT|CCDBnx1*wNl4}FfUUDrO zx0Ao+ey)QYMkk%5ix2vIb`fmZL26;FgG@uGJmk+z&aMI81 zao*YzAG3ft#S@{se)5(uAuWRO5IKqlBjgf(!aD&Q2FR)KLX4}zrTxS-_&G|x{EJxu zIuau>VNwviw}QNt1}DM{3n$|w1-VZ#`Jmp#Ooz&C^5JfKgen^(IN6u0m=ScHiwXQd)#g7g4*5eo2zSM5V*= zc8Y|?06znbo2d9~(fkrsIhXs#hpFQD`(q@mh*4%_9Hu1V`?JI=k5lsmaAufFgY=X9 zEQlH@Hz-{~;kn zlQ*a(Ld4;ImPo8vkhg4vAYc)Y)8VDn0t&6XP3_0{WhDwEyF-;L(63AB+M)IMB6=L20Qq~j&W44}8u8L)MfR-&z8dPK}UC^<-% z(Pfw_K7MpMJiLrf#T0NRL{Eo5brYFzWr)s!W6S6%;N8Fya5albf#U;&2rO5a9FXSF zB2cslQ()5woz4BXFu|P>`UTL8&@Hgo!MWo6DbV7T^uYzdAuI*<@#!=Oi0P@Q=`c-- z(5~b37Xozn9eQ{gTJ{ zjOB7o>7sy9!*>GF43s5cZu6mQoDx9rNt%L-Go&-&KMpV?m?TVE^78v6Oqm#6V3}np zkn=RxkAKW(lnC;f*CL9Pc3lr=VqpmQs z1WOQ94Bu|x!de_*l6sk_Wrzu32@{!UNsRe{N~p6SyDUmeO70vZXTU@2m>CI?S+FIG ztIeO+Gv^WxeoF9F41VBlmUGWtUKW)P`i*1;?5Tbv=yf)j2ocRvXzRwyq?Jc4`K(h>~q%arcpVDN8}`~S8QCeV>W=>|Ts J&Xw*L{txzZq~QPn From b797610571ac523656a0cf0cd0f1e5ef2151f4e4 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Fri, 9 Aug 2024 00:01:55 +0300 Subject: [PATCH 039/218] Formatting --- .../src-tauri/capabilities/migrated.json | 30 +++++++++---------- apps/desktop/src-tauri/tauri.conf.json | 21 +++---------- 2 files changed, 18 insertions(+), 33 deletions(-) diff --git a/apps/desktop/src-tauri/capabilities/migrated.json b/apps/desktop/src-tauri/capabilities/migrated.json index 03fd3578e..e2006e737 100644 --- a/apps/desktop/src-tauri/capabilities/migrated.json +++ b/apps/desktop/src-tauri/capabilities/migrated.json @@ -1,17 +1,15 @@ { - "identifier": "migrated", - "description": "permissions that were migrated from v1", - "local": true, - "windows": [ - "main" - ], - "permissions": [ - "path:default", - "event:default", - "window:default", - "app:default", - "resources:default", - "menu:default", - "tray:default" - ] -} \ No newline at end of file + "identifier": "migrated", + "description": "permissions that were migrated from v1", + "local": true, + "windows": ["main"], + "permissions": [ + "path:default", + "event:default", + "window:default", + "app:default", + "resources:default", + "menu:default", + "tray:default" + ] +} diff --git a/apps/desktop/src-tauri/tauri.conf.json b/apps/desktop/src-tauri/tauri.conf.json index 3726b8bdc..6a4a45128 100644 --- a/apps/desktop/src-tauri/tauri.conf.json +++ b/apps/desktop/src-tauri/tauri.conf.json @@ -29,9 +29,7 @@ "transparent": true, "center": true, "windowEffects": { - "effects": [ - "sidebar" - ], + "effects": ["sidebar"], "state": "followsWindowActiveState", "radius": 9 } @@ -43,12 +41,7 @@ }, "bundle": { "active": true, - "targets": [ - "deb", - "msi", - "dmg", - "updater" - ], + "targets": ["deb", "msi", "dmg", "updater"], "publisher": "Spacedrive Technology Inc.", "copyright": "Spacedrive Technology Inc.", "category": "Productivity", @@ -66,20 +59,14 @@ "files": { "/usr/share/spacedrive/models/yolov8s.onnx": "../../.deps/models/yolov8s.onnx" }, - "depends": [ - "libc6", - "libxdo3", - "dbus" - ] + "depends": ["libc6", "libxdo3", "dbus"] } }, "macOS": { "minimumSystemVersion": "10.15", "exceptionDomain": null, "entitlements": null, - "frameworks": [ - "../../.deps/Spacedrive.framework" - ] + "frameworks": ["../../.deps/Spacedrive.framework"] }, "windows": { "certificateThumbprint": null, From 6e4cbe7a701f23d4ef430863a68af48783b203e8 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Fri, 9 Aug 2024 18:25:27 +0300 Subject: [PATCH 040/218] Working signout button Turns out, I forgot an await --- interface/app/$libraryId/settings/client/account/index.tsx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/interface/app/$libraryId/settings/client/account/index.tsx b/interface/app/$libraryId/settings/client/account/index.tsx index f807b0cff..236883694 100644 --- a/interface/app/$libraryId/settings/client/account/index.tsx +++ b/interface/app/$libraryId/settings/client/account/index.tsx @@ -48,8 +48,8 @@ export const Component = () => { +
+ ); +}; diff --git a/interface/app/$libraryId/settings/node/libraries/ListItem.tsx b/interface/app/$libraryId/settings/node/libraries/ListItem.tsx index 06a95d443..f48ac9669 100644 --- a/interface/app/$libraryId/settings/node/libraries/ListItem.tsx +++ b/interface/app/$libraryId/settings/node/libraries/ListItem.tsx @@ -1,10 +1,13 @@ -import { Pencil, Trash } from '@phosphor-icons/react'; -import { LibraryConfigWrapped } from '@sd/client'; +import { CaretDown, CaretRight, Pencil, Trash } from '@phosphor-icons/react'; +import { AnimatePresence, motion } from 'framer-motion'; +import { Key, useState } from 'react'; +import { LibraryConfigWrapped, useBridgeQuery } from '@sd/client'; import { Button, ButtonLink, Card, dialogManager, Tooltip } from '@sd/ui'; import { Icon } from '~/components'; import { useLocale } from '~/hooks'; import DeleteDialog from './DeleteDialog'; +import DeviceItem from './DeviceItem'; interface Props { library: LibraryConfigWrapped; @@ -13,51 +16,115 @@ interface Props { export default (props: Props) => { const { t } = useLocale(); + const [isExpanded, setIsExpanded] = useState(false); + + const cloudDevicesList = useBridgeQuery(['cloud.devices.list'], { + suspense: true, + retry: false + }); + + const toggleExpansion = () => { + setIsExpanded((prev) => !prev); + }; return ( - - {/* */} - -
-

- {props.library.config.name} - {props.current && ( - - {t('current')} - - )} -

-

{props.library.uuid}

-
-
- {/* */} - - - - - - -
-
+
+ +
+ +
+

+ {props.library.config.name} + {props.current && ( + + {t('current')} + + )} +

+

{props.library.uuid}

+
+
+
+ + + + + + + +
+
+ + + {isExpanded && ( + +
+ + {cloudDevicesList.data?.map( + ( + device: { + pub_id: Key | null | undefined; + name: string; + os: string; + storage_size: number; + created_at: string; + }, + index: number + ) => ( +
+ +
+
+ +
+
+
+ ) + )} +
+ )} +
+
); }; diff --git a/interface/locales/en/common.json b/interface/locales/en/common.json index 3a9a0c514..4769085af 100644 --- a/interface/locales/en/common.json +++ b/interface/locales/en/common.json @@ -1,4 +1,8 @@ { + "Add Device Description": "Scan the QR code or authenticate your device UUID to add a device.", + "Connect": "Connect", + "Connecting": "Connecting", + "Delete device": "Remove this device from library", "about": "About", "about_vision_text": "Many of us have multiple cloud accounts, drives that aren’t backed up and data at risk of loss. We depend on cloud services like Google Photos and iCloud, but are locked in with limited capacity and almost zero interoperability between services and operating systems. Photo albums shouldn’t be stuck in a device ecosystem, or harvested for advertising data. They should be OS agnostic, permanent and personally owned. Data we create is our legacy, that will long outlive us—open source technology is the only way to ensure we retain absolute control over the data that defines our lives, at unlimited scale.", "about_vision_title": "Vision", @@ -8,7 +12,6 @@ "account": "Account", "actions": "Actions", "add": "Add", - "Add Device Description": "Scan the QR code or authenticate your device UUID to add a device.", "add_device": "Add Device", "add_file_extension_rule": "Add a file extension to the current rule", "add_filter": "Add Filter", @@ -19,6 +22,7 @@ "add_location_tooltip": "Add path as an indexed location", "add_locations": "Add Locations", "add_tag": "Add Tag", + "added": "added to library on", "added_location": "Added Location {{name}}", "adding_location": "Adding Location {{name}}", "advanced": "Advanced", @@ -102,14 +106,12 @@ "config_other": "Configs", "configure_location": "Configure Location", "confirm": "Confirm", - "Connect": "Connect", "connect_cloud": "Connect a cloud", "connect_cloud_description": "Connect your cloud accounts to Spacedrive.", "connect_device": "Connect a device", "connect_device_description": "Spacedrive works best on all your devices.", "connect_library_to_cloud": "Connect library to Spacedrive Cloud", "connected": "Connected", - "Connecting": "Connecting", "connecting_library_to_cloud": "Connecting library to Spacedrive Cloud...", "contacts": "Contacts", "contacts_description": "Manage your contacts in Spacedrive.", @@ -169,7 +171,7 @@ "delete_forever": "Delete Forever", "delete_info": "This will not delete the actual folder on disk. Preview media will be deleted.", "delete_library": "Delete Library", - "delete_library_description": "This is permanent, your files will not be deleted, only the Spacedrive library.", + "delete_library_description": "This is permanent! Original files will not be deleted, only the Spacedrive library.", "delete_location": "Delete Location", "delete_location_description": "Deleting a location will also remove all files associated with it from the Spacedrive database, the files themselves will not be deleted.", "delete_object": "Delete object", @@ -226,7 +228,7 @@ "encrypt": "Encrypt", "encrypt_library": "Encrypt Library", "encrypt_library_coming_soon": "Library encryption coming soon", - "encrypt_library_description": "Enable encryption for this library, this will only encrypt the Spacedrive database, not the files themselves.", + "encrypt_library_description": "Enable encryption for this library. This will only encrypt the Spacedrive database, not the files themselves.", "encrypted": "Encrypted", "encrypted_one": "Encrypted", "encrypted_other": "Encrypted", @@ -323,8 +325,8 @@ "general_settings": "General Settings", "general_settings_description": "General settings related to this client.", "general_shortcut_description": "General usage shortcuts", - "generate_checksums": "Generate Checksums", "generatePreviewMedia_label": "Generate preview media for this Location", + "generate_checksums": "Generate Checksums", "gitignore": "Git Ignore", "glob_description": "Glob (e.g., **/.git)", "go_back": "Go Back", @@ -712,10 +714,10 @@ "switch_to_next_tab": "Switch to next tab", "switch_to_previous_tab": "Switch to previous tab", "sync": "Sync", + "syncPreviewMedia_label": "Sync preview media for this Location with your devices", "sync_description": "Manage how Spacedrive syncs.", "sync_with_library": "Sync with Library", "sync_with_library_description": "If enabled, your keybinds will be synced with library, otherwise they will apply only to this client.", - "syncPreviewMedia_label": "Sync preview media for this Location with your devices", "system": "System", "tag": "Tag", "tag_one": "Tag", @@ -744,6 +746,7 @@ "thumbnailer_cpu_usage": "Thumbnailer CPU usage", "thumbnailer_cpu_usage_description": "Limit how much CPU the thumbnailer can use for background processing.", "to": "to", + "toggle devices": "Toggle Library Devices", "toggle_all": "Toggle All", "toggle_command_palette": "Toggle command palette", "toggle_hidden_files": "Toggle hidden files", @@ -803,4 +806,4 @@ "zoom": "Zoom", "zoom_in": "Zoom In", "zoom_out": "Zoom Out" -} \ No newline at end of file +} From 749d228a586d455e7d8a7c99b58cbf0872cca4a9 Mon Sep 17 00:00:00 2001 From: myung03 Date: Tue, 13 Aug 2024 19:13:22 -0700 Subject: [PATCH 054/218] added min width for list and device items --- interface/app/$libraryId/settings/node/libraries/DeviceItem.tsx | 2 +- interface/app/$libraryId/settings/node/libraries/ListItem.tsx | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/interface/app/$libraryId/settings/node/libraries/DeviceItem.tsx b/interface/app/$libraryId/settings/node/libraries/DeviceItem.tsx index 0cb27984e..470c72e7e 100644 --- a/interface/app/$libraryId/settings/node/libraries/DeviceItem.tsx +++ b/interface/app/$libraryId/settings/node/libraries/DeviceItem.tsx @@ -17,7 +17,7 @@ export default (props: DeviceItemProps) => { const { t } = useLocale(); return ( - + { return (
- +
From 1aa459ad23b4e540ca6f172da52b937219a0a648 Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Wed, 14 Aug 2024 16:51:16 -0300 Subject: [PATCH 055/218] Revamping crypto subcrate --- Cargo.toml | 10 +- apps/deps-generator/Cargo.toml | 20 - apps/deps-generator/src/main.rs | 75 - apps/deps-generator/src/types/backend.rs | 12 - apps/deps-generator/src/types/cli.rs | 33 - apps/deps-generator/src/types/frontend.rs | 36 - apps/deps-generator/src/types/mod.rs | 3 - apps/desktop/src-tauri/Cargo.toml | 62 +- apps/desktop/src-tauri/src/main.rs | 7 +- apps/mobile/modules/sd-core/core/src/lib.rs | 4 +- apps/server/src/main.rs | 14 +- core/Cargo.toml | 28 +- .../crates/cloud-services/src/cloud_client.rs | 1 + core/crates/cloud-services/src/lib.rs | 4 +- core/src/api/auth.rs | 153 - core/src/api/cloud/devices.rs | 139 +- core/src/api/cloud/locations.rs | 59 +- core/src/api/cloud/mod.rs | 65 +- core/src/api/cloud/new_locations.rs | 89 - core/src/api/mod.rs | 69 +- core/src/api/nodes.rs | 66 - core/src/api/web_api.rs | 12 +- core/src/cloud/sync/send.rs | 11 +- core/src/env.rs | 15 - core/src/lib.rs | 102 +- core/src/library/library.rs | 10 - core/src/library/manager/mod.rs | 193 +- core/src/node/config.rs | 81 +- core/src/p2p/manager.rs | 3 +- crates/crypto/Cargo.toml | 149 +- crates/crypto/README.md | 22 +- crates/crypto/assets/eff_large_wordlist.txt | 7776 ----------------- .../crypto/benches/crypto/aes-256-gcm-siv.rs | 66 - crates/crypto/benches/crypto/aes-256-gcm.rs | 66 - .../benches/crypto/xchacha20-poly1305.rs | 66 - crates/crypto/benches/hashing/argon2id.rs | 43 - .../crypto/benches/hashing/blake3-balloon.rs | 43 - crates/crypto/benches/hashing/blake3-kdf.rs | 24 - crates/crypto/benches/hashing/blake3.rs | 31 - crates/crypto/examples/file_encryption.rs | 119 - crates/crypto/examples/secure_erase.rs | 15 +- crates/crypto/src/cloud/decrypt.rs | 105 + crates/crypto/src/cloud/encrypt.rs | 99 + crates/crypto/src/cloud/mod.rs | 3 + crates/crypto/src/cloud/secret_key.rs | 189 + crates/crypto/src/crypto/mod.rs | 1 - crates/crypto/src/crypto/stream.rs | 1 - crates/crypto/src/ct.rs | 68 +- crates/crypto/src/encoding/bincode.rs | 29 - crates/crypto/src/encoding/file/header.rs | 321 - crates/crypto/src/encoding/file/keyslot.rs | 74 - crates/crypto/src/encoding/file/mod.rs | 525 -- crates/crypto/src/encoding/file/object.rs | 92 - crates/crypto/src/encoding/mod.rs | 6 - crates/crypto/src/encrypted.rs | 143 - crates/crypto/src/{sys/fs => }/erase.rs | 196 +- crates/crypto/src/error.rs | 115 +- crates/crypto/src/hashing.rs | 399 - crates/crypto/src/keyring/apple/ios.rs | 40 - crates/crypto/src/keyring/apple/macos.rs | 54 - crates/crypto/src/keyring/apple/mod.rs | 9 - crates/crypto/src/keyring/identifier.rs | 48 - crates/crypto/src/keyring/linux/keyutils.rs | 69 - crates/crypto/src/keyring/linux/mod.rs | 7 - .../src/keyring/linux/secret_service.rs | 73 - crates/crypto/src/keyring/mod.rs | 212 - crates/crypto/src/keyring/session.rs | 39 - crates/crypto/src/keyring/windows.rs | 1 - crates/crypto/src/lib.rs | 24 +- crates/crypto/src/primitives.rs | 132 +- crates/crypto/src/protected.rs | 12 +- crates/crypto/src/rng/csprng.rs | 82 + crates/crypto/src/rng/csprng/chacha20.rs | 80 - crates/crypto/src/rng/csprng/mod.rs | 18 - crates/crypto/src/rng/mod.rs | 3 +- crates/crypto/src/sys/fs/mod.rs | 6 - crates/crypto/src/sys/mod.rs | 1 - crates/crypto/src/types.rs | 744 -- crates/crypto/src/utils.rs | 54 - crates/crypto/src/vault/ephemeral.rs | 84 - crates/crypto/src/vault/mod.rs | 5 - crates/crypto/src/vault/persistent.rs | 80 - .../Layout/Sidebar/DebugPopover.tsx | 32 - packages/client/src/stores/featureFlags.tsx | 6 +- rust-toolchain.toml | 2 +- 85 files changed, 1196 insertions(+), 12883 deletions(-) delete mode 100644 apps/deps-generator/Cargo.toml delete mode 100644 apps/deps-generator/src/main.rs delete mode 100644 apps/deps-generator/src/types/backend.rs delete mode 100644 apps/deps-generator/src/types/cli.rs delete mode 100644 apps/deps-generator/src/types/frontend.rs delete mode 100644 apps/deps-generator/src/types/mod.rs delete mode 100644 core/src/api/auth.rs delete mode 100644 core/src/api/cloud/new_locations.rs delete mode 100644 core/src/env.rs delete mode 100644 crates/crypto/assets/eff_large_wordlist.txt delete mode 100644 crates/crypto/benches/crypto/aes-256-gcm-siv.rs delete mode 100644 crates/crypto/benches/crypto/aes-256-gcm.rs delete mode 100644 crates/crypto/benches/crypto/xchacha20-poly1305.rs delete mode 100644 crates/crypto/benches/hashing/argon2id.rs delete mode 100644 crates/crypto/benches/hashing/blake3-balloon.rs delete mode 100644 crates/crypto/benches/hashing/blake3-kdf.rs delete mode 100644 crates/crypto/benches/hashing/blake3.rs delete mode 100644 crates/crypto/examples/file_encryption.rs create mode 100644 crates/crypto/src/cloud/decrypt.rs create mode 100644 crates/crypto/src/cloud/encrypt.rs create mode 100644 crates/crypto/src/cloud/mod.rs create mode 100644 crates/crypto/src/cloud/secret_key.rs delete mode 100644 crates/crypto/src/encoding/bincode.rs delete mode 100644 crates/crypto/src/encoding/file/header.rs delete mode 100644 crates/crypto/src/encoding/file/keyslot.rs delete mode 100644 crates/crypto/src/encoding/file/mod.rs delete mode 100644 crates/crypto/src/encoding/file/object.rs delete mode 100644 crates/crypto/src/encoding/mod.rs delete mode 100644 crates/crypto/src/encrypted.rs rename crates/crypto/src/{sys/fs => }/erase.rs (71%) delete mode 100644 crates/crypto/src/hashing.rs delete mode 100644 crates/crypto/src/keyring/apple/ios.rs delete mode 100644 crates/crypto/src/keyring/apple/macos.rs delete mode 100644 crates/crypto/src/keyring/apple/mod.rs delete mode 100644 crates/crypto/src/keyring/identifier.rs delete mode 100644 crates/crypto/src/keyring/linux/keyutils.rs delete mode 100644 crates/crypto/src/keyring/linux/mod.rs delete mode 100644 crates/crypto/src/keyring/linux/secret_service.rs delete mode 100644 crates/crypto/src/keyring/mod.rs delete mode 100644 crates/crypto/src/keyring/session.rs delete mode 100644 crates/crypto/src/keyring/windows.rs create mode 100644 crates/crypto/src/rng/csprng.rs delete mode 100644 crates/crypto/src/rng/csprng/chacha20.rs delete mode 100644 crates/crypto/src/rng/csprng/mod.rs delete mode 100644 crates/crypto/src/sys/fs/mod.rs delete mode 100644 crates/crypto/src/sys/mod.rs delete mode 100644 crates/crypto/src/types.rs delete mode 100644 crates/crypto/src/utils.rs delete mode 100644 crates/crypto/src/vault/ephemeral.rs delete mode 100644 crates/crypto/src/vault/mod.rs delete mode 100644 crates/crypto/src/vault/persistent.rs diff --git a/Cargo.toml b/Cargo.toml index b240bfc9b..574fd7962 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,5 @@ [workspace] -exclude = ["crates/crypto"] members = [ - "apps/deps-generator", "apps/desktop/crates/*", "apps/desktop/src-tauri", "apps/mobile/modules/sd-core/android/crate", @@ -18,6 +16,7 @@ resolver = "2" edition = "2021" license = "AGPL-3.0-only" repository = "https://github.com/spacedriveapp/spacedrive" +rust-version = "1.80" [workspace.dependencies] # First party dependencies @@ -25,11 +24,12 @@ sd-cloud-schema = { git = "https://github.com/spacedriveapp/cloud-services-schem # Third party dependencies used by one or more of our crates async-channel = "2.3" +async-stream = "0.3.5" async-trait = "0.1.80" axum = "0.6.20" # Update blocked by hyper base64 = "0.22.1" base91 = "0.1.0" -blake3 = "1.5.0" # Update blocked by custom patch below +blake3 = "1.5.3" # Update blocked by custom patch below chrono = "0.4.38" directories = "5.0" ed25519-dalek = "2.1.1" @@ -114,10 +114,6 @@ rev = "a005656df7" git = "https://github.com/spacedriveapp/rust-libp2p.git" rev = "a005656df7" -[patch.crates-io.blake3] -git = "https://github.com/spacedriveapp/blake3.git" -rev = "d3aab416c1" - [profile.dev] # Make compilation faster on macOS codegen-units = 256 diff --git a/apps/deps-generator/Cargo.toml b/apps/deps-generator/Cargo.toml deleted file mode 100644 index e775b7913..000000000 --- a/apps/deps-generator/Cargo.toml +++ /dev/null @@ -1,20 +0,0 @@ -[package] -name = "sd-deps-generator" -version = "0.0.0" - -authors = ["Jake Robinson "] -description = "A tool to compile all Spacedrive dependencies and their respective licenses" -edition.workspace = true -license.workspace = true -repository.workspace = true - -[dependencies] -# Workspace dependencies -reqwest = { workspace = true, features = ["blocking", "native-tls-vendored"] } -serde = { workspace = true, features = ["derive"] } -serde_json = { workspace = true } - -# Specific Deps Generator dependencies -anyhow = "1.0" -cargo_metadata = "0.18.1" -clap = { version = "4.5", features = ["derive"] } diff --git a/apps/deps-generator/src/main.rs b/apps/deps-generator/src/main.rs deleted file mode 100644 index 42c81ec84..000000000 --- a/apps/deps-generator/src/main.rs +++ /dev/null @@ -1,75 +0,0 @@ -use anyhow::Result; -use cargo_metadata::CargoOpt; -use clap::Parser; -use std::{fs::File, path::PathBuf}; -use types::{ - backend::BackendDependency, - cli::{Action, Arguments}, - frontend::FrontendDependency, -}; - -pub mod types; - -const FOSSA_BASE_URL: &str = - "https://app.fossa.com/api/revisions/git%2Bgithub.com%2Fspacedriveapp%2Fspacedrive%24"; - -fn main() -> Result<()> { - let args = Arguments::parse(); - - match args.action { - Action::Frontend(sub_args) => write_frontend_deps(sub_args.revision, sub_args.path), - Action::Backend(sub_args) => { - write_backend_deps(sub_args.manifest_path, sub_args.output_path) - } - } -} - -fn write_backend_deps(manifest_path: PathBuf, output_path: PathBuf) -> Result<()> { - let cmd = cargo_metadata::MetadataCommand::new() - .manifest_path(manifest_path) - .features(CargoOpt::AllFeatures) - .exec()?; - - let deps: Vec = cmd - .packages - .into_iter() - .filter_map(|p| { - (!cmd.workspace_members.iter().any(|t| &p.id == t)).then_some(BackendDependency { - title: p.name, - description: p.description, - url: p.repository, - version: p.version.to_string(), - authors: p.authors, - license: p.license, - }) - }) - .collect(); - - let mut file = File::create(output_path)?; - serde_json::to_writer(&mut file, &deps)?; - - Ok(()) -} - -fn write_frontend_deps(rev: String, path: PathBuf) -> Result<()> { - let url = format!("{FOSSA_BASE_URL}{rev}/dependencies"); - - let response = reqwest::blocking::get(url)?.text()?; - let json: Vec = serde_json::from_str(&response)?; - - let deps: Vec<_> = json - .into_iter() - .map(|dep| FrontendDependency { - title: dep.project.title, - authors: dep.project.authors, - description: dep.project.description, - url: dep.project.url, - license: dep.licenses, - }) - .collect(); - - let mut file = File::create(path)?; - serde_json::to_writer(&mut file, &deps)?; - - Ok(()) -} diff --git a/apps/deps-generator/src/types/backend.rs b/apps/deps-generator/src/types/backend.rs deleted file mode 100644 index c78440e5d..000000000 --- a/apps/deps-generator/src/types/backend.rs +++ /dev/null @@ -1,12 +0,0 @@ -use serde::Serialize; - -#[allow(clippy::module_name_repetitions)] -#[derive(Serialize)] -pub struct BackendDependency { - pub title: String, - pub description: Option, - pub url: Option, - pub version: String, - pub authors: Vec, - pub license: Option, -} diff --git a/apps/deps-generator/src/types/cli.rs b/apps/deps-generator/src/types/cli.rs deleted file mode 100644 index 0ad792921..000000000 --- a/apps/deps-generator/src/types/cli.rs +++ /dev/null @@ -1,33 +0,0 @@ -use std::path::PathBuf; - -use clap::{Args, Parser, Subcommand}; - -#[derive(Parser)] -pub struct Arguments { - #[command(subcommand)] - pub action: Action, -} - -#[derive(Subcommand)] -pub enum Action { - Frontend(FrontendArgs), - Backend(BackendArgs), -} - -#[derive(Args)] -pub struct FrontendArgs { - // could source this from `$GITHUB_SHA` for CI, if not set - #[arg(help = "the git revision")] - pub revision: String, - #[arg(help = "the output path")] - pub path: PathBuf, -} - -#[derive(Args)] -pub struct BackendArgs { - // could use `Cargo.toml` as the default from current dir (if not set) - #[arg(help = "path to the cargo manifest")] - pub manifest_path: PathBuf, - #[arg(help = "the output path")] - pub output_path: PathBuf, -} diff --git a/apps/deps-generator/src/types/frontend.rs b/apps/deps-generator/src/types/frontend.rs deleted file mode 100644 index dcc0b3e02..000000000 --- a/apps/deps-generator/src/types/frontend.rs +++ /dev/null @@ -1,36 +0,0 @@ -use serde::{Deserialize, Serialize}; - -#[derive(Serialize, Deserialize)] -pub struct Dependency { - pub project: Project, - pub licenses: Vec, -} - -#[derive(Serialize, Deserialize)] -pub struct Project { - // pub locator: Option, - pub title: String, - pub description: Option, - pub url: Option, - pub authors: Vec>, -} - -#[derive(Serialize, Deserialize)] -pub struct License { - pub text: Option, - // pub license_id: Option, // always null AFAIK - pub copyright: Option, - // pub license_group_id: i64, - // pub ignored: bool, // always false from my testing - // pub revision_id: Option, -} - -#[allow(clippy::module_name_repetitions)] -#[derive(Serialize)] -pub struct FrontendDependency { - pub title: String, - pub description: Option, - pub url: Option, - pub authors: Vec>, - pub license: Vec, -} diff --git a/apps/deps-generator/src/types/mod.rs b/apps/deps-generator/src/types/mod.rs deleted file mode 100644 index 8e51afb58..000000000 --- a/apps/deps-generator/src/types/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -pub mod backend; -pub mod cli; -pub mod frontend; diff --git a/apps/desktop/src-tauri/Cargo.toml b/apps/desktop/src-tauri/Cargo.toml index dd9d30393..b296ec80e 100644 --- a/apps/desktop/src-tauri/Cargo.toml +++ b/apps/desktop/src-tauri/Cargo.toml @@ -1,43 +1,47 @@ [package] -name = "sd-desktop" +name = "sd-desktop" version = "0.4.1" -authors = ["Spacedrive Technology Inc "] -default-run = "sd-desktop" -description = "The universal file manager." -edition.workspace = true -license.workspace = true +authors = ["Spacedrive Technology Inc "] +default-run = "sd-desktop" +description = "The universal file manager." +edition.workspace = true +license.workspace = true repository.workspace = true [dependencies] # Spacedrive Sub-crates -sd-core = { path = "../../../core", features = ["ffmpeg", "heif"] } -sd-fda = { path = "../../../crates/fda" } +sd-core = { path = "../../../core", features = ["ffmpeg", "heif"] } +sd-fda = { path = "../../../crates/fda" } sd-prisma = { path = "../../../crates/prisma" } # Workspace dependencies -axum = { workspace = true, features = ["headers", "query"] } -directories = { workspace = true } -futures = { workspace = true } -http = { workspace = true } -hyper = { workspace = true } +axum = { workspace = true, features = ["headers", "query"] } +directories = { workspace = true } +futures = { workspace = true } +http = { workspace = true } +hyper = { workspace = true } prisma-client-rust = { workspace = true } -rand = { workspace = true } -rspc = { workspace = true, features = ["tauri", "tracing"] } -serde = { workspace = true } -serde_json = { workspace = true } -specta = { workspace = true } -specta-typescript = { workspace = true } -strum = { workspace = true, features = ["derive"] } -thiserror = { workspace = true } -tokio = { workspace = true, features = ["sync"] } -tracing = { workspace = true } -uuid = { workspace = true, features = ["serde"] } +rand = { workspace = true } +rspc = { workspace = true, features = ["tauri", "tracing"] } +serde = { workspace = true } +serde_json = { workspace = true } +specta = { workspace = true } +specta-typescript = { workspace = true } +strum = { workspace = true, features = ["derive"] } +thiserror = { workspace = true } +tokio = { workspace = true, features = ["sync"] } +tracing = { workspace = true } +uuid = { workspace = true, features = ["serde"] } # Specific Desktop dependencies # WARNING: Do NOT enable default features, as that vendors dbus (see below) opener = { version = "0.7.1", features = ["reveal"], default-features = false } -tauri = { version = "=2.0.0-rc.2", features = ["linux-libxdo", "macos-private-api", "unstable"] } +tauri = { version = "=2.0.0-rc.2", features = [ + "linux-libxdo", + "macos-private-api", + "unstable", +] } tauri-plugin-dialog = "=2.0.0-rc.0" tauri-plugin-os = "=2.0.0-rc.0" tauri-plugin-shell = "=2.0.0-rc.0" @@ -45,7 +49,7 @@ tauri-plugin-updater = "=2.0.0-rc.0" tauri-runtime = { version = "=2.0.0-rc.2" } tauri-specta = { git = "https://github.com/spacedriveapp/tauri-specta", rev = "6069a05029", features = [ "derive", - "typescript" + "typescript", ] } tauri-utils = { version = "=2.0.0-rc.2" } @@ -72,7 +76,7 @@ sd-desktop-windows = { path = "../crates/windows" } tauri-build = "=2.0.0-rc.2" [features] -ai-models = ["sd-core/ai"] +ai-models = ["sd-core/ai"] custom-protocol = ["tauri/custom-protocol"] -default = ["custom-protocol"] -devtools = ["tauri/devtools"] +default = ["custom-protocol"] +devtools = ["tauri/devtools"] diff --git a/apps/desktop/src-tauri/src/main.rs b/apps/desktop/src-tauri/src/main.rs index 7442bf8f6..c2aafdc93 100644 --- a/apps/desktop/src-tauri/src/main.rs +++ b/apps/desktop/src-tauri/src/main.rs @@ -176,8 +176,6 @@ pub enum DragAndDropEvent { Cancelled, } -const CLIENT_ID: &str = "2abb241e-40b8-4517-a3e3-5594375c8fbb"; - #[tokio::main] async fn main() -> tauri::Result<()> { #[cfg(target_os = "linux")] @@ -235,10 +233,7 @@ async fn main() -> tauri::Result<()> { // The `_guard` must be assigned to variable for flushing remaining logs on main exit through Drop let (_guard, result) = match Node::init_logger(&data_dir) { - Ok(guard) => ( - Some(guard), - Node::new(data_dir, sd_core::Env::new(CLIENT_ID)).await, - ), + Ok(guard) => (Some(guard), Node::new(data_dir).await), Err(err) => (None, Err(NodeError::Logger(err))), }; diff --git a/apps/mobile/modules/sd-core/core/src/lib.rs b/apps/mobile/modules/sd-core/core/src/lib.rs index 575447ca4..688d63c80 100644 --- a/apps/mobile/modules/sd-core/core/src/lib.rs +++ b/apps/mobile/modules/sd-core/core/src/lib.rs @@ -32,8 +32,6 @@ pub static SUBSCRIPTIONS: Lazy> = OnceCell::new(); -pub const CLIENT_ID: &str = "d068776a-05b6-4aaa-9001-4d01734e1944"; - pub struct MobileSender<'a> { resp: &'a mut Option, } @@ -74,7 +72,7 @@ pub fn handle_core_msg( None => { let _guard = Node::init_logger(&data_dir); - let new_node = match Node::new(data_dir, sd_core::Env::new(CLIENT_ID)).await { + let new_node = match Node::new(data_dir).await { Ok(node) => node, Err(e) => { error!(?e, "Failed to initialize node;"); diff --git a/apps/server/src/main.rs b/apps/server/src/main.rs index 8690d6286..f5b2baec0 100644 --- a/apps/server/src/main.rs +++ b/apps/server/src/main.rs @@ -145,19 +145,7 @@ async fn main() { let state = AppState { auth }; - let (node, router) = match Node::new( - data_dir, - sd_core::Env { - api_url: tokio::sync::Mutex::new( - std::env::var("SD_API_URL") - .unwrap_or_else(|_| "https://api.spacedrive.com".to_string()), - ), - client_id: std::env::var("SD_CLIENT_ID") - .unwrap_or_else(|_| "04701823-a498-406e-aef9-22081c1dae34".to_string()), - }, - ) - .await - { + let (node, router) = match Node::new(data_dir).await { Ok(d) => d, Err(e) => { panic!("{}", e.to_string()) diff --git a/core/Cargo.toml b/core/Cargo.toml index 575a0c201..036963745 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -2,12 +2,12 @@ name = "sd-core" version = "0.4.1" -authors = ["Spacedrive Technology Inc "] -description = "Virtual distributed filesystem engine that powers Spacedrive." -edition.workspace = true -license.workspace = true -repository.workspace = true -rust-version = "1.78" +authors = ["Spacedrive Technology Inc "] +description = "Virtual distributed filesystem engine that powers Spacedrive." +edition.workspace = true +license.workspace = true +repository.workspace = true +rust-version.workspace = true [features] default = [] @@ -20,8 +20,8 @@ heif = ["sd-images/heif"] [dependencies] # Inner Core Sub-crates -sd-cloud-schema = { workspace = true } -sd-core-cloud-services = { path = "./crates/cloud-services" } +sd-cloud-schema = { workspace = true } +sd-core-cloud-services = { path = "./crates/cloud-services" } sd-core-file-path-helper = { path = "./crates/file-path-helper" } sd-core-heavy-lifting = { path = "./crates/heavy-lifting" } sd-core-indexer-rules = { path = "./crates/indexer-rules" } @@ -47,6 +47,7 @@ sd-utils = { path = "../crates/utils" } # Workspace dependencies async-channel = { workspace = true } +async-stream = { workspace = true } async-trait = { workspace = true } axum = { workspace = true, features = ["ws"] } base64 = { workspace = true } @@ -91,15 +92,11 @@ tokio-stream = { workspace = true, features = ["fs"] } tokio-util = { workspace = true, features = ["io"] } tracing = { workspace = true } tracing-subscriber = { workspace = true, features = ["env-filter"] } -uuid = { workspace = true, features = ["v4", "v7", "std", "serde"] } +uuid = { workspace = true, features = ["serde", "std", "v4", "v7"] } webp = { workspace = true } # Specific Core dependencies -async-recursion = "1.1" async-stream = "0.3.5" -aws-config = "1.5" -aws-credential-types = "1.2" -aws-sdk-s3 = { version = "1.34", features = ["behavior-version-latest"] } bytes = "1.6" ctor = "0.2.8" flate2 = "1.0" @@ -107,6 +104,11 @@ hostname = "0.4.0" http-body = "0.4.6" # Update blocked by http http-range = "0.1.5" int-enum = "0.5" # Update blocked due to API breaking changes +keyring = { version = "3.0.4", features = [ + "apple-native", + "sync-secret-service", + "windows-native" +] } mini-moka = "0.10.3" notify = { git = "https://github.com/notify-rs/notify.git", rev = "c3929ed114", default-features = false, features = [ "macos_fsevent" diff --git a/core/crates/cloud-services/src/cloud_client.rs b/core/crates/cloud-services/src/cloud_client.rs index 08b896bc4..593739581 100644 --- a/core/crates/cloud-services/src/cloud_client.rs +++ b/core/crates/cloud-services/src/cloud_client.rs @@ -22,6 +22,7 @@ enum ClientState { /// They're optional in two different ways: /// - The cloud services depends on a user being logged in with our server. /// - The user being connected to the internet to begin with. +/// /// As we don't want to force the user to be connected to the internet, we have to make sure /// that core can always operate without the cloud services. #[derive(Debug, Clone)] diff --git a/core/crates/cloud-services/src/lib.rs b/core/crates/cloud-services/src/lib.rs index 9264826e1..e663a6ed2 100644 --- a/core/crates/cloud-services/src/lib.rs +++ b/core/crates/cloud-services/src/lib.rs @@ -1,9 +1,9 @@ - mod error; mod cloud_client; mod cloud_p2p; -pub use error::Error; pub use cloud_client::CloudServices; +pub use error::Error; +pub use quic_rpc::transport::quinn::QuinnConnection; diff --git a/core/src/api/auth.rs b/core/src/api/auth.rs deleted file mode 100644 index 52673323e..000000000 --- a/core/src/api/auth.rs +++ /dev/null @@ -1,153 +0,0 @@ -use std::time::Duration; - -use reqwest::StatusCode; -use rspc::alpha::AlphaRouter; -use serde::{Deserialize, Serialize}; -use specta::Type; - -use super::{Ctx, R}; - -pub(crate) fn mount() -> AlphaRouter { - R.router() - .procedure("loginSession", { - #[derive(Serialize, Type)] - #[specta(inline)] - enum Response { - Start { - user_code: String, - verification_url: String, - verification_url_complete: String, - }, - Complete, - Error(String), - } - - R.subscription(|node, _: ()| async move { - #[derive(Deserialize, Type)] - struct DeviceAuthorizationResponse { - device_code: String, - user_code: String, - verification_url: String, - verification_uri_complete: String, - } - - async_stream::stream! { - let device_type = if cfg!(target_arch = "wasm32") { - "web".to_string() - } else if cfg!(target_os = "ios") || cfg!(target_os = "android") { - "mobile".to_string() - } else { - "desktop".to_string() - }; - - let auth_response = match match node - .http - .post(&format!( - "{}/login/device/code", - &node.env.api_url.lock().await - )) - .form(&[("client_id", &node.env.client_id), ("device", &device_type)]) - .send() - .await - .map_err(|e| e.to_string()) - { - Ok(r) => r.json::().await.map_err(|e| e.to_string()), - Err(e) => { - yield Response::Error(e.to_string()); - return - }, - } { - Ok(v) => v, - Err(e) => { - yield Response::Error(e.to_string()); - return - }, - }; - - yield Response::Start { - user_code: auth_response.user_code.clone(), - verification_url: auth_response.verification_url.clone(), - verification_url_complete: auth_response.verification_uri_complete.clone(), - }; - - yield loop { - tokio::time::sleep(Duration::from_secs(5)).await; - - let token_resp = match node.http - .post(&format!("{}/login/oauth/access_token", &node.env.api_url.lock().await)) - .form(&[ - ("grant_type", sd_cloud_api::auth::DEVICE_CODE_URN), - ("device_code", &auth_response.device_code), - ("client_id", &node.env.client_id) - ]) - .send() - .await { - Ok(v) => v, - Err(e) => break Response::Error(e.to_string()) - }; - - match token_resp.status() { - StatusCode::OK => { - let token = match token_resp.json().await { - Ok(v) => v, - Err(e) => break Response::Error(e.to_string()) - }; - - if let Err(e) = node.config - .write(|c| c.auth_token = Some(token)) - .await { - break Response::Error(e.to_string()); - }; - - - break Response::Complete; - }, - StatusCode::BAD_REQUEST => { - #[derive(Debug, Deserialize)] - struct OAuth400 { - error: String - } - - let resp = match token_resp.json::().await { - Ok(v) => v, - Err(e) => break Response::Error(e.to_string()) - }; - - match resp.error.as_str() { - "authorization_pending" => continue, - e => { - break Response::Error(e.to_string()) - } - } - }, - s => { - break Response::Error(s.to_string()); - } - } - } - } - }) - }) - .procedure( - "logout", - R.mutation(|node, _: ()| async move { - node.config - .write(|c| c.auth_token = None) - .await - .map(|_| ()) - .map_err(|_| { - rspc::Error::new( - rspc::ErrorCode::InternalServerError, - "Failed to write config".to_string(), - ) - }) - }), - ) - .procedure("me", { - R.query(|node, _: ()| async move { - let resp = sd_cloud_api::user::me(node.cloud_api_config().await).await?; - - Ok(resp) - }) - }) -} diff --git a/core/src/api/cloud/devices.rs b/core/src/api/cloud/devices.rs index e009197e3..a5b982ce0 100644 --- a/core/src/api/cloud/devices.rs +++ b/core/src/api/cloud/devices.rs @@ -1,15 +1,25 @@ -use crate::{ - api::{Ctx, R}, - try_get_cloud_services_client, +use crate::api::{Ctx, R}; + +use futures::{SinkExt, StreamExt}; +use sd_cloud_schema::{ + auth::AccessToken, + devices::{self, DeviceOS, PubId}, + opaque_ke::{ + rand::rngs::OsRng, ClientLogin, ClientLoginFinishParameters, ClientLoginFinishResult, + ClientLoginStartResult, + }, + Client, Service, SpacedriveCipherSuite, }; +use sd_core_cloud_services::QuinnConnection; +use blake3::Hash; use chrono::DateTime; -use sd_cloud_schema::devices::{self, DeviceOS, PubId}; - use rspc::alpha::AlphaRouter; -use tracing::debug; +use tracing::{debug, error}; use uuid::Uuid; +use super::{handle_comm_error, try_get_cloud_services_client}; + #[derive(Debug, serde::Serialize, serde::Deserialize, specta::Type)] struct MockDevice { pub_id: PubId, @@ -138,7 +148,8 @@ pub fn mount() -> AlphaRouter { .procedure("delete", { R.mutation(|node, req: devices::delete::Request| async move { super::handle_comm_error( - try_get_cloud_services_client!(node)? + try_get_cloud_services_client(&node) + .await? .devices() .delete(req) .await, @@ -153,7 +164,8 @@ pub fn mount() -> AlphaRouter { .procedure("update", { R.mutation(|node, req: devices::update::Request| async move { super::handle_comm_error( - try_get_cloud_services_client!(node)? + try_get_cloud_services_client(&node) + .await? .devices() .update(req) .await, @@ -166,3 +178,114 @@ pub fn mount() -> AlphaRouter { }) }) } + +pub async fn hello( + client: &Client, Service>, + access_token: AccessToken, + device_pub_id: PubId, + hashed_pub_id: Hash, +) -> Result<(), rspc::Error> { + use devices::hello::{Request, RequestUpdate, Response, State}; + + let ClientLoginStartResult { message, state } = ClientLogin::::start( + &mut OsRng, + hashed_pub_id.as_bytes().as_slice(), + ) + .map_err(|e| { + error!(?e, "OPAQUE error initializing device hello request;"); + rspc::Error::new( + rspc::ErrorCode::InternalServerError, + "Failed to initialize device login".into(), + ) + })?; + + let (mut hello_continuation, mut res_stream) = handle_comm_error( + client + .devices() + .hello(Request { + access_token, + pub_id: device_pub_id, + opaque_login_message: Box::new(message), + }) + .await, + "Failed to send device hello request;", + )?; + + let Some(res) = res_stream.next().await else { + let message = "Server did not send a device hello response;"; + error!("{message}"); + return Err(rspc::Error::new( + rspc::ErrorCode::InternalServerError, + message.to_string(), + )); + }; + + let login_response = + match handle_comm_error(res, "Communication error on device hello response;")? { + Ok(Response(State::LoginResponse(login_response))) => login_response, + Ok(Response(State::End)) => { + unreachable!("Device hello response MUST not be End here, this is a serious bug and should crash;"); + } + Err(e) => { + error!(?e, "Device hello response error;"); + return Err(e.into()); + } + }; + + let ClientLoginFinishResult { + message, + export_key, + .. + } = state + .finish( + hashed_pub_id.as_bytes().as_slice(), + *login_response, + ClientLoginFinishParameters::default(), + ) + .map_err(|e| { + error!(?e, "Device hello finish error;"); + rspc::Error::new( + rspc::ErrorCode::InternalServerError, + "Failed to finish device login".into(), + ) + })?; + + hello_continuation + .send(RequestUpdate { + opaque_login_finish: Box::new(message), + }) + .await + .map_err(|e| { + error!(?e, "Failed to send device hello request continuation;"); + rspc::Error::new( + rspc::ErrorCode::InternalServerError, + "Failed to finish device login procedure;".into(), + ) + })?; + + let Some(res) = res_stream.next().await else { + let message = "Server did not send a device hello END response;"; + error!("{message}"); + return Err(rspc::Error::new( + rspc::ErrorCode::InternalServerError, + message.to_string(), + )); + }; + + match handle_comm_error(res, "Communication error on device hello response;")? { + Ok(Response(State::LoginResponse(_))) => { + unreachable!("Device hello final response MUST be End here, this is a serious bug and should crash;"); + } + Ok(Response(State::End)) => {} + Err(e) => { + error!(?e, "Device hello final response error;"); + return Err(e.into()); + } + }; + + Ok(()) +} + +pub async fn device_registration() -> Result<(), rspc::Error> { + Ok(()) +} diff --git a/core/src/api/cloud/locations.rs b/core/src/api/cloud/locations.rs index a6c9c80cc..e7320d315 100644 --- a/core/src/api/cloud/locations.rs +++ b/core/src/api/cloud/locations.rs @@ -1,31 +1,58 @@ -// This file is being deprecated in favor of libraries.rs -// This is due to the migration to the new API system, but the frontend is still using this file - use crate::api::{Ctx, R}; +use sd_cloud_schema::locations; + use rspc::alpha::AlphaRouter; +use tracing::debug; pub fn mount() -> AlphaRouter { R.router() .procedure("list", { - R.query(|node, _: ()| async move { - sd_cloud_api::locations::list(node.cloud_api_config().await) - .await - .map_err(Into::into) + R.query(|node, req: locations::list::Request| async move { + let locations::list::Response(locations) = super::handle_comm_error( + super::try_get_cloud_services_client(&node) + .await? + .locations() + .list(req) + .await, + "Failed to list locations;", + )??; + + debug!(?locations, "Got locations"); + + Ok(locations) }) }) .procedure("create", { - R.mutation(|node, name: String| async move { - sd_cloud_api::locations::create(node.cloud_api_config().await, name) - .await - .map_err(Into::into) + R.mutation(|node, req: locations::create::Request| async move { + super::handle_comm_error( + super::try_get_cloud_services_client(&node) + .await? + .locations() + .create(req) + .await, + "Failed to list locations;", + )??; + + debug!("Created cloud location"); + + Ok(()) }) }) - .procedure("remove", { - R.mutation(|node, id: String| async move { - sd_cloud_api::locations::create(node.cloud_api_config().await, id) - .await - .map_err(Into::into) + .procedure("delete", { + R.mutation(|node, req: locations::delete::Request| async move { + super::handle_comm_error( + super::try_get_cloud_services_client(&node) + .await? + .locations() + .delete(req) + .await, + "Failed to list locations;", + )??; + + debug!("Created cloud location"); + + Ok(()) }) }) } diff --git a/core/src/api/cloud/mod.rs b/core/src/api/cloud/mod.rs index 0b9ef5065..ec3d0177b 100644 --- a/core/src/api/cloud/mod.rs +++ b/core/src/api/cloud/mod.rs @@ -1,6 +1,11 @@ -// use crate::{api::libraries::LibraryConfigWrapped, invalidate_query, library::LibraryName}; +use crate::Node; -use sd_cloud_schema::{auth, users}; +use sd_cloud_schema::{ + auth, + error::{ClientSideError, Error}, + users, Client, Service, +}; +use sd_core_cloud_services::QuinnConnection; use rspc::alpha::AlphaRouter; use tracing::error; @@ -9,23 +14,19 @@ use uuid::Uuid; use super::{Ctx, R}; mod devices; +mod libraries; mod library; mod locations; -mod new_locations; -mod libraries; -#[macro_export] -macro_rules! try_get_cloud_services_client { - ($node:expr) => {{ - let node: &$crate::Node = &$node; - - node.cloud_services - .client() - .await - .map_err(::sd_utils::error::report_error( - "Failed to get cloud services client;", - )) - }}; +async fn try_get_cloud_services_client( + node: &Node, +) -> Result, Service>, sd_core_cloud_services::Error> { + node.cloud_services + .client() + .await + .map_err(::sd_utils::error::report_error( + "Failed to get cloud services client;", + )) } pub(crate) fn mount() -> AlphaRouter { @@ -33,11 +34,12 @@ pub(crate) fn mount() -> AlphaRouter { .merge("library.", library::mount()) .merge("libraries.", libraries::mount()) .merge("locations.", locations::mount()) - .merge("new_locations.", new_locations::mount()) .merge("devices.", devices::mount()) .procedure("bootstrap", { R.mutation(|node, access_token: auth::AccessToken| async move { - let client = try_get_cloud_services_client!(node)?; + use sd_cloud_schema::devices; + + let client = try_get_cloud_services_client(&node).await?; // create user route is idempotent, so we can safely keep creating the same user over and over handle_comm_error( @@ -50,6 +52,33 @@ pub(crate) fn mount() -> AlphaRouter { "Failed to create user;", )??; + let device_pub_id = devices::PubId(node.config.get().await.id); + let mut hasher = blake3::Hasher::new(); + hasher.update(device_pub_id.0.as_bytes().as_slice()); + let hashed_pub_id = hasher.finalize(); + + match handle_comm_error( + client + .devices() + .get(devices::get::Request { + access_token: access_token.clone(), + pub_id: device_pub_id, + }) + .await, + "Failed to get device on cloud bootstrap;", + )? { + Ok(_) => { + // Device registered, we execute a device hello flow + self::devices::hello(&client, access_token, device_pub_id, hashed_pub_id) + .await + } + Err(Error::Client(ClientSideError::NotFound(_))) => { + // Device not registered, we execute a device register flow + todo!() + } + Err(e) => return Err(e.into()), + } + // TODO: figure out a way to know if we need to register the device or send a device hello request // TODO: in case of a device register request, we use the OPAQUE key to encrypt iroh's secret key (NodeId) diff --git a/core/src/api/cloud/new_locations.rs b/core/src/api/cloud/new_locations.rs deleted file mode 100644 index 6ac9f5f37..000000000 --- a/core/src/api/cloud/new_locations.rs +++ /dev/null @@ -1,89 +0,0 @@ -use crate::{ - api::{Ctx, R}, - try_get_cloud_services_client, -}; - -use rspc::alpha::AlphaRouter; -use sd_cloud_schema::locations; -use tracing::debug; - -pub fn mount() -> AlphaRouter { - R.router() - .procedure("get", { - R.query(|node, req: locations::get::Request| async move { - let locations::get::Response(location) = super::handle_comm_error( - try_get_cloud_services_client!(node)? - .locations() - .get(req) - .await, - "Failed to get location;", - )??; - - debug!(?location, "Got location"); - - Ok(location) - }) - }) - .procedure("list", { - R.query(|node, req: locations::list::Request| async move { - let locations::list::Response(locations) = super::handle_comm_error( - try_get_cloud_services_client!(node)? - .locations() - .list(req) - .await, - "Failed to list locations;", - )??; - - debug!(?locations, "Listed locations"); - - Ok(locations) - }) - }) - .procedure("create", { - R.mutation(|node, req: locations::create::Request| async move { - super::handle_comm_error( - try_get_cloud_services_client!(node)? - .locations() - .create(req) - .await, - "Failed to create location;", - )??; - - debug!("Created location"); - - // Should we invalidate the location list cache here? - - Ok(()) - }) - }) - .procedure("delete", { - R.mutation(|node, req: locations::delete::Request| async move { - super::handle_comm_error( - try_get_cloud_services_client!(node)? - .locations() - .delete(req) - .await, - "Failed to delete location;", - )??; - - debug!("Deleted location"); - - Ok(()) - }) - }) - .procedure("update", { - R.mutation(|node, req: locations::update::Request| async move { - super::handle_comm_error( - try_get_cloud_services_client!(node)? - .locations() - .update(req) - .await, - "Failed to update location;", - )??; - - debug!("Updated location"); - - Ok(()) - }) - }) -} diff --git a/core/src/api/mod.rs b/core/src/api/mod.rs index b8f0a36db..0ca79309c 100644 --- a/core/src/api/mod.rs +++ b/core/src/api/mod.rs @@ -13,15 +13,14 @@ use sd_core_heavy_lifting::media_processor::ThumbKey; use sd_p2p::RemoteIdentity; use sd_prisma::prisma::file_path; -use std::sync::{atomic::Ordering, Arc}; +use std::sync::Arc; use itertools::Itertools; -use rspc::{alpha::Rspc, Config, ErrorCode}; +use rspc::{alpha::Rspc, Config}; use serde::{Deserialize, Serialize}; use specta::Type; use uuid::Uuid; -mod auth; mod backups; mod cloud; // mod categories; @@ -68,49 +67,26 @@ pub enum CoreEvent { InvalidateOperation(InvalidateOperationEvent), } -/// All of the feature flags provided by the core itself. The frontend has it's own set of feature flags! -/// -/// If you want a variant of this to show up on the frontend it must be added to `backendFeatures` in `useFeatureFlag.tsx` -#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize, Type)] -#[serde(rename_all = "camelCase")] -pub enum BackendFeature { - CloudSync, -} - -impl BackendFeature { - pub fn restore(&self, node: &Node) { - match self { - BackendFeature::CloudSync => { - node.cloud_sync_flag.store(true, Ordering::Relaxed); - } - } - } -} - -// A version of [NodeConfig] that is safe to share with the frontend +/// A version of [`NodeConfig`] that is safe to share with the frontend #[derive(Debug, Serialize, Deserialize, Clone, Type)] -pub struct SanitisedNodeConfig { +pub struct SanitizedNodeConfig { /// id is a unique identifier for the current node. Each node has a public identifier (this one) and is given a local id for each library (done within the library code). pub id: Uuid, /// name is the display name of the current node. This is set by the user and is shown in the UI. // TODO: Length validation so it can fit in DNS record pub name: String, pub identity: RemoteIdentity, pub p2p: NodeConfigP2P, - pub features: Vec, pub preferences: NodePreferences, - pub image_labeler_version: Option, } -impl From for SanitisedNodeConfig { +impl From for SanitizedNodeConfig { fn from(value: NodeConfig) -> Self { Self { id: value.id, name: value.name, identity: value.identity.to_remote_identity(), p2p: value.p2p, - features: value.features, preferences: value.preferences, - image_labeler_version: value.image_labeler_version, } } } @@ -118,7 +94,7 @@ impl From for SanitisedNodeConfig { #[derive(Serialize, Debug, Type)] struct NodeState { #[serde(flatten)] - config: SanitisedNodeConfig, + config: SanitizedNodeConfig, data_path: String, device_model: Option, is_in_docker: bool, @@ -161,40 +137,7 @@ pub(crate) fn mount() -> Arc { }) }) }) - .procedure("toggleFeatureFlag", { - R.mutation(|node, feature: BackendFeature| async move { - let config = node.config.get().await; - - let enabled = if config.features.iter().contains(&feature) { - node.config - .write(|cfg| { - cfg.features.retain(|f| *f != feature); - }) - .await - .map(|_| false) - } else { - node.config - .write(|cfg| { - cfg.features.push(feature.clone()); - }) - .await - .map(|_| true) - } - .map_err(|e| rspc::Error::new(ErrorCode::InternalServerError, e.to_string()))?; - - match feature { - BackendFeature::CloudSync => { - node.cloud_sync_flag.store(enabled, Ordering::Relaxed); - } - } - - invalidate_query!(node; node, "nodeState"); - - Ok(()) - }) - }) .merge("api.", web_api::mount()) - .merge("auth.", auth::mount()) .merge("cloud.", cloud::mount()) .merge("search.", search::mount()) .merge("library.", libraries::mount()) diff --git a/core/src/api/nodes.rs b/core/src/api/nodes.rs index 0f422b593..09fb102fb 100644 --- a/core/src/api/nodes.rs +++ b/core/src/api/nodes.rs @@ -28,8 +28,6 @@ pub(crate) fn mount() -> AlphaRouter { pub p2p_discovery: Option, pub p2p_remote_access: Option, pub p2p_manual_peers: Option>, - #[cfg(feature = "ai")] - pub image_labeler_version: Option, } R.mutation(|node, args: ChangeNodeNameArgs| async move { if let Some(name) = &args.name { @@ -41,9 +39,6 @@ pub(crate) fn mount() -> AlphaRouter { } } - #[cfg(feature = "ai")] - let mut new_model = None; - node.config .write(|config| { if let Some(name) = args.name { @@ -71,29 +66,6 @@ pub(crate) fn mount() -> AlphaRouter { if let Some(manual_peers) = args.p2p_manual_peers { config.p2p.manual_peers = manual_peers; }; - - #[cfg(feature = "ai")] - if let Some(version) = args.image_labeler_version { - if config - .image_labeler_version - .as_ref() - .map(|node_version| version != *node_version) - .unwrap_or(true) - { - new_model = sd_ai::old_image_labeler::YoloV8::model(Some(&version)) - .map_err(|e| { - error!( - %version, - ?e, - "Failed to crate image_detection model;", - ); - }) - .ok(); - if new_model.is_some() { - config.image_labeler_version = Some(version); - } - } - } }) .await .map_err(|e| { @@ -109,44 +81,6 @@ pub(crate) fn mount() -> AlphaRouter { invalidate_query!(node; node, "nodeState"); - #[cfg(feature = "ai")] - { - use super::notifications::{NotificationData, NotificationKind}; - - if let Some(model) = new_model { - let version = model.version().to_string(); - tokio::spawn(async move { - let notification = if let Some(image_labeller) = - node.old_image_labeller.as_ref() - { - if let Err(e) = image_labeller.change_model(model).await { - NotificationData { - title: String::from( - "Failed to change image detection model", - ), - content: format!("Error: {e}"), - kind: NotificationKind::Error, - } - } else { - NotificationData { - title: String::from("Model download completed"), - content: format!("Successfully loaded model: {version}"), - kind: NotificationKind::Success, - } - } - } else { - NotificationData { - title: String::from("Failed to change image detection model"), - content: "The AI system is disabled due to a previous error. Contact support for help.".to_string(), - kind: NotificationKind::Success, - } - }; - - node.emit_notification(notification, None).await; - }); - } - } - Ok(()) }) }) diff --git a/core/src/api/web_api.rs b/core/src/api/web_api.rs index 49802bb5b..677d77865 100644 --- a/core/src/api/web_api.rs +++ b/core/src/api/web_api.rs @@ -15,12 +15,12 @@ pub(crate) fn mount() -> AlphaRouter { } |node, args: Feedback| async move { - sd_cloud_api::feedback::send( - node.cloud_api_config().await, - args.message, - args.emoji, - ) - .await?; + // sd_cloud_api::feedback::send( + // node.cloud_api_config().await, + // args.message, + // args.emoji, + // ) + // .await?; Ok(()) } diff --git a/core/src/cloud/sync/send.rs b/core/src/cloud/sync/send.rs index 82a049f49..11bf68f28 100644 --- a/core/src/cloud/sync/send.rs +++ b/core/src/cloud/sync/send.rs @@ -1,9 +1,6 @@ -use futures::FutureExt; -use futures_concurrency::future::Race; -use sd_core_sync::{SyncMessage, NTP64}; - use sd_actors::Stopper; -use sd_cloud_api::RequestConfigProvider; +use sd_core_cloud_services::CloudServices; +use sd_core_sync::{SyncMessage, NTP64}; use std::{ future::IntoFuture, @@ -14,6 +11,8 @@ use std::{ time::Duration, }; +use futures::FutureExt; +use futures_concurrency::future::Race; use tokio::{ sync::{broadcast, Notify}, time::sleep, @@ -31,7 +30,7 @@ enum RaceNotifiedOrStopped { pub async fn run_actor( library_id: Uuid, sync: Arc, - cloud_api_config_provider: Arc, + cloud_services: CloudServices, state: Arc, state_notify: Arc, stop: Stopper, diff --git a/core/src/env.rs b/core/src/env.rs deleted file mode 100644 index 41f09c2cf..000000000 --- a/core/src/env.rs +++ /dev/null @@ -1,15 +0,0 @@ -use tokio::sync::Mutex; - -pub struct Env { - pub api_url: Mutex, - pub client_id: String, -} - -impl Env { - pub fn new(client_id: &str) -> Self { - Self { - api_url: Mutex::new("https://api.spacedrive.com".to_string()), - client_id: client_id.to_string(), - } - } -} diff --git a/core/src/lib.rs b/core/src/lib.rs index 57bfdd11e..72eeaf6f2 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -43,7 +43,6 @@ mod context; #[cfg(feature = "crypto")] pub(crate) mod crypto; pub mod custom_uri; -mod env; pub mod library; pub(crate) mod location; pub(crate) mod node; @@ -56,8 +55,6 @@ pub(crate) mod preferences; pub mod util; pub(crate) mod volume; -pub use env::Env; - use api::notifications::{Notification, NotificationData, NotificationId}; use context::{JobContext, NodeContext}; use node::config; @@ -77,13 +74,10 @@ pub struct Node { pub event_bus: (broadcast::Sender, broadcast::Receiver), pub notifications: Notifications, pub cloud_sync_flag: Arc, - pub env: Arc, pub http: reqwest::Client, pub task_system: TaskSystem, pub job_system: JobSystem>, pub cloud_services: Arc, - #[cfg(feature = "ai")] - pub old_image_labeller: Option, } impl fmt::Debug for Node { @@ -95,16 +89,11 @@ impl fmt::Debug for Node { } impl Node { - pub async fn new( - data_dir: impl AsRef, - env: env::Env, - ) -> Result<(Arc, Arc), NodeError> { + pub async fn new(data_dir: impl AsRef) -> Result<(Arc, Arc), NodeError> { let data_dir = data_dir.as_ref(); info!(data_directory = %data_dir.display(), "Starting core;"); - let env = Arc::new(env); - #[cfg(debug_assertions)] let init_data = util::debug_initializer::InitConfig::load(data_dir).await?; @@ -116,16 +105,6 @@ impl Node { .await .map_err(NodeError::FailedToInitializeConfig)?; - if let Some(url) = config.get().await.sd_api_origin { - *env.api_url.lock().await = url; - } - - #[cfg(feature = "ai")] - let image_labeler_version = { - sd_ai::init()?; - config.get().await.image_labeler_version - }; - let (locations, locations_actor) = location::Locations::new(); let (old_jobs, jobs_actor) = old_job::OldJobs::new(); let libraries = library::Libraries::new(data_dir.join("libraries")).await?; @@ -169,30 +148,11 @@ impl Node { cfg!(target_os = "ios") || cfg!(target_os = "android"), )), http: reqwest::Client::new(), - env, cloud_services: Arc::new( CloudServices::new(&get_cloud_api_address, cloud_services_domain_name).await?, ), - #[cfg(feature = "ai")] - old_image_labeller: OldImageLabeler::new( - YoloV8::model(image_labeler_version)?, - data_dir, - ) - .await - .map_err(|e| { - error!( - ?e, - "Failed to initialize image labeller. AI features will be disabled;" - ); - }) - .ok(), }); - // Restore backend feature flags - for feature in node.config.get().await.features { - feature.restore(&node); - } - // Setup start actors that depend on the `Node` #[cfg(debug_assertions)] if let Some(init_data) = init_data { @@ -346,10 +306,6 @@ impl Node { .join() .await; - #[cfg(feature = "ai")] - if let Some(image_labeller) = &self.old_image_labeller { - image_labeller.shutdown().await; - } info!("Spacedrive Core shutdown successful!"); } @@ -394,55 +350,6 @@ impl Node { } } } - - pub async fn add_auth_header(&self, mut req: RequestBuilder) -> RequestBuilder { - if let Some(auth_token) = self.config.get().await.auth_token { - req = req.header("authorization", auth_token.to_header()); - }; - - req - } - - pub async fn authed_api_request(&self, req: RequestBuilder) -> Result { - let Some(auth_token) = self.config.get().await.auth_token else { - return Err(rspc::Error::new( - rspc::ErrorCode::Unauthorized, - "No auth token".to_string(), - )); - }; - - let req = req.header("authorization", auth_token.to_header()); - - req.send().await.map_err(|_| { - rspc::Error::new( - rspc::ErrorCode::InternalServerError, - "Request failed".to_string(), - ) - }) - } - - pub async fn api_request(&self, req: RequestBuilder) -> Result { - req.send().await.map_err(|_| { - rspc::Error::new( - rspc::ErrorCode::InternalServerError, - "Request failed".to_string(), - ) - }) - } - - pub async fn cloud_api_config(&self) -> sd_cloud_api::RequestConfig { - sd_cloud_api::RequestConfig { - client: self.http.clone(), - api_url: self.env.api_url.lock().await.clone(), - auth_token: self.config.get().await.auth_token, - } - } -} - -impl sd_cloud_api::RequestConfigProvider for Node { - async fn get_request_config(self: &Arc) -> sd_cloud_api::RequestConfig { - Node::cloud_api_config(self).await - } } /// Error type for Node related errors. @@ -467,11 +374,4 @@ pub enum NodeError { JobSystem(#[from] sd_core_heavy_lifting::JobSystemError), #[error(transparent)] CloudServices(#[from] sd_core_cloud_services::Error), - - #[cfg(feature = "ai")] - #[error("ai error: {0}")] - AI(#[from] sd_ai::Error), - #[cfg(feature = "ai")] - #[error("Failed to download model: {0}")] - DownloadModel(#[from] DownloadModelError), } diff --git a/core/src/library/library.rs b/core/src/library/library.rs index 795714ab8..593c3509b 100644 --- a/core/src/library/library.rs +++ b/core/src/library/library.rs @@ -21,14 +21,6 @@ use uuid::Uuid; use super::{LibraryConfig, LibraryManagerError}; -// TODO: Finish this -// pub enum LibraryNew { -// InitialSync, -// Encrypted, -// Loaded(LoadedLibrary), -// Deleting, -// } - pub struct Library { /// id holds the ID of the current library. pub id: Uuid, @@ -48,7 +40,6 @@ pub struct Library { pub instance_uuid: Uuid, do_cloud_sync: broadcast::Sender<()>, - pub env: Arc, // Look, I think this shouldn't be here but our current invalidation system needs it. // TODO(@Oscar): Get rid of this with the new invalidation system. @@ -95,7 +86,6 @@ impl Library { // orphan_remover: OrphanRemoverActor::spawn(db), instance_uuid, do_cloud_sync, - env: node.env.clone(), event_bus_tx: node.event_bus.0.clone(), actors, }) diff --git a/core/src/library/manager/mod.rs b/core/src/library/manager/mod.rs index 6dfb20091..a8e187f39 100644 --- a/core/src/library/manager/mod.rs +++ b/core/src/library/manager/mod.rs @@ -598,109 +598,110 @@ impl Libraries { async move { loop { debug!("Syncing library with cloud!"); + // TODO(fogodev): re-implement this with new Cloud Services API - if library.config().await.cloud_id.is_some() { - if let Ok(lib) = - sd_cloud_api::library::get(node.cloud_api_config().await, library.id) - .await - { - match lib { - Some(lib) => { - if let Some(this_instance) = lib - .instances - .iter() - .find(|i| i.uuid == library.instance_uuid) - { - let node_config = node.config.get().await; - let curr_metadata: Option> = - instance.metadata.as_ref().map(|metadata| { - serde_json::from_slice(metadata) - .expect("invalid metadata") - }); - let should_update = this_instance.node_id != node_config.id - || RemoteIdentity::from_str( - &this_instance.node_remote_identity, - ) - .ok() != Some( - node_config.identity.to_remote_identity(), - ) || curr_metadata - != Some(node.p2p.peer_metadata()); + // if library.config().await.cloud_id.is_some() { + // if let Ok(lib) = + // sd_cloud_api::library::get(node.cloud_api_config().await, library.id) + // .await + // { + // match lib { + // Some(lib) => { + // if let Some(this_instance) = lib + // .instances + // .iter() + // .find(|i| i.uuid == library.instance_uuid) + // { + // let node_config = node.config.get().await; + // let curr_metadata: Option> = + // instance.metadata.as_ref().map(|metadata| { + // serde_json::from_slice(metadata) + // .expect("invalid metadata") + // }); + // let should_update = this_instance.node_id != node_config.id + // || RemoteIdentity::from_str( + // &this_instance.node_remote_identity, + // ) + // .ok() != Some( + // node_config.identity.to_remote_identity(), + // ) || curr_metadata + // != Some(node.p2p.peer_metadata()); - if should_update { - warn!("Library instance on cloud is outdated. Updating..."); + // if should_update { + // warn!("Library instance on cloud is outdated. Updating..."); - if let Err(e) = sd_cloud_api::library::update_instance( - node.cloud_api_config().await, - library.id, - this_instance.uuid, - Some(node_config.id), - Some(node_config.identity.to_remote_identity()), - Some(node.p2p.peer_metadata()), - ) - .await - { - error!( - instance_uuid = %this_instance.uuid, - ?e, - "Failed to updating instance on cloud;", - ); - } - } - } + // if let Err(e) = sd_cloud_api::library::update_instance( + // node.cloud_api_config().await, + // library.id, + // this_instance.uuid, + // Some(node_config.id), + // Some(node_config.identity.to_remote_identity()), + // Some(node.p2p.peer_metadata()), + // ) + // .await + // { + // error!( + // instance_uuid = %this_instance.uuid, + // ?e, + // "Failed to updating instance on cloud;", + // ); + // } + // } + // } - if lib.name != *library.config().await.name { - warn!("Library name on cloud is outdated. Updating..."); + // if lib.name != *library.config().await.name { + // warn!("Library name on cloud is outdated. Updating..."); - if let Err(e) = sd_cloud_api::library::update( - node.cloud_api_config().await, - library.id, - Some(lib.name), - ) - .await - { - error!(?e, "Failed to update library name on cloud;"); - } - } + // if let Err(e) = sd_cloud_api::library::update( + // node.cloud_api_config().await, + // library.id, + // Some(lib.name), + // ) + // .await + // { + // error!(?e, "Failed to update library name on cloud;"); + // } + // } - for instance in lib.instances { - if let Err(e) = cloud::sync::receive::upsert_instance( - library.id, - &library.db, - &library.sync, - &node.libraries, - &instance.uuid, - instance.identity, - &instance.node_id, - RemoteIdentity::from_str( - &instance.node_remote_identity, - ) - .expect("malformed remote identity from API"), - instance.metadata, - ) - .await - { - error!(?e, "Failed to create instance on cloud;"); - } - } - } - None => { - warn!( - "Library not found on cloud. Removing from local node..." - ); + // for instance in lib.instances { + // if let Err(e) = cloud::sync::receive::upsert_instance( + // library.id, + // &library.db, + // &library.sync, + // &node.libraries, + // &instance.uuid, + // instance.identity, + // &instance.node_id, + // RemoteIdentity::from_str( + // &instance.node_remote_identity, + // ) + // .expect("malformed remote identity from API"), + // instance.metadata, + // ) + // .await + // { + // error!(?e, "Failed to create instance on cloud;"); + // } + // } + // } + // None => { + // warn!( + // "Library not found on cloud. Removing from local node..." + // ); - let _ = this - .edit( - library.id, - None, - MaybeUndefined::Undefined, - MaybeUndefined::Null, - None, - ) - .await; - } - } - } - } + // let _ = this + // .edit( + // library.id, + // None, + // MaybeUndefined::Undefined, + // MaybeUndefined::Null, + // None, + // ) + // .await; + // } + // } + // } + // } tokio::select! { // Update instances every 2 minutes diff --git a/core/src/node/config.rs b/core/src/node/config.rs index 565146ab7..75ec1a9ed 100644 --- a/core/src/node/config.rs +++ b/core/src/node/config.rs @@ -1,9 +1,10 @@ use crate::{ - api::{notifications::Notification, BackendFeature}, + api::notifications::Notification, /*object::media::old_thumbnail::preferences::ThumbnailerPreferences,*/ util::version_manager::{Kind, ManagedVersion, VersionManager, VersionManagerError}, }; +use sd_cloud_schema::devices::DeviceOS; use sd_p2p::Identity; use sd_utils::error::FileIOError; @@ -110,7 +111,8 @@ impl Default for NodeConfigP2P { } } -/// NodeConfig is the configuration for a node. This is shared between all libraries and is stored in a JSON file on disk. +/// NodeConfig is the configuration for a node. +/// This is shared between all libraries and is stored in a JSON file on disk. #[derive(Debug, Clone, Serialize, Deserialize)] // If you are adding `specta::Type` on this your probably about to leak the P2P private key pub struct NodeConfig { /// id is a unique identifier for the current node. Each node has a public identifier (this one) and is given a local id for each library (done within the library code). @@ -123,24 +125,16 @@ pub struct NodeConfig { /// The p2p identity keypair for this node. This is used to identify the node on the network. /// This keypair does effectively nothing except for provide libp2p with a stable peer_id. #[serde(with = "identity_serde")] + // TODO(@fogodev): remove these from here, we must not store secret keys in plaintext... + // Put then on secret storage when we have a keyring compatible with all our supported platforms pub identity: Identity, /// P2P config #[serde(default)] pub p2p: NodeConfigP2P, - /// Feature flags enabled on the node - #[serde(default)] - pub features: Vec, - /// Authentication for Spacedrive Accounts - pub auth_token: Option, - /// URL of the Spacedrive API - #[serde(default, skip_serializing_if = "Option::is_none")] - pub sd_api_origin: Option, /// The aggregation of many different preferences for the node pub preferences: NodePreferences, - // Model version for the image labeler - pub image_labeler_version: Option, - // Operating System of the node -> "linux", "macos", "windows", "android", "ios" - pub os: String, + // Operating System of the node + pub os: DeviceOS, version: NodeConfigVersion, } @@ -185,6 +179,7 @@ pub enum NodeConfigVersion { V2 = 2, V3 = 3, V4 = 4, + V5 = 5, } impl ManagedVersion for NodeConfig { @@ -196,12 +191,7 @@ impl ManagedVersion for NodeConfig { let mut name = generate_device_name(); name.truncate(255); - #[cfg(feature = "ai")] - let image_labeler_version = Some(sd_ai::old_image_labeler::DEFAULT_MODEL_VERSION.to_string()); - #[cfg(not(feature = "ai"))] - let image_labeler_version = None; - - let os = std::env::consts::OS; + let os = DeviceOS::from_env(); Some(Self { id: Uuid::now_v7(), @@ -209,13 +199,9 @@ impl ManagedVersion for NodeConfig { identity: Identity::default(), p2p: NodeConfigP2P::default(), version: Self::LATEST_VERSION, - features: vec![], notifications: vec![], - auth_token: None, - sd_api_origin: None, preferences: NodePreferences::default(), - image_labeler_version, - os: os.to_string(), + os, }) } } @@ -335,6 +321,38 @@ impl NodeConfig { .map_err(|e| FileIOError::from((path, e)))?; } + (NodeConfigVersion::V4, NodeConfigVersion::V5) => { + let mut config: Map = + serde_json::from_slice(&fs::read(path).await.map_err(|e| { + FileIOError::from(( + path, + e, + "Failed to read node config file for migration", + )) + })?) + .map_err(VersionManagerError::SerdeJson)?; + + config.insert( + String::from("os"), + serde_json::to_value(DeviceOS::from_env()) + .map_err(VersionManagerError::SerdeJson)?, + ); + + config.remove("features"); + config.remove("auth_token"); + config.remove("sd_api_origin"); + config.remove("image_labeler_version"); + + fs::write( + path, + serde_json::to_vec(&config).map_err(VersionManagerError::SerdeJson)?, + ) + .await + .map_err(|e| { + FileIOError::from((path, e, "Failed to write back updated config")) + })?; + } + _ => { error!(current_version = ?current, "Node config version is not handled;"); return Err(VersionManagerError::UnexpectedMigration { @@ -376,18 +394,7 @@ impl Manager { let data_directory_path = data_directory_path.as_ref().to_path_buf(); let config_file_path = data_directory_path.join(NODE_STATE_CONFIG_NAME); - let mut config = NodeConfig::load(&config_file_path).await?; - - #[cfg(feature = "ai")] - if config.image_labeler_version.is_none() { - config.image_labeler_version = - Some(sd_ai::old_image_labeler::DEFAULT_MODEL_VERSION.to_string()); - } - - #[cfg(not(feature = "ai"))] - { - config.image_labeler_version = None; - } + let config = NodeConfig::load(&config_file_path).await?; let (preferences_watcher_tx, _preferences_watcher_rx) = watch::channel(config.preferences.clone()); diff --git a/core/src/p2p/manager.rs b/core/src/p2p/manager.rs index 7dfcb95ea..b986e5f3b 100644 --- a/core/src/p2p/manager.rs +++ b/core/src/p2p/manager.rs @@ -116,7 +116,8 @@ impl P2PManager { let client = reqwest::Client::new(); loop { match client - .get(format!("{}/api/p2p/relays", node.env.api_url.lock().await)) + // FIXME(@fogodev): hardcoded URL for now as I'm moving stuff around + .get(format!("{}/api/p2p/relays", "https://app.spacedrive.com")) .send() .await { diff --git a/crates/crypto/Cargo.toml b/crates/crypto/Cargo.toml index 3e8810500..3431ed194 100644 --- a/crates/crypto/Cargo.toml +++ b/crates/crypto/Cargo.toml @@ -2,141 +2,52 @@ name = "sd-crypto" version = "0.0.0" -authors = ["Jake Robinson "] +authors = ["Ericson Soares ", "Jake Robinson "] description = """ A cryptographic library that provides safe and high-level encryption, hashing, and encoding interfaces. """ -edition.workspace = true -keywords = ["crypto"] -license.workspace = true -readme = "README.md" -repository.workspace = true -rust-version = "1.72" - -[features] -experimental = [] -keyring = ["dep:linux-keyutils", "dep:security-framework"] -secret-service = [ - "dep:secret-service", - "dep:zbus", - "keyring" -] # explicit enabling required as the secret service api requires `zbus` and is messy -serde = ["bincode/serde", "dep:serde", "dep:serde-big-array", "dep:serde_json", "dep:serdect"] -sys = [] +edition.workspace = true +keywords = ["crypto"] +license.workspace = true +readme = "README.md" +repository.workspace = true +rust-version.workspace = true [dependencies] -# rng -rand = "0.9.0-alpha.0" -rand_chacha = "0.9.0-alpha.0" -rand_core = "0.9.0-alpha.0" +# Workspace dependencies +async-stream = { workspace = true } +blake3 = { workspace = true } +futures = { workspace = true } +serde = { workspace = true, features = ["derive"] } +thiserror = { workspace = true } +tokio = { workspace = true, features = ["io-util", "macros", "rt-multi-thread", "sync"] } -# hashing -argon2 = { version = "0.6.0-pre.0", default_features = false, features = ["alloc", "zeroize"] } -balloon-hash = { version = "0.5.0-pre.0", default_features = false, features = [ - "alloc", - "zeroize" -] } -blake3 = { version = "1.5.0", features = ["traits-preview", "zeroize"] } +# External dependencies +aead = { version = "0.6.0-rc.0", default-features = false, features = ["stream"] } +chacha20poly1305 = "0.11.0-pre.1" +cmov = "0.3.1" +# Some deps use this same version, so we just use it too +generic-array = { version = "=0.14.7", features = ["serde", "zeroize"] } +hex = "0.4.3" +rand = "0.9.0-alpha.2" +rand_chacha = "0.9.0-alpha.2" +rand_core = "0.9.0-alpha.2" +serde-big-array = { version = "0.5.1" } +serdect = { version = "0.3.0-pre.0" } +typenum = "1.17.0" +zeroize = { version = "1.7.0", features = ["aarch64", "derive"] } -# constant time -cmov = "0.3.1" -# aeads -aead = { version = "0.5.2", default-features = false, features = ["stream"] } -aes-gcm-siv = "0.11.1" -bincode = { version = "2.0.0-rc.3", features = ["alloc", "derive"] } -chacha20poly1305 = "0.10.1" -thiserror = "1.0.57" - -zeroize = { version = "1.7.0", features = ["aarch64", "derive"] } - -serde = { version = "1.0.197", features = ["derive"], optional = true } -serde-big-array = { version = "0.5.1", optional = true } -serde_json = { version = "1.0.114", optional = true } -serdect = { version = "0.3.0-pre.0", optional = true } - -specta = { workspace = true, optional = true } - -# for asynchronous crypto -tokio = { workspace = true, features = [ - "io-util", - "macros", - "rt-multi-thread", - "sync" -], optional = true } - -redb = "1.5.0" - -hex = "0.4.3" - -uuid = { version = "1.7.0", features = ["v4"] } - -# ed25519-dalek = { version = "2.1.1", feature = ["std", "zeroize"] } -# x25519-dalek = { version = "2.0.1", feature = [ -# "std", -# "zeroize", -# ] } # ReusableSecrets feature may have to come out for X3DH - -# linux OS keyring -[target.'cfg(target_os = "linux")'.dependencies] -linux-keyutils = { version = "0.2.4", features = ["std"], optional = true } -secret-service = { version = "3.0.1", features = [ - "crypto-rust", - "rt-tokio-crypto-rust" -], optional = true } - -# this needs to remain at versions < 4, as they made some changes and i can't get it -# to compile for the time being -zbus = { version = "4.0", default_features = false, features = [ - "blocking", - "tokio" -], optional = true } - -[target.'cfg(any(target_os = "macos", target_os = "ios"))'.dependencies] -security-framework = { version = "2.9.2", optional = true } [dev-dependencies] -criterion = "0.5.1" paste = "1.0.14" tempfile = "3.10.1" -[clippy] -allow = ["unwrap_in_tests"] -[[bench]] -harness = false -name = "aes-256-gcm-siv" -path = "benches/crypto/aes-256-gcm-siv.rs" -[[bench]] -harness = false -name = "xchacha20-poly1305" -path = "benches/crypto/xchacha20-poly1305.rs" - -[[bench]] -bench = false -harness = false -name = "argon2id" -path = "benches/hashing/argon2id.rs" - -[[bench]] -bench = false -harness = false -name = "blake3-balloon" -path = "benches/hashing/blake3-balloon.rs" - -[[bench]] -harness = false -name = "blake3" -path = "benches/hashing/blake3.rs" - -[[bench]] -harness = false -name = "blake3-kdf" -path = "benches/hashing/blake3-kdf.rs" [[example]] -name = "file_encryption" -path = "examples/file_encryption.rs" +path = "examples/secure_erase.rs" +name = "secure_erase" diff --git a/crates/crypto/README.md b/crates/crypto/README.md index de00fbe84..58f4b2306 100644 --- a/crates/crypto/README.md +++ b/crates/crypto/README.md @@ -4,40 +4,20 @@ This crate contains Spacedrive's cryptographic modules. This includes things such as: -- The key manager - Encryption and decryption -- Encrypted file header formats (with extremely fast serialization and deserialization) -- Key hashing and derivation -- Keyring interfaces to access native OS keystores It has support for the following cryptographic functions: -- `Argon2id` -- `Balloon` hashing -- `BLAKE3` key derivation - `XChaCha20-Poly1305` -- `AES-256-GCM-SIV` It aims to be (relatively) lightweight, easy to maintain and platform-agnostic where possible. It does contain some platform-specific code, although it's only built if the target matches. -## Features - -A list of all features can be found below (NOTE: none of these features are enabled by default) - -- `serde` - provides integration with `serde` and `serde_json` - -- `tokio` - provides integration with the `tokio` crate -- `specta` - provides integration with the `specta` crate -- `bincode` - provides integration with the `bincode` crate (this will likely become part of the crate) -- `keyring` - provides a unified interface for interacting with OS-keyrings (currently only supports MacOS/iOS/Linux `keyutils`). `keyutils` is not persistent, so is best used in a headless server/docker environment, as keys are wiped on-reboot. The Secret Service API is not practically available in headless environments. -- `secret-service` - enables `keyring` but also enables the Secret Service API (a persistent keyring targeted at Gnome/KDE (via `gnome-keyring` and `kwallet` respectively)). Is a pretty heavy dependency. - ## Security Notice This crate has NOT received any security audit - however, a couple of our upstream libraries (provided by [RustCrypto](https://github.com/RustCrypto)) have. You may find them below: -- AES-GCM and XChaCha20-Poly1305 audit by NCC group ([link](https://research.nccgroup.com/wp-content/uploads/2020/02/NCC_Group_MobileCoin_RustCrypto_AESGCM_ChaCha20Poly1305_Implementation_Review_2020-02-12_v1.0.pdf)) +- XChaCha20-Poly1305 audit by NCC group ([link](https://research.nccgroup.com/wp-content/uploads/2020/02/NCC_Group_MobileCoin_RustCrypto_AESGCM_ChaCha20Poly1305_Implementation_Review_2020-02-12_v1.0.pdf)) Breaking changes are very likely! Use at your own risk - no stability or security is guaranteed. diff --git a/crates/crypto/assets/eff_large_wordlist.txt b/crates/crypto/assets/eff_large_wordlist.txt deleted file mode 100644 index 20a63e458..000000000 --- a/crates/crypto/assets/eff_large_wordlist.txt +++ /dev/null @@ -1,7776 +0,0 @@ -abacus -abdomen -abdominal -abide -abiding -ability -ablaze -able -abnormal -abrasion -abrasive -abreast -abridge -abroad -abruptly -absence -absentee -absently -absinthe -absolute -absolve -abstain -abstract -absurd -accent -acclaim -acclimate -accompany -account -accuracy -accurate -accustom -acetone -achiness -aching -acid -acorn -acquaint -acquire -acre -acrobat -acronym -acting -action -activate -activator -active -activism -activist -activity -actress -acts -acutely -acuteness -aeration -aerobics -aerosol -aerospace -afar -affair -affected -affecting -affection -affidavit -affiliate -affirm -affix -afflicted -affluent -afford -affront -aflame -afloat -aflutter -afoot -afraid -afterglow -afterlife -aftermath -aftermost -afternoon -aged -ageless -agency -agenda -agent -aggregate -aghast -agile -agility -aging -agnostic -agonize -agonizing -agony -agreeable -agreeably -agreed -agreeing -agreement -aground -ahead -ahoy -aide -aids -aim -ajar -alabaster -alarm -albatross -album -alfalfa -algebra -algorithm -alias -alibi -alienable -alienate -aliens -alike -alive -alkaline -alkalize -almanac -almighty -almost -aloe -aloft -aloha -alone -alongside -aloof -alphabet -alright -although -altitude -alto -aluminum -alumni -always -amaretto -amaze -amazingly -amber -ambiance -ambiguity -ambiguous -ambition -ambitious -ambulance -ambush -amendable -amendment -amends -amenity -amiable -amicably -amid -amigo -amino -amiss -ammonia -ammonium -amnesty -amniotic -among -amount -amperage -ample -amplifier -amplify -amply -amuck -amulet -amusable -amused -amusement -amuser -amusing -anaconda -anaerobic -anagram -anatomist -anatomy -anchor -anchovy -ancient -android -anemia -anemic -aneurism -anew -angelfish -angelic -anger -angled -angler -angles -angling -angrily -angriness -anguished -angular -animal -animate -animating -animation -animator -anime -animosity -ankle -annex -annotate -announcer -annoying -annually -annuity -anointer -another -answering -antacid -antarctic -anteater -antelope -antennae -anthem -anthill -anthology -antibody -antics -antidote -antihero -antiquely -antiques -antiquity -antirust -antitoxic -antitrust -antiviral -antivirus -antler -antonym -antsy -anvil -anybody -anyhow -anymore -anyone -anyplace -anything -anytime -anyway -anywhere -aorta -apache -apostle -appealing -appear -appease -appeasing -appendage -appendix -appetite -appetizer -applaud -applause -apple -appliance -applicant -applied -apply -appointee -appraisal -appraiser -apprehend -approach -approval -approve -apricot -april -apron -aptitude -aptly -aqua -aqueduct -arbitrary -arbitrate -ardently -area -arena -arguable -arguably -argue -arise -armadillo -armband -armchair -armed -armful -armhole -arming -armless -armoire -armored -armory -armrest -army -aroma -arose -around -arousal -arrange -array -arrest -arrival -arrive -arrogance -arrogant -arson -art -ascend -ascension -ascent -ascertain -ashamed -ashen -ashes -ashy -aside -askew -asleep -asparagus -aspect -aspirate -aspire -aspirin -astonish -astound -astride -astrology -astronaut -astronomy -astute -atlantic -atlas -atom -atonable -atop -atrium -atrocious -atrophy -attach -attain -attempt -attendant -attendee -attention -attentive -attest -attic -attire -attitude -attractor -attribute -atypical -auction -audacious -audacity -audible -audibly -audience -audio -audition -augmented -august -authentic -author -autism -autistic -autograph -automaker -automated -automatic -autopilot -available -avalanche -avatar -avenge -avenging -avenue -average -aversion -avert -aviation -aviator -avid -avoid -await -awaken -award -aware -awhile -awkward -awning -awoke -awry -axis -babble -babbling -babied -baboon -backache -backboard -backboned -backdrop -backed -backer -backfield -backfire -backhand -backing -backlands -backlash -backless -backlight -backlit -backlog -backpack -backpedal -backrest -backroom -backshift -backside -backslid -backspace -backspin -backstab -backstage -backtalk -backtrack -backup -backward -backwash -backwater -backyard -bacon -bacteria -bacterium -badass -badge -badland -badly -badness -baffle -baffling -bagel -bagful -baggage -bagged -baggie -bagginess -bagging -baggy -bagpipe -baguette -baked -bakery -bakeshop -baking -balance -balancing -balcony -balmy -balsamic -bamboo -banana -banish -banister -banjo -bankable -bankbook -banked -banker -banking -banknote -bankroll -banner -bannister -banshee -banter -barbecue -barbed -barbell -barber -barcode -barge -bargraph -barista -baritone -barley -barmaid -barman -barn -barometer -barrack -barracuda -barrel -barrette -barricade -barrier -barstool -bartender -barterer -bash -basically -basics -basil -basin -basis -basket -batboy -batch -bath -baton -bats -battalion -battered -battering -battery -batting -battle -bauble -bazooka -blabber -bladder -blade -blah -blame -blaming -blanching -blandness -blank -blaspheme -blasphemy -blast -blatancy -blatantly -blazer -blazing -bleach -bleak -bleep -blemish -blend -bless -blighted -blimp -bling -blinked -blinker -blinking -blinks -blip -blissful -blitz -blizzard -bloated -bloating -blob -blog -bloomers -blooming -blooper -blot -blouse -blubber -bluff -bluish -blunderer -blunt -blurb -blurred -blurry -blurt -blush -blustery -boaster -boastful -boasting -boat -bobbed -bobbing -bobble -bobcat -bobsled -bobtail -bodacious -body -bogged -boggle -bogus -boil -bok -bolster -bolt -bonanza -bonded -bonding -bondless -boned -bonehead -boneless -bonelike -boney -bonfire -bonnet -bonsai -bonus -bony -boogeyman -boogieman -book -boondocks -booted -booth -bootie -booting -bootlace -bootleg -boots -boozy -borax -boring -borough -borrower -borrowing -boss -botanical -botanist -botany -botch -both -bottle -bottling -bottom -bounce -bouncing -bouncy -bounding -boundless -bountiful -bovine -boxcar -boxer -boxing -boxlike -boxy -breach -breath -breeches -breeching -breeder -breeding -breeze -breezy -brethren -brewery -brewing -briar -bribe -brick -bride -bridged -brigade -bright -brilliant -brim -bring -brink -brisket -briskly -briskness -bristle -brittle -broadband -broadcast -broaden -broadly -broadness -broadside -broadways -broiler -broiling -broken -broker -bronchial -bronco -bronze -bronzing -brook -broom -brought -browbeat -brownnose -browse -browsing -bruising -brunch -brunette -brunt -brush -brussels -brute -brutishly -bubble -bubbling -bubbly -buccaneer -bucked -bucket -buckle -buckshot -buckskin -bucktooth -buckwheat -buddhism -buddhist -budding -buddy -budget -buffalo -buffed -buffer -buffing -buffoon -buggy -bulb -bulge -bulginess -bulgur -bulk -bulldog -bulldozer -bullfight -bullfrog -bullhorn -bullion -bullish -bullpen -bullring -bullseye -bullwhip -bully -bunch -bundle -bungee -bunion -bunkbed -bunkhouse -bunkmate -bunny -bunt -busboy -bush -busily -busload -bust -busybody -buzz -cabana -cabbage -cabbie -cabdriver -cable -caboose -cache -cackle -cacti -cactus -caddie -caddy -cadet -cadillac -cadmium -cage -cahoots -cake -calamari -calamity -calcium -calculate -calculus -caliber -calibrate -calm -caloric -calorie -calzone -camcorder -cameo -camera -camisole -camper -campfire -camping -campsite -campus -canal -canary -cancel -candied -candle -candy -cane -canine -canister -cannabis -canned -canning -cannon -cannot -canola -canon -canopener -canopy -canteen -canyon -capable -capably -capacity -cape -capillary -capital -capitol -capped -capricorn -capsize -capsule -caption -captivate -captive -captivity -capture -caramel -carat -caravan -carbon -cardboard -carded -cardiac -cardigan -cardinal -cardstock -carefully -caregiver -careless -caress -caretaker -cargo -caring -carless -carload -carmaker -carnage -carnation -carnival -carnivore -carol -carpenter -carpentry -carpool -carport -carried -carrot -carrousel -carry -cartel -cartload -carton -cartoon -cartridge -cartwheel -carve -carving -carwash -cascade -case -cash -casing -casino -casket -cassette -casually -casualty -catacomb -catalog -catalyst -catalyze -catapult -cataract -catatonic -catcall -catchable -catcher -catching -catchy -caterer -catering -catfight -catfish -cathedral -cathouse -catlike -catnap -catnip -catsup -cattail -cattishly -cattle -catty -catwalk -caucasian -caucus -causal -causation -cause -causing -cauterize -caution -cautious -cavalier -cavalry -caviar -cavity -cedar -celery -celestial -celibacy -celibate -celtic -cement -census -ceramics -ceremony -certainly -certainty -certified -certify -cesarean -cesspool -chafe -chaffing -chain -chair -chalice -challenge -chamber -chamomile -champion -chance -change -channel -chant -chaos -chaperone -chaplain -chapped -chaps -chapter -character -charbroil -charcoal -charger -charging -chariot -charity -charm -charred -charter -charting -chase -chasing -chaste -chastise -chastity -chatroom -chatter -chatting -chatty -cheating -cheddar -cheek -cheer -cheese -cheesy -chef -chemicals -chemist -chemo -cherisher -cherub -chess -chest -chevron -chevy -chewable -chewer -chewing -chewy -chief -chihuahua -childcare -childhood -childish -childless -childlike -chili -chill -chimp -chip -chirping -chirpy -chitchat -chivalry -chive -chloride -chlorine -choice -chokehold -choking -chomp -chooser -choosing -choosy -chop -chosen -chowder -chowtime -chrome -chubby -chuck -chug -chummy -chump -chunk -churn -chute -cider -cilantro -cinch -cinema -cinnamon -circle -circling -circular -circulate -circus -citable -citadel -citation -citizen -citric -citrus -city -civic -civil -clad -claim -clambake -clammy -clamor -clamp -clamshell -clang -clanking -clapped -clapper -clapping -clarify -clarinet -clarity -clash -clasp -class -clatter -clause -clavicle -claw -clay -clean -clear -cleat -cleaver -cleft -clench -clergyman -clerical -clerk -clever -clicker -client -climate -climatic -cling -clinic -clinking -clip -clique -cloak -clobber -clock -clone -cloning -closable -closure -clothes -clothing -cloud -clover -clubbed -clubbing -clubhouse -clump -clumsily -clumsy -clunky -clustered -clutch -clutter -coach -coagulant -coastal -coaster -coasting -coastland -coastline -coat -coauthor -cobalt -cobbler -cobweb -cocoa -coconut -cod -coeditor -coerce -coexist -coffee -cofounder -cognition -cognitive -cogwheel -coherence -coherent -cohesive -coil -coke -cola -cold -coleslaw -coliseum -collage -collapse -collar -collected -collector -collide -collie -collision -colonial -colonist -colonize -colony -colossal -colt -coma -come -comfort -comfy -comic -coming -comma -commence -commend -comment -commerce -commode -commodity -commodore -common -commotion -commute -commuting -compacted -compacter -compactly -compactor -companion -company -compare -compel -compile -comply -component -composed -composer -composite -compost -composure -compound -compress -comprised -computer -computing -comrade -concave -conceal -conceded -concept -concerned -concert -conch -concierge -concise -conclude -concrete -concur -condense -condiment -condition -condone -conducive -conductor -conduit -cone -confess -confetti -confidant -confident -confider -confiding -configure -confined -confining -confirm -conflict -conform -confound -confront -confused -confusing -confusion -congenial -congested -congrats -congress -conical -conjoined -conjure -conjuror -connected -connector -consensus -consent -console -consoling -consonant -constable -constant -constrain -constrict -construct -consult -consumer -consuming -contact -container -contempt -contend -contented -contently -contents -contest -context -contort -contour -contrite -control -contusion -convene -convent -copartner -cope -copied -copier -copilot -coping -copious -copper -copy -coral -cork -cornball -cornbread -corncob -cornea -corned -corner -cornfield -cornflake -cornhusk -cornmeal -cornstalk -corny -coronary -coroner -corporal -corporate -corral -correct -corridor -corrode -corroding -corrosive -corsage -corset -cortex -cosigner -cosmetics -cosmic -cosmos -cosponsor -cost -cottage -cotton -couch -cough -could -countable -countdown -counting -countless -country -county -courier -covenant -cover -coveted -coveting -coyness -cozily -coziness -cozy -crabbing -crabgrass -crablike -crabmeat -cradle -cradling -crafter -craftily -craftsman -craftwork -crafty -cramp -cranberry -crane -cranial -cranium -crank -crate -crave -craving -crawfish -crawlers -crawling -crayfish -crayon -crazed -crazily -craziness -crazy -creamed -creamer -creamlike -crease -creasing -creatable -create -creation -creative -creature -credible -credibly -credit -creed -creme -creole -crepe -crept -crescent -crested -cresting -crestless -crevice -crewless -crewman -crewmate -crib -cricket -cried -crier -crimp -crimson -cringe -cringing -crinkle -crinkly -crisped -crisping -crisply -crispness -crispy -criteria -critter -croak -crock -crook -croon -crop -cross -crouch -crouton -crowbar -crowd -crown -crucial -crudely -crudeness -cruelly -cruelness -cruelty -crumb -crummiest -crummy -crumpet -crumpled -cruncher -crunching -crunchy -crusader -crushable -crushed -crusher -crushing -crust -crux -crying -cryptic -crystal -cubbyhole -cube -cubical -cubicle -cucumber -cuddle -cuddly -cufflink -culinary -culminate -culpable -culprit -cultivate -cultural -culture -cupbearer -cupcake -cupid -cupped -cupping -curable -curator -curdle -cure -curfew -curing -curled -curler -curliness -curling -curly -curry -curse -cursive -cursor -curtain -curtly -curtsy -curvature -curve -curvy -cushy -cusp -cussed -custard -custodian -custody -customary -customer -customize -customs -cut -cycle -cyclic -cycling -cyclist -cylinder -cymbal -cytoplasm -cytoplast -dab -dad -daffodil -dagger -daily -daintily -dainty -dairy -daisy -dallying -dance -dancing -dandelion -dander -dandruff -dandy -danger -dangle -dangling -daredevil -dares -daringly -darkened -darkening -darkish -darkness -darkroom -darling -darn -dart -darwinism -dash -dastardly -data -datebook -dating -daughter -daunting -dawdler -dawn -daybed -daybreak -daycare -daydream -daylight -daylong -dayroom -daytime -dazzler -dazzling -deacon -deafening -deafness -dealer -dealing -dealmaker -dealt -dean -debatable -debate -debating -debit -debrief -debtless -debtor -debug -debunk -decade -decaf -decal -decathlon -decay -deceased -deceit -deceiver -deceiving -december -decency -decent -deception -deceptive -decibel -decidable -decimal -decimeter -decipher -deck -declared -decline -decode -decompose -decorated -decorator -decoy -decrease -decree -dedicate -dedicator -deduce -deduct -deed -deem -deepen -deeply -deepness -deface -defacing -defame -default -defeat -defection -defective -defendant -defender -defense -defensive -deferral -deferred -defiance -defiant -defile -defiling -define -definite -deflate -deflation -deflator -deflected -deflector -defog -deforest -defraud -defrost -deftly -defuse -defy -degraded -degrading -degrease -degree -dehydrate -deity -dejected -delay -delegate -delegator -delete -deletion -delicacy -delicate -delicious -delighted -delirious -delirium -deliverer -delivery -delouse -delta -deluge -delusion -deluxe -demanding -demeaning -demeanor -demise -democracy -democrat -demote -demotion -demystify -denatured -deniable -denial -denim -denote -dense -density -dental -dentist -denture -deny -deodorant -deodorize -departed -departure -depict -deplete -depletion -deplored -deploy -deport -depose -depraved -depravity -deprecate -depress -deprive -depth -deputize -deputy -derail -deranged -derby -derived -desecrate -deserve -deserving -designate -designed -designer -designing -deskbound -desktop -deskwork -desolate -despair -despise -despite -destiny -destitute -destruct -detached -detail -detection -detective -detector -detention -detergent -detest -detonate -detonator -detoxify -detract -deuce -devalue -deviancy -deviant -deviate -deviation -deviator -device -devious -devotedly -devotee -devotion -devourer -devouring -devoutly -dexterity -dexterous -diabetes -diabetic -diabolic -diagnoses -diagnosis -diagram -dial -diameter -diaper -diaphragm -diary -dice -dicing -dictate -dictation -dictator -difficult -diffused -diffuser -diffusion -diffusive -dig -dilation -diligence -diligent -dill -dilute -dime -diminish -dimly -dimmed -dimmer -dimness -dimple -diner -dingbat -dinghy -dinginess -dingo -dingy -dining -dinner -diocese -dioxide -diploma -dipped -dipper -dipping -directed -direction -directive -directly -directory -direness -dirtiness -disabled -disagree -disallow -disarm -disarray -disaster -disband -disbelief -disburse -discard -discern -discharge -disclose -discolor -discount -discourse -discover -discuss -disdain -disengage -disfigure -disgrace -dish -disinfect -disjoin -disk -dislike -disliking -dislocate -dislodge -disloyal -dismantle -dismay -dismiss -dismount -disobey -disorder -disown -disparate -disparity -dispatch -dispense -dispersal -dispersed -disperser -displace -display -displease -disposal -dispose -disprove -dispute -disregard -disrupt -dissuade -distance -distant -distaste -distill -distinct -distort -distract -distress -district -distrust -ditch -ditto -ditzy -dividable -divided -dividend -dividers -dividing -divinely -diving -divinity -divisible -divisibly -division -divisive -divorcee -dizziness -dizzy -doable -docile -dock -doctrine -document -dodge -dodgy -doily -doing -dole -dollar -dollhouse -dollop -dolly -dolphin -domain -domelike -domestic -dominion -dominoes -donated -donation -donator -donor -donut -doodle -doorbell -doorframe -doorknob -doorman -doormat -doornail -doorpost -doorstep -doorstop -doorway -doozy -dork -dormitory -dorsal -dosage -dose -dotted -doubling -douche -dove -down -dowry -doze -drab -dragging -dragonfly -dragonish -dragster -drainable -drainage -drained -drainer -drainpipe -dramatic -dramatize -drank -drapery -drastic -draw -dreaded -dreadful -dreadlock -dreamboat -dreamily -dreamland -dreamless -dreamlike -dreamt -dreamy -drearily -dreary -drench -dress -drew -dribble -dried -drier -drift -driller -drilling -drinkable -drinking -dripping -drippy -drivable -driven -driver -driveway -driving -drizzle -drizzly -drone -drool -droop -drop-down -dropbox -dropkick -droplet -dropout -dropper -drove -drown -drowsily -drudge -drum -dry -dubbed -dubiously -duchess -duckbill -ducking -duckling -ducktail -ducky -duct -dude -duffel -dugout -duh -duke -duller -dullness -duly -dumping -dumpling -dumpster -duo -dupe -duplex -duplicate -duplicity -durable -durably -duration -duress -during -dusk -dust -dutiful -duty -duvet -dwarf -dweeb -dwelled -dweller -dwelling -dwindle -dwindling -dynamic -dynamite -dynasty -dyslexia -dyslexic -each -eagle -earache -eardrum -earflap -earful -earlobe -early -earmark -earmuff -earphone -earpiece -earplugs -earring -earshot -earthen -earthlike -earthling -earthly -earthworm -earthy -earwig -easeful -easel -easiest -easily -easiness -easing -eastbound -eastcoast -easter -eastward -eatable -eaten -eatery -eating -eats -ebay -ebony -ebook -ecard -eccentric -echo -eclair -eclipse -ecologist -ecology -economic -economist -economy -ecosphere -ecosystem -edge -edginess -edging -edgy -edition -editor -educated -education -educator -eel -effective -effects -efficient -effort -eggbeater -egging -eggnog -eggplant -eggshell -egomaniac -egotism -egotistic -either -eject -elaborate -elastic -elated -elbow -eldercare -elderly -eldest -electable -election -elective -elephant -elevate -elevating -elevation -elevator -eleven -elf -eligible -eligibly -eliminate -elite -elitism -elixir -elk -ellipse -elliptic -elm -elongated -elope -eloquence -eloquent -elsewhere -elude -elusive -elves -email -embargo -embark -embassy -embattled -embellish -ember -embezzle -emblaze -emblem -embody -embolism -emboss -embroider -emcee -emerald -emergency -emission -emit -emote -emoticon -emotion -empathic -empathy -emperor -emphases -emphasis -emphasize -emphatic -empirical -employed -employee -employer -emporium -empower -emptier -emptiness -empty -emu -enable -enactment -enamel -enchanted -enchilada -encircle -enclose -enclosure -encode -encore -encounter -encourage -encroach -encrust -encrypt -endanger -endeared -endearing -ended -ending -endless -endnote -endocrine -endorphin -endorse -endowment -endpoint -endurable -endurance -enduring -energetic -energize -energy -enforced -enforcer -engaged -engaging -engine -engorge -engraved -engraver -engraving -engross -engulf -enhance -enigmatic -enjoyable -enjoyably -enjoyer -enjoying -enjoyment -enlarged -enlarging -enlighten -enlisted -enquirer -enrage -enrich -enroll -enslave -ensnare -ensure -entail -entangled -entering -entertain -enticing -entire -entitle -entity -entomb -entourage -entrap -entree -entrench -entrust -entryway -entwine -enunciate -envelope -enviable -enviably -envious -envision -envoy -envy -enzyme -epic -epidemic -epidermal -epidermis -epidural -epilepsy -epileptic -epilogue -epiphany -episode -equal -equate -equation -equator -equinox -equipment -equity -equivocal -eradicate -erasable -erased -eraser -erasure -ergonomic -errand -errant -erratic -error -erupt -escalate -escalator -escapable -escapade -escapist -escargot -eskimo -esophagus -espionage -espresso -esquire -essay -essence -essential -establish -estate -esteemed -estimate -estimator -estranged -estrogen -etching -eternal -eternity -ethanol -ether -ethically -ethics -euphemism -evacuate -evacuee -evade -evaluate -evaluator -evaporate -evasion -evasive -even -everglade -evergreen -everybody -everyday -everyone -evict -evidence -evident -evil -evoke -evolution -evolve -exact -exalted -example -excavate -excavator -exceeding -exception -excess -exchange -excitable -exciting -exclaim -exclude -excluding -exclusion -exclusive -excretion -excretory -excursion -excusable -excusably -excuse -exemplary -exemplify -exemption -exerciser -exert -exes -exfoliate -exhale -exhaust -exhume -exile -existing -exit -exodus -exonerate -exorcism -exorcist -expand -expanse -expansion -expansive -expectant -expedited -expediter -expel -expend -expenses -expensive -expert -expire -expiring -explain -expletive -explicit -explode -exploit -explore -exploring -exponent -exporter -exposable -expose -exposure -express -expulsion -exquisite -extended -extending -extent -extenuate -exterior -external -extinct -extortion -extradite -extras -extrovert -extrude -extruding -exuberant -fable -fabric -fabulous -facebook -facecloth -facedown -faceless -facelift -faceplate -faceted -facial -facility -facing -facsimile -faction -factoid -factor -factsheet -factual -faculty -fade -fading -failing -falcon -fall -false -falsify -fame -familiar -family -famine -famished -fanatic -fancied -fanciness -fancy -fanfare -fang -fanning -fantasize -fantastic -fantasy -fascism -fastball -faster -fasting -fastness -faucet -favorable -favorably -favored -favoring -favorite -fax -feast -federal -fedora -feeble -feed -feel -feisty -feline -felt-tip -feminine -feminism -feminist -feminize -femur -fence -fencing -fender -ferment -fernlike -ferocious -ferocity -ferret -ferris -ferry -fervor -fester -festival -festive -festivity -fetal -fetch -fever -fiber -fiction -fiddle -fiddling -fidelity -fidgeting -fidgety -fifteen -fifth -fiftieth -fifty -figment -figure -figurine -filing -filled -filler -filling -film -filter -filth -filtrate -finale -finalist -finalize -finally -finance -financial -finch -fineness -finer -finicky -finished -finisher -finishing -finite -finless -finlike -fiscally -fit -five -flaccid -flagman -flagpole -flagship -flagstick -flagstone -flail -flakily -flaky -flame -flammable -flanked -flanking -flannels -flap -flaring -flashback -flashbulb -flashcard -flashily -flashing -flashy -flask -flatbed -flatfoot -flatly -flatness -flatten -flattered -flatterer -flattery -flattop -flatware -flatworm -flavored -flavorful -flavoring -flaxseed -fled -fleshed -fleshy -flick -flier -flight -flinch -fling -flint -flip -flirt -float -flock -flogging -flop -floral -florist -floss -flounder -flyable -flyaway -flyer -flying -flyover -flypaper -foam -foe -fog -foil -folic -folk -follicle -follow -fondling -fondly -fondness -fondue -font -food -fool -footage -football -footbath -footboard -footer -footgear -foothill -foothold -footing -footless -footman -footnote -footpad -footpath -footprint -footrest -footsie -footsore -footwear -footwork -fossil -foster -founder -founding -fountain -fox -foyer -fraction -fracture -fragile -fragility -fragment -fragrance -fragrant -frail -frame -framing -frantic -fraternal -frayed -fraying -frays -freckled -freckles -freebase -freebee -freebie -freedom -freefall -freehand -freeing -freeload -freely -freemason -freeness -freestyle -freeware -freeway -freewill -freezable -freezing -freight -french -frenzied -frenzy -frequency -frequent -fresh -fretful -fretted -friction -friday -fridge -fried -friend -frighten -frightful -frigidity -frigidly -frill -fringe -frisbee -frisk -fritter -frivolous -frolic -from -front -frostbite -frosted -frostily -frosting -frostlike -frosty -froth -frown -frozen -fructose -frugality -frugally -fruit -frustrate -frying -gab -gaffe -gag -gainfully -gaining -gains -gala -gallantly -galleria -gallery -galley -gallon -gallows -gallstone -galore -galvanize -gambling -game -gaming -gamma -gander -gangly -gangrene -gangway -gap -garage -garbage -garden -gargle -garland -garlic -garment -garnet -garnish -garter -gas -gatherer -gathering -gating -gauging -gauntlet -gauze -gave -gawk -gazing -gear -gecko -geek -geiger -gem -gender -generic -generous -genetics -genre -gentile -gentleman -gently -gents -geography -geologic -geologist -geology -geometric -geometry -geranium -gerbil -geriatric -germicide -germinate -germless -germproof -gestate -gestation -gesture -getaway -getting -getup -giant -gibberish -giblet -giddily -giddiness -giddy -gift -gigabyte -gigahertz -gigantic -giggle -giggling -giggly -gigolo -gilled -gills -gimmick -girdle -giveaway -given -giver -giving -gizmo -gizzard -glacial -glacier -glade -gladiator -gladly -glamorous -glamour -glance -glancing -glandular -glare -glaring -glass -glaucoma -glazing -gleaming -gleeful -glider -gliding -glimmer -glimpse -glisten -glitch -glitter -glitzy -gloater -gloating -gloomily -gloomy -glorified -glorifier -glorify -glorious -glory -gloss -glove -glowing -glowworm -glucose -glue -gluten -glutinous -glutton -gnarly -gnat -goal -goatskin -goes -goggles -going -goldfish -goldmine -goldsmith -golf -goliath -gonad -gondola -gone -gong -good -gooey -goofball -goofiness -goofy -google -goon -gopher -gore -gorged -gorgeous -gory -gosling -gossip -gothic -gotten -gout -gown -grab -graceful -graceless -gracious -gradation -graded -grader -gradient -grading -gradually -graduate -graffiti -grafted -grafting -grain -granddad -grandkid -grandly -grandma -grandpa -grandson -granite -granny -granola -grant -granular -grape -graph -grapple -grappling -grasp -grass -gratified -gratify -grating -gratitude -gratuity -gravel -graveness -graves -graveyard -gravitate -gravity -gravy -gray -grazing -greasily -greedily -greedless -greedy -green -greeter -greeting -grew -greyhound -grid -grief -grievance -grieving -grievous -grill -grimace -grimacing -grime -griminess -grimy -grinch -grinning -grip -gristle -grit -groggily -groggy -groin -groom -groove -grooving -groovy -grope -ground -grouped -grout -grove -grower -growing -growl -grub -grudge -grudging -grueling -gruffly -grumble -grumbling -grumbly -grumpily -grunge -grunt -guacamole -guidable -guidance -guide -guiding -guileless -guise -gulf -gullible -gully -gulp -gumball -gumdrop -gumminess -gumming -gummy -gurgle -gurgling -guru -gush -gusto -gusty -gutless -guts -gutter -guy -guzzler -gyration -habitable -habitant -habitat -habitual -hacked -hacker -hacking -hacksaw -had -haggler -haiku -half -halogen -halt -halved -halves -hamburger -hamlet -hammock -hamper -hamster -hamstring -handbag -handball -handbook -handbrake -handcart -handclap -handclasp -handcraft -handcuff -handed -handful -handgrip -handgun -handheld -handiness -handiwork -handlebar -handled -handler -handling -handmade -handoff -handpick -handprint -handrail -handsaw -handset -handsfree -handshake -handstand -handwash -handwork -handwoven -handwrite -handyman -hangnail -hangout -hangover -hangup -hankering -hankie -hanky -haphazard -happening -happier -happiest -happily -happiness -happy -harbor -hardcopy -hardcore -hardcover -harddisk -hardened -hardener -hardening -hardhat -hardhead -hardiness -hardly -hardness -hardship -hardware -hardwired -hardwood -hardy -harmful -harmless -harmonica -harmonics -harmonize -harmony -harness -harpist -harsh -harvest -hash -hassle -haste -hastily -hastiness -hasty -hatbox -hatchback -hatchery -hatchet -hatching -hatchling -hate -hatless -hatred -haunt -haven -hazard -hazelnut -hazily -haziness -hazing -hazy -headache -headband -headboard -headcount -headdress -headed -header -headfirst -headgear -heading -headlamp -headless -headlock -headphone -headpiece -headrest -headroom -headscarf -headset -headsman -headstand -headstone -headway -headwear -heap -heat -heave -heavily -heaviness -heaving -hedge -hedging -heftiness -hefty -helium -helmet -helper -helpful -helping -helpless -helpline -hemlock -hemstitch -hence -henchman -henna -herald -herbal -herbicide -herbs -heritage -hermit -heroics -heroism -herring -herself -hertz -hesitancy -hesitant -hesitate -hexagon -hexagram -hubcap -huddle -huddling -huff -hug -hula -hulk -hull -human -humble -humbling -humbly -humid -humiliate -humility -humming -hummus -humongous -humorist -humorless -humorous -humpback -humped -humvee -hunchback -hundredth -hunger -hungrily -hungry -hunk -hunter -hunting -huntress -huntsman -hurdle -hurled -hurler -hurling -hurray -hurricane -hurried -hurry -hurt -husband -hush -husked -huskiness -hut -hybrid -hydrant -hydrated -hydration -hydrogen -hydroxide -hyperlink -hypertext -hyphen -hypnoses -hypnosis -hypnotic -hypnotism -hypnotist -hypnotize -hypocrisy -hypocrite -ibuprofen -ice -iciness -icing -icky -icon -icy -idealism -idealist -idealize -ideally -idealness -identical -identify -identity -ideology -idiocy -idiom -idly -igloo -ignition -ignore -iguana -illicitly -illusion -illusive -image -imaginary -imagines -imaging -imbecile -imitate -imitation -immature -immerse -immersion -imminent -immobile -immodest -immorally -immortal -immovable -immovably -immunity -immunize -impaired -impale -impart -impatient -impeach -impeding -impending -imperfect -imperial -impish -implant -implement -implicate -implicit -implode -implosion -implosive -imply -impolite -important -importer -impose -imposing -impotence -impotency -impotent -impound -imprecise -imprint -imprison -impromptu -improper -improve -improving -improvise -imprudent -impulse -impulsive -impure -impurity -iodine -iodize -ion -ipad -iphone -ipod -irate -irk -iron -irregular -irrigate -irritable -irritably -irritant -irritate -islamic -islamist -isolated -isolating -isolation -isotope -issue -issuing -italicize -italics -item -itinerary -itunes -ivory -ivy -jab -jackal -jacket -jackknife -jackpot -jailbird -jailbreak -jailer -jailhouse -jalapeno -jam -janitor -january -jargon -jarring -jasmine -jaundice -jaunt -java -jawed -jawless -jawline -jaws -jaybird -jaywalker -jazz -jeep -jeeringly -jellied -jelly -jersey -jester -jet -jiffy -jigsaw -jimmy -jingle -jingling -jinx -jitters -jittery -job -jockey -jockstrap -jogger -jogging -john -joining -jokester -jokingly -jolliness -jolly -jolt -jot -jovial -joyfully -joylessly -joyous -joyride -joystick -jubilance -jubilant -judge -judgingly -judicial -judiciary -judo -juggle -juggling -jugular -juice -juiciness -juicy -jujitsu -jukebox -july -jumble -jumbo -jump -junction -juncture -june -junior -juniper -junkie -junkman -junkyard -jurist -juror -jury -justice -justifier -justify -justly -justness -juvenile -kabob -kangaroo -karaoke -karate -karma -kebab -keenly -keenness -keep -keg -kelp -kennel -kept -kerchief -kerosene -kettle -kick -kiln -kilobyte -kilogram -kilometer -kilowatt -kilt -kimono -kindle -kindling -kindly -kindness -kindred -kinetic -kinfolk -king -kinship -kinsman -kinswoman -kissable -kisser -kissing -kitchen -kite -kitten -kitty -kiwi -kleenex -knapsack -knee -knelt -knickers -knoll -koala -kooky -kosher -krypton -kudos -kung -labored -laborer -laboring -laborious -labrador -ladder -ladies -ladle -ladybug -ladylike -lagged -lagging -lagoon -lair -lake -lance -landed -landfall -landfill -landing -landlady -landless -landline -landlord -landmark -landmass -landmine -landowner -landscape -landside -landslide -language -lankiness -lanky -lantern -lapdog -lapel -lapped -lapping -laptop -lard -large -lark -lash -lasso -last -latch -late -lather -latitude -latrine -latter -latticed -launch -launder -laundry -laurel -lavender -lavish -laxative -lazily -laziness -lazy -lecturer -left -legacy -legal -legend -legged -leggings -legible -legibly -legislate -lego -legroom -legume -legwarmer -legwork -lemon -lend -length -lens -lent -leotard -lesser -letdown -lethargic -lethargy -letter -lettuce -level -leverage -levers -levitate -levitator -liability -liable -liberty -librarian -library -licking -licorice -lid -life -lifter -lifting -liftoff -ligament -likely -likeness -likewise -liking -lilac -lilly -lily -limb -limeade -limelight -limes -limit -limping -limpness -line -lingo -linguini -linguist -lining -linked -linoleum -linseed -lint -lion -lip -liquefy -liqueur -liquid -lisp -list -litigate -litigator -litmus -litter -little -livable -lived -lively -liver -livestock -lividly -living -lizard -lubricant -lubricate -lucid -luckily -luckiness -luckless -lucrative -ludicrous -lugged -lukewarm -lullaby -lumber -luminance -luminous -lumpiness -lumping -lumpish -lunacy -lunar -lunchbox -luncheon -lunchroom -lunchtime -lung -lurch -lure -luridness -lurk -lushly -lushness -luster -lustfully -lustily -lustiness -lustrous -lusty -luxurious -luxury -lying -lyrically -lyricism -lyricist -lyrics -macarena -macaroni -macaw -mace -machine -machinist -magazine -magenta -maggot -magical -magician -magma -magnesium -magnetic -magnetism -magnetize -magnifier -magnify -magnitude -magnolia -mahogany -maimed -majestic -majesty -majorette -majority -makeover -maker -makeshift -making -malformed -malt -mama -mammal -mammary -mammogram -manager -managing -manatee -mandarin -mandate -mandatory -mandolin -manger -mangle -mango -mangy -manhandle -manhole -manhood -manhunt -manicotti -manicure -manifesto -manila -mankind -manlike -manliness -manly -manmade -manned -mannish -manor -manpower -mantis -mantra -manual -many -map -marathon -marauding -marbled -marbles -marbling -march -mardi -margarine -margarita -margin -marigold -marina -marine -marital -maritime -marlin -marmalade -maroon -married -marrow -marry -marshland -marshy -marsupial -marvelous -marxism -mascot -masculine -mashed -mashing -massager -masses -massive -mastiff -matador -matchbook -matchbox -matcher -matching -matchless -material -maternal -maternity -math -mating -matriarch -matrimony -matrix -matron -matted -matter -maturely -maturing -maturity -mauve -maverick -maximize -maximum -maybe -mayday -mayflower -moaner -moaning -mobile -mobility -mobilize -mobster -mocha -mocker -mockup -modified -modify -modular -modulator -module -moisten -moistness -moisture -molar -molasses -mold -molecular -molecule -molehill -mollusk -mom -monastery -monday -monetary -monetize -moneybags -moneyless -moneywise -mongoose -mongrel -monitor -monkhood -monogamy -monogram -monologue -monopoly -monorail -monotone -monotype -monoxide -monsieur -monsoon -monstrous -monthly -monument -moocher -moodiness -moody -mooing -moonbeam -mooned -moonlight -moonlike -moonlit -moonrise -moonscape -moonshine -moonstone -moonwalk -mop -morale -morality -morally -morbidity -morbidly -morphine -morphing -morse -mortality -mortally -mortician -mortified -mortify -mortuary -mosaic -mossy -most -mothball -mothproof -motion -motivate -motivator -motive -motocross -motor -motto -mountable -mountain -mounted -mounting -mourner -mournful -mouse -mousiness -moustache -mousy -mouth -movable -move -movie -moving -mower -mowing -much -muck -mud -mug -mulberry -mulch -mule -mulled -mullets -multiple -multiply -multitask -multitude -mumble -mumbling -mumbo -mummified -mummify -mummy -mumps -munchkin -mundane -municipal -muppet -mural -murkiness -murky -murmuring -muscular -museum -mushily -mushiness -mushroom -mushy -music -musket -muskiness -musky -mustang -mustard -muster -mustiness -musty -mutable -mutate -mutation -mute -mutilated -mutilator -mutiny -mutt -mutual -muzzle -myself -myspace -mystified -mystify -myth -nacho -nag -nail -name -naming -nanny -nanometer -nape -napkin -napped -napping -nappy -narrow -nastily -nastiness -national -native -nativity -natural -nature -naturist -nautical -navigate -navigator -navy -nearby -nearest -nearly -nearness -neatly -neatness -nebula -nebulizer -nectar -negate -negation -negative -neglector -negligee -negligent -negotiate -nemeses -nemesis -neon -nephew -nerd -nervous -nervy -nest -net -neurology -neuron -neurosis -neurotic -neuter -neutron -never -next -nibble -nickname -nicotine -niece -nifty -nimble -nimbly -nineteen -ninetieth -ninja -nintendo -ninth -nuclear -nuclei -nucleus -nugget -nullify -number -numbing -numbly -numbness -numeral -numerate -numerator -numeric -numerous -nuptials -nursery -nursing -nurture -nutcase -nutlike -nutmeg -nutrient -nutshell -nuttiness -nutty -nuzzle -nylon -oaf -oak -oasis -oat -obedience -obedient -obituary -object -obligate -obliged -oblivion -oblivious -oblong -obnoxious -oboe -obscure -obscurity -observant -observer -observing -obsessed -obsession -obsessive -obsolete -obstacle -obstinate -obstruct -obtain -obtrusive -obtuse -obvious -occultist -occupancy -occupant -occupier -occupy -ocean -ocelot -octagon -octane -october -octopus -ogle -oil -oink -ointment -okay -old -olive -olympics -omega -omen -ominous -omission -omit -omnivore -onboard -oncoming -ongoing -onion -online -onlooker -only -onscreen -onset -onshore -onslaught -onstage -onto -onward -onyx -oops -ooze -oozy -opacity -opal -open -operable -operate -operating -operation -operative -operator -opium -opossum -opponent -oppose -opposing -opposite -oppressed -oppressor -opt -opulently -osmosis -other -otter -ouch -ought -ounce -outage -outback -outbid -outboard -outbound -outbreak -outburst -outcast -outclass -outcome -outdated -outdoors -outer -outfield -outfit -outflank -outgoing -outgrow -outhouse -outing -outlast -outlet -outline -outlook -outlying -outmatch -outmost -outnumber -outplayed -outpost -outpour -output -outrage -outrank -outreach -outright -outscore -outsell -outshine -outshoot -outsider -outskirts -outsmart -outsource -outspoken -outtakes -outthink -outward -outweigh -outwit -oval -ovary -oven -overact -overall -overarch -overbid -overbill -overbite -overblown -overboard -overbook -overbuilt -overcast -overcoat -overcome -overcook -overcrowd -overdraft -overdrawn -overdress -overdrive -overdue -overeager -overeater -overexert -overfed -overfeed -overfill -overflow -overfull -overgrown -overhand -overhang -overhaul -overhead -overhear -overheat -overhung -overjoyed -overkill -overlabor -overlaid -overlap -overlay -overload -overlook -overlord -overlying -overnight -overpass -overpay -overplant -overplay -overpower -overprice -overrate -overreach -overreact -override -overripe -overrule -overrun -overshoot -overshot -oversight -oversized -oversleep -oversold -overspend -overstate -overstay -overstep -overstock -overstuff -oversweet -overtake -overthrow -overtime -overtly -overtone -overture -overturn -overuse -overvalue -overview -overwrite -owl -oxford -oxidant -oxidation -oxidize -oxidizing -oxygen -oxymoron -oyster -ozone -paced -pacemaker -pacific -pacifier -pacifism -pacifist -pacify -padded -padding -paddle -paddling -padlock -pagan -pager -paging -pajamas -palace -palatable -palm -palpable -palpitate -paltry -pampered -pamperer -pampers -pamphlet -panama -pancake -pancreas -panda -pandemic -pang -panhandle -panic -panning -panorama -panoramic -panther -pantomime -pantry -pants -pantyhose -paparazzi -papaya -paper -paprika -papyrus -parabola -parachute -parade -paradox -paragraph -parakeet -paralegal -paralyses -paralysis -paralyze -paramedic -parameter -paramount -parasail -parasite -parasitic -parcel -parched -parchment -pardon -parish -parka -parking -parkway -parlor -parmesan -parole -parrot -parsley -parsnip -partake -parted -parting -partition -partly -partner -partridge -party -passable -passably -passage -passcode -passenger -passerby -passing -passion -passive -passivism -passover -passport -password -pasta -pasted -pastel -pastime -pastor -pastrami -pasture -pasty -patchwork -patchy -paternal -paternity -path -patience -patient -patio -patriarch -patriot -patrol -patronage -patronize -pauper -pavement -paver -pavestone -pavilion -paving -pawing -payable -payback -paycheck -payday -payee -payer -paying -payment -payphone -payroll -pebble -pebbly -pecan -pectin -peculiar -peddling -pediatric -pedicure -pedigree -pedometer -pegboard -pelican -pellet -pelt -pelvis -penalize -penalty -pencil -pendant -pending -penholder -penknife -pennant -penniless -penny -penpal -pension -pentagon -pentagram -pep -perceive -percent -perch -percolate -perennial -perfected -perfectly -perfume -periscope -perish -perjurer -perjury -perkiness -perky -perm -peroxide -perpetual -perplexed -persecute -persevere -persuaded -persuader -pesky -peso -pessimism -pessimist -pester -pesticide -petal -petite -petition -petri -petroleum -petted -petticoat -pettiness -petty -petunia -phantom -phobia -phoenix -phonebook -phoney -phonics -phoniness -phony -phosphate -photo -phrase -phrasing -placard -placate -placidly -plank -planner -plant -plasma -plaster -plastic -plated -platform -plating -platinum -platonic -platter -platypus -plausible -plausibly -playable -playback -player -playful -playgroup -playhouse -playing -playlist -playmaker -playmate -playoff -playpen -playroom -playset -plaything -playtime -plaza -pleading -pleat -pledge -plentiful -plenty -plethora -plexiglas -pliable -plod -plop -plot -plow -ploy -pluck -plug -plunder -plunging -plural -plus -plutonium -plywood -poach -pod -poem -poet -pogo -pointed -pointer -pointing -pointless -pointy -poise -poison -poker -poking -polar -police -policy -polio -polish -politely -polka -polo -polyester -polygon -polygraph -polymer -poncho -pond -pony -popcorn -pope -poplar -popper -poppy -popsicle -populace -popular -populate -porcupine -pork -porous -porridge -portable -portal -portfolio -porthole -portion -portly -portside -poser -posh -posing -possible -possibly -possum -postage -postal -postbox -postcard -posted -poster -posting -postnasal -posture -postwar -pouch -pounce -pouncing -pound -pouring -pout -powdered -powdering -powdery -power -powwow -pox -praising -prance -prancing -pranker -prankish -prankster -prayer -praying -preacher -preaching -preachy -preamble -precinct -precise -precision -precook -precut -predator -predefine -predict -preface -prefix -preflight -preformed -pregame -pregnancy -pregnant -preheated -prelaunch -prelaw -prelude -premiere -premises -premium -prenatal -preoccupy -preorder -prepaid -prepay -preplan -preppy -preschool -prescribe -preseason -preset -preshow -president -presoak -press -presume -presuming -preteen -pretended -pretender -pretense -pretext -pretty -pretzel -prevail -prevalent -prevent -preview -previous -prewar -prewashed -prideful -pried -primal -primarily -primary -primate -primer -primp -princess -print -prior -prism -prison -prissy -pristine -privacy -private -privatize -prize -proactive -probable -probably -probation -probe -probing -probiotic -problem -procedure -process -proclaim -procreate -procurer -prodigal -prodigy -produce -product -profane -profanity -professed -professor -profile -profound -profusely -progeny -prognosis -program -progress -projector -prologue -prolonged -promenade -prominent -promoter -promotion -prompter -promptly -prone -prong -pronounce -pronto -proofing -proofread -proofs -propeller -properly -property -proponent -proposal -propose -props -prorate -protector -protegee -proton -prototype -protozoan -protract -protrude -proud -provable -proved -proven -provided -provider -providing -province -proving -provoke -provoking -provolone -prowess -prowler -prowling -proximity -proxy -prozac -prude -prudishly -prune -pruning -pry -psychic -public -publisher -pucker -pueblo -pug -pull -pulmonary -pulp -pulsate -pulse -pulverize -puma -pumice -pummel -punch -punctual -punctuate -punctured -pungent -punisher -punk -pupil -puppet -puppy -purchase -pureblood -purebred -purely -pureness -purgatory -purge -purging -purifier -purify -purist -puritan -purity -purple -purplish -purposely -purr -purse -pursuable -pursuant -pursuit -purveyor -pushcart -pushchair -pusher -pushiness -pushing -pushover -pushpin -pushup -pushy -putdown -putt -puzzle -puzzling -pyramid -pyromania -python -quack -quadrant -quail -quaintly -quake -quaking -qualified -qualifier -qualify -quality -qualm -quantum -quarrel -quarry -quartered -quarterly -quarters -quartet -quench -query -quicken -quickly -quickness -quicksand -quickstep -quiet -quill -quilt -quintet -quintuple -quirk -quit -quiver -quizzical -quotable -quotation -quote -rabid -race -racing -racism -rack -racoon -radar -radial -radiance -radiantly -radiated -radiation -radiator -radio -radish -raffle -raft -rage -ragged -raging -ragweed -raider -railcar -railing -railroad -railway -raisin -rake -raking -rally -ramble -rambling -ramp -ramrod -ranch -rancidity -random -ranged -ranger -ranging -ranked -ranking -ransack -ranting -rants -rare -rarity -rascal -rash -rasping -ravage -raven -ravine -raving -ravioli -ravishing -reabsorb -reach -reacquire -reaction -reactive -reactor -reaffirm -ream -reanalyze -reappear -reapply -reappoint -reapprove -rearrange -rearview -reason -reassign -reassure -reattach -reawake -rebalance -rebate -rebel -rebirth -reboot -reborn -rebound -rebuff -rebuild -rebuilt -reburial -rebuttal -recall -recant -recapture -recast -recede -recent -recess -recharger -recipient -recital -recite -reckless -reclaim -recliner -reclining -recluse -reclusive -recognize -recoil -recollect -recolor -reconcile -reconfirm -reconvene -recopy -record -recount -recoup -recovery -recreate -rectal -rectangle -rectified -rectify -recycled -recycler -recycling -reemerge -reenact -reenter -reentry -reexamine -referable -referee -reference -refill -refinance -refined -refinery -refining -refinish -reflected -reflector -reflex -reflux -refocus -refold -reforest -reformat -reformed -reformer -reformist -refract -refrain -refreeze -refresh -refried -refueling -refund -refurbish -refurnish -refusal -refuse -refusing -refutable -refute -regain -regalia -regally -reggae -regime -region -register -registrar -registry -regress -regretful -regroup -regular -regulate -regulator -rehab -reheat -rehire -rehydrate -reimburse -reissue -reiterate -rejoice -rejoicing -rejoin -rekindle -relapse -relapsing -relatable -related -relation -relative -relax -relay -relearn -release -relenting -reliable -reliably -reliance -reliant -relic -relieve -relieving -relight -relish -relive -reload -relocate -relock -reluctant -rely -remake -remark -remarry -rematch -remedial -remedy -remember -reminder -remindful -remission -remix -remnant -remodeler -remold -remorse -remote -removable -removal -removed -remover -removing -rename -renderer -rendering -rendition -renegade -renewable -renewably -renewal -renewed -renounce -renovate -renovator -rentable -rental -rented -renter -reoccupy -reoccur -reopen -reorder -repackage -repacking -repaint -repair -repave -repaying -repayment -repeal -repeated -repeater -repent -rephrase -replace -replay -replica -reply -reporter -repose -repossess -repost -repressed -reprimand -reprint -reprise -reproach -reprocess -reproduce -reprogram -reps -reptile -reptilian -repugnant -repulsion -repulsive -repurpose -reputable -reputably -request -require -requisite -reroute -rerun -resale -resample -rescuer -reseal -research -reselect -reseller -resemble -resend -resent -reset -reshape -reshoot -reshuffle -residence -residency -resident -residual -residue -resigned -resilient -resistant -resisting -resize -resolute -resolved -resonant -resonate -resort -resource -respect -resubmit -result -resume -resupply -resurface -resurrect -retail -retainer -retaining -retake -retaliate -retention -rethink -retinal -retired -retiree -retiring -retold -retool -retorted -retouch -retrace -retract -retrain -retread -retreat -retrial -retrieval -retriever -retry -return -retying -retype -reunion -reunite -reusable -reuse -reveal -reveler -revenge -revenue -reverb -revered -reverence -reverend -reversal -reverse -reversing -reversion -revert -revisable -revise -revision -revisit -revivable -revival -reviver -reviving -revocable -revoke -revolt -revolver -revolving -reward -rewash -rewind -rewire -reword -rework -rewrap -rewrite -rhyme -ribbon -ribcage -rice -riches -richly -richness -rickety -ricotta -riddance -ridden -ride -riding -rifling -rift -rigging -rigid -rigor -rimless -rimmed -rind -rink -rinse -rinsing -riot -ripcord -ripeness -ripening -ripping -ripple -rippling -riptide -rise -rising -risk -risotto -ritalin -ritzy -rival -riverbank -riverbed -riverboat -riverside -riveter -riveting -roamer -roaming -roast -robbing -robe -robin -robotics -robust -rockband -rocker -rocket -rockfish -rockiness -rocking -rocklike -rockslide -rockstar -rocky -rogue -roman -romp -rope -roping -roster -rosy -rotten -rotting -rotunda -roulette -rounding -roundish -roundness -roundup -roundworm -routine -routing -rover -roving -royal -rubbed -rubber -rubbing -rubble -rubdown -ruby -ruckus -rudder -rug -ruined -rule -rumble -rumbling -rummage -rumor -runaround -rundown -runner -running -runny -runt -runway -rupture -rural -ruse -rush -rust -rut -sabbath -sabotage -sacrament -sacred -sacrifice -sadden -saddlebag -saddled -saddling -sadly -sadness -safari -safeguard -safehouse -safely -safeness -saffron -saga -sage -sagging -saggy -said -saint -sake -salad -salami -salaried -salary -saline -salon -saloon -salsa -salt -salutary -salute -salvage -salvaging -salvation -same -sample -sampling -sanction -sanctity -sanctuary -sandal -sandbag -sandbank -sandbar -sandblast -sandbox -sanded -sandfish -sanding -sandlot -sandpaper -sandpit -sandstone -sandstorm -sandworm -sandy -sanitary -sanitizer -sank -santa -sapling -sappiness -sappy -sarcasm -sarcastic -sardine -sash -sasquatch -sassy -satchel -satiable -satin -satirical -satisfied -satisfy -saturate -saturday -sauciness -saucy -sauna -savage -savanna -saved -savings -savior -savor -saxophone -say -scabbed -scabby -scalded -scalding -scale -scaling -scallion -scallop -scalping -scam -scandal -scanner -scanning -scant -scapegoat -scarce -scarcity -scarecrow -scared -scarf -scarily -scariness -scarring -scary -scavenger -scenic -schedule -schematic -scheme -scheming -schilling -schnapps -scholar -science -scientist -scion -scoff -scolding -scone -scoop -scooter -scope -scorch -scorebook -scorecard -scored -scoreless -scorer -scoring -scorn -scorpion -scotch -scoundrel -scoured -scouring -scouting -scouts -scowling -scrabble -scraggly -scrambled -scrambler -scrap -scratch -scrawny -screen -scribble -scribe -scribing -scrimmage -script -scroll -scrooge -scrounger -scrubbed -scrubber -scruffy -scrunch -scrutiny -scuba -scuff -sculptor -sculpture -scurvy -scuttle -secluded -secluding -seclusion -second -secrecy -secret -sectional -sector -secular -securely -security -sedan -sedate -sedation -sedative -sediment -seduce -seducing -segment -seismic -seizing -seldom -selected -selection -selective -selector -self -seltzer -semantic -semester -semicolon -semifinal -seminar -semisoft -semisweet -senate -senator -send -senior -senorita -sensation -sensitive -sensitize -sensually -sensuous -sepia -september -septic -septum -sequel -sequence -sequester -series -sermon -serotonin -serpent -serrated -serve -service -serving -sesame -sessions -setback -setting -settle -settling -setup -sevenfold -seventeen -seventh -seventy -severity -shabby -shack -shaded -shadily -shadiness -shading -shadow -shady -shaft -shakable -shakily -shakiness -shaking -shaky -shale -shallot -shallow -shame -shampoo -shamrock -shank -shanty -shape -shaping -share -sharpener -sharper -sharpie -sharply -sharpness -shawl -sheath -shed -sheep -sheet -shelf -shell -shelter -shelve -shelving -sherry -shield -shifter -shifting -shiftless -shifty -shimmer -shimmy -shindig -shine -shingle -shininess -shining -shiny -ship -shirt -shivering -shock -shone -shoplift -shopper -shopping -shoptalk -shore -shortage -shortcake -shortcut -shorten -shorter -shorthand -shortlist -shortly -shortness -shorts -shortwave -shorty -shout -shove -showbiz -showcase -showdown -shower -showgirl -showing -showman -shown -showoff -showpiece -showplace -showroom -showy -shrank -shrapnel -shredder -shredding -shrewdly -shriek -shrill -shrimp -shrine -shrink -shrivel -shrouded -shrubbery -shrubs -shrug -shrunk -shucking -shudder -shuffle -shuffling -shun -shush -shut -shy -siamese -siberian -sibling -siding -sierra -siesta -sift -sighing -silenced -silencer -silent -silica -silicon -silk -silliness -silly -silo -silt -silver -similarly -simile -simmering -simple -simplify -simply -sincere -sincerity -singer -singing -single -singular -sinister -sinless -sinner -sinuous -sip -siren -sister -sitcom -sitter -sitting -situated -situation -sixfold -sixteen -sixth -sixties -sixtieth -sixtyfold -sizable -sizably -size -sizing -sizzle -sizzling -skater -skating -skedaddle -skeletal -skeleton -skeptic -sketch -skewed -skewer -skid -skied -skier -skies -skiing -skilled -skillet -skillful -skimmed -skimmer -skimming -skimpily -skincare -skinhead -skinless -skinning -skinny -skintight -skipper -skipping -skirmish -skirt -skittle -skydiver -skylight -skyline -skype -skyrocket -skyward -slab -slacked -slacker -slacking -slackness -slacks -slain -slam -slander -slang -slapping -slapstick -slashed -slashing -slate -slather -slaw -sled -sleek -sleep -sleet -sleeve -slept -sliceable -sliced -slicer -slicing -slick -slider -slideshow -sliding -slighted -slighting -slightly -slimness -slimy -slinging -slingshot -slinky -slip -slit -sliver -slobbery -slogan -sloped -sloping -sloppily -sloppy -slot -slouching -slouchy -sludge -slug -slum -slurp -slush -sly -small -smartly -smartness -smasher -smashing -smashup -smell -smelting -smile -smilingly -smirk -smite -smith -smitten -smock -smog -smoked -smokeless -smokiness -smoking -smoky -smolder -smooth -smother -smudge -smudgy -smuggler -smuggling -smugly -smugness -snack -snagged -snaking -snap -snare -snarl -snazzy -sneak -sneer -sneeze -sneezing -snide -sniff -snippet -snipping -snitch -snooper -snooze -snore -snoring -snorkel -snort -snout -snowbird -snowboard -snowbound -snowcap -snowdrift -snowdrop -snowfall -snowfield -snowflake -snowiness -snowless -snowman -snowplow -snowshoe -snowstorm -snowsuit -snowy -snub -snuff -snuggle -snugly -snugness -speak -spearfish -spearhead -spearman -spearmint -species -specimen -specked -speckled -specks -spectacle -spectator -spectrum -speculate -speech -speed -spellbind -speller -spelling -spendable -spender -spending -spent -spew -sphere -spherical -sphinx -spider -spied -spiffy -spill -spilt -spinach -spinal -spindle -spinner -spinning -spinout -spinster -spiny -spiral -spirited -spiritism -spirits -spiritual -splashed -splashing -splashy -splatter -spleen -splendid -splendor -splice -splicing -splinter -splotchy -splurge -spoilage -spoiled -spoiler -spoiling -spoils -spoken -spokesman -sponge -spongy -sponsor -spoof -spookily -spooky -spool -spoon -spore -sporting -sports -sporty -spotless -spotlight -spotted -spotter -spotting -spotty -spousal -spouse -spout -sprain -sprang -sprawl -spray -spree -sprig -spring -sprinkled -sprinkler -sprint -sprite -sprout -spruce -sprung -spry -spud -spur -sputter -spyglass -squabble -squad -squall -squander -squash -squatted -squatter -squatting -squeak -squealer -squealing -squeamish -squeegee -squeeze -squeezing -squid -squiggle -squiggly -squint -squire -squirt -squishier -squishy -stability -stabilize -stable -stack -stadium -staff -stage -staging -stagnant -stagnate -stainable -stained -staining -stainless -stalemate -staleness -stalling -stallion -stamina -stammer -stamp -stand -stank -staple -stapling -starboard -starch -stardom -stardust -starfish -stargazer -staring -stark -starless -starlet -starlight -starlit -starring -starry -starship -starter -starting -startle -startling -startup -starved -starving -stash -state -static -statistic -statue -stature -status -statute -statutory -staunch -stays -steadfast -steadier -steadily -steadying -steam -steed -steep -steerable -steering -steersman -stegosaur -stellar -stem -stench -stencil -step -stereo -sterile -sterility -sterilize -sterling -sternness -sternum -stew -stick -stiffen -stiffly -stiffness -stifle -stifling -stillness -stilt -stimulant -stimulate -stimuli -stimulus -stinger -stingily -stinging -stingray -stingy -stinking -stinky -stipend -stipulate -stir -stitch -stock -stoic -stoke -stole -stomp -stonewall -stoneware -stonework -stoning -stony -stood -stooge -stool -stoop -stoplight -stoppable -stoppage -stopped -stopper -stopping -stopwatch -storable -storage -storeroom -storewide -storm -stout -stove -stowaway -stowing -straddle -straggler -strained -strainer -straining -strangely -stranger -strangle -strategic -strategy -stratus -straw -stray -streak -stream -street -strength -strenuous -strep -stress -stretch -strewn -stricken -strict -stride -strife -strike -striking -strive -striving -strobe -strode -stroller -strongbox -strongly -strongman -struck -structure -strudel -struggle -strum -strung -strut -stubbed -stubble -stubbly -stubborn -stucco -stuck -student -studied -studio -study -stuffed -stuffing -stuffy -stumble -stumbling -stump -stung -stunned -stunner -stunning -stunt -stupor -sturdily -sturdy -styling -stylishly -stylist -stylized -stylus -suave -subarctic -subatomic -subdivide -subdued -subduing -subfloor -subgroup -subheader -subject -sublease -sublet -sublevel -sublime -submarine -submerge -submersed -submitter -subpanel -subpar -subplot -subprime -subscribe -subscript -subsector -subside -subsiding -subsidize -subsidy -subsoil -subsonic -substance -subsystem -subtext -subtitle -subtly -subtotal -subtract -subtype -suburb -subway -subwoofer -subzero -succulent -such -suction -sudden -sudoku -suds -sufferer -suffering -suffice -suffix -suffocate -suffrage -sugar -suggest -suing -suitable -suitably -suitcase -suitor -sulfate -sulfide -sulfite -sulfur -sulk -sullen -sulphate -sulphuric -sultry -superbowl -superglue -superhero -superior -superjet -superman -supermom -supernova -supervise -supper -supplier -supply -support -supremacy -supreme -surcharge -surely -sureness -surface -surfacing -surfboard -surfer -surgery -surgical -surging -surname -surpass -surplus -surprise -surreal -surrender -surrogate -surround -survey -survival -survive -surviving -survivor -sushi -suspect -suspend -suspense -sustained -sustainer -swab -swaddling -swagger -swampland -swan -swapping -swarm -sway -swear -sweat -sweep -swell -swept -swerve -swifter -swiftly -swiftness -swimmable -swimmer -swimming -swimsuit -swimwear -swinger -swinging -swipe -swirl -switch -swivel -swizzle -swooned -swoop -swoosh -swore -sworn -swung -sycamore -sympathy -symphonic -symphony -symptom -synapse -syndrome -synergy -synopses -synopsis -synthesis -synthetic -syrup -system -t-shirt -tabasco -tabby -tableful -tables -tablet -tableware -tabloid -tackiness -tacking -tackle -tackling -tacky -taco -tactful -tactical -tactics -tactile -tactless -tadpole -taekwondo -tag -tainted -take -taking -talcum -talisman -tall -talon -tamale -tameness -tamer -tamper -tank -tanned -tannery -tanning -tantrum -tapeless -tapered -tapering -tapestry -tapioca -tapping -taps -tarantula -target -tarmac -tarnish -tarot -tartar -tartly -tartness -task -tassel -taste -tastiness -tasting -tasty -tattered -tattle -tattling -tattoo -taunt -tavern -thank -that -thaw -theater -theatrics -thee -theft -theme -theology -theorize -thermal -thermos -thesaurus -these -thesis -thespian -thicken -thicket -thickness -thieving -thievish -thigh -thimble -thing -think -thinly -thinner -thinness -thinning -thirstily -thirsting -thirsty -thirteen -thirty -thong -thorn -those -thousand -thrash -thread -threaten -threefold -thrift -thrill -thrive -thriving -throat -throbbing -throng -throttle -throwaway -throwback -thrower -throwing -thud -thumb -thumping -thursday -thus -thwarting -thyself -tiara -tibia -tidal -tidbit -tidiness -tidings -tidy -tiger -tighten -tightly -tightness -tightrope -tightwad -tigress -tile -tiling -till -tilt -timid -timing -timothy -tinderbox -tinfoil -tingle -tingling -tingly -tinker -tinkling -tinsel -tinsmith -tint -tinwork -tiny -tipoff -tipped -tipper -tipping -tiptoeing -tiptop -tiring -tissue -trace -tracing -track -traction -tractor -trade -trading -tradition -traffic -tragedy -trailing -trailside -train -traitor -trance -tranquil -transfer -transform -translate -transpire -transport -transpose -trapdoor -trapeze -trapezoid -trapped -trapper -trapping -traps -trash -travel -traverse -travesty -tray -treachery -treading -treadmill -treason -treat -treble -tree -trekker -tremble -trembling -tremor -trench -trend -trespass -triage -trial -triangle -tribesman -tribunal -tribune -tributary -tribute -triceps -trickery -trickily -tricking -trickle -trickster -tricky -tricolor -tricycle -trident -tried -trifle -trifocals -trillion -trilogy -trimester -trimmer -trimming -trimness -trinity -trio -tripod -tripping -triumph -trivial -trodden -trolling -trombone -trophy -tropical -tropics -trouble -troubling -trough -trousers -trout -trowel -truce -truck -truffle -trump -trunks -trustable -trustee -trustful -trusting -trustless -truth -try -tubby -tubeless -tubular -tucking -tuesday -tug -tuition -tulip -tumble -tumbling -tummy -turban -turbine -turbofan -turbojet -turbulent -turf -turkey -turmoil -turret -turtle -tusk -tutor -tutu -tux -tweak -tweed -tweet -tweezers -twelve -twentieth -twenty -twerp -twice -twiddle -twiddling -twig -twilight -twine -twins -twirl -twistable -twisted -twister -twisting -twisty -twitch -twitter -tycoon -tying -tyke -udder -ultimate -ultimatum -ultra -umbilical -umbrella -umpire -unabashed -unable -unadorned -unadvised -unafraid -unaired -unaligned -unaltered -unarmored -unashamed -unaudited -unawake -unaware -unbaked -unbalance -unbeaten -unbend -unbent -unbiased -unbitten -unblended -unblessed -unblock -unbolted -unbounded -unboxed -unbraided -unbridle -unbroken -unbuckled -unbundle -unburned -unbutton -uncanny -uncapped -uncaring -uncertain -unchain -unchanged -uncharted -uncheck -uncivil -unclad -unclaimed -unclamped -unclasp -uncle -unclip -uncloak -unclog -unclothed -uncoated -uncoiled -uncolored -uncombed -uncommon -uncooked -uncork -uncorrupt -uncounted -uncouple -uncouth -uncover -uncross -uncrown -uncrushed -uncured -uncurious -uncurled -uncut -undamaged -undated -undaunted -undead -undecided -undefined -underage -underarm -undercoat -undercook -undercut -underdog -underdone -underfed -underfeed -underfoot -undergo -undergrad -underhand -underline -underling -undermine -undermost -underpaid -underpass -underpay -underrate -undertake -undertone -undertook -undertow -underuse -underwear -underwent -underwire -undesired -undiluted -undivided -undocked -undoing -undone -undrafted -undress -undrilled -undusted -undying -unearned -unearth -unease -uneasily -uneasy -uneatable -uneaten -unedited -unelected -unending -unengaged -unenvied -unequal -unethical -uneven -unexpired -unexposed -unfailing -unfair -unfasten -unfazed -unfeeling -unfiled -unfilled -unfitted -unfitting -unfixable -unfixed -unflawed -unfocused -unfold -unfounded -unframed -unfreeze -unfrosted -unfrozen -unfunded -unglazed -ungloved -unglue -ungodly -ungraded -ungreased -unguarded -unguided -unhappily -unhappy -unharmed -unhealthy -unheard -unhearing -unheated -unhelpful -unhidden -unhinge -unhitched -unholy -unhook -unicorn -unicycle -unified -unifier -uniformed -uniformly -unify -unimpeded -uninjured -uninstall -uninsured -uninvited -union -uniquely -unisexual -unison -unissued -unit -universal -universe -unjustly -unkempt -unkind -unknotted -unknowing -unknown -unlaced -unlatch -unlawful -unleaded -unlearned -unleash -unless -unleveled -unlighted -unlikable -unlimited -unlined -unlinked -unlisted -unlit -unlivable -unloaded -unloader -unlocked -unlocking -unlovable -unloved -unlovely -unloving -unluckily -unlucky -unmade -unmanaged -unmanned -unmapped -unmarked -unmasked -unmasking -unmatched -unmindful -unmixable -unmixed -unmolded -unmoral -unmovable -unmoved -unmoving -unnamable -unnamed -unnatural -unneeded -unnerve -unnerving -unnoticed -unopened -unopposed -unpack -unpadded -unpaid -unpainted -unpaired -unpaved -unpeeled -unpicked -unpiloted -unpinned -unplanned -unplanted -unpleased -unpledged -unplowed -unplug -unpopular -unproven -unquote -unranked -unrated -unraveled -unreached -unread -unreal -unreeling -unrefined -unrelated -unrented -unrest -unretired -unrevised -unrigged -unripe -unrivaled -unroasted -unrobed -unroll -unruffled -unruly -unrushed -unsaddle -unsafe -unsaid -unsalted -unsaved -unsavory -unscathed -unscented -unscrew -unsealed -unseated -unsecured -unseeing -unseemly -unseen -unselect -unselfish -unsent -unsettled -unshackle -unshaken -unshaved -unshaven -unsheathe -unshipped -unsightly -unsigned -unskilled -unsliced -unsmooth -unsnap -unsocial -unsoiled -unsold -unsolved -unsorted -unspoiled -unspoken -unstable -unstaffed -unstamped -unsteady -unsterile -unstirred -unstitch -unstopped -unstuck -unstuffed -unstylish -unsubtle -unsubtly -unsuited -unsure -unsworn -untagged -untainted -untaken -untamed -untangled -untapped -untaxed -unthawed -unthread -untidy -untie -until -untimed -untimely -untitled -untoasted -untold -untouched -untracked -untrained -untreated -untried -untrimmed -untrue -untruth -unturned -untwist -untying -unusable -unused -unusual -unvalued -unvaried -unvarying -unveiled -unveiling -unvented -unviable -unvisited -unvocal -unwanted -unwarlike -unwary -unwashed -unwatched -unweave -unwed -unwelcome -unwell -unwieldy -unwilling -unwind -unwired -unwitting -unwomanly -unworldly -unworn -unworried -unworthy -unwound -unwoven -unwrapped -unwritten -unzip -upbeat -upchuck -upcoming -upcountry -update -upfront -upgrade -upheaval -upheld -uphill -uphold -uplifted -uplifting -upload -upon -upper -upright -uprising -upriver -uproar -uproot -upscale -upside -upstage -upstairs -upstart -upstate -upstream -upstroke -upswing -uptake -uptight -uptown -upturned -upward -upwind -uranium -urban -urchin -urethane -urgency -urgent -urging -urologist -urology -usable -usage -useable -used -uselessly -user -usher -usual -utensil -utility -utilize -utmost -utopia -utter -vacancy -vacant -vacate -vacation -vagabond -vagrancy -vagrantly -vaguely -vagueness -valiant -valid -valium -valley -valuables -value -vanilla -vanish -vanity -vanquish -vantage -vaporizer -variable -variably -varied -variety -various -varmint -varnish -varsity -varying -vascular -vaseline -vastly -vastness -veal -vegan -veggie -vehicular -velcro -velocity -velvet -vendetta -vending -vendor -veneering -vengeful -venomous -ventricle -venture -venue -venus -verbalize -verbally -verbose -verdict -verify -verse -version -versus -vertebrae -vertical -vertigo -very -vessel -vest -veteran -veto -vexingly -viability -viable -vibes -vice -vicinity -victory -video -viewable -viewer -viewing -viewless -viewpoint -vigorous -village -villain -vindicate -vineyard -vintage -violate -violation -violator -violet -violin -viper -viral -virtual -virtuous -virus -visa -viscosity -viscous -viselike -visible -visibly -vision -visiting -visitor -visor -vista -vitality -vitalize -vitally -vitamins -vivacious -vividly -vividness -vixen -vocalist -vocalize -vocally -vocation -voice -voicing -void -volatile -volley -voltage -volumes -voter -voting -voucher -vowed -vowel -voyage -wackiness -wad -wafer -waffle -waged -wager -wages -waggle -wagon -wake -waking -walk -walmart -walnut -walrus -waltz -wand -wannabe -wanted -wanting -wasabi -washable -washbasin -washboard -washbowl -washcloth -washday -washed -washer -washhouse -washing -washout -washroom -washstand -washtub -wasp -wasting -watch -water -waviness -waving -wavy -whacking -whacky -wham -wharf -wheat -whenever -whiff -whimsical -whinny -whiny -whisking -whoever -whole -whomever -whoopee -whooping -whoops -why -wick -widely -widen -widget -widow -width -wieldable -wielder -wife -wifi -wikipedia -wildcard -wildcat -wilder -wildfire -wildfowl -wildland -wildlife -wildly -wildness -willed -willfully -willing -willow -willpower -wilt -wimp -wince -wincing -wind -wing -winking -winner -winnings -winter -wipe -wired -wireless -wiring -wiry -wisdom -wise -wish -wisplike -wispy -wistful -wizard -wobble -wobbling -wobbly -wok -wolf -wolverine -womanhood -womankind -womanless -womanlike -womanly -womb -woof -wooing -wool -woozy -word -work -worried -worrier -worrisome -worry -worsening -worshiper -worst -wound -woven -wow -wrangle -wrath -wreath -wreckage -wrecker -wrecking -wrench -wriggle -wriggly -wrinkle -wrinkly -wrist -writing -written -wrongdoer -wronged -wrongful -wrongly -wrongness -wrought -xbox -xerox -yahoo -yam -yanking -yapping -yard -yarn -yeah -yearbook -yearling -yearly -yearning -yeast -yelling -yelp -yen -yesterday -yiddish -yield -yin -yippee -yo-yo -yodel -yoga -yogurt -yonder -yoyo -yummy -zap -zealous -zebra -zen -zeppelin -zero -zestfully -zesty -zigzagged -zipfile -zipping -zippy -zips -zit -zodiac -zombie -zone -zoning -zookeeper -zoologist -zoology -zoom \ No newline at end of file diff --git a/crates/crypto/benches/crypto/aes-256-gcm-siv.rs b/crates/crypto/benches/crypto/aes-256-gcm-siv.rs deleted file mode 100644 index ac3bcfec1..000000000 --- a/crates/crypto/benches/crypto/aes-256-gcm-siv.rs +++ /dev/null @@ -1,66 +0,0 @@ -use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; -use sd_crypto::{ - crypto::{Decryptor, Encryptor}, - primitives::{BLOCK_LEN, KEY_LEN}, - types::{Aad, Algorithm, Key, Nonce}, -}; - -const ALGORITHM: Algorithm = Algorithm::Aes256GcmSiv; -const SIZES: [usize; 3] = [BLOCK_LEN, BLOCK_LEN * 2, BLOCK_LEN * 4]; - -fn bench(c: &mut Criterion) { - let mut group = c.benchmark_group(ALGORITHM.to_string().to_ascii_lowercase()); - - let key = Key::generate(); - let nonce = Nonce::generate(ALGORITHM); - - { - group.throughput(Throughput::Bytes(KEY_LEN as u64)); - - let test_key = Key::generate(); - let test_key_encrypted = - Encryptor::encrypt_key(&key, &nonce, ALGORITHM, &test_key, Aad::Null).unwrap(); - - group.bench_function(BenchmarkId::new("encrypt", "key"), |b| { - b.iter(|| { - Encryptor::encrypt_key(&key, &nonce, ALGORITHM, &test_key, Aad::Null).unwrap() - }); - }); - - group.bench_function(BenchmarkId::new("decrypt", "key"), |b| { - b.iter(|| { - Decryptor::decrypt_key(&key, ALGORITHM, &test_key_encrypted, Aad::Null).unwrap() - }); - }); - } - - for size in SIZES { - group.throughput(Throughput::Bytes(size as u64)); - - let buf = vec![0u8; size].into_boxed_slice(); - - let encrypted_bytes = - Encryptor::encrypt_bytes(&key, &nonce, ALGORITHM, &buf, Aad::Null).unwrap(); // bytes to decrypt - - group.bench_function(BenchmarkId::new("encrypt", size), |b| { - b.iter(|| Encryptor::encrypt_bytes(&key, &nonce, ALGORITHM, &buf, Aad::Null).unwrap()); - }); - - group.bench_function(BenchmarkId::new("decrypt", size), |b| { - b.iter(|| { - Decryptor::decrypt_bytes(&key, &nonce, ALGORITHM, &encrypted_bytes, Aad::Null) - .unwrap() - }) - }); - } - - group.finish(); -} - -criterion_group!( - name = benches; - config = Criterion::default(); - targets = bench -); - -criterion_main!(benches); diff --git a/crates/crypto/benches/crypto/aes-256-gcm.rs b/crates/crypto/benches/crypto/aes-256-gcm.rs deleted file mode 100644 index cf270b892..000000000 --- a/crates/crypto/benches/crypto/aes-256-gcm.rs +++ /dev/null @@ -1,66 +0,0 @@ -use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; -use sd_crypto::{ - crypto::{Decryptor, Encryptor}, - primitives::{BLOCK_LEN, KEY_LEN}, - types::{Aad, Algorithm, Key, Nonce}, -}; - -const ALGORITHM: Algorithm = Algorithm::Aes256Gcm; -const SIZES: [usize; 3] = [BLOCK_LEN, BLOCK_LEN * 2, BLOCK_LEN * 4]; - -fn bench(c: &mut Criterion) { - let mut group = c.benchmark_group(ALGORITHM.to_string().to_ascii_lowercase()); - - let key = Key::generate(); - let nonce = Nonce::generate(ALGORITHM); - - { - group.throughput(Throughput::Bytes(KEY_LEN as u64)); - - let test_key = Key::generate(); - let test_key_encrypted = - Encryptor::encrypt_key(&key, &nonce, ALGORITHM, &test_key, Aad::Null).unwrap(); - - group.bench_function(BenchmarkId::new("encrypt", "key"), |b| { - b.iter(|| { - Encryptor::encrypt_key(&key, &nonce, ALGORITHM, &test_key, Aad::Null).unwrap() - }); - }); - - group.bench_function(BenchmarkId::new("decrypt", "key"), |b| { - b.iter(|| { - Decryptor::decrypt_key(&key, ALGORITHM, &test_key_encrypted, Aad::Null).unwrap() - }); - }); - } - - for size in SIZES { - group.throughput(Throughput::Bytes(size as u64)); - - let buf = vec![0u8; size].into_boxed_slice(); - - let encrypted_bytes = - Encryptor::encrypt_bytes(&key, &nonce, ALGORITHM, &buf, Aad::Null).unwrap(); // bytes to decrypt - - group.bench_function(BenchmarkId::new("encrypt", size), |b| { - b.iter(|| Encryptor::encrypt_bytes(&key, &nonce, ALGORITHM, &buf, Aad::Null).unwrap()); - }); - - group.bench_function(BenchmarkId::new("decrypt", size), |b| { - b.iter(|| { - Decryptor::decrypt_bytes(&key, &nonce, ALGORITHM, &encrypted_bytes, Aad::Null) - .unwrap() - }) - }); - } - - group.finish(); -} - -criterion_group!( - name = benches; - config = Criterion::default(); - targets = bench -); - -criterion_main!(benches); diff --git a/crates/crypto/benches/crypto/xchacha20-poly1305.rs b/crates/crypto/benches/crypto/xchacha20-poly1305.rs deleted file mode 100644 index b8608d018..000000000 --- a/crates/crypto/benches/crypto/xchacha20-poly1305.rs +++ /dev/null @@ -1,66 +0,0 @@ -use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; -use sd_crypto::{ - crypto::{Decryptor, Encryptor}, - primitives::{BLOCK_LEN, KEY_LEN}, - types::{Aad, Algorithm, Key, Nonce}, -}; - -const ALGORITHM: Algorithm = Algorithm::XChaCha20Poly1305; -const SIZES: [usize; 3] = [BLOCK_LEN, BLOCK_LEN * 2, BLOCK_LEN * 4]; - -fn bench(c: &mut Criterion) { - let mut group = c.benchmark_group(ALGORITHM.to_string().to_ascii_lowercase()); - - let key = Key::generate(); - let nonce = Nonce::generate(ALGORITHM); - - { - group.throughput(Throughput::Bytes(KEY_LEN as u64)); - - let test_key = Key::generate(); - let test_key_encrypted = - Encryptor::encrypt_key(&key, &nonce, ALGORITHM, &test_key, Aad::Null).unwrap(); - - group.bench_function(BenchmarkId::new("encrypt", "key"), |b| { - b.iter(|| { - Encryptor::encrypt_key(&key, &nonce, ALGORITHM, &test_key, Aad::Null).unwrap() - }); - }); - - group.bench_function(BenchmarkId::new("decrypt", "key"), |b| { - b.iter(|| { - Decryptor::decrypt_key(&key, ALGORITHM, &test_key_encrypted, Aad::Null).unwrap() - }); - }); - } - - for size in SIZES { - group.throughput(Throughput::Bytes(size as u64)); - - let buf = vec![0u8; size].into_boxed_slice(); - - let encrypted_bytes = - Encryptor::encrypt_bytes(&key, &nonce, ALGORITHM, &buf, Aad::Null).unwrap(); // bytes to decrypt - - group.bench_function(BenchmarkId::new("encrypt", size), |b| { - b.iter(|| Encryptor::encrypt_bytes(&key, &nonce, ALGORITHM, &buf, Aad::Null).unwrap()); - }); - - group.bench_function(BenchmarkId::new("decrypt", size), |b| { - b.iter(|| { - Decryptor::decrypt_bytes(&key, &nonce, ALGORITHM, &encrypted_bytes, Aad::Null) - .unwrap() - }) - }); - } - - group.finish(); -} - -criterion_group!( - name = benches; - config = Criterion::default(); - targets = bench -); - -criterion_main!(benches); diff --git a/crates/crypto/benches/hashing/argon2id.rs b/crates/crypto/benches/hashing/argon2id.rs deleted file mode 100644 index c4f16b9cf..000000000 --- a/crates/crypto/benches/hashing/argon2id.rs +++ /dev/null @@ -1,43 +0,0 @@ -use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion}; -use sd_crypto::{ - hashing::Hasher, - rng::CryptoRng, - types::{HashingAlgorithm, Params, Salt, SecretKey}, - Protected, -}; - -const PARAMS: [Params; 3] = [Params::Standard, Params::Hardened, Params::Paranoid]; - -fn bench(c: &mut Criterion) { - let mut group = c.benchmark_group("argon2id"); - group.sample_size(10); // TODO(brxken128): probably remove this - - for param in PARAMS { - let password: Protected> = CryptoRng::generate_vec(16).into(); - let salt = Salt::generate(); - let hashing_algorithm = HashingAlgorithm::Argon2id(param); - - group.bench_function( - BenchmarkId::new("hash", hashing_algorithm.get_parameters().0), - |b| { - b.iter_batched( - || (password.clone(), salt), - |(password, salt)| { - Hasher::hash_password(hashing_algorithm, &password, salt, &SecretKey::Null) - }, - BatchSize::LargeInput, - ) - }, - ); - } - - group.finish(); -} - -criterion_group!( - name = benches; - config = Criterion::default(); - targets = bench -); - -criterion_main!(benches); diff --git a/crates/crypto/benches/hashing/blake3-balloon.rs b/crates/crypto/benches/hashing/blake3-balloon.rs deleted file mode 100644 index 5e1cffe8f..000000000 --- a/crates/crypto/benches/hashing/blake3-balloon.rs +++ /dev/null @@ -1,43 +0,0 @@ -use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion}; -use sd_crypto::{ - hashing::Hasher, - rng::CryptoRng, - types::{HashingAlgorithm, Params, Salt, SecretKey}, - Protected, -}; - -const PARAMS: [Params; 3] = [Params::Standard, Params::Hardened, Params::Paranoid]; - -fn bench(c: &mut Criterion) { - let mut group = c.benchmark_group("blake3-balloon"); - group.sample_size(10); // TODO(brxken128): probably remove this - - for param in PARAMS { - let password: Protected> = CryptoRng::generate_vec(16).into(); - let salt = Salt::generate(); - let hashing_algorithm = HashingAlgorithm::Blake3Balloon(param); - - group.bench_function( - BenchmarkId::new("hash", hashing_algorithm.get_parameters().0), - |b| { - b.iter_batched( - || (password.clone(), salt), - |(password, salt)| { - Hasher::hash_password(hashing_algorithm, &password, salt, &SecretKey::Null) - }, - BatchSize::LargeInput, - ) - }, - ); - } - - group.finish(); -} - -criterion_group!( - name = benches; - config = Criterion::default(); - targets = bench -); - -criterion_main!(benches); diff --git a/crates/crypto/benches/hashing/blake3-kdf.rs b/crates/crypto/benches/hashing/blake3-kdf.rs deleted file mode 100644 index 7c93fe3a1..000000000 --- a/crates/crypto/benches/hashing/blake3-kdf.rs +++ /dev/null @@ -1,24 +0,0 @@ -use criterion::{criterion_group, criterion_main, Criterion}; -use sd_crypto::{ - hashing::Hasher, - types::{DerivationContext, Key, Salt}, -}; - -const CONTEXT: DerivationContext = - DerivationContext::new("crypto 2023-03-21 11:31:38 benchmark testing context"); - -fn bench(c: &mut Criterion) { - let key = Key::generate(); - let salt = Salt::generate(); - c.bench_function("blake3-kdf", |b| { - b.iter(|| Hasher::derive_key(&key, salt, CONTEXT)) - }); -} - -criterion_group!( - name = benches; - config = Criterion::default(); - targets = bench -); - -criterion_main!(benches); diff --git a/crates/crypto/benches/hashing/blake3.rs b/crates/crypto/benches/hashing/blake3.rs deleted file mode 100644 index f4fa6b22d..000000000 --- a/crates/crypto/benches/hashing/blake3.rs +++ /dev/null @@ -1,31 +0,0 @@ -use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; -use sd_crypto::{ - hashing::Hasher, - primitives::{BLOCK_LEN, KEY_LEN}, -}; - -const SIZES: [usize; 2] = [KEY_LEN, BLOCK_LEN]; - -fn bench(c: &mut Criterion) { - let mut group = c.benchmark_group("blake3"); - - for size in SIZES { - let buf = vec![0u8; size].into_boxed_slice(); - - group.throughput(Throughput::Bytes(size as u64)); - - group.bench_function(BenchmarkId::new("hash", size), |b| { - b.iter(|| Hasher::blake3(&buf)) - }); - } - - group.finish(); -} - -criterion_group!( - name = benches; - config = Criterion::default(); - targets = bench -); - -criterion_main!(benches); diff --git a/crates/crypto/examples/file_encryption.rs b/crates/crypto/examples/file_encryption.rs deleted file mode 100644 index dcc38fc8c..000000000 --- a/crates/crypto/examples/file_encryption.rs +++ /dev/null @@ -1,119 +0,0 @@ -use sd_crypto::{ - crypto::{Decryptor, Encryptor}, - encoding::Header, - hashing::Hasher, - types::{ - Algorithm, DerivationContext, HashingAlgorithm, Key, MagicBytes, Params, Salt, SecretKey, - }, - Protected, -}; -use std::io::{Cursor, Read, Seek, Write}; - -const MAGIC_BYTES: MagicBytes<6> = MagicBytes::new(*b"crypto"); - -const HEADER_KEY_CONTEXT: DerivationContext = - DerivationContext::new("crypto 2023-03-21 11:24:53 example header key context"); - -const HEADER_OBJECT_CONTEXT: DerivationContext = - DerivationContext::new("crypto 2023-03-21 11:25:08 example header object context"); - -const ALGORITHM: Algorithm = Algorithm::XChaCha20Poly1305; -const HASHING_ALGORITHM: HashingAlgorithm = HashingAlgorithm::Argon2id(Params::Standard); - -const OBJECT_DATA: [u8; 15] = *b"a nice mountain"; - -fn encrypt(reader: &mut R, writer: &mut W) -where - R: Read, - W: Write + Seek, -{ - let password = Protected::new(b"password".to_vec()); - - // This needs to be generated here, otherwise we won't have access to it for encryption - let master_key = Key::generate(); - - // These should ideally be done by a key management system - let content_salt = Salt::generate(); - let hashed_password = - Hasher::hash_password(HASHING_ALGORITHM, &password, content_salt, &SecretKey::Null) - .unwrap(); - - // Create the header for the encrypted file - let mut header = Header::new(ALGORITHM); - - // Create a keyslot to be added to the header - header - .add_keyslot( - HASHING_ALGORITHM, - content_salt, - &hashed_password, - &master_key, - HEADER_KEY_CONTEXT, - ) - .unwrap(); - - header - .add_object( - "FileMetadata", - HEADER_OBJECT_CONTEXT, - &master_key, - &OBJECT_DATA, - ) - .unwrap(); - - // Write the header to the file - header.to_writer(writer, MAGIC_BYTES).unwrap(); - - // Use the nonce created by the header to initialize an encryptor - let encryptor = Encryptor::new(&master_key, &header.nonce, header.algorithm).unwrap(); - - // Encrypt the data from the reader, and write it to the writer - // Use AAD so the header can be authenticated against every block of data - encryptor - .encrypt_streams(reader, writer, header.generate_aad()) - .unwrap(); -} - -fn decrypt(reader: &mut R, writer: &mut W) -> Vec -where - R: Read + Seek, - W: Write, -{ - let password = Protected::new(b"password".to_vec()); - - // Deserialize the header from the encrypted file - let (header, aad) = Header::from_reader(reader, MAGIC_BYTES).unwrap(); - - let (master_key, index) = header - .decrypt_master_key_with_password(&password, HEADER_KEY_CONTEXT) - .unwrap(); - - println!("key is in slot: {index}"); - - let decryptor = Decryptor::new(&master_key, &header.nonce, header.algorithm).unwrap(); - - // Decrypt data the from the reader, and write it to the writer - decryptor.decrypt_streams(reader, writer, aad).unwrap(); - - // Decrypt the object - let object = header - .decrypt_object("FileMetadata", HEADER_OBJECT_CONTEXT, &master_key) - .unwrap(); - - object.into_inner() -} - -fn main() { - // Open both the source and the output file - let mut source = Cursor::new(vec![5u8; 256]); - let mut dest = Cursor::new(vec![]); - let mut source_comparison = Cursor::new(vec![]); - - encrypt(&mut source, &mut dest); - - dest.rewind().unwrap(); - - let object_data = decrypt(&mut dest, &mut source_comparison); - - assert_eq!(&object_data, &OBJECT_DATA); -} diff --git a/crates/crypto/examples/secure_erase.rs b/crates/crypto/examples/secure_erase.rs index 8d0bfb55a..23921c7dc 100644 --- a/crates/crypto/examples/secure_erase.rs +++ b/crates/crypto/examples/secure_erase.rs @@ -1,20 +1,19 @@ -use sd_crypto::rng::CryptoRng; +use sd_crypto::{erase::erase_sync, rng::CryptoRng}; + use std::io::{Seek, Write}; + use tempfile::tempfile; fn main() { let mut file = tempfile().unwrap(); - let data = CryptoRng::generate_vec(1048576 * 16); + let mut rng = CryptoRng::new(); + let data = rng.generate_vec(1048576 * 16); file.write_all(&data).unwrap(); file.rewind().unwrap(); - #[cfg(feature = "sys")] - { - use sd_crypto::sys::fs; - // Erase the file (the size would normally be obtained via `fs::Metadata::len()` or similar) - fs::erase(&mut file, 1048576 * 16, 2).unwrap(); - } + // Erase the file (the size would normally be obtained via `fs::Metadata::len()` or similar) + erase_sync(&mut file, 1048576 * 16, 2).unwrap(); // Truncate the file to a length of zero file.set_len(0).unwrap(); diff --git a/crates/crypto/src/cloud/decrypt.rs b/crates/crypto/src/cloud/decrypt.rs new file mode 100644 index 000000000..1ba41f35b --- /dev/null +++ b/crates/crypto/src/cloud/decrypt.rs @@ -0,0 +1,105 @@ +use crate::{ + primitives::{EncryptedBlock, StreamNonce}, + Error, +}; + +use std::future::Future; + +use aead::{stream::DecryptorLE31, Aead, KeyInit}; +use chacha20poly1305::XChaCha20Poly1305; +use tokio::io::{AsyncBufReadExt, AsyncRead, AsyncWrite, AsyncWriteExt, BufReader, BufWriter}; + +use super::secret_key::SecretKey; + +pub trait OneShotDecryption { + fn decrypt(&self, cipher_text: &EncryptedBlock) -> Result, Error>; +} + +pub trait StreamDecryption { + fn decrypt( + &self, + nonce: &StreamNonce, + reader: impl AsyncRead + Unpin + Send, + writer: impl AsyncWrite + Unpin + Send, + ) -> impl Future> + Send; +} + +impl OneShotDecryption for SecretKey { + fn decrypt( + &self, + EncryptedBlock { nonce, cipher_text }: &EncryptedBlock, + ) -> Result, Error> { + XChaCha20Poly1305::new(&self.0) + .decrypt(nonce, cipher_text.as_slice()) + .map_err(|aead::Error| Error::Decrypt) + } +} + +impl StreamDecryption for SecretKey { + async fn decrypt( + &self, + nonce: &StreamNonce, + reader: impl AsyncRead + Unpin + Send, + writer: impl AsyncWrite + Unpin + Send, + ) -> Result<(), Error> { + let mut reader = BufReader::with_capacity(EncryptedBlock::CIPHER_TEXT_SIZE, reader); + let mut writer = BufWriter::with_capacity(EncryptedBlock::PLAIN_TEXT_SIZE * 5, writer); + + let mut buf = Vec::with_capacity(EncryptedBlock::CIPHER_TEXT_SIZE); + + let mut decryptor = DecryptorLE31::from_aead(XChaCha20Poly1305::new(&self.0), nonce); + + loop { + match reader.fill_buf().await { + Ok([]) => { + // Jobs done + break; + } + + Ok(bytes) => { + let total_bytes = bytes.len(); + + buf.clear(); + buf.extend_from_slice(bytes); + + reader.consume(total_bytes); + + if total_bytes == EncryptedBlock::CIPHER_TEXT_SIZE { + decryptor + .decrypt_next_in_place(b"", &mut buf) + .map_err(|aead::Error| Error::Decrypt)?; + + writer.write_all(&buf).await.map_err(|e| Error::DecryptIo { + context: "Writing decrypted block to writer", + source: e, + })?; + } else { + decryptor + .decrypt_last_in_place(b"", &mut buf) + .map_err(|aead::Error| Error::Decrypt)?; + + writer.write_all(&buf).await.map_err(|e| Error::DecryptIo { + context: "Writing last decrypted block to writer", + source: e, + })?; + break; + } + } + + Err(e) => { + return Err(Error::DecryptIo { + context: "Reading a block from the reader", + source: e, + }); + } + } + } + + writer.flush().await.map_err(|e| Error::DecryptIo { + context: "Flushing writer", + source: e, + })?; + + Ok(()) + } +} diff --git a/crates/crypto/src/cloud/encrypt.rs b/crates/crypto/src/cloud/encrypt.rs new file mode 100644 index 000000000..efdcd670a --- /dev/null +++ b/crates/crypto/src/cloud/encrypt.rs @@ -0,0 +1,99 @@ +use crate::{ + primitives::{EncryptedBlock, StreamNonce}, + Error, +}; + +use aead::{stream::EncryptorLE31, Aead, KeyInit}; +use async_stream::stream; +use chacha20poly1305::{XChaCha20Poly1305, XNonce}; +use futures::Stream; +use rand::CryptoRng; +use tokio::io::{AsyncBufReadExt, AsyncRead, BufReader}; + +use super::secret_key::SecretKey; + +pub trait OneShotEncryption { + fn encrypt(&self, plaintext: &[u8], rng: &mut impl CryptoRng) -> Result; +} + +pub trait StreamEncryption { + fn encrypt( + &self, + reader: impl AsyncRead + Unpin + Send, + rng: &mut (impl CryptoRng + Send), + ) -> ( + StreamNonce, + impl Stream, Error>> + Send, + ); +} + +impl OneShotEncryption for SecretKey { + fn encrypt(&self, plaintext: &[u8], rng: &mut impl CryptoRng) -> Result { + if plaintext.len() > EncryptedBlock::PLAIN_TEXT_SIZE { + return Err(Error::BlockTooBig(plaintext.len())); + } + + let cipher = XChaCha20Poly1305::new(&self.0); + let mut nonce = XNonce::default(); + rng.fill_bytes(&mut nonce); + + Ok(EncryptedBlock { + nonce, + cipher_text: cipher + .encrypt(&nonce, plaintext) + .map_err(|aead::Error| Error::Encrypt)?, + }) + } +} + +impl StreamEncryption for SecretKey { + fn encrypt( + &self, + reader: impl AsyncRead + Unpin + Send, + rng: &mut (impl CryptoRng + Send), + ) -> ( + StreamNonce, + impl Stream, Error>> + Send, + ) { + let mut nonce = StreamNonce::default(); + rng.fill_bytes(&mut nonce); + + ( + nonce, + stream! { + let mut reader = BufReader::with_capacity(EncryptedBlock::PLAIN_TEXT_SIZE, reader); + let mut encryptor = EncryptorLE31::from_aead(XChaCha20Poly1305::new(&self.0), &nonce); + + loop { + match reader.fill_buf().await { + Ok([]) => { + // Jobs done + break; + } + + Ok(bytes) => { + let total_bytes = bytes.len(); + if bytes.len() == EncryptedBlock::PLAIN_TEXT_SIZE { + let cipher_text = encryptor.encrypt_next(bytes).map_err(|aead::Error| Error::Encrypt)?; + assert_eq!(cipher_text.len(), EncryptedBlock::CIPHER_TEXT_SIZE); + yield Ok(cipher_text); + reader.consume(total_bytes); + } else { + yield encryptor.encrypt_last(bytes).map_err(|aead::Error| Error::Encrypt); + break; + } + } + + Err(e) => { + yield Err(Error::EncryptIo { + context: "Reading a block from the reader", + source: e, + }); + break; + } + } + } + }, + ) + } +} diff --git a/crates/crypto/src/cloud/mod.rs b/crates/crypto/src/cloud/mod.rs new file mode 100644 index 000000000..4a09a47d8 --- /dev/null +++ b/crates/crypto/src/cloud/mod.rs @@ -0,0 +1,3 @@ +pub mod decrypt; +pub mod encrypt; +pub mod secret_key; diff --git a/crates/crypto/src/cloud/secret_key.rs b/crates/crypto/src/cloud/secret_key.rs new file mode 100644 index 000000000..dcef9e90e --- /dev/null +++ b/crates/crypto/src/cloud/secret_key.rs @@ -0,0 +1,189 @@ +use crate::{ + ct::{Choice, ConstantTimeEq, ConstantTimeEqNull}, + rng::CryptoRng, +}; + +use std::fmt; + +use aead::array::Array; +use blake3::{Hash, Hasher}; +use generic_array::GenericArray; +use serde::{Deserialize, Serialize}; +use typenum::consts::U32; +use zeroize::{Zeroize, ZeroizeOnDrop}; + +/// This should be used for encrypting and decrypting data. +/// +/// You can pass an existing key to [`SecretKey::new`] or you may also generate +/// a secure random key with [`SecretKey::generate`], passing the [`CryptoRng`] to generate +/// random bytes. +#[derive(Clone, Zeroize, ZeroizeOnDrop)] +#[repr(transparent)] +pub struct SecretKey(pub(crate) Array); + +impl fmt::Debug for SecretKey { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("SecretKey()") + } +} + +impl SecretKey { + #[inline] + #[must_use] + pub fn new(v: impl Into>) -> Self { + Self(v.into()) + } + + #[inline] + #[must_use] + pub fn generate(rng: &mut CryptoRng) -> Self { + let mut key_candidate = rng.generate_fixed(); + + while bool::from(key_candidate.ct_eq_null()) { + key_candidate = rng.generate_fixed(); + } + + Self(key_candidate.into()) + } + + #[must_use] + pub fn to_hash(&self) -> Hash { + let mut hasher = Hasher::new(); + hasher.update(&self.0); + hasher.finalize() + } +} + +impl ConstantTimeEq for SecretKey { + fn ct_eq(&self, rhs: &Self) -> Choice { + self.0.ct_eq(&rhs.0) + } +} + +impl PartialEq for SecretKey { + fn eq(&self, other: &Self) -> bool { + self.ct_eq(other).into() + } +} + +impl Serialize for SecretKey { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + serdect::array::serialize_hex_lower_or_bin(&self.0, serializer) + } +} + +impl<'de> Deserialize<'de> for SecretKey { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + let mut buf = [0u8; 32]; + serdect::array::deserialize_hex_or_bin(&mut buf, deserializer)?; + Ok(Self::new(buf)) + } +} + +impl From<&SecretKey> for Array { + fn from(SecretKey(key): &SecretKey) -> Self { + *key + } +} + +impl From> for SecretKey { + fn from(key: GenericArray) -> Self { + Self(Array([ + key[0], key[1], key[2], key[3], key[4], key[5], key[6], key[7], key[8], key[9], + key[10], key[11], key[12], key[13], key[14], key[15], key[16], key[17], key[18], + key[19], key[20], key[21], key[22], key[23], key[24], key[25], key[26], key[27], + key[28], key[29], key[30], key[31], + ])) + } +} + +#[cfg(test)] +mod tests { + use std::pin::pin; + + use futures::StreamExt; + use rand::RngCore; + + use crate::primitives::EncryptedBlock; + + use super::*; + + #[test] + fn one_shot_test() { + use super::super::{decrypt::OneShotDecryption, encrypt::OneShotEncryption}; + let mut rng = CryptoRng::new(); + + let message = b"Eu queria um apartamento no Guarujah; \ + Mas o melhor que eu consegui foi um barraco em Itaquah."; + + let key = SecretKey::generate(&mut rng); + + let encrypted_block = key.encrypt(message, &mut rng).unwrap(); + let decrypted_message = key.decrypt(&encrypted_block).unwrap(); + + assert_eq!(message, decrypted_message.as_slice()); + } + + async fn stream_test(rng: &mut CryptoRng, message: &[u8]) { + use super::super::{decrypt::StreamDecryption, encrypt::StreamEncryption}; + + let key = SecretKey::generate(rng); + + let mut encrypted_message = vec![]; + + let (nonce, stream) = key.encrypt(message, rng); + + let mut stream = pin!(stream); + + while let Some(res) = stream.next().await { + encrypted_message.extend(res.unwrap()); + } + + let mut decrypted_message = vec![]; + + key.decrypt(&nonce, encrypted_message.as_slice(), &mut decrypted_message) + .await + .unwrap(); + + assert_eq!(message, decrypted_message.as_slice()); + } + + #[tokio::test] + async fn stream_test_small() { + let message = b"Eu sou cagado, veja so como eh que eh; \ + Se der uma chuva de Xuxa, no meu colo cai Peleh; \ + E como aquele ditado que jah dizia; \ + Pau que nasce torto mija fora da bacia"; + + stream_test(&mut CryptoRng::new(), message).await; + } + + #[tokio::test] + async fn stream_test_big() { + let mut rng = CryptoRng::new(); + + let mut message = + vec![0u8; EncryptedBlock::PLAIN_TEXT_SIZE * 10 + EncryptedBlock::PLAIN_TEXT_SIZE / 2]; + + rng.fill_bytes(&mut message); + + stream_test(&mut rng, &message).await; + } + + #[tokio::test] + async fn stream_test_big_exact() { + let mut rng = CryptoRng::new(); + + let mut message = vec![0u8; EncryptedBlock::PLAIN_TEXT_SIZE * 20]; + + rng.fill_bytes(&mut message); + + stream_test(&mut rng, &message).await; + } +} diff --git a/crates/crypto/src/crypto/mod.rs b/crates/crypto/src/crypto/mod.rs index 487454fba..6ab19cca1 100644 --- a/crates/crypto/src/crypto/mod.rs +++ b/crates/crypto/src/crypto/mod.rs @@ -341,7 +341,6 @@ mod tests { } #[tokio::test] - #[cfg(feature = "tokio")] #[cfg_attr(miri, ignore)] async fn aes_256_gcm_siv_encrypt_and_decrypt_5_blocks_async() { let buf = CryptoRng::generate_vec(BLOCK_LEN * 5); diff --git a/crates/crypto/src/crypto/stream.rs b/crates/crypto/src/crypto/stream.rs index 22781eead..dce48d57b 100644 --- a/crates/crypto/src/crypto/stream.rs +++ b/crates/crypto/src/crypto/stream.rs @@ -46,7 +46,6 @@ macro_rules! impl_stream { /// For more information, view `Key::validate()` and `Nonce::validate()` pub fn new(key: &Key, nonce: &Nonce, algorithm: Algorithm) -> Result { nonce.validate(algorithm)?; - key.validate()?; let s = match algorithm { $( diff --git a/crates/crypto/src/ct.rs b/crates/crypto/src/ct.rs index 70b1708c1..e7edf6a89 100644 --- a/crates/crypto/src/ct.rs +++ b/crates/crypto/src/ct.rs @@ -1,3 +1,4 @@ +use aead::array::{Array, ArraySize}; use cmov::{Cmov, CmovEq}; // The basic principle of most `ct_eq()` functions is to "null" out `x` (which is = 1 by default) @@ -46,6 +47,21 @@ where } } +impl ConstantTimeEq for Array +where + T: CmovEq, +{ + fn ct_eq(&self, rhs: &Self) -> Choice { + let mut x = 1u8; + + self.iter() + .zip(rhs.iter()) + .for_each(|(l, r)| l.cmovne(r, 0u8, &mut x)); + + Choice::from(x) + } +} + impl ConstantTimeEq for [T] { fn ct_eq(&self, rhs: &Self) -> Choice { // Here we can short-circuit as it's obvious that they're not equal @@ -86,7 +102,7 @@ impl Choice { #[inline] #[must_use] pub fn unwrap_u8(&self) -> u8 { - // could use an unsafe volatile read as an optimisation barrier + // could use an unsafe volatile read as an optimization barrier // i think cmov does a great job at being the barrier as well though let mut x = 0u8; x.cmovnz(&1, self.0); @@ -166,31 +182,65 @@ impl ConstantTimeEqNull for [u8] { #[cfg(test)] mod tests { - use crate::{ - ct::{ConstantTimeEq, ConstantTimeEqNull}, - primitives::SALT_LEN, - }; + + use aead::array::Array; + use typenum::consts::U32; + + use crate::ct::{ConstantTimeEq, ConstantTimeEqNull}; #[test] fn eq_null() { - assert!(bool::from([0u8; SALT_LEN].ct_eq_null())); + assert!(bool::from([0u8; 16].ct_eq_null())); } #[test] #[should_panic(expected = "assertion")] fn eq_null_fail() { - assert!(bool::from([1u8; SALT_LEN].ct_eq_null())); + assert!(bool::from([1u8; 16].ct_eq_null())); } #[test] fn ne_null() { - assert!(bool::from([1u8; SALT_LEN].ct_ne_null())); + assert!(bool::from([1u8; 16].ct_ne_null())); } #[test] #[should_panic(expected = "assertion")] fn ne_null_fail() { - assert!(bool::from([0u8; SALT_LEN].ct_ne_null())); + assert!(bool::from([0u8; 16].ct_ne_null())); + } + + #[test] + fn generic_array_eq() { + assert!(bool::from(Array::::ct_eq( + &Array::from([0u8; 32]), + &Array::from([0u8; 32]), + ))); + } + #[test] + #[should_panic(expected = "assertion")] + fn generic_array_eq_fail() { + assert!(bool::from(Array::::ct_eq( + &Array::from([0u8; 32]), + &Array::from([1u8; 32]), + ))); + } + + #[test] + fn generic_array_ne() { + assert!(bool::from(Array::::ct_ne( + &Array::from([0u8; 32]), + &Array::from([1u8; 32]), + ))); + } + + #[test] + #[should_panic(expected = "assertion")] + fn generic_array_ne_fail() { + assert!(bool::from(Array::::ct_ne( + &Array::from([0u8; 32]), + &Array::from([0u8; 32]), + ))); } macro_rules! create_tests { diff --git a/crates/crypto/src/encoding/bincode.rs b/crates/crypto/src/encoding/bincode.rs deleted file mode 100644 index 2a32e31ad..000000000 --- a/crates/crypto/src/encoding/bincode.rs +++ /dev/null @@ -1,29 +0,0 @@ -pub const CONFIG: Configuration = bincode::config::standard(); - -use crate::{Error, Result}; -use bincode::{config::Configuration, de::read::Reader}; - -pub fn decode(bytes: &[u8]) -> Result -where - T: bincode::Decode, -{ - bincode::decode_from_slice::(bytes, CONFIG) - .map(|t| t.0) - .map_err(Error::BincodeDecode) -} - -pub fn decode_from_reader(reader: R) -> Result -where - T: bincode::Decode, -{ - bincode::decode_from_reader::(reader, CONFIG).map_err(Error::BincodeDecode) -} - -pub fn encode(object: &T) -> Result> -where - T: bincode::Encode, -{ - bincode::encode_to_vec(object, CONFIG).map_err(Error::BincodeEncode) -} - -// TODO(brxken128): this should probably go but it's convenient diff --git a/crates/crypto/src/encoding/file/header.rs b/crates/crypto/src/encoding/file/header.rs deleted file mode 100644 index 695a3e633..000000000 --- a/crates/crypto/src/encoding/file/header.rs +++ /dev/null @@ -1,321 +0,0 @@ -use std::io::Cursor; - -use super::{keyslot::Keyslot, object::HeaderObject, HeaderEncode, KEYSLOT_LIMIT, OBJECT_LIMIT}; -use crate::{ - hashing::Hasher, - primitives::AAD_HEADER_LEN, - types::{ - Aad, Algorithm, DerivationContext, HashingAlgorithm, Key, MagicBytes, Nonce, Salt, - SecretKey, - }, - utils::ToArray, - Error, Protected, Result, -}; - -pub struct Header { - pub version: HeaderVersion, - pub algorithm: Algorithm, - pub nonce: Nonce, - pub keyslots: Vec, - pub objects: Vec, -} - -#[derive(Eq, PartialEq, Debug)] -pub enum HeaderVersion { - V1, -} - -impl std::fmt::Display for HeaderVersion { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Self::V1 => write!(f, "V1"), - } - } -} - -impl Header { - #[must_use] - pub fn new(algorithm: Algorithm) -> Self { - Self { - version: HeaderVersion::V1, - algorithm, - nonce: Nonce::generate(algorithm), - keyslots: vec![], - objects: vec![], - } - } - - pub fn to_writer( - &self, - writer: &mut W, - magic_bytes: MagicBytes, - ) -> Result<()> - where - W: std::io::Write, - { - let b = self.as_bytes()?; - - writer.write_all(magic_bytes.inner())?; - - // we're good here for up to 4096mib~ (headers should never be this large) - writer.write_all( - &(TryInto::::try_into(b.len()).map_err(|_| Error::Validity)?).to_le_bytes(), - )?; - writer.write_all(&b)?; - - Ok(()) - } - - #[cfg(feature = "tokio")] - pub async fn to_writer_async( - &self, - writer: &mut W, - magic_bytes: MagicBytes, - ) -> Result<()> - where - W: tokio::io::AsyncWriteExt + tokio::io::AsyncSeekExt + Unpin + Send, - { - let b = self.as_bytes()?; - - writer.write_all(magic_bytes.inner()).await?; - - // we're good here for up to 4096mib~ (headers should never be this large) - writer - .write_all( - &(TryInto::::try_into(b.len()).map_err(|_| Error::Validity)?).to_le_bytes(), - ) - .await?; - writer.write_all(&b).await?; - - Ok(()) - } - - pub fn from_reader( - reader: &mut R, - magic_bytes: MagicBytes, - ) -> Result<(Self, Aad)> - where - R: std::io::Read + std::io::Seek, - { - let mut b = [0u8; I]; - reader.read_exact(&mut b)?; - - if &b != magic_bytes.inner() { - return Err(Error::Validity); - } - - let mut len = [0u8; 4]; - reader.read_exact(&mut len)?; - let len = u32::from_le_bytes(len); - - let mut header_bytes = vec![0u8; len.try_into().map_err(|_| Error::Validity)?]; - reader.read_exact(&mut header_bytes)?; - let h = Self::from_reader_raw(&mut Cursor::new(&header_bytes))?; - - Ok((h, Aad::Header(header_bytes[..AAD_HEADER_LEN].to_array()?))) - } - - #[cfg(feature = "tokio")] - pub async fn from_reader_async( - reader: &mut R, - magic_bytes: MagicBytes, - ) -> Result<(Self, Aad)> - where - R: tokio::io::AsyncReadExt + tokio::io::AsyncSeekExt + Unpin + Send, - { - let mut b = [0u8; I]; - reader.read_exact(&mut b).await?; - - if &b != magic_bytes.inner() { - return Err(Error::Validity); - } - - let mut len = [0u8; 4]; - reader.read_exact(&mut len).await?; - let len = u32::from_le_bytes(len); - - let mut header_bytes = vec![0u8; len.try_into().map_err(|_| Error::Validity)?]; - reader.read_exact(&mut header_bytes).await?; - let h = Self::from_reader_raw(&mut Cursor::new(&header_bytes))?; - - Ok((h, Aad::Header(header_bytes[..AAD_HEADER_LEN].to_array()?))) - } - - #[must_use] - pub fn generate_aad(&self) -> Aad { - let mut o = [0u8; 38]; - o[..2].copy_from_slice(&[0xFA, 0xDA]); - o[2..4].copy_from_slice(&self.version.as_bytes()); - o[4..6].copy_from_slice(&self.algorithm.as_bytes()); - o[6..38].copy_from_slice(&self.nonce.as_bytes()); - Aad::Header(o) - } - - pub fn remove_keyslot(&mut self, index: usize) -> Result<()> { - if index > self.keyslots.len() - 1 { - return Err(Error::Validity); - } - - self.keyslots.remove(index); - Ok(()) - } - - pub fn decrypt_object( - &self, - name: &'static str, - context: DerivationContext, - master_key: &Key, - ) -> Result>> { - let rhs = Hasher::blake3(name.as_bytes()); - - self.objects - .iter() - .filter_map(|o| { - o.identifier - .decrypt_id(master_key, self.algorithm, context, self.generate_aad()) - .ok() - .and_then(|i| (i == rhs).then_some(o)) - }) - // .cloned() - .collect::>() - .first() - .ok_or(Error::NoObjects)? - .decrypt(self.algorithm, self.generate_aad(), master_key) - } - - pub fn add_keyslot( - &mut self, - hashing_algorithm: HashingAlgorithm, - hash_salt: Salt, - hashed_password: &Key, - master_key: &Key, - context: DerivationContext, - ) -> Result<()> { - if self.keyslots.len() + 1 > KEYSLOT_LIMIT { - return Err(Error::TooManyKeyslots); - } - - self.keyslots.push(Keyslot::new( - self.algorithm, - hashing_algorithm, - hash_salt, - hashed_password, - master_key, - self.generate_aad(), - context, - )?); - - Ok(()) - } - - pub fn add_object( - &mut self, - name: &'static str, - context: DerivationContext, - master_key: &Key, - data: &[u8], - ) -> Result<()> { - if self.objects.len() + 1 > OBJECT_LIMIT { - return Err(Error::TooManyObjects); - } - - let rhs = Hasher::blake3(name.as_bytes()); - - if self - .objects - .iter() - .filter_map(|o| { - o.identifier - .decrypt_id(master_key, self.algorithm, context, self.generate_aad()) - .ok() - .map(|i| i == rhs) - }) - .any(|x| x) - { - return Err(Error::TooManyObjects); - } - - self.objects.push(HeaderObject::new( - name, - self.algorithm, - master_key, - context, - self.generate_aad(), - data, - )?); - Ok(()) - } - - pub fn decrypt_master_key( - &self, - keys: &[Key], - context: DerivationContext, - ) -> Result<(Key, usize)> { - if self.keyslots.is_empty() { - return Err(Error::NoKeyslots); - } - - keys.iter() - .enumerate() - .find_map(|(i, k)| { - self.keyslots.iter().find_map(|z| { - z.decrypt(self.algorithm, k, self.generate_aad(), context) - .ok() - .map(|x| (x, i)) - }) - }) - .ok_or(Error::Decrypt) - } - - pub fn decrypt_master_key_with_password( - &self, - password: &Protected>, - context: DerivationContext, - ) -> Result<(Key, usize)> { - if self.keyslots.is_empty() { - return Err(Error::NoKeyslots); - } - - self.keyslots - .iter() - .enumerate() - .find_map(|(i, z)| { - let k = Hasher::hash_password( - z.hashing_algorithm, - password, - z.hash_salt, - &SecretKey::Null, - ) - .ok()?; - z.decrypt(self.algorithm, &k, self.generate_aad(), context) - .ok() - .map(|x| (x, i)) - }) - .ok_or(Error::Decrypt) - } -} - -#[cfg(test)] -mod tests { - use crate::{ct::ConstantTimeEq, encoding::Header, types::MagicBytes}; - use std::io::{Cursor, Seek}; - - const MAGIC_BYTES: MagicBytes<6> = MagicBytes::new(*b"crypto"); - - #[test] - fn encode_and_decode() { - let mut w = Cursor::new(vec![]); - let h_source = Header::new(crate::types::Algorithm::XChaCha20Poly1305); - - h_source.to_writer(&mut w, MAGIC_BYTES).unwrap(); - w.rewind().unwrap(); - - let (h_read, aad) = Header::from_reader(&mut w, MAGIC_BYTES).unwrap(); - - assert_eq!(w.into_inner().len(), 294); - assert_eq!(h_source.algorithm, h_read.algorithm); - assert_eq!(h_source.version, h_read.version); - assert!(bool::from(h_source.nonce.ct_eq(&h_read.nonce))); - assert!(bool::from(h_source.generate_aad().ct_eq(&aad))); - } -} diff --git a/crates/crypto/src/encoding/file/keyslot.rs b/crates/crypto/src/encoding/file/keyslot.rs deleted file mode 100644 index 9c8d72b8b..000000000 --- a/crates/crypto/src/encoding/file/keyslot.rs +++ /dev/null @@ -1,74 +0,0 @@ -use crate::{ - crypto::{Decryptor, Encryptor}, - hashing::Hasher, - rng::CryptoRng, - types::{Aad, Algorithm, DerivationContext, EncryptedKey, HashingAlgorithm, Key, Nonce, Salt}, - Result, -}; - -pub struct Keyslot { - pub hashing_algorithm: HashingAlgorithm, // password hashing algorithm - pub hash_salt: Salt, // salt to hash the password with - pub salt: Salt, // the salt used for key derivation with the hash digest - pub encrypted_key: EncryptedKey, // encrypted -} - -impl Keyslot { - pub fn new( - algorithm: Algorithm, - hashing_algorithm: HashingAlgorithm, - hash_salt: Salt, - hashed_password: &Key, - master_key: &Key, - aad: Aad, - context: DerivationContext, - ) -> Result { - let nonce = Nonce::generate(algorithm); - let salt = Salt::generate(); - - let encrypted_key = Encryptor::encrypt_key( - &Hasher::derive_key(hashed_password, salt, context), - &nonce, - algorithm, - master_key, - aad, - )?; - - Ok(Self { - hashing_algorithm, - hash_salt, - salt, - encrypted_key, - }) - } - - pub(super) fn decrypt( - &self, - algorithm: Algorithm, - key: &Key, - aad: Aad, - context: DerivationContext, - ) -> Result { - Decryptor::decrypt_key( - &Hasher::derive_key(key, self.salt, context), - algorithm, - &self.encrypted_key.clone(), - aad, - ) - } -} - -impl Keyslot { - #[must_use] - pub fn random() -> Self { - Self { - hash_salt: Salt::generate(), - hashing_algorithm: HashingAlgorithm::default(), - encrypted_key: EncryptedKey::new( - CryptoRng::generate_fixed(), - Nonce::generate(Algorithm::default()), - ), - salt: Salt::generate(), - } - } -} diff --git a/crates/crypto/src/encoding/file/mod.rs b/crates/crypto/src/encoding/file/mod.rs deleted file mode 100644 index b685376df..000000000 --- a/crates/crypto/src/encoding/file/mod.rs +++ /dev/null @@ -1,525 +0,0 @@ -use std::mem; - -use crate::{ - primitives::{ - AES_256_GCM_SIV_NONCE_LEN, ENCRYPTED_KEY_LEN, SALT_LEN, XCHACHA20_POLY1305_NONCE_LEN, - }, - types::{Algorithm, EncryptedKey, HashingAlgorithm, Nonce, Params, Salt}, - utils::ToArray, - Error, Result, -}; - -use self::{ - header::HeaderVersion, - keyslot::Keyslot, - object::{HeaderObject, HeaderObjectIdentifier}, -}; - -use super::Header; - -pub mod header; -pub mod keyslot; -pub mod object; - -const KEYSLOT_LIMIT: usize = 2; -const OBJECT_LIMIT: usize = 2; - -pub trait HeaderEncode { - const OUTPUT_LEN: usize; - type Identifier; - type Output: Default; - - fn as_bytes(&self) -> Self::Output; - - fn from_bytes(b: Self::Output) -> Result - where - Self: Sized; - - fn from_reader(reader: &mut R) -> Result - where - Self: Sized, - R: std::io::Read + std::io::Seek; // make this a provided method eventually via `hybrid-array`? -} - -// TODO(brxken128): convert as many of these as possible to vec -// also define the identifiers as consts where possble? -// typenum/hybrid-array/generic-array too - -impl HeaderEncode for Params { - const OUTPUT_LEN: usize = 1; - type Identifier = u8; - type Output = u8; - - fn as_bytes(&self) -> Self::Output { - match self { - Self::Standard => 18u8, - Self::Hardened => 39u8, - Self::Paranoid => 56u8, - } - } - - fn from_bytes(b: Self::Output) -> Result { - match b { - 18u8 => Ok(Self::Standard), - 39u8 => Ok(Self::Hardened), - 56u8 => Ok(Self::Paranoid), - _ => Err(Error::Validity), - } - } - - fn from_reader(reader: &mut R) -> Result - where - R: std::io::Read + std::io::Seek, - { - let mut b = [0u8; Self::OUTPUT_LEN]; - reader.read_exact(&mut b)?; - Self::from_bytes(b[0]) - } -} - -impl HeaderEncode for HashingAlgorithm { - const OUTPUT_LEN: usize = 1 + Params::OUTPUT_LEN; - type Identifier = [u8; 2]; - type Output = [u8; Self::OUTPUT_LEN]; - - fn as_bytes(&self) -> Self::Output { - match self { - Self::Argon2id(p) => [0xF2u8, p.as_bytes()], - Self::Blake3Balloon(p) => [0xA8u8, p.as_bytes()], - } - } - - fn from_bytes(b: Self::Output) -> Result { - let x = match b[0] { - 0xF2u8 => Self::Argon2id(Params::from_bytes(b[1])?), - 0xA8u8 => Self::Blake3Balloon(Params::from_bytes(b[1])?), - _ => return Err(Error::Validity), - }; - - Ok(x) - } - - fn from_reader(reader: &mut R) -> Result - where - R: std::io::Read + std::io::Seek, - { - let mut b = Self::Output::default(); - reader.read_exact(&mut b)?; - Self::from_bytes(b) - } -} - -impl HeaderEncode for Algorithm { - const OUTPUT_LEN: usize = 2; - type Identifier = [u8; 2]; - type Output = [u8; Self::OUTPUT_LEN]; - - fn as_bytes(&self) -> Self::Output { - let s = match self { - Self::Aes256GcmSiv => 0xD3, - Self::XChaCha20Poly1305 => 0xD5, - }; - - [13u8, s] - } - - fn from_bytes(b: Self::Output) -> Result { - if b[0] != 13u8 { - return Err(Error::Validity); - } - - let a = match b[1] { - 0xD3 => Self::Aes256GcmSiv, - 0xD5 => Self::XChaCha20Poly1305, - _ => return Err(Error::Validity), - }; - - Ok(a) - } - - fn from_reader(reader: &mut R) -> Result - where - R: std::io::Read + std::io::Seek, - { - let mut b = Self::Output::default(); - reader.read_exact(&mut b)?; - Self::from_bytes(b) - } -} - -impl HeaderEncode for Salt { - const OUTPUT_LEN: usize = SALT_LEN + 2; - type Identifier = [u8; 2]; - type Output = [u8; 18]; - - fn as_bytes(&self) -> Self::Output { - let mut s = [0u8; Self::OUTPUT_LEN]; - s[0] = 12u8; - s[1] = 4u8; - s[2..].copy_from_slice(self.inner()); - s - } - - fn from_bytes(b: Self::Output) -> Result { - if b[..2] != [12u8, 4u8] { - return Err(Error::Validity); - } - - let mut o = [0u8; SALT_LEN]; - o.copy_from_slice(&b[2..]); - - Ok(Self::new(o)) - } - - fn from_reader(reader: &mut R) -> Result - where - R: std::io::Read + std::io::Seek, - { - let mut b = Self::Output::default(); - reader.read_exact(&mut b)?; - Self::from_bytes(b) - } -} - -impl HeaderEncode for Nonce { - const OUTPUT_LEN: usize = 32; - type Identifier = [u8; 2]; - type Output = [u8; Self::OUTPUT_LEN]; - - fn as_bytes(&self) -> Self::Output { - let b = match self { - Self::Aes256GcmSiv(_) => 0xB5u8, - Self::XChaCha20Poly1305(_) => 0xB7u8, - }; - - let len = self.algorithm().nonce_len(); - - let mut s = [0u8; Self::OUTPUT_LEN]; - s[0] = 99u8; - s[1] = b; - s[2..len + 2].copy_from_slice(self.inner()); - - s[len + 2..].copy_from_slice(&self.inner()[..Self::OUTPUT_LEN - 2 - len]); - - s - } - - fn from_bytes(b: Self::Output) -> Result { - if b[0] != 99u8 { - return Err(Error::Validity); - } - - let x = match b[1] { - 0xB5u8 => Self::Aes256GcmSiv(b[2..2 + AES_256_GCM_SIV_NONCE_LEN].to_array()?), - 0xB7u8 => Self::XChaCha20Poly1305(b[2..2 + XCHACHA20_POLY1305_NONCE_LEN].to_array()?), - _ => return Err(Error::Validity), - }; - - Ok(x) - } - - fn from_reader(reader: &mut R) -> Result - where - R: std::io::Read + std::io::Seek, - { - let mut b = Self::Output::default(); - reader.read_exact(&mut b)?; - Self::from_bytes(b) - } -} - -impl HeaderEncode for EncryptedKey { - const OUTPUT_LEN: usize = ENCRYPTED_KEY_LEN + Nonce::OUTPUT_LEN + 2; - type Identifier = [u8; 2]; - type Output = Vec; - - fn as_bytes(&self) -> Self::Output { - let mut s = Vec::with_capacity(Self::OUTPUT_LEN); - - s.extend_from_slice(&[0x9, 0xF3]); - s.extend_from_slice(self.inner()); - s.extend_from_slice(&self.nonce().as_bytes()); - s - } - - fn from_bytes(b: Self::Output) -> Result { - if b[..2] != [9u8, 0xF3u8] { - return Err(Error::Validity); - } - - let e = Vec::from(&b[2..ENCRYPTED_KEY_LEN]).to_array()?; - let n = Nonce::from_bytes(b[2 + ENCRYPTED_KEY_LEN..].to_array()?)?; - - Ok(Self::new(e, n)) - } - - fn from_reader(reader: &mut R) -> Result - where - R: std::io::Read + std::io::Seek, - { - let mut b = vec![0u8; Self::OUTPUT_LEN]; - reader.read_exact(&mut b)?; - Self::from_bytes(b) - } -} - -impl HeaderEncode for Keyslot { - const OUTPUT_LEN: usize = - EncryptedKey::OUTPUT_LEN + (Salt::OUTPUT_LEN * 2) + HashingAlgorithm::OUTPUT_LEN + 2; - type Identifier = [u8; 2]; - type Output = Vec; - - fn as_bytes(&self) -> Self::Output { - let mut o = vec![0x83, 0x31]; - o.extend_from_slice(&self.hashing_algorithm.as_bytes()); - o.extend_from_slice(&self.hash_salt.as_bytes()); - o.extend_from_slice(&self.salt.as_bytes()); - o.extend_from_slice(&self.encrypted_key.as_bytes()); - o - } - - fn from_bytes(b: Self::Output) -> Result { - if b[..2] != [0x83, 0x21] { - return Err(Error::Validity); - } - - let hashing_algorithm = HashingAlgorithm::from_bytes(b[2..4].to_array()?)?; - let hash_salt = Salt::from_bytes(b[4..Salt::OUTPUT_LEN + 4].to_array()?)?; - let salt = Salt::from_bytes(b[Salt::OUTPUT_LEN + 8..Salt::OUTPUT_LEN + 12].to_array()?)?; - let ek = EncryptedKey::from_bytes(b[Salt::OUTPUT_LEN + 12..].to_vec())?; - - Ok(Self { - hashing_algorithm, - hash_salt, - salt, - encrypted_key: ek, - }) - } - - fn from_reader(reader: &mut R) -> Result - where - R: std::io::Read + std::io::Seek, - { - let mut b = vec![0u8; Self::OUTPUT_LEN]; - reader.read_exact(&mut b)?; - Self::from_bytes(b) - } -} - -impl HeaderEncode for HeaderObject { - const OUTPUT_LEN: usize = 0; - type Identifier = [u8; 2]; - type Output = Vec; - - fn as_bytes(&self) -> Self::Output { - let mut o = Vec::new(); - - o.extend_from_slice(&[0xF1, 51u8]); - o.extend_from_slice(&self.identifier.as_bytes()); - o.extend_from_slice(&self.nonce.as_bytes()); - - // SAFETY: this unwrap is safe as the length of the objects is capped - // will be removed in a trait overhaul which focuses on versioning too - #[allow(clippy::unwrap_used)] - o.extend_from_slice(&(TryInto::::try_into(self.data.len()).unwrap()).to_le_bytes()); - o.extend_from_slice(&self.data); - - o - } - - fn from_bytes(b: Self::Output) -> Result { - if b[..2] != [0xF1, 51u8] { - return Err(Error::Validity); - } - - let identifier = - HeaderObjectIdentifier::from_bytes(b[2..HeaderObjectIdentifier::OUTPUT_LEN].to_vec())?; - let nonce = Nonce::from_bytes( - b[HeaderObjectIdentifier::OUTPUT_LEN + 2 - ..HeaderObjectIdentifier::OUTPUT_LEN + 2 + Nonce::OUTPUT_LEN] - .to_array()?, - )?; - let data_len = u64::from_le_bytes( - b[HeaderObjectIdentifier::OUTPUT_LEN + Nonce::OUTPUT_LEN + 2 - ..HeaderObjectIdentifier::OUTPUT_LEN + Nonce::OUTPUT_LEN + 2 + 8] - .to_array()?, - ); - let data = b[HeaderObjectIdentifier::OUTPUT_LEN + Nonce::OUTPUT_LEN + 10 - ..data_len.try_into().map_err(|_| Error::Validity)?] - .to_vec(); - - Ok(Self { - identifier, - nonce, - data, - }) - } - - fn from_reader(reader: &mut R) -> Result - where - R: std::io::Read + std::io::Seek, - { - let mut buffer = [0u8; mem::size_of::()]; - reader.read_exact(&mut buffer)?; - let size = u64::from_le_bytes(buffer); - - let mut buffer = vec![0u8; size.try_into().map_err(|_| Error::Validity)?]; - reader.read_exact(&mut buffer)?; - - Self::from_bytes(buffer) - } -} - -impl HeaderEncode for HeaderObjectIdentifier { - const OUTPUT_LEN: usize = 2 + EncryptedKey::OUTPUT_LEN + Salt::OUTPUT_LEN; - type Identifier = [u8; 2]; - type Output = Vec; - - fn as_bytes(&self) -> Self::Output { - let mut o = vec![0xC2, 0xE9]; - o.extend_from_slice(&self.key.as_bytes()); - o.extend_from_slice(&self.salt.as_bytes()); - o - } - - fn from_bytes(b: Self::Output) -> Result { - if b[..2] != [0xC2, 0xE9] { - return Err(Error::Validity); - } - - let ek = EncryptedKey::from_bytes(b[2..EncryptedKey::OUTPUT_LEN].to_vec())?; - let salt = Salt::from_bytes(b[EncryptedKey::OUTPUT_LEN + 2..].to_array()?)?; - - Ok(Self { key: ek, salt }) - } - - fn from_reader(reader: &mut R) -> Result - where - R: std::io::Read + std::io::Seek, - { - let mut b = vec![0u8; Self::OUTPUT_LEN]; - reader.read_exact(&mut b)?; - Self::from_bytes(b) - } -} - -impl HeaderEncode for HeaderVersion { - const OUTPUT_LEN: usize = 2; - type Identifier = [u8; 2]; - type Output = [u8; Self::OUTPUT_LEN]; - - fn as_bytes(&self) -> Self::Output { - match self { - Self::V1 => [0xDA; 2], - } - } - - fn from_bytes(b: Self::Output) -> Result { - match b { - [0xDA, 0xDA] => Ok(Self::V1), - _ => Err(Error::Validity), - } - } - - fn from_reader(reader: &mut R) -> Result - where - R: std::io::Read + std::io::Seek, - { - let mut b = [0u8; Self::OUTPUT_LEN]; - reader.read_exact(&mut b)?; - Self::from_bytes(b) - } -} - -impl Header { - pub fn as_bytes(&self) -> Result> { - match self.version { - HeaderVersion::V1 => self.as_bytes_v1(), - } - } - - fn as_bytes_v1(&self) -> Result> { - let mut o = vec![]; - o.extend_from_slice(&[0xFA, 0xDA]); - - o.extend_from_slice(&self.version.as_bytes()); - o.extend_from_slice(&self.algorithm.as_bytes()); - o.extend_from_slice(&self.nonce.as_bytes()); - - self.keyslots - .iter() - .for_each(|k| o.extend_from_slice(&k.as_bytes())); - - (0..KEYSLOT_LIMIT - self.keyslots.len()) - .for_each(|_| o.extend_from_slice(&Keyslot::random().as_bytes())); - - o.extend_from_slice( - &(TryInto::::try_into(self.objects.len()).map_err(|_| Error::Validity)?) - .to_le_bytes(), - ); - - self.objects.iter().try_for_each(|k| { - let b = k.as_bytes(); - o.extend_from_slice( - &(TryInto::::try_into(b.len()).map_err(|_| Error::Validity)?).to_le_bytes(), - ); - o.extend_from_slice(&b); - - Ok::<_, Error>(()) - })?; - - Ok(o) - } - - pub(super) fn from_reader_raw(reader: &mut R) -> Result - where - R: std::io::Read + std::io::Seek, - { - let mut m = [0u8; 2]; - reader.read_exact(&mut m)?; - - if m != [0xFA, 0xDA] { - return Err(Error::Validity); - } - - let mut buffer = [0u8; HeaderVersion::OUTPUT_LEN]; - reader.read_exact(&mut buffer)?; - let version = HeaderVersion::from_bytes(buffer)?; - - let mut buffer = [0u8; Algorithm::OUTPUT_LEN]; - reader.read_exact(&mut buffer)?; - let algorithm = Algorithm::from_bytes(buffer)?; - - let mut nonce_buffer = [0u8; Nonce::OUTPUT_LEN]; - reader.read_exact(&mut nonce_buffer)?; - let nonce = Nonce::from_bytes(nonce_buffer)?; - nonce.validate(algorithm)?; - - // we always read the limit as there will always be extra room for additional keyslots after header creation - let keyslots = (0..KEYSLOT_LIMIT) - .filter_map(|_| { - let mut buffer = [0u8; Keyslot::OUTPUT_LEN]; - reader.read_exact(&mut buffer).ok(); - Keyslot::from_bytes(buffer.to_vec()).ok() - }) - .collect::>(); - - let mut buffer = [0u8; mem::size_of::()]; - reader.read_exact(&mut buffer)?; - let objects_len = u16::from_le_bytes(buffer); - - let objects = (0..objects_len) - .map(|_| HeaderObject::from_reader(reader)) - .collect::>>()?; - - let h = Self { - version, - algorithm, - nonce, - keyslots, - objects, - }; - - Ok(h) - } -} diff --git a/crates/crypto/src/encoding/file/object.rs b/crates/crypto/src/encoding/file/object.rs deleted file mode 100644 index 559210fa6..000000000 --- a/crates/crypto/src/encoding/file/object.rs +++ /dev/null @@ -1,92 +0,0 @@ -use crate::{ - crypto::{Decryptor, Encryptor}, - hashing::Hasher, - types::{Aad, Algorithm, DerivationContext, EncryptedKey, Key, Nonce, Salt}, - Protected, Result, -}; - -#[derive(Clone)] -pub struct HeaderObjectIdentifier { - pub(super) key: EncryptedKey, // technically a key, although used as an identifier here - pub(super) salt: Salt, -} - -pub struct HeaderObject { - pub identifier: HeaderObjectIdentifier, - pub nonce: Nonce, - pub data: Vec, -} - -impl HeaderObject { - pub fn new( - name: &'static str, - algorithm: Algorithm, - master_key: &Key, - context: DerivationContext, - aad: Aad, - data: &[u8], - ) -> Result { - let identifier = HeaderObjectIdentifier::new(name, master_key, algorithm, context, aad)?; - - let nonce = Nonce::generate(algorithm); - let encrypted_data = Encryptor::encrypt_bytes(master_key, &nonce, algorithm, data, aad)?; - - let object = Self { - identifier, - nonce, - data: encrypted_data, - }; - - Ok(object) - } - - pub(super) fn decrypt( - &self, - algorithm: Algorithm, - aad: Aad, - master_key: &Key, - ) -> Result>> { - Decryptor::decrypt_bytes(master_key, &self.nonce, algorithm, &self.data, aad) - } -} - -impl HeaderObjectIdentifier { - pub fn new( - name: &'static str, - master_key: &Key, - algorithm: Algorithm, - context: DerivationContext, - aad: Aad, - ) -> Result { - let salt = Salt::generate(); - let nonce = Nonce::generate(algorithm); - - let encrypted_key = Encryptor::encrypt_key( - &Hasher::derive_key(master_key, salt, context), - &nonce, - algorithm, - &Hasher::blake3(name.as_bytes()), - aad, - )?; - - Ok(Self { - key: encrypted_key, - salt, - }) - } - - pub(super) fn decrypt_id( - &self, - master_key: &Key, - algorithm: Algorithm, - context: DerivationContext, - aad: Aad, - ) -> Result { - Decryptor::decrypt_key( - &Hasher::derive_key(master_key, self.salt, context), - algorithm, - &self.key, - aad, - ) - } -} diff --git a/crates/crypto/src/encoding/mod.rs b/crates/crypto/src/encoding/mod.rs deleted file mode 100644 index 8d2fe1005..000000000 --- a/crates/crypto/src/encoding/mod.rs +++ /dev/null @@ -1,6 +0,0 @@ -mod bincode; -pub mod file; - -pub use self::bincode::{decode, decode_from_reader, encode}; - -pub use file::header::Header; diff --git a/crates/crypto/src/encrypted.rs b/crates/crypto/src/encrypted.rs deleted file mode 100644 index 2adc7e460..000000000 --- a/crates/crypto/src/encrypted.rs +++ /dev/null @@ -1,143 +0,0 @@ -use bincode::{Decode, Encode}; -use std::marker::PhantomData; - -use crate::{ - crypto::{Decryptor, Encryptor}, - encoding::{decode, encode}, - hashing::Hasher, - primitives::ENCRYPTED_TYPE_CONTEXT, - types::{Aad, Algorithm, Key, Nonce, Salt}, - Protected, Result, -}; - -#[derive(Clone, Encode, Decode)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "specta", derive(specta::Type))] -pub struct Encrypted { - data: Vec, - algorithm: Algorithm, - nonce: Nonce, - salt: Salt, - #[cfg_attr(feature = "specta", specta(skip))] - _type: PhantomData, -} - -impl Encrypted { - pub fn new(key: &Key, item: &T, algorithm: Algorithm) -> Result - where - T: Encode + Decode, - { - let salt = Salt::generate(); - let nonce = Nonce::generate(algorithm); - - let bytes = Encryptor::encrypt_tiny( - &Hasher::derive_key(key, salt, ENCRYPTED_TYPE_CONTEXT), - &nonce, - algorithm, - &encode(item)?, - Aad::Null, - )?; - - Ok(Self { - data: bytes, - algorithm, - salt, - nonce, - _type: PhantomData, - }) - } - - pub fn new_from_bytes( - key: &Key, - item: &Protected>, - algorithm: Algorithm, - ) -> Result { - let salt = Salt::generate(); - let nonce = Nonce::generate(algorithm); - - let bytes = Encryptor::encrypt_tiny( - &Hasher::derive_key(key, salt, ENCRYPTED_TYPE_CONTEXT), - &nonce, - algorithm, - item.expose(), - Aad::Null, - )?; - - Ok(Self { - data: bytes, - algorithm, - salt, - nonce, - _type: PhantomData, - }) - } - - pub fn decrypt(self, key: &Key) -> Result - where - T: Encode + Decode, - { - let bytes = Decryptor::decrypt_bytes( - &Hasher::derive_key(key, self.salt, ENCRYPTED_TYPE_CONTEXT), - &self.nonce, - self.algorithm, - &self.data, - Aad::Null, - )? - .into_inner(); - - decode(&bytes) - } - - pub fn decrypt_bytes(self, key: &Key) -> Result>> { - let bytes = Decryptor::decrypt_bytes( - &Hasher::derive_key(key, self.salt, ENCRYPTED_TYPE_CONTEXT), - &self.nonce, - self.algorithm, - &self.data, - Aad::Null, - )? - .into_inner(); - - Ok(bytes.into()) - } - - pub fn as_bytes(&self) -> Result> - where - T: Encode + Decode, - { - encode(&self) - } - - // check if key is okay - #[must_use] - pub fn validate_key(&self, key: &Key) -> bool { - Decryptor::decrypt_bytes( - &Hasher::derive_key(key, self.salt, ENCRYPTED_TYPE_CONTEXT), - &self.nonce, - self.algorithm, - &self.data, - Aad::Null, - ) - .is_ok() - } - - #[must_use] - pub fn get_bytes(&self) -> Vec { - self.data.clone() - } - - #[must_use] - pub const fn get_salt(&self) -> Salt { - self.salt - } - - #[must_use] - pub const fn get_nonce(&self) -> Nonce { - self.nonce - } - - #[must_use] - pub const fn get_algorithm(&self) -> Algorithm { - self.algorithm - } -} diff --git a/crates/crypto/src/sys/fs/erase.rs b/crates/crypto/src/erase.rs similarity index 71% rename from crates/crypto/src/sys/fs/erase.rs rename to crates/crypto/src/erase.rs index d5e1f9bc0..d13b7c086 100644 --- a/crates/crypto/src/sys/fs/erase.rs +++ b/crates/crypto/src/erase.rs @@ -1,51 +1,12 @@ -use crate::{primitives::BLOCK_LEN, rng::CryptoRng, Result}; +use crate::{rng::CryptoRng, Error}; + use std::io::{Read, Seek, Write}; use rand_core::RngCore; -#[cfg(feature = "tokio")] use tokio::io::{AsyncReadExt, AsyncSeekExt, AsyncWriteExt}; -/// This is used for erasing a stream. -/// -/// It requires the size, an input stream and the amount of passes (to overwrite the entire stream with random data) -/// -/// It works against `BLOCK_LEN`. -/// -/// Note, it will not be ideal on flash-based storage devices. -/// The drive will be worn down, and due to wear-levelling built into the drive's firmware no tool (short of an ATA secure erase command) -/// can guarantee a perfect erasure on solid-state drives. -/// -/// This also does not factor in temporary files, caching, thumbnails, etc. -/// -/// If you are dealing with files, ensure that you truncate the length to zero before removing it via the standard -/// filesystem deletion function. -pub fn erase(stream: &mut RW, size: usize, passes: usize) -> Result -where - RW: Read + Write + Seek, -{ - let mut count = 0usize; - - let mut buf = vec![0u8; BLOCK_LEN].into_boxed_slice(); - let mut end_buf = vec![0u8; size % BLOCK_LEN].into_boxed_slice(); - - for _ in 0..passes { - stream.rewind()?; - for _ in 0..(size / BLOCK_LEN) { - CryptoRng::new().fill_bytes(&mut buf); - stream.write_all(&buf)?; - count += BLOCK_LEN; - } - - CryptoRng::new().fill_bytes(&mut end_buf); - stream.write_all(&end_buf)?; - stream.flush()?; - count += size % BLOCK_LEN; - } - - stream.rewind()?; - - Ok(count) -} +/// Erasing in blocks of 1MiB +const BLOCK_LEN: usize = 1_048_576; /// This is used for erasing a stream asynchronously. /// @@ -61,50 +22,124 @@ where /// /// If you are dealing with files, ensure that you truncate the length to zero before removing it via the standard /// filesystem deletion function. -#[cfg(feature = "tokio")] -pub async fn erase_async(stream: &mut RW, size: usize, passes: usize) -> Result +pub async fn erase(stream: &mut RW, size: usize, passes: usize) -> Result where RW: AsyncReadExt + AsyncWriteExt + AsyncSeekExt + Unpin + Send, { - let mut count = 0usize; + let mut rng = CryptoRng::new(); let mut buf = vec![0u8; BLOCK_LEN].into_boxed_slice(); let mut end_buf = vec![0u8; size % BLOCK_LEN].into_boxed_slice(); + let mut count = 0usize; for _ in 0..passes { - stream.rewind().await?; + stream.rewind().await.map_err(|e| Error::EraseIo { + context: "Rewinding stream", + source: e, + })?; for _ in 0..(size / BLOCK_LEN) { - CryptoRng::new().fill_bytes(&mut buf); - stream.write_all(&buf).await?; + rng.fill_bytes(&mut buf); + stream.write_all(&buf).await.map_err(|e| Error::EraseIo { + context: "Writing random bytes to stream", + source: e, + })?; count += BLOCK_LEN; } - CryptoRng::new().fill_bytes(&mut end_buf); - stream.write_all(&end_buf).await?; - stream.flush().await?; + rng.fill_bytes(&mut end_buf); + stream + .write_all(&end_buf) + .await + .map_err(|e| Error::EraseIo { + context: "Writing last block to stream", + source: e, + })?; + stream.flush().await.map_err(|e| Error::EraseIo { + context: "Flushing stream", + source: e, + })?; count += size % BLOCK_LEN; } - stream.rewind().await?; + stream.rewind().await.map_err(|e| Error::EraseIo { + context: "Final stream rewind", + source: e, + })?; + + Ok(count) +} + +/// This is used for erasing a stream. +/// +/// It requires the size, an input stream and the amount of passes (to overwrite the entire stream with random data) +/// +/// It works against `BLOCK_LEN`. +/// +/// Note, it will not be ideal on flash-based storage devices. +/// The drive will be worn down, and due to wear-levelling built into the drive's firmware no tool (short of an ATA secure erase command) +/// can guarantee a perfect erasure on solid-state drives. +/// +/// This also does not factor in temporary files, caching, thumbnails, etc. +/// +/// If you are dealing with files, ensure that you truncate the length to zero before removing it via the standard +/// filesystem deletion function. +pub fn erase_sync(stream: &mut RW, size: usize, passes: usize) -> Result +where + RW: Read + Write + Seek, +{ + let mut rng = CryptoRng::new(); + + let mut buf = vec![0u8; BLOCK_LEN].into_boxed_slice(); + let mut end_buf = vec![0u8; size % BLOCK_LEN].into_boxed_slice(); + + let mut count = 0; + for _ in 0..passes { + stream.rewind().map_err(|e| Error::EraseIo { + context: "Rewinding stream", + source: e, + })?; + for _ in 0..(size / BLOCK_LEN) { + rng.fill_bytes(&mut buf); + stream.write_all(&buf).map_err(|e| Error::EraseIo { + context: "Writing random bytes to stream", + source: e, + })?; + count += BLOCK_LEN; + } + + rng.fill_bytes(&mut end_buf); + stream.write_all(&end_buf).map_err(|e| Error::EraseIo { + context: "Writing last block to stream", + source: e, + })?; + stream.flush().map_err(|e| Error::EraseIo { + context: "Flushing stream", + source: e, + })?; + count += size % BLOCK_LEN; + } + + stream.rewind().map_err(|e| Error::EraseIo { + context: "Final stream rewind", + source: e, + })?; Ok(count) } #[cfg(test)] mod tests { - use crate::{ct::ConstantTimeEqNull, primitives::BLOCK_LEN}; + use crate::ct::ConstantTimeEqNull; + use std::io::Cursor; - use super::erase; - - #[cfg(feature = "tokio")] - use super::erase_async; + use super::{erase, erase_sync, BLOCK_LEN}; #[test] #[cfg_attr(miri, ignore)] fn erase_block_one_pass() { let mut buffer = Cursor::new(vec![0u8; BLOCK_LEN]); - let count = erase(&mut buffer, BLOCK_LEN, 1).unwrap(); + let count = erase_sync(&mut buffer, BLOCK_LEN, 1).unwrap(); assert_eq!(count, BLOCK_LEN); assert_eq!(buffer.position(), 0); assert!(bool::from(buffer.into_inner().ct_ne_null())); @@ -114,7 +149,7 @@ mod tests { #[cfg_attr(miri, ignore)] fn erase_block_two_passes() { let mut buffer = Cursor::new(vec![0u8; BLOCK_LEN]); - let count = erase(&mut buffer, BLOCK_LEN, 2).unwrap(); + let count = erase_sync(&mut buffer, BLOCK_LEN, 2).unwrap(); assert_eq!(count, BLOCK_LEN * 2); assert_eq!(buffer.position(), 0); assert!(bool::from(buffer.into_inner().ct_ne_null())); @@ -124,7 +159,7 @@ mod tests { #[cfg_attr(miri, ignore)] fn erase_5_blocks_one_pass() { let mut buffer = Cursor::new(vec![0u8; BLOCK_LEN * 5]); - let count = erase(&mut buffer, BLOCK_LEN * 5, 1).unwrap(); + let count = erase_sync(&mut buffer, BLOCK_LEN * 5, 1).unwrap(); assert_eq!(count, BLOCK_LEN * 5); assert_eq!(buffer.position(), 0); assert!(bool::from(buffer.into_inner().ct_ne_null())); @@ -134,7 +169,7 @@ mod tests { #[cfg_attr(miri, ignore)] fn erase_5_blocks_two_passes() { let mut buffer = Cursor::new(vec![0u8; BLOCK_LEN * 5]); - let count = erase(&mut buffer, BLOCK_LEN * 5, 2).unwrap(); + let count = erase_sync(&mut buffer, BLOCK_LEN * 5, 2).unwrap(); assert_eq!(count, (BLOCK_LEN * 5) * 2); assert_eq!(buffer.position(), 0); assert!(bool::from(buffer.into_inner().ct_ne_null())); @@ -144,7 +179,7 @@ mod tests { #[cfg_attr(miri, ignore)] fn erase_small() { let mut buffer = Cursor::new(vec![0u8; 1024]); - let count = erase(&mut buffer, 1024, 1).unwrap(); + let count = erase_sync(&mut buffer, 1024, 1).unwrap(); assert_eq!(count, 1024); assert_eq!(buffer.position(), 0); assert!(bool::from(buffer.into_inner().ct_ne_null())); @@ -154,7 +189,7 @@ mod tests { #[cfg_attr(miri, ignore)] fn erase_small_two_passes() { let mut buffer = Cursor::new(vec![0u8; 1024]); - let count = erase(&mut buffer, 1024, 2).unwrap(); + let count = erase_sync(&mut buffer, 1024, 2).unwrap(); assert_eq!(count, 1024 * 2); assert_eq!(buffer.position(), 0); assert!(bool::from(buffer.into_inner().ct_ne_null())); @@ -164,7 +199,7 @@ mod tests { #[cfg_attr(miri, ignore)] fn erase_block_plus_512() { let mut buffer = Cursor::new(vec![0u8; BLOCK_LEN + 512]); - let count = erase(&mut buffer, BLOCK_LEN + 512, 1).unwrap(); + let count = erase_sync(&mut buffer, BLOCK_LEN + 512, 1).unwrap(); assert_eq!(count, BLOCK_LEN + 512); assert_eq!(buffer.position(), 0); assert!(bool::from(buffer.into_inner().ct_ne_null())); @@ -174,7 +209,7 @@ mod tests { #[cfg_attr(miri, ignore)] fn erase_block_plus_512_two_passes() { let mut buffer = Cursor::new(vec![0u8; BLOCK_LEN + 512]); - let count = erase(&mut buffer, BLOCK_LEN + 512, 2).unwrap(); + let count = erase_sync(&mut buffer, BLOCK_LEN + 512, 2).unwrap(); assert_eq!(count, (BLOCK_LEN + 512) * 2); assert_eq!(buffer.position(), 0); assert!(bool::from(buffer.into_inner().ct_ne_null())); @@ -184,106 +219,97 @@ mod tests { #[cfg_attr(miri, ignore)] fn erase_block_eight_passes() { let mut buffer = Cursor::new(vec![0u8; BLOCK_LEN]); - let count = erase(&mut buffer, BLOCK_LEN, 8).unwrap(); + let count = erase_sync(&mut buffer, BLOCK_LEN, 8).unwrap(); assert_eq!(count, BLOCK_LEN * 8); assert_eq!(buffer.position(), 0); assert!(bool::from(buffer.into_inner().ct_ne_null())); } #[tokio::test] - #[cfg(feature = "tokio")] #[cfg_attr(miri, ignore)] async fn erase_block_one_pass_async() { let mut buffer = Cursor::new(vec![0u8; BLOCK_LEN]); - let count = erase_async(&mut buffer, BLOCK_LEN, 1).await.unwrap(); + let count = erase(&mut buffer, BLOCK_LEN, 1).await.unwrap(); assert_eq!(count, BLOCK_LEN); assert_eq!(buffer.position(), 0); assert!(bool::from(buffer.into_inner().ct_ne_null())); } #[tokio::test] - #[cfg(feature = "tokio")] #[cfg_attr(miri, ignore)] async fn erase_block_two_passes_async() { let mut buffer = Cursor::new(vec![0u8; BLOCK_LEN]); - let count = erase_async(&mut buffer, BLOCK_LEN, 2).await.unwrap(); + let count = erase(&mut buffer, BLOCK_LEN, 2).await.unwrap(); assert_eq!(count, BLOCK_LEN * 2); assert_eq!(buffer.position(), 0); assert!(bool::from(buffer.into_inner().ct_ne_null())); } #[tokio::test] - #[cfg(feature = "tokio")] #[cfg_attr(miri, ignore)] async fn erase_5_blocks_one_pass_async() { let mut buffer = Cursor::new(vec![0u8; BLOCK_LEN * 5]); - let count = erase_async(&mut buffer, BLOCK_LEN * 5, 1).await.unwrap(); + let count = erase(&mut buffer, BLOCK_LEN * 5, 1).await.unwrap(); assert_eq!(count, BLOCK_LEN * 5); assert_eq!(buffer.position(), 0); assert!(bool::from(buffer.into_inner().ct_ne_null())); } #[tokio::test] - #[cfg(feature = "tokio")] #[cfg_attr(miri, ignore)] async fn erase_5_blocks_two_passes_async() { let mut buffer = Cursor::new(vec![0u8; BLOCK_LEN * 5]); - let count = erase_async(&mut buffer, BLOCK_LEN * 5, 2).await.unwrap(); + let count = erase(&mut buffer, BLOCK_LEN * 5, 2).await.unwrap(); assert_eq!(count, (BLOCK_LEN * 5) * 2); assert_eq!(buffer.position(), 0); assert!(bool::from(buffer.into_inner().ct_ne_null())); } #[tokio::test] - #[cfg(feature = "tokio")] #[cfg_attr(miri, ignore)] async fn erase_small_async() { let mut buffer = Cursor::new(vec![0u8; 1024]); - let count = erase_async(&mut buffer, 1024, 1).await.unwrap(); + let count = erase(&mut buffer, 1024, 1).await.unwrap(); assert_eq!(count, 1024); assert_eq!(buffer.position(), 0); assert!(bool::from(buffer.into_inner().ct_ne_null())); } #[tokio::test] - #[cfg(feature = "tokio")] #[cfg_attr(miri, ignore)] async fn erase_small_two_passes_async() { let mut buffer = Cursor::new(vec![0u8; 1024]); - let count = erase_async(&mut buffer, 1024, 2).await.unwrap(); + let count = erase(&mut buffer, 1024, 2).await.unwrap(); assert_eq!(count, 1024 * 2); assert_eq!(buffer.position(), 0); assert!(bool::from(buffer.into_inner().ct_ne_null())); } #[tokio::test] - #[cfg(feature = "tokio")] #[cfg_attr(miri, ignore)] async fn erase_block_plus_512_async() { let mut buffer = Cursor::new(vec![0u8; BLOCK_LEN + 512]); - let count = erase_async(&mut buffer, BLOCK_LEN + 512, 1).await.unwrap(); + let count = erase(&mut buffer, BLOCK_LEN + 512, 1).await.unwrap(); assert_eq!(count, BLOCK_LEN + 512); assert_eq!(buffer.position(), 0); assert!(bool::from(buffer.into_inner().ct_ne_null())); } #[tokio::test] - #[cfg(feature = "tokio")] #[cfg_attr(miri, ignore)] async fn erase_block_plus_512_two_passes_async() { let mut buffer = Cursor::new(vec![0u8; BLOCK_LEN + 512]); - let count = erase_async(&mut buffer, BLOCK_LEN + 512, 2).await.unwrap(); + let count = erase(&mut buffer, BLOCK_LEN + 512, 2).await.unwrap(); assert_eq!(count, (BLOCK_LEN + 512) * 2); assert_eq!(buffer.position(), 0); assert!(bool::from(buffer.into_inner().ct_ne_null())); } #[tokio::test] - #[cfg(feature = "tokio")] #[cfg_attr(miri, ignore)] async fn erase_block_eight_passes_async() { let mut buffer = Cursor::new(vec![0u8; BLOCK_LEN]); - let count = erase_async(&mut buffer, BLOCK_LEN, 8).await.unwrap(); + let count = erase(&mut buffer, BLOCK_LEN, 8).await.unwrap(); assert_eq!(count, BLOCK_LEN * 8); assert_eq!(buffer.position(), 0); assert!(bool::from(buffer.into_inner().ct_ne_null())); diff --git a/crates/crypto/src/error.rs b/crates/crypto/src/error.rs index 83856b37d..efeb5c2ca 100644 --- a/crates/crypto/src/error.rs +++ b/crates/crypto/src/error.rs @@ -1,102 +1,41 @@ //! This module contains all possible errors that this crate can return. -use std::string::FromUtf8Error; - -impl From for bincode::error::EncodeError { - fn from(value: Error) -> Self { - Self::OtherString(value.to_string()) - } -} - -pub type Result = std::result::Result; +use tokio::io; /// This enum defines all possible errors that this crate can give -#[allow(deprecated)] #[derive(thiserror::Error, Debug)] pub enum Error { - // crypto primitive errors (STREAM, hashing) - #[error("there was an error while password hashing")] - Hashing, - #[error("error while encrypting")] + // crypto errors + #[error("Block too big for oneshot encryption: size in bytes = {0}")] + BlockTooBig(usize), + + /// Encrypt and decrypt errors, AEAD crate doesn't provide any error context for these + /// as it can be a security hazard to leak information about the error. + #[error("Encryption error")] Encrypt, - #[error("error while decrypting (could be: wrong key, wrong data, wrong aad, etc)")] + #[error("Decryption error")] Decrypt, - // header errors - #[error("no keyslots available")] - NoKeyslots, - #[error("tried adding too many keyslots to a header")] - TooManyKeyslots, - #[error("no header objects available (or none that match)")] - NoObjects, - #[error("tried adding too many objects to a header (or too many with the same name)")] - TooManyObjects, - #[error("read magic bytes aren't equal to the expected bytes")] - MagicByteMismatch, + #[error("I/O error while encrypting: {{context: {context}, source: {source}}}")] + EncryptIo { + context: &'static str, + #[source] + source: io::Error, + }, + #[error("I/O error while decrypting: {{context: {context}, source: {source}}}")] + DecryptIo { + context: &'static str, + #[source] + source: io::Error, + }, - #[error("error while encoding with bincode: {0}")] - BincodeEncode(#[from] bincode::error::EncodeError), - - #[error("error while decoding with bincode: {0}")] - BincodeDecode(#[from] bincode::error::DecodeError), - - // #[cfg(feature = "serde")] - // #[error("error while encoding with serde")] - // Serde, - #[error("keystore error")] - Keystore, - - #[error("redb error: {0}")] - Redb(#[from] redb::Error), - #[error("redb error: {0}")] - RedbDatabase(#[from] redb::DatabaseError), - #[error("redb error: {0}")] - RedbTransaction(#[from] redb::TransactionError), - #[error("redb error: {0}")] - RedbTable(#[from] redb::TableError), - #[error("redb error: {0}")] - RedbStorage(#[from] redb::StorageError), - #[error("redb error: {0}")] - RedbCommit(#[from] redb::CommitError), - - #[error("vault root key already exists")] - RootKeyAlreadyExists, - - // general errors - #[error("expected length differs from provided length")] - LengthMismatch, - - // TODO(brxken128): remove this, and add appropriate/correct errors - #[error("expected type/value differs from provided")] - Validity, - #[error("string parse error")] - StringParse(#[from] FromUtf8Error), - - // i/o - #[cfg(not(feature = "tokio"))] - #[error("I/O error: {0}")] - Io(#[from] std::io::Error), - #[cfg(feature = "tokio")] - #[error("I/O error: {0}")] - AsyncIo(#[from] tokio::io::Error), - #[cfg(feature = "tokio")] - #[error("Async task join error: {0}")] - JoinError(#[from] tokio::task::JoinError), + #[error("I/O error while erasing: {{context: {context}, source: {source}}}")] + EraseIo { + context: &'static str, + #[source] + source: io::Error, + }, #[error("hex error: {0}")] Hex(#[from] hex::FromHexError), - - // keyring - #[cfg(all(target_os = "linux", feature = "keyring"))] - #[error("error with the keyutils keyring: {0}")] - KeyUtils(#[from] linux_keyutils::KeyError), - #[cfg(all(target_os = "linux", feature = "keyring", feature = "secret-service"))] - #[error("error with the secret service keyring: {0}")] - SecretService(#[from] secret_service::Error), - #[cfg(all(any(target_os = "macos", target_os = "ios"), feature = "keyring"))] - #[error("error with the apple keyring: {0}")] - AppleKeyring(#[from] security_framework::base::Error), - #[cfg(feature = "keyring")] - #[error("generic keyring error")] - Keyring, } diff --git a/crates/crypto/src/hashing.rs b/crates/crypto/src/hashing.rs deleted file mode 100644 index 398fe6938..000000000 --- a/crates/crypto/src/hashing.rs +++ /dev/null @@ -1,399 +0,0 @@ -//! This module contains all password-hashing related functions. -//! -//! Everything contained within is used to hash a user's password into strong key material, suitable for encrypting master keys. -//! -//! # Examples -//! -//! ```rust,ignore -//! let password = Protected::new(b"password".to_vec()); -//! let hashing_algorithm = HashingAlgorithm::default(); -//! let salt = generate_salt(); -//! let hashed_password = hashing_algorithm.hash(password, salt).unwrap(); -//! ``` - -use crate::{ - primitives::KEY_LEN, - types::{DerivationContext, HashingAlgorithm, Key, Salt, SecretKey}, - Error, Protected, Result, -}; -use argon2::Argon2; -use balloon_hash::Balloon; -use zeroize::Zeroizing; - -pub struct Hasher; - -impl Hasher { - #[must_use] - pub fn blake3(bytes: &[u8]) -> Key { - blake3::hash(bytes).into() - } - - /// This is the same as `Hasher::blake3`, but returns a lowercase hex `String` - /// - /// This is not implemented for `Key` as a safety measure. - #[must_use] - pub fn blake3_hex(bytes: &[u8]) -> String { - blake3::hash(bytes).to_hex().to_string() - } - - /// This can be used to derive a key with BLAKE3-KDF, with both a salt and a derivation context. - #[must_use] - pub fn derive_key(key: &Key, salt: Salt, context: DerivationContext) -> Key { - let k = blake3::derive_key(context.inner(), &[key.expose(), salt.inner()].concat()); - Key::new(k) - } - - pub fn hash_password( - algorithm: HashingAlgorithm, - password: &Protected>, - salt: Salt, - secret: &SecretKey, - ) -> Result { - let d = algorithm.get_parameters(); - - match algorithm { - HashingAlgorithm::Argon2id(_) => Self::argon2id(password, salt, secret, d), - HashingAlgorithm::Blake3Balloon(_) => Self::blake3_balloon(password, salt, secret, d), - } - } - - fn argon2id( - password: &Protected>, - salt: Salt, - secret: &SecretKey, - params: (u32, u32, u32), - ) -> Result { - let p = - argon2::Params::new(params.0, params.1, params.2, None).map_err(|_| Error::Hashing)?; - - let mut key = Zeroizing::new([0u8; KEY_LEN]); - - let argon2 = Argon2::new_with_secret( - secret.expose(), - argon2::Algorithm::Argon2id, - argon2::Version::V0x13, - p, - ) - .map_err(|_| Error::Hashing)?; - - argon2 - .hash_password_into(password.expose(), salt.inner(), key.as_mut_slice()) - .map_or(Err(Error::Hashing), |()| Ok(Key::new(*key))) - } - - fn blake3_balloon( - password: &Protected>, - salt: Salt, - secret: &SecretKey, - params: (u32, u32, u32), - ) -> Result { - let p = - balloon_hash::Params::new(params.0, params.1, params.2).map_err(|_| Error::Hashing)?; - - let mut key = Zeroizing::new([0u8; KEY_LEN]); - - let balloon = Balloon::::new( - balloon_hash::Algorithm::Balloon, - p, - Some(secret.expose()), - ); - - balloon - .hash_into(password.expose(), salt.inner(), key.as_mut_slice()) - .map_or(Err(Error::Hashing), |()| Ok(Key::new(*key))) - } -} - -#[cfg(test)] -mod tests { - use crate::{ - hashing::Hasher, - primitives::{KEY_LEN, SALT_LEN, SECRET_KEY_LEN}, - types::{DerivationContext, HashingAlgorithm, Key, Params, Salt, SecretKey}, - }; - - // don't do this in production code - use separate contexts for keys and objects - const CONTEXT: DerivationContext = - DerivationContext::new("crypto 2023-03-20 20:12:42 global test context"); - - const ARGON2ID_STANDARD: HashingAlgorithm = HashingAlgorithm::Argon2id(Params::Standard); - const ARGON2ID_HARDENED: HashingAlgorithm = HashingAlgorithm::Argon2id(Params::Hardened); - const ARGON2ID_PARANOID: HashingAlgorithm = HashingAlgorithm::Argon2id(Params::Paranoid); - const BLAKE3_BALLOON_STANDARD: HashingAlgorithm = - HashingAlgorithm::Blake3Balloon(Params::Standard); - const BLAKE3_BALLOON_HARDENED: HashingAlgorithm = - HashingAlgorithm::Blake3Balloon(Params::Hardened); - const BLAKE3_BALLOON_PARANOID: HashingAlgorithm = - HashingAlgorithm::Blake3Balloon(Params::Paranoid); - - const PASSWORD: [u8; 8] = [0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64]; - - const SALT: Salt = Salt::new([0xFF; SALT_LEN]); - const SECRET_KEY: SecretKey = SecretKey::new([0x55; SECRET_KEY_LEN]); - - #[test] - #[ignore] - fn argon2id_standard() { - let output = Hasher::hash_password( - ARGON2ID_STANDARD, - &PASSWORD.to_vec().into(), - SALT, - &SecretKey::Null, - ) - .unwrap(); - - assert_eq!( - output, - Key::new([ - 194, 153, 245, 125, 12, 102, 65, 30, 254, 191, 9, 125, 4, 113, 99, 209, 162, 43, - 140, 93, 217, 220, 222, 46, 105, 48, 123, 220, 180, 103, 20, 11, - ]) - ); - } - - #[test] - #[ignore] - fn argon2id_standard_with_secret() { - let output = Hasher::hash_password( - ARGON2ID_STANDARD, - &PASSWORD.to_vec().into(), - SALT, - &SECRET_KEY, - ) - .unwrap(); - - assert_eq!( - output, - Key::new([ - 132, 102, 123, 67, 87, 219, 88, 76, 81, 191, 128, 41, 246, 201, 103, 155, 200, 114, - 54, 116, 240, 66, 155, 78, 73, 44, 87, 174, 231, 196, 206, 236, - ]) - ); - } - - #[test] - #[ignore] - fn argon2id_hardened() { - let output = Hasher::hash_password( - ARGON2ID_HARDENED, - &PASSWORD.to_vec().into(), - SALT, - &SecretKey::Null, - ) - .unwrap(); - - assert_eq!( - output, - Key::new([ - 173, 45, 167, 171, 125, 13, 245, 47, 231, 62, 175, 215, 21, 253, 84, 188, 249, 68, - 229, 98, 16, 55, 110, 202, 105, 109, 102, 71, 216, 125, 170, 66, - ]) - ); - } - - #[test] - #[ignore] - fn argon2id_hardened_with_secret() { - let output = Hasher::hash_password( - ARGON2ID_HARDENED, - &PASSWORD.to_vec().into(), - SALT, - &SECRET_KEY, - ) - .unwrap(); - - assert_eq!( - output, - Key::new([ - 246, 200, 29, 33, 86, 21, 66, 177, 154, 2, 134, 181, 254, 148, 104, 205, 235, 108, - 121, 127, 184, 230, 109, 240, 128, 101, 137, 179, 212, 89, 37, 41, - ]) - ); - } - - #[test] - #[ignore] - fn argon2id_paranoid() { - let output = Hasher::hash_password( - ARGON2ID_PARANOID, - &PASSWORD.to_vec().into(), - SALT, - &SecretKey::Null, - ) - .unwrap(); - - assert_eq!( - output, - Key::new([ - 27, 158, 230, 75, 99, 236, 40, 137, 60, 237, 145, 119, 159, 207, 56, 50, 210, 5, - 157, 227, 162, 162, 148, 142, 230, 237, 138, 133, 112, 182, 156, 198, - ]) - ); - } - - #[test] - #[ignore] - fn argon2id_paranoid_with_secret() { - let output = Hasher::hash_password( - ARGON2ID_PARANOID, - &PASSWORD.to_vec().into(), - SALT, - &SECRET_KEY, - ) - .unwrap(); - - assert_eq!( - output, - Key::new([ - 3, 60, 179, 196, 172, 30, 0, 201, 15, 9, 213, 59, 37, 219, 173, 134, 132, 166, 32, - 60, 33, 216, 3, 249, 185, 120, 110, 14, 155, 242, 134, 215, - ]) - ); - } - - #[test] - #[ignore] - fn blake3_balloon_standard() { - let output = Hasher::hash_password( - BLAKE3_BALLOON_STANDARD, - &PASSWORD.to_vec().into(), - SALT, - &SecretKey::Null, - ) - .unwrap(); - - assert_eq!( - output, - Key::new([ - 105, 36, 165, 219, 22, 136, 156, 19, 32, 143, 237, 150, 236, 194, 70, 113, 73, 137, - 243, 106, 80, 31, 43, 73, 207, 210, 29, 251, 88, 6, 132, 77, - ]) - ); - } - - #[test] - #[ignore] - fn blake3_balloon_standard_with_secret() { - let output = Hasher::hash_password( - BLAKE3_BALLOON_STANDARD, - &PASSWORD.to_vec().into(), - SALT, - &SECRET_KEY, - ) - .unwrap(); - - assert_eq!( - output, - Key::new([ - 188, 0, 43, 39, 137, 199, 91, 142, 97, 31, 98, 6, 130, 75, 251, 71, 150, 109, 29, - 62, 237, 171, 210, 22, 139, 108, 94, 190, 91, 74, 134, 47, - ]) - ); - } - - #[test] - #[ignore] - fn blake3_balloon_hardened() { - let output = Hasher::hash_password( - BLAKE3_BALLOON_HARDENED, - &PASSWORD.to_vec().into(), - SALT, - &SecretKey::Null, - ) - .unwrap(); - - assert_eq!( - output, - Key::new([ - 179, 71, 60, 122, 54, 72, 132, 209, 146, 96, 15, 115, 41, 95, 5, 75, 214, 135, 6, - 122, 82, 42, 158, 9, 117, 19, 19, 40, 48, 233, 207, 237, - ]) - ); - } - - #[test] - #[ignore] - fn blake3_balloon_hardened_with_secret() { - let output = Hasher::hash_password( - BLAKE3_BALLOON_HARDENED, - &PASSWORD.to_vec().into(), - SALT, - &SECRET_KEY, - ) - .unwrap(); - - assert_eq!( - output, - Key::new([ - 19, 247, 102, 192, 129, 184, 29, 147, 68, 215, 234, 146, 153, 221, 65, 134, 68, - 120, 207, 209, 184, 246, 127, 131, 9, 245, 91, 250, 220, 61, 76, 248, - ]) - ); - } - - #[test] - #[ignore] - fn blake3_balloon_paranoid() { - let output = Hasher::hash_password( - BLAKE3_BALLOON_PARANOID, - &PASSWORD.to_vec().into(), - SALT, - &SecretKey::Null, - ) - .unwrap(); - - assert_eq!( - output, - Key::new([ - 233, 60, 62, 184, 29, 152, 111, 46, 239, 126, 98, 90, 211, 255, 151, 0, 10, 189, - 61, 84, 229, 11, 245, 228, 47, 114, 87, 74, 227, 67, 24, 141, - ]) - ); - } - - #[test] - #[ignore] - fn blake3_balloon_paranoid_with_secret() { - let output = Hasher::hash_password( - BLAKE3_BALLOON_PARANOID, - &PASSWORD.to_vec().into(), - SALT, - &SECRET_KEY, - ) - .unwrap(); - - assert_eq!( - output, - Key::new([ - 165, 240, 162, 25, 172, 3, 232, 2, 43, 230, 226, 128, 174, 28, 211, 61, 139, 136, - 221, 197, 16, 83, 221, 18, 212, 190, 138, 79, 239, 148, 89, 215, - ]) - ); - } - - #[test] - fn blake3_kdf() { - let output = Hasher::derive_key(&Key::new([0x23; KEY_LEN]), SALT, CONTEXT); - - assert_eq!( - output, - Key::new([ - 88, 23, 212, 172, 220, 212, 247, 196, 129, 100, 18, 49, 208, 134, 247, 53, 83, 242, - 143, 131, 58, 249, 130, 168, 70, 245, 250, 128, 106, 170, 175, 255, - ]) - ); - } - - #[test] - fn blake3_hash() { - let output = Hasher::blake3(&PASSWORD); - - assert_eq!( - output, - Key::new([ - 127, 38, 17, 186, 21, 139, 109, 206, 164, 166, 156, 34, 156, 48, 51, 88, 197, 224, - 68, 147, 171, 234, 222, 225, 6, 164, 191, 164, 100, 213, 87, 135, - ]) - ); - } -} diff --git a/crates/crypto/src/keyring/apple/ios.rs b/crates/crypto/src/keyring/apple/ios.rs deleted file mode 100644 index 2278d4710..000000000 --- a/crates/crypto/src/keyring/apple/ios.rs +++ /dev/null @@ -1,40 +0,0 @@ -//! This is Spacedrive's `iOS` keyring integration. It has no strict dependencies. -use crate::{ - keyring::{Identifier, KeyringBackend, KeyringInterface}, - Error, Protected, Result, -}; -use security_framework::passwords::{ - delete_generic_password, get_generic_password, set_generic_password, -}; - -pub struct IosKeyring; - -impl KeyringInterface for IosKeyring { - fn new() -> Result { - Ok(Self {}) - } - - fn name(&self) -> KeyringBackend { - KeyringBackend::Ios - } - - fn get(&self, id: &Identifier) -> Result>> { - get_generic_password(&id.application(), &id.as_apple_identifer()) - .map_err(Error::AppleKeyring) - .map(Into::into) - } - - fn contains_key(&self, id: &Identifier) -> bool { - get_generic_password(&id.application(), &id.as_apple_identifer()).map_or(false, |_| true) - } - - fn insert(&self, id: &Identifier, value: Protected>) -> Result<()> { - set_generic_password(&id.application(), &id.as_apple_identifer(), value.expose()) - .map_err(Error::AppleKeyring) - } - - fn remove(&self, id: &Identifier) -> Result<()> { - delete_generic_password(&id.application(), &id.as_apple_identifer()) - .map_err(Error::AppleKeyring) - } -} diff --git a/crates/crypto/src/keyring/apple/macos.rs b/crates/crypto/src/keyring/apple/macos.rs deleted file mode 100644 index 8606ff493..000000000 --- a/crates/crypto/src/keyring/apple/macos.rs +++ /dev/null @@ -1,54 +0,0 @@ -//! This is Spacedrive's `MacOS` keyring integration. It has no strict dependencies. -use crate::{ - keyring::{Identifier, KeyringBackend, KeyringInterface}, - Error, Protected, Result, -}; -use security_framework::os::macos::keychain::SecKeychain; - -pub struct MacosKeyring { - inner: SecKeychain, -} - -impl KeyringInterface for MacosKeyring { - fn new() -> Result { - Ok(Self { - inner: SecKeychain::default()?, - }) - } - - fn get(&self, id: &Identifier) -> Result>> { - Ok(self - .inner - .find_generic_password(&id.application(), &id.as_apple_identifer()) - .map_err(Error::AppleKeyring)? - .0 - .to_owned() - .into()) - } - - fn contains_key(&self, id: &Identifier) -> bool { - self.inner - .find_generic_password(&id.application(), &id.as_apple_identifer()) - .map_or(false, |_| true) - } - - fn insert(&self, id: &Identifier, value: Protected>) -> Result<()> { - self.inner - .set_generic_password(&id.application(), &id.as_apple_identifer(), value.expose()) - .map_err(Error::AppleKeyring) - } - - fn remove(&self, id: &Identifier) -> Result<()> { - self.inner - .find_generic_password(&id.application(), &id.as_apple_identifer()) - .map_err(Error::AppleKeyring)? - .1 - .delete(); - - Ok(()) - } - - fn name(&self) -> KeyringBackend { - KeyringBackend::MacOS - } -} diff --git a/crates/crypto/src/keyring/apple/mod.rs b/crates/crypto/src/keyring/apple/mod.rs deleted file mode 100644 index 34eb56e2d..000000000 --- a/crates/crypto/src/keyring/apple/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -#[cfg(target_os = "macos")] -mod macos; -#[cfg(target_os = "macos")] -pub use macos::MacosKeyring; - -#[cfg(target_os = "ios")] -mod ios; -#[cfg(target_os = "ios")] -pub use ios::IosKeyring; diff --git a/crates/crypto/src/keyring/identifier.rs b/crates/crypto/src/keyring/identifier.rs deleted file mode 100644 index e184023d2..000000000 --- a/crates/crypto/src/keyring/identifier.rs +++ /dev/null @@ -1,48 +0,0 @@ -use crate::hashing::Hasher; - -#[derive(Clone)] -pub struct Identifier { - id: String, - usage: String, - application: String, -} - -impl Identifier { - #[inline] - #[must_use] - pub fn new(id: &str, usage: &str, application: &str) -> Self { - Self { - id: id.to_string(), - usage: usage.to_string(), - application: application.to_string(), - } - } - - pub fn application(&self) -> String { - self.application.to_string() - } - - #[inline] - #[must_use] - pub(super) fn hash(&self) -> String { - format!( - "{}:{}", - self.application, - Hasher::blake3_hex(&[self.id.as_bytes(), self.usage.as_bytes()].concat()) - ) - } - - #[inline] - #[must_use] - #[cfg(any(target_os = "ios", target_os = "macos"))] - pub(super) fn as_apple_identifer(&self) -> String { - format!("{} - {}", self.id, self.usage) - } - - #[inline] - #[must_use] - #[cfg(all(target_os = "linux", feature = "secret-service"))] - pub(super) fn as_sec_ser_identifier(&self) -> std::collections::HashMap<&str, &str> { - std::collections::HashMap::from([(self.id.as_str(), self.usage.as_str())]) - } -} diff --git a/crates/crypto/src/keyring/linux/keyutils.rs b/crates/crypto/src/keyring/linux/keyutils.rs deleted file mode 100644 index af282b574..000000000 --- a/crates/crypto/src/keyring/linux/keyutils.rs +++ /dev/null @@ -1,69 +0,0 @@ -//! This is Spacedrive's Linux keyring implementation, which makes use of the `keyutils` API (provided by modern Linux kernels). -use crate::keyring::{Identifier, KeyringBackend, KeyringInterface, LinuxKeyring}; -use crate::{Protected, Result}; -use linux_keyutils::{KeyPermissionsBuilder, KeyRing, KeyRingIdentifier, Permission}; - -pub struct KeyutilsKeyring { - session: KeyRing, - persistent: KeyRing, -} - -const WEEK: usize = 604_800; - -impl KeyutilsKeyring { - pub fn new() -> Result { - Ok(Self { - session: KeyRing::from_special_id(KeyRingIdentifier::Session, false)?, - persistent: KeyRing::get_persistent(KeyRingIdentifier::Session)?, - }) - } -} - -impl KeyringInterface for KeyutilsKeyring { - fn new() -> Result { - Self::new() - } - - fn name(&self) -> KeyringBackend { - KeyringBackend::Linux(LinuxKeyring::Keyutils) - } - - fn contains_key(&self, id: &Identifier) -> bool { - self.session.search(&id.hash()).map_or(false, |_| true) - } - - fn get(&self, id: &Identifier) -> Result>> { - let key = self.session.search(&id.hash())?; - - self.session.link_key(key)?; - self.persistent.link_key(key)?; - - Ok(Protected::new(key.read_to_vec()?)) - } - - fn insert(&self, id: &Identifier, value: Protected>) -> Result<()> { - let key = self.session.add_key(&id.hash(), value.expose())?; - key.set_timeout(WEEK)?; - - let perms = KeyPermissionsBuilder::builder() - .posessor(Permission::ALL) - .user(Permission::empty()) - .group(Permission::empty()) - .world(Permission::empty()) - .build(); - - key.set_perms(perms)?; - - self.persistent.link_key(key)?; - - Ok(()) - } - - fn remove(&self, id: &Identifier) -> Result<()> { - let key = self.session.search(&id.hash())?; - - key.invalidate()?; - - Ok(()) - } -} diff --git a/crates/crypto/src/keyring/linux/mod.rs b/crates/crypto/src/keyring/linux/mod.rs deleted file mode 100644 index 0e8bf9bef..000000000 --- a/crates/crypto/src/keyring/linux/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -mod keyutils; -pub use keyutils::KeyutilsKeyring; - -#[cfg(feature = "secret-service")] -mod secret_service; -#[cfg(feature = "secret-service")] -pub use secret_service::SecretServiceKeyring; diff --git a/crates/crypto/src/keyring/linux/secret_service.rs b/crates/crypto/src/keyring/linux/secret_service.rs deleted file mode 100644 index 334a52b88..000000000 --- a/crates/crypto/src/keyring/linux/secret_service.rs +++ /dev/null @@ -1,73 +0,0 @@ -//! This is Spacedrive's Linux keyring implementation, which makes use of the `secret-service` API (provided by `gnome-passwords` and `kwallet`). -use crate::keyring::{Identifier, KeyringBackend, KeyringInterface, LinuxKeyring}; -use crate::{Error, Protected, Result}; -use secret_service::blocking::{Collection, SecretService}; -use secret_service::EncryptionType; - -pub struct SecretServiceKeyring { - session: SecretService<'static>, -} - -impl SecretServiceKeyring { - fn new() -> Result { - Ok(Self { - session: SecretService::connect(EncryptionType::Dh)?, - }) - } - - fn get_collection(&self) -> Result> { - let k = self.session.get_default_collection()?; - k.unlock()?; - - Ok(k) - } -} - -impl KeyringInterface for SecretServiceKeyring { - fn new() -> Result { - Self::new() - } - - fn name(&self) -> KeyringBackend { - KeyringBackend::Linux(LinuxKeyring::SecretService) - } - - fn contains_key(&self, id: &Identifier) -> bool { - self.get_collection().ok().is_some_and(|k| { - k.search_items(id.as_sec_ser_identifier()) - .ok() - .map_or(false, |x| !x.is_empty()) - }) - } - - fn get(&self, id: &Identifier) -> Result>> { - self.get_collection()? - .search_items(id.as_sec_ser_identifier())? - .first() - .map_or(Err(Error::Keyring), |k| { - Ok(Protected::new(hex::decode(k.get_secret()?)?)) - }) - } - - fn insert(&self, id: &Identifier, value: Protected>) -> Result<()> { - self.get_collection()?.create_item( - &id.application(), - id.as_sec_ser_identifier(), - hex::encode(value.expose()).as_bytes(), - true, - "text/plain", - )?; - - Ok(()) - } - - fn remove(&self, id: &Identifier) -> Result<()> { - self.get_collection()? - .search_items(id.as_sec_ser_identifier())? - .first() - .map_or(Err(Error::Keyring), |k| { - k.delete()?; - Ok(()) - }) - } -} diff --git a/crates/crypto/src/keyring/mod.rs b/crates/crypto/src/keyring/mod.rs deleted file mode 100644 index 07cf3fff8..000000000 --- a/crates/crypto/src/keyring/mod.rs +++ /dev/null @@ -1,212 +0,0 @@ -use crate::{Error, Protected, Result}; -use std::fmt::Display; - -mod identifier; -mod session; - -use identifier::Identifier; -use session::SessionKeyring; - -#[cfg(target_os = "linux")] -mod linux; - -#[cfg(any(target_os = "macos", target_os = "ios"))] -mod apple; - -// #[cfg(target_os = "windows")] -// mod windows; - -const MAX_LEN: usize = 128; - -// TODO(brxken128): use `Encrypted` type here? - -pub trait KeyringInterface { - fn new() -> Result - where - Self: Sized; - fn name(&self) -> KeyringBackend; - fn get(&self, id: &Identifier) -> Result>>; - fn remove(&self, id: &Identifier) -> Result<()>; - fn insert(&self, id: &Identifier, value: Protected>) -> Result<()>; - fn contains_key(&self, id: &Identifier) -> bool; -} - -#[derive(Clone, Copy, PartialEq, Eq)] -pub enum KeyringBackend { - Session, - #[cfg(target_os = "macos")] - MacOS, - #[cfg(target_os = "ios")] - Ios, - #[cfg(target_os = "linux")] - Linux(LinuxKeyring), -} - -#[derive(Clone, Copy, PartialEq, Eq)] -pub enum LinuxKeyring { - #[cfg(target_os = "linux")] - Keyutils, - #[cfg(all(target_os = "linux", feature = "secret-service"))] - SecretService, -} - -impl Display for KeyringBackend { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let s = match self { - Self::Session => "Session", - #[cfg(target_os = "macos")] - Self::MacOS => "MacOS", - #[cfg(target_os = "ios")] - Self::Ios => "iOS", - #[cfg(target_os = "linux")] - Self::Linux(k) => match k { - LinuxKeyring::Keyutils => "KeyUtils", - #[cfg(feature = "secret-service")] - LinuxKeyring::SecretService => "Secret Service", - }, - }; - - f.write_str(s) - } -} - -pub struct Keyring { - inner: Box, -} - -impl Keyring { - pub fn new(backend: KeyringBackend) -> Result { - let inner: Box = match backend { - KeyringBackend::Session => Box::new(SessionKeyring::new()?), - #[cfg(target_os = "macos")] - KeyringBackend::MacOS => Box::new(apple::MacosKeyring::new()?), - #[cfg(target_os = "Ios")] - KeyringBackend::Ios => Box::new(apple::IosKeyring::new()?), - #[cfg(target_os = "linux")] - KeyringBackend::Linux(k) => match k { - LinuxKeyring::Keyutils => Box::new(linux::KeyutilsKeyring::new()?), - #[cfg(feature = "secret-service")] - LinuxKeyring::SecretService => Box::new(linux::SecretServiceKeyring::new()?), - }, - }; - - Ok(Self { inner }) - } - - #[inline] - pub fn get(&self, id: &Identifier) -> Result>> { - self.inner.get(id) - } - - #[inline] - #[must_use] - pub fn contains_key(&self, id: &Identifier) -> bool { - self.inner.contains_key(id) - } - - #[inline] - pub fn remove(&self, id: &Identifier) -> Result<()> { - self.inner.remove(id) - } - - #[inline] - pub fn insert(&self, id: &Identifier, value: Protected>) -> Result<()> { - if value.expose().len() > MAX_LEN { - return Err(Error::Validity); // should be "value too long" - } - - self.inner.insert(id, value) - } - - #[inline] - #[must_use] - pub fn name(&self) -> KeyringBackend { - self.inner.name() - } -} - -#[cfg(test)] -mod tests { - use crate::Protected; - - use super::{Identifier, Keyring, KeyringBackend}; - - #[test] - fn full_session() { - let password = Protected::new(b"SuperSecurePassword".to_vec()); - let identifier = Identifier::new("0000-0000-0000-0000", "Password", "Crypto"); - let keyring = Keyring::new(KeyringBackend::Session).unwrap(); - - keyring.insert(&identifier, password.clone()).unwrap(); - assert!(keyring.contains_key(&identifier)); - - let pw = keyring.get(&identifier).unwrap(); - - assert_eq!(pw.expose(), password.expose()); - - keyring.remove(&identifier).unwrap(); - - assert!(!keyring.contains_key(&identifier)); - } - - #[test] - #[cfg(target_os = "linux")] - #[ignore] - fn linux_keyutils() { - let password = Protected::new(b"SuperSecurePassword".to_vec()); - let identifier = Identifier::new("0000-0000-0000-0000", "Password", "Crypto"); - let keyring = Keyring::new(KeyringBackend::Linux(super::LinuxKeyring::Keyutils)).unwrap(); - - keyring.insert(&identifier, password.clone()).unwrap(); - assert!(keyring.contains_key(&identifier)); - - let pw = keyring.get(&identifier).unwrap(); - - assert_eq!(pw.expose(), password.expose()); - - keyring.remove(&identifier).unwrap(); - - assert!(!keyring.contains_key(&identifier)); - } - - #[test] - #[cfg(target_os = "linux")] - #[ignore] - fn linux_secret_service() { - let password = Protected::new(b"SuperSecurePassword".to_vec()); - let identifier = Identifier::new("0000-0000-0000-0000", "Password", "Crypto"); - let keyring = - Keyring::new(KeyringBackend::Linux(super::LinuxKeyring::SecretService)).unwrap(); - - keyring.insert(&identifier, password.clone()).unwrap(); - assert!(keyring.contains_key(&identifier)); - - let pw = keyring.get(&identifier).unwrap(); - - assert_eq!(pw.expose(), password.expose()); - - keyring.remove(&identifier).unwrap(); - - assert!(!keyring.contains_key(&identifier)); - } - - #[test] - #[cfg(target_os = "macos")] - #[ignore] - fn macos() { - let password = Protected::new(b"SuperSecurePassword".to_vec()); - let identifier = Identifier::new("0000-0000-0000-0000", "Password", "Crypto"); - let keyring = Keyring::new(KeyringBackend::MacOS).unwrap(); - - keyring.insert(&identifier, password.clone()).unwrap(); - assert!(keyring.contains_key(&identifier)); - - let pw = keyring.get(&identifier).unwrap(); - - assert_eq!(pw.expose(), password.expose()); - - keyring.remove(&identifier).unwrap(); - - assert!(!keyring.contains_key(&identifier)); - } -} diff --git a/crates/crypto/src/keyring/session.rs b/crates/crypto/src/keyring/session.rs deleted file mode 100644 index 756bbc2f9..000000000 --- a/crates/crypto/src/keyring/session.rs +++ /dev/null @@ -1,39 +0,0 @@ -use crate::{types::Algorithm, vault::EphemeralVault, Error, Protected, Result}; - -use super::{Identifier, KeyringBackend, KeyringInterface}; - -pub struct SessionKeyring { - inner: EphemeralVault>, -} - -impl KeyringInterface for SessionKeyring { - fn new() -> Result { - Ok(Self { - inner: EphemeralVault::new(Algorithm::default()), - }) - } - - fn name(&self) -> KeyringBackend { - KeyringBackend::Session - } - - fn contains_key(&self, id: &Identifier) -> bool { - self.inner.contains_key(&id.hash()).map_or(false, |x| x) - } - - fn get(&self, id: &Identifier) -> Result>> { - Ok(Protected::new( - self.inner.get(&id.hash()).map_err(|_| Error::Keyring)?, - )) - } - - fn insert(&self, id: &Identifier, value: Protected>) -> Result<()> { - self.inner - .insert(id.hash(), value.into_inner()) - .map_err(|_| Error::Keyring) - } - - fn remove(&self, id: &Identifier) -> Result<()> { - self.inner.remove(&id.hash()).map_err(|_| Error::Keyring) - } -} diff --git a/crates/crypto/src/keyring/windows.rs b/crates/crypto/src/keyring/windows.rs deleted file mode 100644 index 73bc13012..000000000 --- a/crates/crypto/src/keyring/windows.rs +++ /dev/null @@ -1 +0,0 @@ -// This file is a placeholder diff --git a/crates/crypto/src/lib.rs b/crates/crypto/src/lib.rs index 05f21562e..1115212c6 100644 --- a/crates/crypto/src/lib.rs +++ b/crates/crypto/src/lib.rs @@ -28,29 +28,15 @@ clippy::similar_names )] -pub mod crypto; +// pub mod crypto; +pub mod cloud; pub mod ct; -pub mod encoding; -pub mod encrypted; +pub mod erase; pub mod error; -pub mod hashing; pub mod primitives; pub mod protected; pub mod rng; -pub mod types; -pub mod utils; -pub mod vault; -#[cfg(all( - feature = "keyring", - any(target_os = "macos", target_os = "ios", target_os = "linux") -))] -pub mod keyring; - -#[cfg(feature = "sys")] -pub mod sys; - -pub use self::error::{Error, Result}; -pub use aead::Payload; +pub use error::Error; pub use protected::Protected; -pub use zeroize::Zeroize; +pub use rng::CryptoRng; diff --git a/crates/crypto/src/primitives.rs b/crates/crypto/src/primitives.rs index 393ee4db6..efa7bd33b 100644 --- a/crates/crypto/src/primitives.rs +++ b/crates/crypto/src/primitives.rs @@ -1,130 +1,56 @@ //! This module contains constant values, functions and types that are used around the crate. -//! -//! This includes things such as cryptographically-secure random salt/master key/nonce generation, -//! lengths for master keys and even the STREAM block size. // DO NOT EDIT THIS FILE. IF THESE CONSTANTS CHANGE, THINGS CAN (AND PROBABLY WILL) BREAK -use crate::types::DerivationContext; +use aead::stream::{Nonce, StreamLE31}; +use chacha20poly1305::{Tag, XChaCha20Poly1305, XNonce}; -/// This is the salt size -pub const SALT_LEN: usize = 16; +pub type OneShotNonce = XNonce; +pub type StreamNonce = Nonce>; -/// The nonce size for XChaCha20-Poly1305, minus the last 4 bytes (due to STREAM with a 31+1 bit counter) -pub const XCHACHA20_POLY1305_NONCE_LEN: usize = 20; +#[derive(Debug, Clone)] +pub struct EncryptedBlock { + pub nonce: OneShotNonce, + pub cipher_text: Vec, +} -/// The nonce size for AES-256-GCM-SIV, minus the last 4 bytes (due to STREAM with a 31+1 bit counter) -pub const AES_256_GCM_SIV_NONCE_LEN: usize = 8; +impl EncryptedBlock { + /// The block size used for STREAM encryption/decryption. This size seems to offer + /// the best performance compared to alternatives. + /// + /// The file size gain is 24 bytes per 1MiB due to nonce of XChaCha20-Poly1305 + pub const PLAIN_TEXT_SIZE: usize = 1_048_576; -/// The length of a secret key, in bytes. -pub const SECRET_KEY_LEN: usize = 18; - -/// The block size used for STREAM encryption/decryption. This size seems to offer the best performance compared to alternatives. -/// -/// The file size gain is 16 bytes per 1048576 bytes (due to the AEAD tag), plus the size of the header. -pub const BLOCK_LEN: usize = 1_048_576; - -/// This is the default AEAD tag size for all encryption algorithms used within the crate. -pub const AEAD_TAG_LEN: usize = 16; - -/// Length of the AAD -pub const AAD_LEN: usize = 32; - -/// Length of the AAD (for headers) -pub const AAD_HEADER_LEN: usize = 38; - -/// The length of encrypted master keys -pub const ENCRYPTED_KEY_LEN: usize = KEY_LEN + AEAD_TAG_LEN; - -/// The length of plain master/hashed keys -pub const KEY_LEN: usize = 32; - -pub const ENCRYPTED_TYPE_CONTEXT: DerivationContext = - DerivationContext::new("2023-10-02 03:19:34 Encrypted type derivation context"); - -pub(super) const ARGON2ID_STANDARD: (u32, u32, u32) = (131_072, 8, 4); -pub(super) const ARGON2ID_HARDENED: (u32, u32, u32) = (262_144, 8, 4); -pub(super) const ARGON2ID_PARANOID: (u32, u32, u32) = (524_288, 8, 4); -pub(super) const BLAKE3_BALLOON_STANDARD: (u32, u32, u32) = (131_072, 2, 1); -pub(super) const BLAKE3_BALLOON_HARDENED: (u32, u32, u32) = (262_144, 2, 1); -pub(super) const BLAKE3_BALLOON_PARANOID: (u32, u32, u32) = (524_288, 2, 1); // could increase first value 2x, and lower 2nd value to 1? + /// The size of a encrypted block with its tag. + pub const CIPHER_TEXT_SIZE: usize = Self::PLAIN_TEXT_SIZE + size_of::(); +} #[cfg(test)] mod tests { - use crate::primitives::{ - AAD_LEN, AEAD_TAG_LEN, AES_256_GCM_SIV_NONCE_LEN, ARGON2ID_HARDENED, ARGON2ID_PARANOID, - ARGON2ID_STANDARD, BLAKE3_BALLOON_HARDENED, BLAKE3_BALLOON_PARANOID, - BLAKE3_BALLOON_STANDARD, BLOCK_LEN, ENCRYPTED_KEY_LEN, KEY_LEN, SECRET_KEY_LEN, - XCHACHA20_POLY1305_NONCE_LEN, - }; + use super::*; #[test] - fn argon2id_standard_params() { - assert_eq!(ARGON2ID_STANDARD, (131_072, 8, 4)); + fn test_encrypted_block_plain_text_size() { + assert_eq!(EncryptedBlock::PLAIN_TEXT_SIZE, 1_048_576); } #[test] - fn argon2id_hardened_params() { - assert_eq!(ARGON2ID_HARDENED, (262_144, 8, 4)); + fn test_one_shot_nonce_size() { + assert_eq!(size_of::(), 24); } #[test] - fn argon2id_paranoid_params() { - assert_eq!(ARGON2ID_PARANOID, (524_288, 8, 4)); + fn test_stream_nonce_size() { + assert_eq!(size_of::(), 20); } #[test] - fn blake3_balloon_standard_params() { - assert_eq!(BLAKE3_BALLOON_STANDARD, (131_072, 2, 1)); + fn xchacha_tag_size() { + assert_eq!(size_of::(), 16); } #[test] - fn blake3_balloon_hardened_params() { - assert_eq!(BLAKE3_BALLOON_HARDENED, (262_144, 2, 1)); - } - - #[test] - fn blake3_balloon_paranoid_params() { - assert_eq!(BLAKE3_BALLOON_PARANOID, (524_288, 2, 1)); - } - - #[test] - fn block_len() { - assert_eq!(BLOCK_LEN, 1_048_576); - } - - #[test] - fn secret_key_len() { - assert_eq!(SECRET_KEY_LEN, 18); - } - - #[test] - fn key_len() { - assert_eq!(KEY_LEN, 32); - } - - #[test] - fn aead_tag_len() { - assert_eq!(AEAD_TAG_LEN, 16); - } - - #[test] - fn encrypted_key_len() { - assert_eq!(ENCRYPTED_KEY_LEN, 48); - } - - #[test] - fn aad_len() { - assert_eq!(AAD_LEN, 32); - } - - #[test] - fn xchacha20_poly1305_nonce_len() { - assert_eq!(XCHACHA20_POLY1305_NONCE_LEN, 20); - } - - #[test] - fn aes_256_gcm_siv_nonce_len() { - assert_eq!(AES_256_GCM_SIV_NONCE_LEN, 8); + fn test_encrypted_block_cipher_text_size() { + assert_eq!(EncryptedBlock::CIPHER_TEXT_SIZE, 1_048_576 + 16); } } diff --git a/crates/crypto/src/protected.rs b/crates/crypto/src/protected.rs index 90de4accf..63d3e97c3 100644 --- a/crates/crypto/src/protected.rs +++ b/crates/crypto/src/protected.rs @@ -28,16 +28,14 @@ //! let value = protected_data.expose(); //! ``` //! + use std::{fmt::Debug, mem}; + +use serde::{Deserialize, Serialize}; use zeroize::{Zeroize, ZeroizeOnDrop}; -#[derive(Clone, Zeroize, ZeroizeOnDrop)] -#[cfg_attr( - feature = "serde", - derive(serde::Serialize, serde::Deserialize), - serde(transparent) -)] -#[cfg_attr(feature = "specta", derive(specta::Type))] +#[derive(Clone, Zeroize, ZeroizeOnDrop, Serialize, Deserialize)] +#[serde(transparent)] pub struct Protected(T) where T: Zeroize; diff --git a/crates/crypto/src/rng/csprng.rs b/crates/crypto/src/rng/csprng.rs new file mode 100644 index 000000000..7ca85eccb --- /dev/null +++ b/crates/crypto/src/rng/csprng.rs @@ -0,0 +1,82 @@ +use rand::RngCore; +use rand_chacha::ChaCha20Rng; +use rand_core::{impl_try_crypto_rng_from_crypto_rng, SeedableRng}; +use zeroize::{Zeroize, Zeroizing}; + +/// This RNG should be used throughout the entire crate. +/// +/// On `Drop`, it re-seeds the inner RNG, erasing the previous state and making all future +/// values unpredictable. +#[derive(Debug)] +pub struct CryptoRng(ChaCha20Rng); + +impl CryptoRng { + /// This creates a new [`ChaCha20Rng`]-backed [`rand::CryptoRng`] from entropy + /// (via the [getrandom](https://docs.rs/getrandom) crate). + #[inline] + #[must_use] + pub fn new() -> Self { + Self(ChaCha20Rng::from_os_rng()) + } + + /// Used to generate completely random bytes, with the use of [`ChaCha20Rng`] + /// + /// Ideally this should be used for small amounts only (as it's stack allocated) + #[inline] + #[must_use] + pub fn generate_fixed(&mut self) -> [u8; I] { + let mut bytes = Zeroizing::new([0u8; I]); + self.fill_bytes(bytes.as_mut()); + *bytes + } + + /// Used to generate completely random bytes, with the use of [`ChaCha20Rng`] + #[inline] + #[must_use] + pub fn generate_vec(&mut self, size: usize) -> Vec { + let mut bytes = vec![0u8; size]; + self.fill_bytes(bytes.as_mut()); + bytes + } +} + +impl RngCore for CryptoRng { + #[inline] + fn fill_bytes(&mut self, dest: &mut [u8]) { + self.0.fill_bytes(dest); + } + + #[inline] + fn next_u32(&mut self) -> u32 { + self.0.next_u32() + } + + #[inline] + fn next_u64(&mut self) -> u64 { + self.0.next_u64() + } +} + +impl Zeroize for CryptoRng { + #[inline] + fn zeroize(&mut self) { + self.0 = ChaCha20Rng::from_os_rng(); + } +} + +impl rand::CryptoRng for CryptoRng {} + +impl_try_crypto_rng_from_crypto_rng!(CryptoRng); + +impl Default for CryptoRng { + fn default() -> Self { + Self::new() + } +} + +impl Drop for CryptoRng { + #[inline] + fn drop(&mut self) { + self.zeroize(); + } +} diff --git a/crates/crypto/src/rng/csprng/chacha20.rs b/crates/crypto/src/rng/csprng/chacha20.rs deleted file mode 100644 index 6bbe2e09d..000000000 --- a/crates/crypto/src/rng/csprng/chacha20.rs +++ /dev/null @@ -1,80 +0,0 @@ -use rand::RngCore; -use rand_chacha::ChaCha20Rng; -use rand_core::{block::BlockRngCore, SeedableRng}; -use zeroize::{Zeroize, Zeroizing}; - -const STATE_WORDS: usize = 16; - -/// This RNG should be used throughout the entire crate. -/// -/// On `Drop`, it re-seeds the inner RNG, erasing the previous state and making all future -/// values unpredictable. -pub struct CryptoRng(Box); - -impl CryptoRng { - /// This creates a new `ChaCha20Rng`-backed `CryptoRng` from entropy (via the `getrandom` crate). - #[inline] - #[must_use] - pub fn new() -> Self { - Self(Box::new(ChaCha20Rng::from_entropy())) - } - - /// Used to generate completely random bytes, with the use of `ChaCha20` - /// - /// Ideally this should be used for small amounts only (as it's stack allocated) - #[inline] - #[must_use] - pub fn generate_fixed() -> [u8; I] { - let mut bytes = Zeroizing::new([0u8; I]); - Self::new().0.fill_bytes(bytes.as_mut()); - *bytes - } - - /// Used to generate completely random bytes, with the use of `ChaCha20` - #[inline] - #[must_use] - pub fn generate_vec(size: usize) -> Vec { - let mut bytes = Zeroizing::new(vec![0u8; size]); - Self::new().fill_bytes(bytes.as_mut()); - bytes.to_vec() - } -} - -impl RngCore for CryptoRng { - #[inline] - fn fill_bytes(&mut self, dest: &mut [u8]) { - self.0.fill_bytes(dest); - } - - #[inline] - fn next_u32(&mut self) -> u32 { - self.0.next_u32() - } - - #[inline] - fn next_u64(&mut self) -> u64 { - self.0.next_u64() - } - - #[inline] - fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), rand::Error> { - self.0.try_fill_bytes(dest) - } -} - -impl BlockRngCore for CryptoRng { - type Item = u32; - type Results = [u32; STATE_WORDS]; - - #[inline] - fn generate(&mut self, results: &mut Self::Results) { - (0..STATE_WORDS).for_each(|i| results[i] = self.next_u32()); - } -} - -impl Zeroize for CryptoRng { - #[inline] - fn zeroize(&mut self) { - *self.0 = ChaCha20Rng::from_entropy(); - } -} diff --git a/crates/crypto/src/rng/csprng/mod.rs b/crates/crypto/src/rng/csprng/mod.rs deleted file mode 100644 index 5e594a2aa..000000000 --- a/crates/crypto/src/rng/csprng/mod.rs +++ /dev/null @@ -1,18 +0,0 @@ -use zeroize::Zeroize; -mod chacha20; -pub use chacha20::CryptoRng; - -impl rand::CryptoRng for CryptoRng {} - -impl Default for CryptoRng { - fn default() -> Self { - Self::new() - } -} - -impl Drop for CryptoRng { - #[inline] - fn drop(&mut self) { - self.zeroize(); - } -} diff --git a/crates/crypto/src/rng/mod.rs b/crates/crypto/src/rng/mod.rs index e0bf86300..d8df631ce 100644 --- a/crates/crypto/src/rng/mod.rs +++ b/crates/crypto/src/rng/mod.rs @@ -1,5 +1,4 @@ mod csprng; -// mod mnemonic; +/// CSPRNG stands for Cryptographically Secure Pseudo Random Number Generator pub use csprng::CryptoRng; -// pub use mnemonic::{Mnemonic, MnemonicDelimiter}; diff --git a/crates/crypto/src/sys/fs/mod.rs b/crates/crypto/src/sys/fs/mod.rs deleted file mode 100644 index b58feca00..000000000 --- a/crates/crypto/src/sys/fs/mod.rs +++ /dev/null @@ -1,6 +0,0 @@ -mod erase; - -pub use erase::erase; - -#[cfg(feature = "tokio")] -pub use erase::erase_async; diff --git a/crates/crypto/src/sys/mod.rs b/crates/crypto/src/sys/mod.rs deleted file mode 100644 index d521fbd77..000000000 --- a/crates/crypto/src/sys/mod.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod fs; diff --git a/crates/crypto/src/types.rs b/crates/crypto/src/types.rs deleted file mode 100644 index 45416ba48..000000000 --- a/crates/crypto/src/types.rs +++ /dev/null @@ -1,744 +0,0 @@ -//! This module defines all of the possible types used throughout this crate, -//! in an effort to add additional type safety. -use crate::{ - ct::{Choice, ConstantTimeEq, ConstantTimeEqNull}, - rng::CryptoRng, - utils::ToArray, - Error, Protected, -}; - -use aead::generic_array::{ArrayLength, GenericArray}; -use bincode::{Decode, Encode}; -use cmov::Cmov; -use std::fmt::{Debug, Display, Write}; -use zeroize::{DefaultIsZeroes, Zeroize, ZeroizeOnDrop}; - -use crate::primitives::{ - AAD_HEADER_LEN, AAD_LEN, AES_256_GCM_SIV_NONCE_LEN, ARGON2ID_HARDENED, ARGON2ID_PARANOID, - ARGON2ID_STANDARD, BLAKE3_BALLOON_HARDENED, BLAKE3_BALLOON_PARANOID, BLAKE3_BALLOON_STANDARD, - ENCRYPTED_KEY_LEN, KEY_LEN, SALT_LEN, SECRET_KEY_LEN, XCHACHA20_POLY1305_NONCE_LEN, -}; - -#[derive(Clone, Copy)] -pub struct MagicBytes([u8; I]); - -impl MagicBytes { - #[inline] - #[must_use] - pub const fn new(bytes: [u8; I]) -> Self { - Self(bytes) - } - - #[inline] - #[must_use] - pub const fn inner(&self) -> &[u8; I] { - &self.0 - } -} - -#[derive(Clone, Copy)] -pub struct DerivationContext(&'static str); - -impl DerivationContext { - #[inline] - #[must_use] - pub const fn new(context: &'static str) -> Self { - Self(context) - } - - #[inline] - #[must_use] - pub const fn inner(&self) -> &'static str { - self.0 - } -} - -/// These parameters define the password-hashing level. -/// -/// The greater the parameter, the longer the password will take to hash. -#[derive(Clone, Copy, Default, Encode, Decode)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "specta", derive(specta::Type))] -pub enum Params { - #[default] - Standard, - Hardened, - Paranoid, -} - -/// This defines all available password hashing algorithms. -#[derive(Clone, Copy, Encode, Decode)] -#[cfg_attr( - feature = "serde", - derive(serde::Serialize, serde::Deserialize), - serde(tag = "name", content = "params") -)] -#[cfg_attr(feature = "specta", derive(specta::Type))] -pub enum HashingAlgorithm { - Argon2id(Params), - Blake3Balloon(Params), -} - -impl Default for HashingAlgorithm { - fn default() -> Self { - Self::Argon2id(Params::default()) - } -} - -impl HashingAlgorithm { - #[inline] - #[must_use] - pub const fn get_parameters(&self) -> (u32, u32, u32) { - match self { - Self::Argon2id(p) => match p { - Params::Standard => ARGON2ID_STANDARD, - Params::Hardened => ARGON2ID_HARDENED, - Params::Paranoid => ARGON2ID_PARANOID, - }, - Self::Blake3Balloon(p) => match p { - Params::Standard => BLAKE3_BALLOON_STANDARD, - Params::Hardened => BLAKE3_BALLOON_HARDENED, - Params::Paranoid => BLAKE3_BALLOON_PARANOID, - }, - } - } -} - -/// This should be used for providing a nonce to encrypt/decrypt functions. -/// -/// You may also generate a nonce for a given algorithm with `Nonce::generate()` -#[derive(Clone, Copy, Encode, Decode)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "specta", derive(specta::Type))] -pub enum Nonce { - Aes256GcmSiv([u8; AES_256_GCM_SIV_NONCE_LEN]), - XChaCha20Poly1305([u8; XCHACHA20_POLY1305_NONCE_LEN]), -} - -impl Nonce { - #[inline] - #[must_use] - pub fn generate(algorithm: Algorithm) -> Self { - match algorithm { - Algorithm::Aes256GcmSiv => Self::Aes256GcmSiv(CryptoRng::generate_fixed()), - Algorithm::XChaCha20Poly1305 => Self::XChaCha20Poly1305(CryptoRng::generate_fixed()), - } - } - - #[inline] - #[must_use] - pub const fn inner(&self) -> &[u8] { - match self { - Self::Aes256GcmSiv(x) => x, - Self::XChaCha20Poly1305(x) => x, - } - } - - #[inline] - #[must_use] - pub const fn len(&self) -> usize { - match self { - Self::Aes256GcmSiv(x) => x.len(), - Self::XChaCha20Poly1305(x) => x.len(), - } - } - - #[inline] - #[must_use] - pub const fn is_empty(&self) -> bool { - match self { - Self::Aes256GcmSiv(x) => x.is_empty(), - Self::XChaCha20Poly1305(x) => x.is_empty(), - } - } - - #[inline] - #[must_use] - pub const fn algorithm(&self) -> Algorithm { - match self { - Self::Aes256GcmSiv(_) => Algorithm::Aes256GcmSiv, - Self::XChaCha20Poly1305(_) => Algorithm::XChaCha20Poly1305, - } - } - - pub fn validate(&self, algorithm: Algorithm) -> crate::Result<()> { - let mut x = 1u8; - x.cmovz(&0, (self.algorithm().ct_eq(&algorithm)).unwrap_u8()); - x.cmovz(&0, (self.inner().ct_ne_null()).unwrap_u8()); - - bool::from(Choice::from(x)) - .then_some(()) - .ok_or(Error::Validity) - } -} - -impl ConstantTimeEq for Nonce { - fn ct_eq(&self, rhs: &Self) -> Choice { - self.inner().ct_eq(rhs.inner()) - } -} - -impl From<&Nonce> for GenericArray -where - I: ArrayLength, -{ - fn from(value: &Nonce) -> Self { - match value { - Nonce::Aes256GcmSiv(x) => Self::clone_from_slice(x), - Nonce::XChaCha20Poly1305(x) => Self::clone_from_slice(x), - } - } -} - -/// These are all possible algorithms that can be used for encryption and decryption -#[derive(Clone, Copy, Default, Encode, Decode)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "specta", derive(specta::Type))] -pub enum Algorithm { - Aes256GcmSiv, - #[default] - XChaCha20Poly1305, -} - -impl ConstantTimeEq for Algorithm { - fn ct_eq(&self, rhs: &Self) -> Choice { - #[allow(clippy::as_conversions)] - (*self as u8).ct_eq(&(*rhs as u8)) - } -} - -impl PartialEq for Algorithm { - fn eq(&self, other: &Self) -> bool { - self.ct_eq(other).into() - } -} - -impl Algorithm { - /// This function returns the nonce length for a given encryption algorithm - #[inline] - #[must_use] - pub const fn nonce_len(&self) -> usize { - match self { - Self::Aes256GcmSiv => AES_256_GCM_SIV_NONCE_LEN, - Self::XChaCha20Poly1305 => XCHACHA20_POLY1305_NONCE_LEN, - } - } -} - -/// This should be used for providing a key to functions. -/// -/// It can either be a random key, or a hashed key. -/// -/// You may also generate a secure random key with `Key::generate()` -#[derive(Clone, Zeroize, ZeroizeOnDrop)] -#[repr(transparent)] -pub struct Key(Box<[u8; KEY_LEN]>); - -impl Key { - #[inline] - #[must_use] - pub fn new(v: [u8; KEY_LEN]) -> Self { - Self(Box::new(v)) - } - - #[inline] - #[must_use] - pub const fn expose(&self) -> &[u8] { - self.0.as_slice() - } - - #[inline] - #[must_use] - pub fn generate() -> Self { - Self::new(CryptoRng::generate_fixed()) - } - - pub fn validate(&self) -> crate::Result<()> { - bool::from(self.expose().ct_ne_null()) - .then_some(()) - .ok_or(Error::Validity) - } -} - -impl ConstantTimeEq for Key { - fn ct_eq(&self, rhs: &Self) -> Choice { - self.expose().ct_eq(rhs.expose()) - } -} - -impl PartialEq for Key { - fn eq(&self, other: &Self) -> bool { - self.ct_eq(other).into() - } -} - -#[cfg(feature = "serde")] -impl serde::Serialize for Key { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - serdect::array::serialize_hex_lower_or_bin(&self.expose(), serializer) - } -} - -#[cfg(feature = "serde")] -impl<'de> serde::Deserialize<'de> for Key { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - let mut buf = [0u8; 32]; - serdect::array::deserialize_hex_or_bin(&mut buf, deserializer)?; - Ok(Self::new(buf)) - } -} - -// The `serde` feature is needed as this makes use of a crate called `serdect` which -// allows for constant-time serialization and deserialization. We then use `bincode`s -// compatability layer to serialize through that, so in theory it should remain constant-time -#[cfg(feature = "serde")] -impl Encode for Key { - fn encode( - &self, - encoder: &mut E, - ) -> Result<(), bincode::error::EncodeError> { - bincode::serde::Compat(self).encode(encoder)?; - - Ok(()) - } -} - -// The `serde` feature is needed as this makes use of a crate called `serdect` which -// allows for constant-time serialization and deserialization. We then use `bincode`s -// compatability layer to serialize through that, so in theory it should remain constant-time -#[cfg(feature = "serde")] -impl Decode for Key { - fn decode( - decoder: &mut D, - ) -> Result { - Ok(bincode::serde::Compat::decode(decoder)?.0) - } -} - -impl From<&Key> for GenericArray -where - I: ArrayLength, -{ - fn from(value: &Key) -> Self { - GenericArray::clone_from_slice(value.expose()) - } -} - -impl From for Key { - fn from(value: blake3::Hash) -> Self { - Self::new(value.into()) - } -} - -impl TryFrom>> for Key { - type Error = Error; - - fn try_from(value: Protected>) -> Result { - Ok(Self::new(value.into_inner().to_array()?)) - } -} - -impl TryFrom>> for Key { - type Error = Error; - - fn try_from(value: Protected>) -> Result { - Ok(Self::new(value.expose().to_array()?)) - } -} - -// -// impl bincode::Encode for Key { -// fn encode( -// &self, -// encoder: &mut E, -// ) -> Result<(), bincode::error::EncodeError> { -// serdect::array::serialize_hex_lower_or_bin(self.expose(), bincode::serde::) -// } -// } - -/// This should be used for providing a secret key to functions. -/// -// /// You may also generate a secret key with `SecretKey::generate()` -#[derive(Zeroize, ZeroizeOnDrop, Clone)] -pub enum SecretKey { - Standard([u8; SECRET_KEY_LEN]), - Variable(Vec), - Null, -} - -impl SecretKey { - #[inline] - #[must_use] - pub const fn new(v: [u8; SECRET_KEY_LEN]) -> Self { - Self::Standard(v) - } - - #[inline] - #[must_use] - pub fn expose(&self) -> &[u8] { - match self { - Self::Standard(v) => v, - Self::Variable(v) => v, - Self::Null => &[], - } - } - - #[inline] - #[must_use] - pub fn generate() -> Self { - Self::new(CryptoRng::generate_fixed()) - } -} - -impl TryFrom>> for SecretKey { - type Error = Error; - - fn try_from(value: Protected>) -> Result { - let sk = match value.expose().len() { - // this won't fail as we check the size - SECRET_KEY_LEN => Self::new(value.into_inner().to_array()?), - 0 => Self::Null, - _ => Self::Variable(value.into_inner()), - }; - - Ok(sk) - } -} - -impl TryFrom> for SecretKey { - type Error = Error; - - fn try_from(value: Protected) -> Result { - let mut s = value.into_inner(); - s.retain(|c| c.is_ascii_hexdigit()); - - // shouldn't fail as `SecretKey::try_from` is (essentially) infallible - hex::decode(&s) - .ok() - .map_or(Protected::new(vec![]), Protected::new) - .try_into() - .map_err(|_| Error::Validity) - } -} - -impl Display for SecretKey { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let s = hex::encode(self.expose()).to_uppercase(); - let separator_distance = s.len() / 6; - s.chars().enumerate().try_for_each(|(i, c)| { - f.write_char(c)?; - if (i + 1) % separator_distance == 0 && (i + 1) != s.len() { - f.write_char('-')?; - } - - Ok(()) - }) - } -} - -/// This should be used for passing an encrypted key around. -/// -/// The length of the encrypted key is `ENCRYPTED_KEY_LEN` (which is `KEY_LEM` + `AEAD_TAG_LEN`). -/// -/// This also stores the associated `Nonce`, in order to make the API a lot cleaner. -#[derive(Clone, Encode, Decode)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "specta", derive(specta::Type))] -pub struct EncryptedKey( - #[cfg_attr(feature = "serde", serde(with = "serde_big_array::BigArray"))] - [u8; ENCRYPTED_KEY_LEN], - Nonce, -); - -impl EncryptedKey { - #[inline] - #[must_use] - pub const fn new(v: [u8; ENCRYPTED_KEY_LEN], nonce: Nonce) -> Self { - Self(v, nonce) - } - - #[inline] - #[must_use] - pub const fn inner(&self) -> &[u8] { - &self.0 - } - - #[inline] - #[must_use] - pub const fn nonce(&self) -> &Nonce { - &self.1 - } -} - -impl ConstantTimeEq for EncryptedKey { - fn ct_eq(&self, rhs: &Self) -> Choice { - // short circuit if algorithm (and therefore nonce lengths) don't match - if !bool::from(self.nonce().algorithm().ct_eq(&rhs.nonce().algorithm())) { - return Choice::from(0); - } - - let mut x = 1u8; - x.cmovz(&0u8, self.nonce().ct_eq(rhs.nonce()).unwrap_u8()); - x.cmovz(&0u8, self.inner().ct_eq(rhs.inner()).unwrap_u8()); - Choice::from(x) - } -} - -impl PartialEq for EncryptedKey { - fn eq(&self, other: &Self) -> bool { - self.ct_eq(other).into() - } -} - -#[derive(Clone, Copy, Default, Encode, Decode)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "specta", derive(specta::Type))] -pub enum Aad { - Standard([u8; AAD_LEN]), - Header( - #[cfg_attr(feature = "serde", serde(with = "serde_big_array::BigArray"))] - [u8; AAD_HEADER_LEN], - ), - #[default] - Null, -} - -impl Aad { - #[inline] - #[must_use] - pub fn generate() -> Self { - Self::Standard(CryptoRng::generate_fixed()) - } - - #[inline] - #[must_use] - pub const fn inner(&self) -> &[u8] { - match self { - Self::Standard(b) => b, - Self::Header(b) => b, - Self::Null => &[], - } - } -} - -impl ConstantTimeEq for Aad { - fn ct_eq(&self, other: &Self) -> Choice { - self.inner().ct_eq(other.inner()) - } -} - -impl PartialEq for Aad { - fn eq(&self, other: &Self) -> bool { - self.ct_eq(other).into() - } -} - -/// This should be used for passing a salt around. -/// -/// You may also generate a salt with `Salt::generate()` -#[derive(Clone, Copy, Encode, Decode)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "specta", derive(specta::Type))] -pub struct Salt([u8; SALT_LEN]); - -impl DefaultIsZeroes for Salt {} - -impl Default for Salt { - fn default() -> Self { - Self([0u8; SALT_LEN]) - } -} - -impl Salt { - #[inline] - #[must_use] - pub fn generate() -> Self { - Self(CryptoRng::generate_fixed()) - } - - #[inline] - #[must_use] - pub const fn new(v: [u8; SALT_LEN]) -> Self { - Self(v) - } - - #[inline] - #[must_use] - pub const fn inner(&self) -> &[u8] { - &self.0 - } -} - -impl TryFrom> for Salt { - type Error = Error; - - fn try_from(value: Vec) -> Result { - Ok(Self::new(value.to_array()?)) - } -} - -impl Display for HashingAlgorithm { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match *self { - Self::Argon2id(p) => write!(f, "Argon2id ({p})"), - Self::Blake3Balloon(p) => write!(f, "BLAKE3-Balloon ({p})"), - } - } -} - -impl Display for Params { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match *self { - Self::Standard => write!(f, "Standard"), - Self::Hardened => write!(f, "Hardened"), - Self::Paranoid => write!(f, "Paranoid"), - } - } -} - -impl Display for Algorithm { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match *self { - Self::Aes256GcmSiv => write!(f, "AES-256-GCM-SIV"), - Self::XChaCha20Poly1305 => write!(f, "XChaCha20-Poly1305"), - } - } -} - -impl Debug for Algorithm { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{self}") - } -} - -impl Debug for Key { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.write_str("[REDACTED]") - } -} - -impl Debug for EncryptedKey { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.write_str("[REDACTED]") - } -} - -#[cfg(test)] -mod tests { - use super::Algorithm; - use crate::{ - primitives::{ - AES_256_GCM_SIV_NONCE_LEN, ENCRYPTED_KEY_LEN, KEY_LEN, XCHACHA20_POLY1305_NONCE_LEN, - }, - types::{EncryptedKey, Key, Nonce}, - }; - - const EK: [[u8; ENCRYPTED_KEY_LEN]; 2] = [[0x20; ENCRYPTED_KEY_LEN], [0x21; ENCRYPTED_KEY_LEN]]; - const NONCES: [Nonce; 2] = [ - Nonce::XChaCha20Poly1305([5u8; XCHACHA20_POLY1305_NONCE_LEN]), - Nonce::Aes256GcmSiv([8u8; AES_256_GCM_SIV_NONCE_LEN]), - ]; - - #[test] - fn encrypted_key_eq() { - // same key and nonce - assert_eq!( - EncryptedKey::new(EK[0], NONCES[0]), - EncryptedKey::new(EK[0], NONCES[0]) - ); - - // same key, different nonce - assert_ne!( - EncryptedKey::new(EK[0], NONCES[0]), - EncryptedKey::new(EK[0], NONCES[1]) - ); - - // different key, same nonce - assert_ne!( - EncryptedKey::new(EK[0], NONCES[0]), - EncryptedKey::new(EK[1], NONCES[0]) - ); - } - - #[test] - #[should_panic(expected = "assertion")] - fn encrypted_key_eq_different_key() { - // different key, same nonce - assert_eq!( - EncryptedKey::new(EK[0], NONCES[0]), - EncryptedKey::new(EK[1], NONCES[0]) - ); - } - - #[test] - #[should_panic(expected = "assertion")] - fn encrypted_key_eq_different_nonce() { - // same key, different nonce - assert_eq!( - EncryptedKey::new(EK[0], NONCES[0]), - EncryptedKey::new(EK[0], NONCES[1]) - ); - } - - #[test] - fn key_eq() { - assert_eq!(Key::new([0x23; KEY_LEN]), Key::new([0x23; KEY_LEN])); - } - - #[test] - #[should_panic(expected = "assertion")] - fn key_eq_fail() { - assert_eq!(Key::new([0x23; KEY_LEN]), Key::new([0x24; KEY_LEN])); - } - - #[test] - fn algorithm_eq() { - assert_eq!(Algorithm::XChaCha20Poly1305, Algorithm::XChaCha20Poly1305); - } - - #[test] - #[should_panic(expected = "assertion")] - fn algorithm_eq_fail() { - assert_eq!(Algorithm::XChaCha20Poly1305, Algorithm::Aes256GcmSiv); - } - - #[test] - fn key_validate() { - Key::new([0x23; KEY_LEN]).validate().unwrap(); - } - - #[test] - #[should_panic(expected = "Validity")] - fn key_validate_null() { - Key::new([0u8; KEY_LEN]).validate().unwrap(); - } - - #[test] - fn nonce_validate() { - Nonce::generate(Algorithm::default()) - .validate(Algorithm::default()) - .unwrap(); - } - - #[test] - #[should_panic(expected = "Validity")] - fn nonce_validate_different_algorithms() { - Nonce::generate(Algorithm::XChaCha20Poly1305) - .validate(Algorithm::Aes256GcmSiv) - .unwrap(); - } - - #[test] - #[should_panic(expected = "Validity")] - fn nonce_validate_null() { - Nonce::XChaCha20Poly1305([0u8; XCHACHA20_POLY1305_NONCE_LEN]) - .validate(Algorithm::XChaCha20Poly1305) - .unwrap(); - } -} diff --git a/crates/crypto/src/utils.rs b/crates/crypto/src/utils.rs deleted file mode 100644 index e6b81d8fc..000000000 --- a/crates/crypto/src/utils.rs +++ /dev/null @@ -1,54 +0,0 @@ -use crate::{Error, Result}; -use zeroize::Zeroize; - -pub(crate) trait ToArray { - fn to_array(self) -> Result<[u8; I]>; -} - -impl ToArray for Vec { - /// This function uses `try_into()`, and calls `zeroize` in the event of an error. - fn to_array(self) -> Result<[u8; I]> { - self.try_into().map_err(|mut b: Self| { - b.zeroize(); - Error::LengthMismatch - }) - } -} - -impl ToArray for &[u8] { - /// **Using this can be risky - ensure that you `zeroize` the source buffer before returning.** - /// - /// `zeroize` cannot be called on the input as we do not have ownership. - /// - /// This function copies `self` into a `Vec`, before using the `ToArray` implementation for `Vec` - fn to_array(self) -> Result<[u8; I]> { - self.to_vec().to_array() - } -} - -#[cfg(test)] -mod tests { - use crate::{ct::ConstantTimeEqNull, primitives::SALT_LEN, utils::ToArray}; - - #[test] - fn vec_to_array() { - let vec = vec![1u8; SALT_LEN]; - let array: [u8; SALT_LEN] = vec.clone().to_array().unwrap(); - - assert!(!bool::from(vec.ct_eq_null())); - assert_eq!(vec, array); - assert_eq!(vec.len(), SALT_LEN); - assert_eq!(array.len(), SALT_LEN); - } - - #[test] - fn slice_to_array() { - let slice: &[u8] = [1u8; SALT_LEN].as_ref(); - let array: [u8; SALT_LEN] = slice.to_array().unwrap(); - - assert!(!bool::from(slice.ct_eq_null())); - assert_eq!(slice, array); - assert_eq!(slice.len(), SALT_LEN); - assert_eq!(array.len(), SALT_LEN); - } -} diff --git a/crates/crypto/src/vault/ephemeral.rs b/crates/crypto/src/vault/ephemeral.rs deleted file mode 100644 index 58a4b31f1..000000000 --- a/crates/crypto/src/vault/ephemeral.rs +++ /dev/null @@ -1,84 +0,0 @@ -use zeroize::{Zeroize, Zeroizing}; - -use crate::{ - encrypted::Encrypted, - types::{Algorithm, Key}, - Error, Result, -}; -use std::{collections::HashMap, hash::Hash, sync::Mutex}; - -pub const EPHEMERAL_VAULT_ITEM_LIMIT: usize = 128; - -pub struct EphemeralVault -where - K: Hash + Eq, -{ - key: Key, - algorithm: Algorithm, - inner: Mutex>>, -} - -impl EphemeralVault -where - K: Hash + Eq, - T: bincode::Encode + bincode::Decode + Zeroize + Clone, -{ - #[must_use] - pub fn new(algorithm: Algorithm) -> Self { - Self { - key: Key::generate(), - algorithm, - inner: Mutex::new(HashMap::new()), - } - } - - #[must_use] - pub fn new_with_key(key: Key, algorithm: Algorithm) -> Self { - Self { - key, - algorithm, - inner: Mutex::new(HashMap::new()), - } - } - - pub fn contains_key(&self, id: &K) -> Result { - self.inner - .lock() - .map_or(Err(Error::Keystore), |x| Ok(x.contains_key(id))) - } - - pub fn get(&self, id: &K) -> Result { - self.inner - .lock() - .map_err(|_| Error::Keystore)? - .get(id) - .cloned() - .ok_or(Error::Keystore)? - .decrypt(&self.key) - } - - pub fn insert(&self, id: K, value: T) -> Result<()> { - let value = Zeroizing::new(value); - - if self.inner.lock().map_err(|_| Error::Keystore)?.len() + 1 > EPHEMERAL_VAULT_ITEM_LIMIT { - return Err(Error::Keystore); - } - - let encrypted = Encrypted::new(&self.key.clone(), &*value, self.algorithm)?; - - self.inner - .lock() - .map_err(|_| Error::Keystore)? - .insert(id, encrypted); - - Ok(()) - } - - pub fn remove(&self, id: &K) -> Result<()> { - self.inner - .lock() - .map_err(|_| Error::Keystore)? - .remove(id) - .map_or_else(|| Err(Error::Keystore), |_| Ok(())) - } -} diff --git a/crates/crypto/src/vault/mod.rs b/crates/crypto/src/vault/mod.rs deleted file mode 100644 index 4d6412f67..000000000 --- a/crates/crypto/src/vault/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -mod ephemeral; -#[cfg(feature = "experimental")] -mod persistent; - -pub use ephemeral::EphemeralVault; diff --git a/crates/crypto/src/vault/persistent.rs b/crates/crypto/src/vault/persistent.rs deleted file mode 100644 index ca281b62a..000000000 --- a/crates/crypto/src/vault/persistent.rs +++ /dev/null @@ -1,80 +0,0 @@ -#![allow(dead_code)] - -use std::path::PathBuf; - -use redb::{Database, ReadableTable, TableDefinition}; - -use crate::{ - encoding, - encrypted::Encrypted, - types::{Algorithm, Key, Salt}, - Error, Result, -}; - -const SECRET_KEY_TABLE: TableDefinition<'_, &'_ [u8; 32], Vec> = - TableDefinition::new("secret_keys"); -const META_TABLE: TableDefinition<'_, &'_ str, Vec> = TableDefinition::new("meta"); - -const ROOT_KEY_ID: &str = "root_key"; -const ROOT_SALT_ID: &str = "root_salt"; - -pub struct Vault { - db: Database, - key: Option, -} - -impl Vault { - pub fn open(path: PathBuf, key: Option) -> Result { - let db = Database::create(path)?; - - let txn = db.begin_write()?; - { - txn.open_table(SECRET_KEY_TABLE)?; - txn.open_table(META_TABLE)?; - } - txn.commit()?; - - Ok(Self { db, key }) - } - - pub fn setup(&self, key: &Key, algorithm: Option) -> Result<()> { - // provided key should be master password, (generated) salt (store that in here, like have a `retrieve decrypt info`), - // and the vault key from the OS keyring. the vault key will have to be provided ed manually if OS keyrings/the key isn't available. - // can use a QR code, or copy/paste/type (last resort) the 16-byte (hex encoed) key - - let algorithm = algorithm.unwrap_or_default(); - - let root_key = Key::generate(); - let salt = Salt::generate(); - - let encrypted_key = Encrypted::new(key, &root_key, algorithm)?; - - let txn = self.db.begin_write()?; - - { - let mut table = txn.open_table(META_TABLE)?; - if table.get(ROOT_KEY_ID).is_err() || table.get(ROOT_SALT_ID).is_err() { - return Err(Error::RootKeyAlreadyExists); - } - - table.insert(ROOT_KEY_ID, encrypted_key.as_bytes()?)?; - table.insert(ROOT_SALT_ID, encoding::encode(&salt)?)?; - } - - txn.commit()?; - - Ok(()) - } - - pub fn unlock(&self, _key: &Key) -> Result<()> { - todo!() - } - - pub fn wipe(self) -> Result<()> { - todo!() - } - - pub const fn is_unlocked(&self) -> bool { - self.key.is_some() - } -} diff --git a/interface/app/$libraryId/Layout/Sidebar/DebugPopover.tsx b/interface/app/$libraryId/Layout/Sidebar/DebugPopover.tsx index d66801c6d..177a931cb 100644 --- a/interface/app/$libraryId/Layout/Sidebar/DebugPopover.tsx +++ b/interface/app/$libraryId/Layout/Sidebar/DebugPopover.tsx @@ -218,38 +218,6 @@ function InvalidateDebugPanel() { ); } -function FeatureFlagSelector() { - const featureFlags = useFeatureFlags(); - - return ( - <> - - Feature Flags - - } - className="z-[999] mt-1 shadow-none data-[side=bottom]:slide-in-from-top-2 dark:divide-menu-selected/30 dark:border-sidebar-line dark:bg-sidebar-box" - alignToTrigger - > - {[...features, ...backendFeatures].map((feat) => ( - toggleFeatureFlag(feat)} - className="font-medium text-white" - icon={ - featureFlags.find((f) => feat === f) !== undefined - ? CheckSquare - : undefined - } - /> - ))} - - - ); -} // function TestNotifications() { // const coreNotif = useBridgeMutation(['notifications.test']); diff --git a/packages/client/src/stores/featureFlags.tsx b/packages/client/src/stores/featureFlags.tsx index 4001c6f23..50b963239 100644 --- a/packages/client/src/stores/featureFlags.tsx +++ b/packages/client/src/stores/featureFlags.tsx @@ -2,8 +2,8 @@ import { useEffect } from 'react'; import { createMutable } from 'solid-js/store'; import type { BackendFeature } from '../core'; -import { nonLibraryClient, useBridgeQuery } from '../rspc'; -import { createPersistedMutable, useObserver, useSolidStore } from '../solid'; +import { useBridgeQuery } from '../rspc'; +import { createPersistedMutable, useObserver } from '../solid'; export const features = [ 'backups', @@ -82,7 +82,7 @@ export function toggleFeatureFlag(flags: FeatureFlag | FeatureFlag[]) { ); if (result) { - nonLibraryClient.mutation(['toggleFeatureFlag', f as any]); + // nonLibraryClient.mutation(['toggleFeatureFlag', f as any]); } })(); diff --git a/rust-toolchain.toml b/rust-toolchain.toml index c6e4d7d50..a56a283d2 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,2 +1,2 @@ [toolchain] -channel = "1.79" +channel = "1.80.1" From 1fd5c5f6bff78d2dd3d9a0a79a0a2461c537e42e Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Wed, 14 Aug 2024 16:56:20 -0300 Subject: [PATCH 056/218] Bad merge --- Cargo.lock | Bin 313094 -> 300290 bytes core/Cargo.toml | 1 - 2 files changed, 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 5b75528e48ea5ae8722a2f7960728e4927690c28..65b05c1670386717e3923efcc9dd0c75f6ec72d5 100644 GIT binary patch delta 3681 zcmb7HYiu3G71r)v+i{#Yj-A|_hm#od2++pN?Ck9Blq3!y(YOL7aY~9rshORbwQpnW z>uWpIv;;y^Kot#kw`EFI)coM3@N&2_eBU|W+_kOY&K>omk6ZJaPz!Exn?FCjsku}2sA66$pLYFF* z*OVTu_|LS`)`r?8rCsa(TA@2`HQP(iUVlZVbmYb(Gxfps=Gv8tDZ$)OG4A7lD&o?R z$-u**LP&wrp>hPbh47>ZJm1A3auGq;=APi()?;;6POs|BROr>mthxHc&y3~Ks=kWS zo85OCkx^U{eYfY=$+Ps$-nQD()0;QVI^+MP(VA`3qOW~#)-;UZjD@1ovzyHID_qVP z7nBD|`o8NZuB0RZqbig-1Ba9Ty_7%46u|RRpBcX#F&arO`%0kaYO4&>zhMa&vh$(~>J47NRsSQvOOavsl z=}0abuE=~DEVya%TTt1RGu}-`?0=o4WiKX9JH=Htu~Fi0DZH=F%);)IP|o-C4i2Qh zNqU~AhUe&)+cMX;3J(jP5lJ~#zD+&k_%aA=#;7B0g+rf05-`UUFyVw)sxn-BoPA8z~1Pz(mM{h)>UzI`6NkDAAv^%`5SrcU0*2UpCsK zmOX)P8MWp|qkB%32Zx?t8Xeh_t(@k4?k1G$k4nPt4Oyk*RPA@-UHbf6~H)t|y)$7~F&0Ms9cV%?!`MvWY7L%ks z8*b6wy;o+ENa?;Vi=^vc-dhu0xo_8+&-~`x5Iw6joddKbwjXB_Q%-c?SYv7HFWP1` z1%v&|`jsfE0eBQQ<%MdVzDX7P^SwPyE1H@eePGO7--?0tNZ1Y*2y^L6V5;X!N+Cd= zB!1|71jxz-c9r0Sa7sKRU5a>MQ|dd>$RBbRSkhatF+aNHmtJ(}wffSFua8ZK0=gi2 z^{u(l;kVyNN(4Yt8#&{`(&2Y1>!S~k?a*Td2;#1fYMavo6+_`NkEP@Od}mL}>GH3_ z#%Skj3!}xytD*}&-kmK^s-jd=7whvntj5yf zaL3kLt~RqXx`u*cUY5IJY9j7?(zvA}$tl1-{_`_N^|W~JZsTutSv6G5_jH~GGtz=| z-}X%VNPaZa^;+=<#AJ;|q4KI%KZyJAG9_FTS9M?=lZADt{(qKR?F?@we|b z|9MH-dWx)cO@A5FdjF`I(N8~P?w&SvI)0#JuBuA-OjDVlkM1&a21}E=(t|WP#Iple zc+8cahkeGd?*_^i2`8~1N?~K+`qEW)GV6sQr-*T>n2dIA%IO`?nU^I>q}+^>YORkw zXO5pS7-#mF&sIeT_bkvIFPYQhKfPcc!1K;ZY|>ECGTvS2;?dae*0lvdDX6N4bWAcc=HCP(eD+Vp01S8?jRBYaYEO%NnmrP ze83o{5HJk&4y2wdU2r^L&^UzSa^-X8+YrP6MTX4jua27w&+Mw}ENhY8{0X#)S4pNl z{`a_fv!NdtHyfs`Ik3ZMU+E}DLdrpw32OU7a?nK}bzn$vUwSvig(ppuba`@zpRhn z&RS+oTAlRccbm0(RTBjdhZ8hqLg|$F`tHY{^ z&u_CDjH-=gp2zr0*1?*}a*@@>2J7Nl#pP*O0r9u5wz9Ve-F%}Wsj12KI7PP=fFjgM;zr7SJ~!taws4 zd??8zM1s0LNhkm}1B|%ar@6{g>*VJFGc6cUfju zw02*6{HxooH8pYFy;fg!6+{bkEsnSR(AqMy9Fvs~SxSHOp1B}?;UQ~n^~B{NZg|Qv zP2D(Zc^Ypu8&Y`2w~bo2w#7$JTKBW53PSHBnMITLGD5K@JrLu917{Ij#%#E6NeG|^ z`r8GT5rkZuyFSGTm}mPEGbw$W2+%e%DLaXvtpQgj1#K!*;zym#`pWqE%QNp*#vNB> gx~tC7K+4@dJASqP;F?TjJoB2&BPt$zCNnbUzpXilng9R* delta 10261 zcmc&)dypMfnXm4inIscPCa;;iNHU3lJZAQs)90M-M#&_=x?;B__ z=FX51ta57!gxh`lp2v5-@Av(_-`6*PYUz7!TXy)qXw}B^J2!0HX@~7-P4}tMQFUMg z>8HFuDUQUei^tm@DgLJYr^~9Rl&x3)*R_#dc68#C9WT!Bu8)mcwP$1Jemh#P?H%55 z;f9T*U-}Inj|K-gO?4uiN@S{Brb(RJEHgBbJa$>^k}S`2CDJU-1ffi&#Kv4OMKfcJ zqQW1$F6=(2hU;SohHP^I&Ch)!YP3H4&c1OwdSKQw*VTsg-r?qY_YUnH)w}lAbn{2G z`i?O>x~Dd*hIWkX9XjA@LqpYr<6|zJ9{cLf&bPg7L=_*syQ{rg!=1g+GXJ89whfRt zHBLLim2ftfsm-(`Iiot0Oen!M(?n8fvB8Ap#F;c9iBLpHWvQV>@BWpiPi?EY6;Hx8 zH2?!CN>xT;ZZbwhsxi2=gi>t> zgazIH@~v&>FPdI|)6D*7ivzV;QWdA1XD~`aWaQ-*a8P4`=CWJ7@VBEe>9G#b%|mG|ghc zjKols8<+7IJ4iUuTq@#Xqoj>l=5oVQD!H=>jsXj!MkP}|R!%T#jU67VsiD5H10z$O zuJFa^IPUJ5ayMMp&F;Q>eQ(p*X0G?vjGldOtQjrk81+51v0VrHRQM&kiS8{eyFQ`KPPJ$M^s4Br zh2kRL%^7L`b7@>EcZI z=U)hV4xV7&GiI;t+|OsOUgps4e)!rHc9sC#vf*80FLj5R{r3NUhwRU|Ffv)wHEQ$Fn1=KQ1P}H8_wkPJza$Gt4j-?izf=<8t+tEV5J&k|xm%MC?oW1T>uktxXyHpc;(OxQf9jwmYQSf|1tih}M}x6F6aos`Qj ztJEflHxIZn#hg|7!?ys$xk`*Bgesb63f@F0q0*$VI8GE50tz1Cj8<_XXiT_Hv1ZgM zIB*m*npDfWwq;qZW|)rT*%ilFq%;OU9rEKiJJB$Ybl zsdTJ~ww*IG`KN+tL+{LWUlSKkIJIM_vLx!&Ja#r!0I38AY&nx@rmYrnEK(e@$*?>m z1SM%%1V9JCZzRvFYHVf8R5M3EJ~~vq_O}~b*D;O6tt+AZ#j%G6H>ZN6#tQCYM3=J~ zsT-B4l$xOkMB!nmaUzETp+$lROLr`5bt5?^MEc2FgPvAIky}j=Q02v%`~%+A96x+4)@Pu<($f(2E;kx zL=lUi@x50D-G6i{)65)O<=Q2&ox<|}`g*j%kKBU2mQ`dcGFQK8R*)zIV@8#Pq~e50 zEr5|IoD$Rs6B`02aj{Q@&f5?U%>-*}pJ*EPFj834|o)fSf5uJxX*Y zVwBND=1!E`n}@+mp~`Q4GFV&jgQod<51{~;E{S=H0N`;#7-+l|iQx&tky*y@5hhFx z$ukTKSw`>4`N`g(`_IaaoV042g1HlhMXLwSfyL$;E1vVfP82XDq)q%o z|4-ZRx+5GMh&gac5>V8fMj51;arakvJPbxiQ=2dDd6uZm9hi;o6pZpnw#ZX5US ztc?wf+Wi+IX(PlaB(P00A6Z$Eu^hptZ5-#?hzwE4Qk7)H?|XmHyK4VpsKnhQkO1H_#{@Bn!+~PmxiJhVfLJj!NB$PD zE;dXYM=5uG(6d5g>>c@dZOGWs=IU_6A0c7ynpt19M()(~kcJrH0-^{CK)6iqSn5(} zmyo$KDj0GKMO~v~ONGiDgi{XFK~;jWH7UB@*mGVfZ&rGF6&Wqcb~X3Vlo)_#>NS7U zW#O9Uny}Q`emln^qS$x)_5q!wgdvcjt-72^fO-ybSW%KvXtt#lXzZwoEntQ@1y@5Q zrel1Yqw3OCWmVkvoq7Iu?~68ia((!!`O{1BZ|e;53-gpg>4q|9a)W>n;G->AI)kR7s+(zv;VSPmw)$*2K-XnRdV4 zAY`m#py>?6#W+;-P%3foC|UuUHq|U4MsUjvx&ajBZJaU!2@GIrxz1_ggFlaYirX(e zy}gQ6fB2ecm9MFAVR6k9n?B$>ilfCNC;RHTJG=e5OQONzjd$iP&lC^ec}~&czjS)j zg{Pve_`biW*!BedfS*@f{h@!}JnPHm#Y`xksO3{VGaaK7d+(-g{^BQt)e{eW?9#=> zS@$pTza9WkJDj~tloVQ6DEojhX{Es7L1ls}=t`btK&*5wMUucfBsge-{*Iz$!CDqC z#edw-CSHC2Q=P}*s%$n5d~QXN{Nwlh6FZ|bCocSa$I^iNBe#IGa9YB12Akse5YT0Y zgy@)1BrZzOR8nyATmeJT39-2*R0yh=QPwKpS+Vb{-3t&zFuz3kxOH{gvUAF#fW}ts zc+_cRfeg3eep=mu3^}aE#z*Z2>Yx8=xUT4b?7(K)vV|)9KSK+)rtL`7S6E?c?B3J%`P5tDgF0*rV@AydTlOwxzJ;jX- z>FU^_Rh@OKDK2ANZhL|i+m1c94wutrnY$DS0_EfvSCiv6)w)hS`Sg?1lhe|&Ewd+3 zwK4m|7ryb!mHtQngObn?pd<8?I0k_LwrPNvb~z+$6i*}tqM}TqzzM?PdTH5IVsL6S z`hZjvaMRxHKl*rh+Fe(L3;d7@*H3)(C#af#u_jpQA8T*BVl#S8ah7p33OTX?7GJH32)*KX6O1Z2&Z^?CBXL6sHC`7F5+?3UDUKCz!`d#nI_OTP3B5i$O5b zB!{{OWt0+LP22!)d3)r0uSRzmDaoR-5JQG8^J<37q9mk`TqhNrq#%M|d9Z{-S(ypO z99S9yz~V?mRvG{HqrtpVg<0*N+7T>`hW76Cz26Ad`%nH=u*9G3g6?uhsGz9t-5RWE zyl8^W6=PzepENFX!I`V30mvVEXRx+mZVdLfH=cTTuweeC6E%rjSpn@&d~eqFrhydP z8bF8ktq@Fu2ISZfSOWMvdfQNWG&+th1$JR$xfOsYASQ*oA(ax;>=*Ymp8f0K=Ux8A zdxDNecu(-*F8`XhwykS?{&4W0uMPV|(fhi5vq`hEcSUgSP$o1tDvmi}6s;`q4=75s zd;-ngvS9_9kBNZDz-i2vq`sH8_4o}p1q+J5J+#zseoNHT_{h(K$KSjG7CH`NCknF0 zUtApC-_>3*;9}opYJd}{JCZ<|%`%WjkgS|2H1#O5CsmNIGVods@&o5YKh~6%L5~(4 zx7a|_DuVk~`d42OeyjNK*x-Pmj&p0k#GOE-q09rpa?pqgoCYm*ltBz}mw+Kds6Yo- z!H__A1R?6ctZaJw&%7mUpE}31Lig&T?4a!19iHcZd`0+Rbw~DRGhYL_{Zm(>j(+0J z;SXQ_>$7}tW%%Wu7Q{m1y{<~b%^k9VA%AnutMC<4MTJ|fXjI*(wt75h3?EI*Fj zEfe`k38fPAJh-f7RjrF?{P)e_mU;e;_k?G7^SKMemeEJi+sz9o8Gc&$M>3eHP%ym*r0Kh(b( zqHAhEfLy5%5$a?jsN1W z!c{Af$EOq9w3VyGLmOQ$gzs8WF{;M>3!;yAEf}?Wyn1cMFIg3J`>R$*OI};n-OYC$ zrmN?Zm*LIMZS~ys<9jxq?D%43AzGQy>Xoy3ROdQCy(l z&anoKJfKi$@eFT3-M6yQc3Rsr9rLGOX=?`S*CuX!lw@^`Op z>yMy7Hgzb05*)>}BEUN(EOH!lZ}jp6>RhH^J-kODK;t22IPg-XQ-&8&AfA_fQZ4{M>rmUTKVFY!HyB44sBEj?Ij~j dF%MixQS1{DiH@Mn{LyQJRgK{r+djDbe*vB)Y)$|G diff --git a/core/Cargo.toml b/core/Cargo.toml index 036963745..18880366a 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -96,7 +96,6 @@ uuid = { workspace = true, features = ["serde", "std", "v4", "v7"] } webp = { workspace = true } # Specific Core dependencies -async-stream = "0.3.5" bytes = "1.6" ctor = "0.2.8" flate2 = "1.0" From 5604e33bccb44407a1da63884a842454f6271a29 Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Wed, 14 Aug 2024 18:18:44 -0300 Subject: [PATCH 057/218] Fixing or commenting broken crap --- Cargo.lock | Bin 293672 -> 299924 bytes core/Cargo.toml | 57 +- core/src/api/cloud/libraries.rs | 26 +- core/src/api/cloud/mod.rs | 98 ++-- core/src/api/ephemeral_files.rs | 14 +- core/src/cloud/sync/mod.rs | 106 ++-- core/src/cloud/sync/receive.rs | 326 +++++------ core/src/cloud/sync/send.rs | 180 +++--- core/src/crypto/error.rs | 62 --- core/src/crypto/keymanager.rs | 939 -------------------------------- core/src/crypto/mod.rs | 64 --- core/src/lib.rs | 8 +- 12 files changed, 405 insertions(+), 1475 deletions(-) delete mode 100644 core/src/crypto/error.rs delete mode 100644 core/src/crypto/keymanager.rs delete mode 100644 core/src/crypto/mod.rs diff --git a/Cargo.lock b/Cargo.lock index abf6791b73679ed0d8f732b2ec2dd94a1942c8b7..21c3952de913e874b3ba45e1c1854baafa466968 100644 GIT binary patch delta 3094 zcmZ8jYlvLs8Rnd`$!<1lvYWld#3Zw860f^{_Y0C8Bds8|HQ5rf#Wmvjxb_`^b*U$zv)8^xC1-kpuH_Rnz6%$fJR z-}^qdAOCIWyXSViGC`}=Vn|`+7j%H#f(ZKg(rd|&t=dON9{RN zhIoyrbdFGLtapeDVzp#ix%TthcCHQ_o9s=GOk}fVW}LN1dmVxbgbJ;#78L4o2cgDCl!8JoYReIhSmBr^{#lH*e}DHa>8>ZT@zue*e{stdB{WT^@#KyDrODcI zGTVxgwB?oTiq-d?eXw_QX})S2ll}LG=2E3zD;+9L@@d|lca3Ss!PoGRWnW-J{!UG%h{c&;~nekmL6Tdcc<6Q)r~ zrynWyrs9tL;OgQF{}Nl8&3bdn&6`Sk|4=rZ&>Q)YGV$wWt#-y?jzn-ls~9NEFHq;S zV+^C|NdZ@x#Z{ z(_hNQ+Gii0Ek_Miq*Ktba8fHtnT5?WE|9}i5KIu)(lLNWz>MpNGyou~D4dGKc2~DC zvBAF%ra$8B+RnG{%qBM{{B|*#s%HQl1pDB$w-}?yqmm?e4G?iOxCn=cSVTrBM?}Kx z%sOP4Ab}`PF_&DXlPlTI&iP~6{Jx%6C9b6B&%-pL_gE;erO?PDs~p3`3o9H(R6r>i znU^6j%^kqcbI)TyiZe|p;oPMQ1KD`G<#*YR-b$_BE&HGAifb<49KJ3aNj2zx262ao z1r%u;Bek$E7vMaGNM)=O1Qx6?4g#?VuXT(YWejyuYU)u)XR@9>>FmX9yt92dJD2yu zLPKr2nhsTRGvgwR!vi#!mD)1SHQ+=7a+DX0Fi8|JD_CI=Rn{|WsiI0o3L|4orqg$4 z;{%mT6{jlIMjG0k?e6>Jn{;*tSo4KSK0D(h01lw3gyaS$XJ~XVA|AQ1FltRSa*E*) zDD#*D+Z{sE0(=nxvZSLQ=i^Tv=uf|`_1w@def>b^=l5lI?b&p4IV^`XcYkim68edt zCfG{L1?37bgka<_gkY490!6PChCLf8ly%r4A3Wv3Vo>@T$+ z|BWfHBZlx8K|CY@wDZ8Yi-;kO5aq^Ep)`?zf?yqGN~-`GKmcweFc?|-+3{@bsk8ZT z;-zeFXWLWR#8)>>RqG3;y|&4g9YDfIV+99|!jUTBg9RxANfV$~1anKHfj7w@8Bhh} zf_XTo(eR`~?K6*$wf8>Ip9Vh42HGPx?iejidb6QX>-x2{5E`wDOGAIphKI~tqt)!n zTB}*9R~z%~3$N|&-0*I8Ymr_a&HB;@S7z6y^q2gKR67N|zVd!{|GwdMfPO~r#B>^b zF*|t226m^KOE+y?P0v+YV0TreS$SA1B(2$ogNLUtQ3|1N}(HdJ~*1N+B)@(hJ07oaVj83R0uOCkz zzEzYuw+`nE-`dt#UR?C``)X6JYMKk_(IbVK;gW!SFa-FZo>=FpiC_ZYbFf1WzbQe0 zI67bpKvF?TsRE@SXH*N7UOJwQUq1YBX|fusZhm7?%e9I#OWi%a_)LDY-n`*Woj<>s zze%sOE3L3G5_rB|t9K{jTJ>gnwYRt`J(7x{G_Vb<<0pf~E<9gfYSm0NKnXUDy3fwH zT8$}N_bc7+#HZgiO>HiuQzt;EruvGp%hGYrY`*LI#fo*^N2`@ur3>ZmU9H}%HcV^& z(oI*`F5aFEckb^iWZpM554cn(8B^xXCWrFZ&^ zq3uAxN-b_2s%4s~^mH+i-rHa7PB(2Wwx=i7@}YF_`}s&Zy0sYTGmS>5`Or*52a2iA zH@6nQyt?<3H(q!RWCAS3fL>DvjtDEZiUwdCfDa^8BDizRv49Qu93&GeSdg$V4aQ)6 zKrEfEgKcg9%D#knvFY5*O~u}kb!vTv8fY4~8#`vp7$Oa|VEjS~r;$MsVF$Hr^iC?_ z2%=!Sk{V>dOrR|S(zwnKMl?ev9iGj0-LamUTHRqf)10fHsD_sDre$DPm`#)JFX3_& zsCDn>7VEZq@qaMA_*OBTt~r~PIyZl@KzOt}*#8YNXqxEuMLIjHf4;B68@pMfzF!m))M;}r#~ zjSfo;q=H2n?I~fNN&~qDCsXw7?5v(HuG`bQJ}5PfJAe2=@#_A((Mb4cKGOcvhqGnM zg$SV7L^+4RkOG71Iy!{{@Yn$_!51Ao5Q&7qh=Nt#IO3^i0STAhKbwuW_dT$sE4xWN zlntggT+gO7a-bOReB;-}+x;1DpKN`(Y%pg*K??KWp|EAlJO_tH5tD!nx86rdJe-n& zbwN;2hI7g}g1ieZtkQ+Y^YQfhKmh{32CVtVf&9v}f32A83pH2wmD*hAwzcAXwz(_k zp?%^=J TAe9CeKa%h4T>nAw???X&AM~$% delta 414 zcmV;P0b%}>rxK|45wH+dv(Z%^2bWL+0#dVGUPwT*tDdF@lRu~nvxTSdHnU#eqa3r~ z{DD`uTU7xe2Dd(30c0PyfPDd^0=FiD0a+clS)loD+3ajXCVVN zm!BL1510Nw0STATtpW;{(;Nd7m+cJ$50~I10};0z9Rs=}x41zA{~fpYU;`~Nmvoc^ z5tp_%0uPgbOj5VQlLOcSw^^41Cmpwkv;)B;m-^QO3YTOZ0}!`?+5@2q0W^~_$5TIJ zGBq?}F)=w~W;8isWMpMCWM*Y%GBIK?WMecrGGt>oFfuk`H#IdjHe+EjG+|*kH#s>o zWn*JxH)CRBF=CUku@;x#Dgz6b*5v~gm)HRU6PFa`0}_{buLBUbKIQ|O0k@gx13e+P InF<8^i2TZv{r~^~ diff --git a/core/Cargo.toml b/core/Cargo.toml index 14ef16efd..19e5249bc 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -86,37 +86,34 @@ tracing-subscriber = { workspace = true, features = ["env-filter"] } uuid = { workspace = true, features = ["serde", "std", "v4", "v7"] } webp = { workspace = true } + +# Specific Core dependencies +bytes = "1.6" +ctor = "0.2.8" +flate2 = "1.0" +hostname = "0.4.0" +http-body = "0.4.6" # Update blocked by http +http-range = "0.1.5" +int-enum = "0.5" # Update blocked due to API breaking changes +mini-moka = "0.10.3" +serde-hashkey = "0.4.5" +serde_repr = "0.1.19" +serde_with = "3.8" +slotmap = "1.0" +sysinfo = "0.29.11" # Update blocked due to API breaking changes +tar = "0.4.41" +tower-service = "0.3.2" +tracing-appender = "0.2.3" + [dependencies.tokio] features = ["io-util", "macros", "process", "rt-multi-thread", "sync", "time"] workspace = true -# Specific Core dependencies -async-recursion = "1.1" -async-stream = "0.3.5" -bytes = "1.6" -ctor = "0.2.8" -flate2 = "1.0" -hostname = "0.4.0" -http-body = "0.4.6" # Update blocked by http -http-range = "0.1.5" -int-enum = "0.5" # Update blocked due to API breaking changes -keyring = { version = "3.0.4", features = [ - "apple-native", - "sync-secret-service", - "windows-native" -] } -mini-moka = "0.10.3" -notify = { git = "https://github.com/notify-rs/notify.git", rev = "c3929ed114", default-features = false, features = [ - "macos_fsevent" -] } -serde-hashkey = "0.4.5" -serde_repr = "0.1.19" -serde_with = "3.8" -slotmap = "1.0" -sysinfo = "0.29.11" # Update blocked due to API breaking changes -tar = "0.4.41" -tower-service = "0.3.2" -tracing-appender = "0.2.3" +[dependencies.notify] +default-features = false +features = ["macos_fsevent"] +git = "https://github.com/notify-rs/notify.git" +rev = "c3929ed114" # Override features of transitive dependencies [dependencies.openssl] @@ -129,13 +126,13 @@ version = "0.9.103" # Platform-specific dependencies [target.'cfg(target_os = "macos")'.dependencies] plist = "1.6" -trash = "4.1" +trash = "5.1.0" [target.'cfg(target_os = "linux")'.dependencies] -trash = "4.1" +trash = "5.1.0" [target.'cfg(target_os = "windows")'.dependencies] -trash = "4.1" +trash = "5.1.0" [target.'cfg(target_os = "ios")'.dependencies] icrate = { version = "0.1.2", features = [ diff --git a/core/src/api/cloud/libraries.rs b/core/src/api/cloud/libraries.rs index 9ee6fb69d..bbc7d3027 100644 --- a/core/src/api/cloud/libraries.rs +++ b/core/src/api/cloud/libraries.rs @@ -1,19 +1,19 @@ -use crate::{api::utils::library, invalidate_query}; -use rspc::alpha::AlphaRouter; +use crate::api::{utils::library, Ctx, R}; + use sd_cloud_schema::{auth::AccessToken, devices, libraries}; + +use rspc::alpha::AlphaRouter; use tracing::debug; -use crate::{ - api::{Ctx, R}, - try_get_cloud_services_client, -}; +use super::try_get_cloud_services_client; pub fn mount() -> AlphaRouter { R.router() .procedure("get", { R.query(|node, req: libraries::get::Request| async move { let libraries::get::Response(library) = super::handle_comm_error( - try_get_cloud_services_client!(node)? + try_get_cloud_services_client(&node) + .await? .libraries() .get(req) .await, @@ -28,7 +28,8 @@ pub fn mount() -> AlphaRouter { .procedure("list", { R.query(|node, req: libraries::list::Request| async move { let libraries::list::Response(libraries) = super::handle_comm_error( - try_get_cloud_services_client!(node)? + try_get_cloud_services_client(&node) + .await? .libraries() .list(req) .await, @@ -56,7 +57,8 @@ pub fn mount() -> AlphaRouter { device_pub_id: args.device_pub_id, }; super::handle_comm_error( - try_get_cloud_services_client!(node)? + try_get_cloud_services_client(&node) + .await? .libraries() .create(req) .await, @@ -69,7 +71,8 @@ pub fn mount() -> AlphaRouter { .procedure("delete", { R.mutation(|node, req: libraries::delete::Request| async move { super::handle_comm_error( - try_get_cloud_services_client!(node)? + try_get_cloud_services_client(&node) + .await? .libraries() .delete(req) .await, @@ -84,7 +87,8 @@ pub fn mount() -> AlphaRouter { .procedure("update", { R.mutation(|node, req: libraries::update::Request| async move { super::handle_comm_error( - try_get_cloud_services_client!(node)? + try_get_cloud_services_client(&node) + .await? .libraries() .update(req) .await, diff --git a/core/src/api/cloud/mod.rs b/core/src/api/cloud/mod.rs index ec3d0177b..d676e7323 100644 --- a/core/src/api/cloud/mod.rs +++ b/core/src/api/cloud/mod.rs @@ -35,64 +35,64 @@ pub(crate) fn mount() -> AlphaRouter { .merge("libraries.", libraries::mount()) .merge("locations.", locations::mount()) .merge("devices.", devices::mount()) - .procedure("bootstrap", { - R.mutation(|node, access_token: auth::AccessToken| async move { - use sd_cloud_schema::devices; + // .procedure("bootstrap", { + // R.mutation(|node, access_token: auth::AccessToken| async move { + // use sd_cloud_schema::devices; - let client = try_get_cloud_services_client(&node).await?; + // let client = try_get_cloud_services_client(&node).await?; - // create user route is idempotent, so we can safely keep creating the same user over and over - handle_comm_error( - client - .users() - .create(users::create::Request { - access_token: access_token.clone(), - }) - .await, - "Failed to create user;", - )??; + // // create user route is idempotent, so we can safely keep creating the same user over and over + // handle_comm_error( + // client + // .users() + // .create(users::create::Request { + // access_token: access_token.clone(), + // }) + // .await, + // "Failed to create user;", + // )??; - let device_pub_id = devices::PubId(node.config.get().await.id); - let mut hasher = blake3::Hasher::new(); - hasher.update(device_pub_id.0.as_bytes().as_slice()); - let hashed_pub_id = hasher.finalize(); + // let device_pub_id = devices::PubId(node.config.get().await.id); + // let mut hasher = blake3::Hasher::new(); + // hasher.update(device_pub_id.0.as_bytes().as_slice()); + // let hashed_pub_id = hasher.finalize(); - match handle_comm_error( - client - .devices() - .get(devices::get::Request { - access_token: access_token.clone(), - pub_id: device_pub_id, - }) - .await, - "Failed to get device on cloud bootstrap;", - )? { - Ok(_) => { - // Device registered, we execute a device hello flow - self::devices::hello(&client, access_token, device_pub_id, hashed_pub_id) - .await - } - Err(Error::Client(ClientSideError::NotFound(_))) => { - // Device not registered, we execute a device register flow - todo!() - } - Err(e) => return Err(e.into()), - } + // match handle_comm_error( + // client + // .devices() + // .get(devices::get::Request { + // access_token: access_token.clone(), + // pub_id: device_pub_id, + // }) + // .await, + // "Failed to get device on cloud bootstrap;", + // )? { + // Ok(_) => { + // // Device registered, we execute a device hello flow + // self::devices::hello(&client, access_token, device_pub_id, hashed_pub_id) + // .await + // } + // Err(Error::Client(ClientSideError::NotFound(_))) => { + // // Device not registered, we execute a device register flow + // todo!() + // } + // Err(e) => return Err(e.into()), + // } - // TODO: figure out a way to know if we need to register the device or send a device hello request + // // TODO: figure out a way to know if we need to register the device or send a device hello request - // TODO: in case of a device register request, we use the OPAQUE key to encrypt iroh's secret key (NodeId) - // and save on data directory + // // TODO: in case of a device register request, we use the OPAQUE key to encrypt iroh's secret key (NodeId) + // // and save on data directory - // TODO: in case of a device hello request, we use the OPAQUE key to decrypt iroh's secret key (NodeId) - // and keep it in memory + // // TODO: in case of a device hello request, we use the OPAQUE key to decrypt iroh's secret key (NodeId) + // // and keep it in memory - // TODO: With this device iroh's secret key (NodeId) now known and we can start the iroh - // node for cloud p2p + // // TODO: With this device iroh's secret key (NodeId) now known and we can start the iroh + // // node for cloud p2p - Ok(()) - }) - }) + // Ok(()) + // }) + // }) } fn handle_comm_error( diff --git a/core/src/api/ephemeral_files.rs b/core/src/api/ephemeral_files.rs index c2cc85a52..3acd7c573 100644 --- a/core/src/api/ephemeral_files.rs +++ b/core/src/api/ephemeral_files.rs @@ -23,7 +23,6 @@ use sd_utils::error::FileIOError; use std::{ffi::OsStr, path::PathBuf, str::FromStr}; -use async_recursion::async_recursion; use futures_concurrency::future::TryJoin; use regex::Regex; use rspc::{alpha::AlphaRouter, ErrorCode}; @@ -481,7 +480,6 @@ impl EphemeralFileSystemOps { Ok(()) } - #[async_recursion] async fn copy(self, library: &Library) -> Result<(), rspc::Error> { self.check().await?; @@ -584,11 +582,13 @@ impl EphemeralFileSystemOps { .await?; if !more_files.is_empty() { - Self { - sources: more_files, - target_dir: target, - } - .copy(library) + Box::pin( + Self { + sources: more_files, + target_dir: target, + } + .copy(library), + ) .await } else { Ok(()) diff --git a/core/src/cloud/sync/mod.rs b/core/src/cloud/sync/mod.rs index 8a52025bb..a2ce3538e 100644 --- a/core/src/cloud/sync/mod.rs +++ b/core/src/cloud/sync/mod.rs @@ -33,64 +33,64 @@ pub async fn declare_actors( let autorun = node.cloud_sync_flag.load(atomic::Ordering::Relaxed); - actors - .declare( - "Cloud Sync Sender", - { - let sync = sync.clone(); - let node = node.clone(); - let active = state.send_active.clone(); - let active_notifier = state.notifier.clone(); + // actors + // .declare( + // "Cloud Sync Sender", + // { + // let sync = sync.clone(); + // let node = node.clone(); + // let active = state.send_active.clone(); + // let active_notifier = state.notifier.clone(); - move |stop| send::run_actor(library_id, sync, node, active, active_notifier, stop) - }, - autorun, - ) - .await; + // move |stop| send::run_actor(library_id, sync, node, active, active_notifier, stop) + // }, + // autorun, + // ) + // .await; - actors - .declare( - "Cloud Sync Receiver", - { - let sync = sync.clone(); - let node = node.clone(); - let ingest_notify = ingest_notify.clone(); - let active_notifier = state.notifier.clone(); - let active = state.receive_active.clone(); + // actors + // .declare( + // "Cloud Sync Receiver", + // { + // let sync = sync.clone(); + // let node = node.clone(); + // let ingest_notify = ingest_notify.clone(); + // let active_notifier = state.notifier.clone(); + // let active = state.receive_active.clone(); - move |stop| { - receive::run_actor( - node.libraries.clone(), - db.clone(), - library_id, - instance_uuid, - sync, - ingest_notify, - node, - active, - active_notifier, - stop, - ) - } - }, - autorun, - ) - .await; + // move |stop| { + // receive::run_actor( + // node.libraries.clone(), + // db.clone(), + // library_id, + // instance_uuid, + // sync, + // ingest_notify, + // node, + // active, + // active_notifier, + // stop, + // ) + // } + // }, + // autorun, + // ) + // .await; - actors - .declare( - "Cloud Sync Ingest", - { - let active = state.ingest_active.clone(); - let active_notifier = state.notifier.clone(); + // actors + // .declare( + // "Cloud Sync Ingest", + // { + // let active = state.ingest_active.clone(); + // let active_notifier = state.notifier.clone(); - move |stop| { - ingest::run_actor(sync.clone(), ingest_notify, active, active_notifier, stop) - } - }, - autorun, - ) - .await; + // move |stop| { + // ingest::run_actor(sync.clone(), ingest_notify, active, active_notifier, stop) + // } + // }, + // autorun, + // ) + // .await; state } diff --git a/core/src/cloud/sync/receive.rs b/core/src/cloud/sync/receive.rs index a0ec93abf..566db53d7 100644 --- a/core/src/cloud/sync/receive.rs +++ b/core/src/cloud/sync/receive.rs @@ -44,198 +44,198 @@ pub async fn run_actor( active_notify: Arc, stop: Stopper, ) { - enum Race { - Continue, - Stop, - } + // enum Race { + // Continue, + // Stop, + // } - loop { - active.store(true, Ordering::Relaxed); - active_notify.notify_waiters(); + // loop { + // active.store(true, Ordering::Relaxed); + // active_notify.notify_waiters(); - loop { - // We need to know the latest operations we should be retrieving - let mut cloud_timestamps = { - let timestamps = sync.timestamps.read().await; + // loop { + // // We need to know the latest operations we should be retrieving + // let mut cloud_timestamps = { + // let timestamps = sync.timestamps.read().await; - // looks up the most recent operation we've received (not ingested!) for each instance - let db_timestamps = err_break!( - db._batch( - timestamps - .keys() - .map(|id| { - db.cloud_crdt_operation() - .find_first(vec![cloud_crdt_operation::instance::is(vec![ - instance::pub_id::equals(uuid_to_bytes(id)), - ])]) - .order_by(cloud_crdt_operation::timestamp::order( - SortOrder::Desc, - )) - }) - .collect::>() - ) - .await - ); + // // looks up the most recent operation we've received (not ingested!) for each instance + // let db_timestamps = err_break!( + // db._batch( + // timestamps + // .keys() + // .map(|id| { + // db.cloud_crdt_operation() + // .find_first(vec![cloud_crdt_operation::instance::is(vec![ + // instance::pub_id::equals(uuid_to_bytes(id)), + // ])]) + // .order_by(cloud_crdt_operation::timestamp::order( + // SortOrder::Desc, + // )) + // }) + // .collect::>() + // ) + // .await + // ); - // compares the latest ingested timestamp with the latest received timestamp - // and picks the highest one for each instance - let mut cloud_timestamps = db_timestamps - .into_iter() - .zip(timestamps.iter()) - .map(|(d, (id, sync_timestamp))| { - let cloud_timestamp = d.map(|d| d.timestamp).unwrap_or_default() as u64; + // // compares the latest ingested timestamp with the latest received timestamp + // // and picks the highest one for each instance + // let mut cloud_timestamps = db_timestamps + // .into_iter() + // .zip(timestamps.iter()) + // .map(|(d, (id, sync_timestamp))| { + // let cloud_timestamp = d.map(|d| d.timestamp).unwrap_or_default() as u64; - debug!( - instance_id = %id, - sync_timestamp = sync_timestamp.as_u64(), - %cloud_timestamp, - "Comparing sync timestamps", - ); + // debug!( + // instance_id = %id, + // sync_timestamp = sync_timestamp.as_u64(), + // %cloud_timestamp, + // "Comparing sync timestamps", + // ); - let max_timestamp = Ord::max(cloud_timestamp, sync_timestamp.as_u64()); + // let max_timestamp = Ord::max(cloud_timestamp, sync_timestamp.as_u64()); - (*id, max_timestamp) - }) - .collect::>(); + // (*id, max_timestamp) + // }) + // .collect::>(); - cloud_timestamps.remove(&instance_uuid); + // cloud_timestamps.remove(&instance_uuid); - cloud_timestamps - }; + // cloud_timestamps + // }; - let instance_timestamps: Vec = sync - .timestamps - .read() - .await - .keys() - .map( - |uuid| sd_cloud_api::library::message_collections::get::InstanceTimestamp { - instance_uuid: *uuid, - from_time: cloud_timestamps - .get(uuid) - .copied() - .unwrap_or_default() - .to_string(), - }, - ) - .collect(); + // let instance_timestamps: Vec = sync + // .timestamps + // .read() + // .await + // .keys() + // .map( + // |uuid| sd_cloud_api::library::message_collections::get::InstanceTimestamp { + // instance_uuid: *uuid, + // from_time: cloud_timestamps + // .get(uuid) + // .copied() + // .unwrap_or_default() + // .to_string(), + // }, + // ) + // .collect(); - let collections = err_break!( - sd_cloud_api::library::message_collections::get( - node.get_request_config().await, - library_id, - instance_uuid, - instance_timestamps, - ) - .await - ); + // let collections = err_break!( + // sd_cloud_api::library::message_collections::get( + // node.get_request_config().await, + // library_id, + // instance_uuid, + // instance_timestamps, + // ) + // .await + // ); - info!( - collections_count = collections.len(), - "Received collections;", - ); + // info!( + // collections_count = collections.len(), + // "Received collections;", + // ); - if collections.is_empty() { - break; - } + // if collections.is_empty() { + // break; + // } - let mut cloud_library_data: Option> = None; + // let mut cloud_library_data: Option> = None; - for collection in collections { - if let Entry::Vacant(e) = cloud_timestamps.entry(collection.instance_uuid) { - let fetched_library = match &cloud_library_data { - None => { - let Some(fetched_library) = err_break!( - sd_cloud_api::library::get( - node.get_request_config().await, - library_id - ) - .await - ) else { - break; - }; + // for collection in collections { + // if let Entry::Vacant(e) = cloud_timestamps.entry(collection.instance_uuid) { + // let fetched_library = match &cloud_library_data { + // None => { + // let Some(fetched_library) = err_break!( + // sd_cloud_api::library::get( + // node.get_request_config().await, + // library_id + // ) + // .await + // ) else { + // break; + // }; - cloud_library_data - .insert(Some(fetched_library)) - .as_ref() - .expect("error inserting fetched library") - } - Some(None) => { - break; - } - Some(Some(fetched_library)) => fetched_library, - }; + // cloud_library_data + // .insert(Some(fetched_library)) + // .as_ref() + // .expect("error inserting fetched library") + // } + // Some(None) => { + // break; + // } + // Some(Some(fetched_library)) => fetched_library, + // }; - let Some(instance) = fetched_library - .instances - .iter() - .find(|i| i.uuid == collection.instance_uuid) - else { - break; - }; + // let Some(instance) = fetched_library + // .instances + // .iter() + // .find(|i| i.uuid == collection.instance_uuid) + // else { + // break; + // }; - err_break!( - upsert_instance( - library_id, - &db, - &sync, - &libraries, - &collection.instance_uuid, - instance.identity, - &instance.node_id, - RemoteIdentity::from_str(&instance.node_remote_identity) - .expect("malformed remote identity in the DB"), - node.p2p.peer_metadata(), - ) - .await - ); + // err_break!( + // upsert_instance( + // library_id, + // &db, + // &sync, + // &libraries, + // &collection.instance_uuid, + // instance.identity, + // &instance.node_id, + // RemoteIdentity::from_str(&instance.node_remote_identity) + // .expect("malformed remote identity in the DB"), + // node.p2p.peer_metadata(), + // ) + // .await + // ); - e.insert(0); - } + // e.insert(0); + // } - let compressed_operations: CompressedCRDTOperations = err_break!( - rmp_serde::from_slice(err_break!(&BASE64_STANDARD.decode(collection.contents))) - ); + // let compressed_operations: CompressedCRDTOperations = err_break!( + // rmp_serde::from_slice(err_break!(&BASE64_STANDARD.decode(collection.contents))) + // ); - let operations = compressed_operations.into_ops(); + // let operations = compressed_operations.into_ops(); - debug!( - instance_id = %collection.instance_uuid, - start = ?operations.first().map(|operation| operation.timestamp.as_u64()), - end = ?operations.last().map(|operation| operation.timestamp.as_u64()), - "Processing collection", - ); + // debug!( + // instance_id = %collection.instance_uuid, + // start = ?operations.first().map(|operation| operation.timestamp.as_u64()), + // end = ?operations.last().map(|operation| operation.timestamp.as_u64()), + // "Processing collection", + // ); - err_break!(write_cloud_ops_to_db(operations, &db).await); + // err_break!(write_cloud_ops_to_db(operations, &db).await); - let collection_timestamp: u64 = - collection.end_time.parse().expect("unable to parse time"); + // let collection_timestamp: u64 = + // collection.end_time.parse().expect("unable to parse time"); - let timestamp = cloud_timestamps - .entry(collection.instance_uuid) - .or_insert(collection_timestamp); + // let timestamp = cloud_timestamps + // .entry(collection.instance_uuid) + // .or_insert(collection_timestamp); - if *timestamp < collection_timestamp { - *timestamp = collection_timestamp; - } - } + // if *timestamp < collection_timestamp { + // *timestamp = collection_timestamp; + // } + // } - ingest_notify.notify_waiters(); - } + // ingest_notify.notify_waiters(); + // } - active.store(false, Ordering::Relaxed); - active_notify.notify_waiters(); + // active.store(false, Ordering::Relaxed); + // active_notify.notify_waiters(); - if let Race::Stop = ( - sleep(Duration::from_secs(60)).map(|()| Race::Continue), - stop.into_future().map(|()| Race::Stop), - ) - .race() - .await - { - break; - } - } + // if let Race::Stop = ( + // sleep(Duration::from_secs(60)).map(|()| Race::Continue), + // stop.into_future().map(|()| Race::Stop), + // ) + // .race() + // .await + // { + // break; + // } + // } } async fn write_cloud_ops_to_db( diff --git a/core/src/cloud/sync/send.rs b/core/src/cloud/sync/send.rs index 11bf68f28..aa2944f22 100644 --- a/core/src/cloud/sync/send.rs +++ b/core/src/cloud/sync/send.rs @@ -35,113 +35,113 @@ pub async fn run_actor( state_notify: Arc, stop: Stopper, ) { - loop { - state.store(true, Ordering::Relaxed); - state_notify.notify_waiters(); + // loop { + // state.store(true, Ordering::Relaxed); + // state_notify.notify_waiters(); - loop { - // all available instances will have a default timestamp from create_instance - let instances = sync - .timestamps - .read() - .await - .keys() - .cloned() - .collect::>(); + // loop { + // // all available instances will have a default timestamp from create_instance + // let instances = sync + // .timestamps + // .read() + // .await + // .keys() + // .cloned() + // .collect::>(); - // obtains a lock on the timestamp collections for the instances we have - let req_adds = err_break!( - sd_cloud_api::library::message_collections::request_add( - cloud_api_config_provider.get_request_config().await, - library_id, - instances, - ) - .await - ); + // // obtains a lock on the timestamp collections for the instances we have + // let req_adds = err_break!( + // sd_cloud_api::library::message_collections::request_add( + // cloud_api_config_provider.get_request_config().await, + // library_id, + // instances, + // ) + // .await + // ); - let mut instances = vec![]; + // let mut instances = vec![]; - use sd_cloud_api::library::message_collections::do_add; + // use sd_cloud_api::library::message_collections::do_add; - debug!( - total_operations = req_adds.len(), - "Preparing to send instance's operations to cloud;" - ); + // debug!( + // total_operations = req_adds.len(), + // "Preparing to send instance's operations to cloud;" + // ); - // gets new operations for each instance to send to cloud - for req_add in req_adds { - let ops = err_break!( - sync.get_instance_ops( - 1000, - req_add.instance_uuid, - NTP64( - req_add - .from_time - .unwrap_or_else(|| "0".to_string()) - .parse() - .expect("couldn't parse ntp64 value"), - ) - ) - .await - ); + // // gets new operations for each instance to send to cloud + // for req_add in req_adds { + // let ops = err_break!( + // sync.get_instance_ops( + // 1000, + // req_add.instance_uuid, + // NTP64( + // req_add + // .from_time + // .unwrap_or_else(|| "0".to_string()) + // .parse() + // .expect("couldn't parse ntp64 value"), + // ) + // ) + // .await + // ); - if ops.is_empty() { - continue; - } + // if ops.is_empty() { + // continue; + // } - let start_time = ops[0].timestamp.0.to_string(); - let end_time = ops[ops.len() - 1].timestamp.0.to_string(); + // let start_time = ops[0].timestamp.0.to_string(); + // let end_time = ops[ops.len() - 1].timestamp.0.to_string(); - let ops_len = ops.len(); + // let ops_len = ops.len(); - use base64::prelude::*; + // use base64::prelude::*; - debug!(instance_id = %req_add.instance_uuid, %start_time, %end_time); + // debug!(instance_id = %req_add.instance_uuid, %start_time, %end_time); - instances.push(do_add::Input { - uuid: req_add.instance_uuid, - key: req_add.key, - start_time, - end_time, - contents: BASE64_STANDARD.encode( - rmp_serde::to_vec_named(&CompressedCRDTOperations::new(ops)) - .expect("CompressedCRDTOperation should serialize!"), - ), - ops_count: ops_len, - }) - } + // instances.push(do_add::Input { + // uuid: req_add.instance_uuid, + // key: req_add.key, + // start_time, + // end_time, + // contents: BASE64_STANDARD.encode( + // rmp_serde::to_vec_named(&CompressedCRDTOperations::new(ops)) + // .expect("CompressedCRDTOperation should serialize!"), + // ), + // ops_count: ops_len, + // }) + // } - if instances.is_empty() { - break; - } + // if instances.is_empty() { + // break; + // } - // uses lock we acquired earlier to send the operations to the cloud - err_break!( - do_add( - cloud_api_config_provider.get_request_config().await, - library_id, - instances, - ) - .await - ); - } + // // uses lock we acquired earlier to send the operations to the cloud + // err_break!( + // do_add( + // cloud_api_config_provider.get_request_config().await, + // library_id, + // instances, + // ) + // .await + // ); + // } - state.store(false, Ordering::Relaxed); - state_notify.notify_waiters(); + // state.store(false, Ordering::Relaxed); + // state_notify.notify_waiters(); - if let RaceNotifiedOrStopped::Stopped = ( - // recreate subscription each time so that existing messages are dropped - wait_notification(sync.subscribe()), - stop.into_future().map(|()| RaceNotifiedOrStopped::Stopped), - ) - .race() - .await - { - break; - } + // if let RaceNotifiedOrStopped::Stopped = ( + // // recreate subscription each time so that existing messages are dropped + // wait_notification(sync.subscribe()), + // stop.into_future().map(|()| RaceNotifiedOrStopped::Stopped), + // ) + // .race() + // .await + // { + // break; + // } - sleep(Duration::from_millis(1000)).await; - } + // sleep(Duration::from_millis(1000)).await; + // } } async fn wait_notification(mut rx: broadcast::Receiver) -> RaceNotifiedOrStopped { diff --git a/core/src/crypto/error.rs b/core/src/crypto/error.rs deleted file mode 100644 index 787840918..000000000 --- a/core/src/crypto/error.rs +++ /dev/null @@ -1,62 +0,0 @@ -use std::num::TryFromIntError; - -use thiserror::Error; - -pub type Result = std::result::Result; - -impl From for rspc::Error { - fn from(value: KeyManagerError) -> Self { - Self::new(rspc::ErrorCode::InternalServerError, value.to_string()) - } -} - -#[derive(Debug, Error)] -pub enum KeyManagerError { - // #[error("crypto error: {0}")] - // Crypto(#[from] sd_crypto::Error), - #[error("the key specified was not found")] - KeyNotFound, - #[error("the key manager is locked")] - Locked, - #[error("the key is already mounted")] - AlreadyMounted, - #[error("key not mounted")] - NotMounted, - #[error("the key is already queued")] - AlreadyQueued, - - #[error("there was an error during a conversion")] - Conversion, - #[error("there was an error converting ints")] - IntConversion(#[from] TryFromIntError), - - #[error("the test vector failed (password is likely incorrect)")] - IncorrectPassword, - #[error("there was an issue while unlocking the key manager")] - Unlock, - - #[error("an unsupported operation was attempted")] - Unsupported, - - #[error("the word provided is too short")] - WordTooShort, - - #[error("the specified file already exists and would be overwritten")] - FileAlreadyExists, - #[error("the specified file doesn't exist")] - FileDoesntExist, - #[error("the specified file is too large")] - FileTooLarge, - - #[error("this action would delete the last root key (and make the key manager unusable)")] - LastRootKey, - - #[error("database error: {0}")] - Database(#[from] prisma_client_rust::QueryError), - - #[error("async IO error: {0}")] - IoAsync(#[from] tokio::io::Error), - - #[error("error while converting a UUID: {0}")] - Uuid(#[from] uuid::Error), -} diff --git a/core/src/crypto/keymanager.rs b/core/src/crypto/keymanager.rs deleted file mode 100644 index 17cf1ae97..000000000 --- a/core/src/crypto/keymanager.rs +++ /dev/null @@ -1,939 +0,0 @@ -use std::path::PathBuf; -use std::sync::Arc; - -use bincode::{Decode, Encode}; -use dashmap::DashSet; -use sd_crypto::crypto::{Decryptor, Encryptor}; -use sd_crypto::hashing::Hasher; -use sd_crypto::primitives::{BLOCK_LEN, SALT_LEN}; -use sd_crypto::types::{ - Aad, Algorithm, EncryptedKey, HashingAlgorithm, Key, Nonce, Salt, SecretKey, -}; -// use sd_crypto::utils::generate_passphrase; -use sd_crypto::{encoding, Protected}; -use serde::{Deserialize, Serialize}; -use specta::Type; -use tokio::fs::{self, File}; -use tokio::io::{AsyncReadExt, AsyncWriteExt}; -use tokio::sync::Mutex; -use uuid::Uuid; - -use super::error::KeyManagerError; -use super::{Result, KEY_MOUNTING_CONTEXT, TEST_VECTOR_CONTEXT}; -use crate::crypto::ENCRYPTED_WORD_CONTEXT; -use crate::prisma::{key, mounted_key, PrismaClient}; - -pub struct KeyManager { - key: Mutex>, // the root key - queue: DashSet, - db: Arc, -} - -#[derive(Clone, bincode::Encode, bincode::Decode)] -pub struct MountedKey { - version: KeyVersion, - #[bincode(with_serde)] - uuid: Uuid, - algorithm: Algorithm, - salt: Salt, - key: EncryptedKey, -} - -impl MountedKey { - pub fn encrypt( - root_key: &Key, - key: &Key, - algorithm: Algorithm, - word: &Protected>, - ) -> Result { - let salt = Salt::generate(); - let nonce = Nonce::generate(algorithm); - - // TODO(brxken128): maybe give these separate contexts, or even remove the second derivation - let ek = Encryptor::encrypt_key( - &Hasher::derive_key(root_key, salt, KEY_MOUNTING_CONTEXT), - &nonce, - algorithm, - &Hasher::derive_key(key, word_to_salt(word)?, KEY_MOUNTING_CONTEXT), - Aad::Null, - )?; - - Ok(Self { - version: KeyVersion::V1, - uuid: Uuid::new_v4(), - algorithm, - salt, - key: ek, - }) - } - - pub fn decrypt(&self, root_key: &Key) -> Result { - Ok(Decryptor::decrypt_key( - &Hasher::derive_key(root_key, self.salt, KEY_MOUNTING_CONTEXT), - self.algorithm, - &self.key, - Aad::Null, - )?) - } -} - -impl TryFrom<&MountedKey> for mounted_key::CreateUnchecked { - type Error = KeyManagerError; - - fn try_from(value: &MountedKey) -> std::result::Result { - #[allow(clippy::as_conversions)] - let s = Self { - version: value.version as i32, - uuid: Uuid::new_v4().as_bytes().to_vec(), // random uuid to prevent conflicts - algorithm: encoding::encode(&value.algorithm)?, - key: encoding::encode(&value.key)?, - salt: encoding::encode(&value.salt)?, - _params: vec![], - }; - - Ok(s) - } -} - -impl TryFrom for mounted_key::CreateUnchecked { - type Error = KeyManagerError; - - fn try_from(value: MountedKey) -> std::result::Result { - (&value).try_into() - } -} - -impl TryFrom for MountedKey { - type Error = KeyManagerError; - - fn try_from(value: mounted_key::Data) -> std::result::Result { - let mk = Self { - version: KeyVersion::try_from(value.version)?, - uuid: Uuid::from_slice(&value.uuid)?, - algorithm: encoding::decode(&value.algorithm)?, - key: encoding::decode(&value.key)?, - salt: encoding::decode(&value.salt)?, - }; - - Ok(mk) - } -} - -#[derive(Clone, Encode, Decode)] -struct OnDiskBackup { - root_keys: Vec, - user_keys: Vec, -} - -#[derive(Clone, Encode, Decode)] -pub struct TestVector(Salt, EncryptedKey); - -#[derive(Clone, Copy, Encode, Decode, PartialEq, Eq, Hash)] -#[repr(i32)] -pub enum KeyType { - Root = 0, - User = 1, -} - -impl TryFrom for KeyType { - type Error = KeyManagerError; - - fn try_from(value: i32) -> std::result::Result { - match value { - 0 => Ok(Self::Root), - 1 => Ok(Self::User), - _ => Err(KeyManagerError::Conversion), - } - } -} - -#[derive(Clone, Copy, Encode, Decode, Serialize, Deserialize, Type)] -#[repr(i32)] -pub enum KeyVersion { - V1 = 0, -} - -impl TryFrom for KeyVersion { - type Error = KeyManagerError; - - fn try_from(value: i32) -> std::result::Result { - match value { - 0 => Ok(Self::V1), - _ => Err(KeyManagerError::Conversion), - } - } -} - -#[derive(Clone, Encode, Decode)] -pub struct EncryptedWord(Salt, Nonce, Vec); - -impl EncryptedWord { - pub fn decrypt(&self, root_key: &Key, algorithm: Algorithm) -> Result>> { - Decryptor::decrypt_tiny( - &Hasher::derive_key(root_key, self.0, ENCRYPTED_WORD_CONTEXT), - &self.1, - algorithm, - &self.2, - Aad::Null, - ) - .map_err(KeyManagerError::Crypto) - } - - pub fn encrypt( - root_key: &Key, - word: &Protected>, - algorithm: Algorithm, - ) -> Result { - let salt = Salt::generate(); - let nonce = Nonce::generate(algorithm); - let bytes = Encryptor::encrypt_tiny( - &Hasher::derive_key(root_key, salt, ENCRYPTED_WORD_CONTEXT), - &nonce, - algorithm, - word.expose(), - Aad::Null, - )?; - - Ok(Self(salt, nonce, bytes)) - } -} - -key::select!(key_info { - version - uuid - name - algorithm - hashing_algorithm - mounted_key: select { id } -}); - -#[derive(Serialize, Deserialize, Type, Clone)] -pub struct DisplayKey { - pub version: KeyVersion, - pub uuid: Uuid, - pub name: Option, - pub algorithm: Algorithm, - pub hashing_algorithm: HashingAlgorithm, - pub mounted: bool, -} - -impl TryFrom for DisplayKey { - type Error = KeyManagerError; - - fn try_from(value: key_info::Data) -> std::result::Result { - let dk = Self { - version: KeyVersion::try_from(value.version)?, - uuid: Uuid::from_slice(&value.uuid)?, - name: value.name, - algorithm: encoding::decode(&value.algorithm)?, - hashing_algorithm: encoding::decode(&value.hashing_algorithm)?, - mounted: value.mounted_key.is_some(), - }; - - Ok(dk) - } -} - -#[derive(Clone, Encode, Decode)] -pub struct UserKey { - pub version: KeyVersion, - #[bincode(with_serde)] - pub uuid: Uuid, - pub algorithm: Algorithm, - pub hashing_algorithm: HashingAlgorithm, - pub word: EncryptedWord, // word (once hashed with b3) acts like a salt - pub tv: TestVector, -} - -fn word_to_salt(word: &Protected>) -> Result { - Ok(Salt::try_from( - Hasher::blake3(word.expose()).expose()[..SALT_LEN].to_vec(), - )?) -} - -impl TryFrom<&UserKey> for key::CreateUnchecked { - type Error = KeyManagerError; - - fn try_from(value: &UserKey) -> std::result::Result { - #[allow(clippy::as_conversions)] - let s = Self { - uuid: value.uuid.as_bytes().to_vec(), - version: value.version as i32, - key_type: KeyType::User as i32, - algorithm: encoding::encode(&value.algorithm)?, - hashing_algorithm: encoding::encode(&value.hashing_algorithm)?, - key: encoding::encode(&value.tv)?, - salt: encoding::encode(&value.word)?, - _params: vec![], - }; - - Ok(s) - } -} - -impl TryFrom for key::CreateUnchecked { - type Error = KeyManagerError; - - fn try_from(value: UserKey) -> std::result::Result { - (&value).try_into() - } -} - -impl TryFrom for UserKey { - type Error = KeyManagerError; - - fn try_from(value: key::Data) -> std::result::Result { - if KeyType::try_from(value.key_type)? != KeyType::User { - return Err(KeyManagerError::Conversion); - } - - let uk = Self { - version: KeyVersion::try_from(value.version)?, - uuid: Uuid::from_slice(&value.uuid)?, - algorithm: encoding::decode(&value.algorithm)?, - hashing_algorithm: encoding::decode(&value.hashing_algorithm)?, - word: encoding::decode(&value.salt)?, - tv: encoding::decode(&value.key)?, - }; - - Ok(uk) - } -} - -impl TestVector { - pub fn validate(&self, algorithm: Algorithm, hashed_password: &Key) -> Result<()> { - Decryptor::decrypt_key( - &Hasher::derive_key(hashed_password, self.0, TEST_VECTOR_CONTEXT), - algorithm, - &self.1, - Aad::Null, - ) - .map_or(Err(KeyManagerError::IncorrectPassword), |_| Ok(())) - } -} - -impl KeyManager { - pub fn new(db: Arc) -> Self { - Self { - key: Mutex::new(None), - queue: DashSet::new(), - db, - } - } - - pub async fn is_unlocked(&self) -> bool { - self.key.lock().await.is_some() - } - - async fn get_root_key(&self) -> Result { - self.key.lock().await.clone().ok_or(KeyManagerError::Locked) - } - - async fn ensure_unlocked(&self) -> Result<()> { - self.key - .lock() - .await - .as_ref() - .map_or(Err(KeyManagerError::Locked), |_| Ok(())) - } - - fn ensure_not_queued(&self, uuid: Uuid) -> Result<()> { - (!self.queue.contains(&uuid)) - .then_some(()) - .ok_or(KeyManagerError::AlreadyQueued) - } - - pub async fn is_unlocking(&self) -> Result { - #[allow(clippy::as_conversions)] - Ok(self - .db - .key() - .find_many(vec![key::key_type::equals(KeyType::Root as i32)]) - .exec() - .await? - .into_iter() - .flat_map(|x| Uuid::from_slice(&x.uuid).map_err(KeyManagerError::Uuid)) - .any(|x| self.queue.contains(&x))) - } - - pub async fn unlock( - &self, - password: Protected, - secret_key: Option>, - ) -> Result<()> { - let password: Protected> = password.into_inner().into_bytes().into(); - - let secret_key: SecretKey = if let Some(secret_key) = secret_key { - secret_key.try_into()? - } else { - // TODO(brxken128): source from keyring here, or return error if that fails - SecretKey::generate() - }; - - #[allow(clippy::as_conversions)] - let root_keys = self - .db - .key() - .find_many(vec![key::key_type::equals(KeyType::Root as i32)]) - .exec() - .await?; - - let root_keys = root_keys - .into_iter() - .map(RootKey::try_from) - .collect::>>()?; - - let rk = root_keys - .into_iter() - .find_map(|k| { - self.ensure_not_queued(k.uuid).ok()?; - - self.queue.insert(k.uuid); - - let res = - Hasher::hash_password(k.hashing_algorithm, &password, k.salt, &secret_key); - - self.queue.remove(&k.uuid); - - let pw = res.ok()?; - - Decryptor::decrypt_key(&pw, k.algorithm, &k.key, Aad::Null).ok() - }) - .ok_or(KeyManagerError::Unlock)?; - - *self.key.lock().await = Some(rk); - Ok(()) - } - - pub async fn initial_setup( - &self, - algorithm: Algorithm, - hashing_algorithm: HashingAlgorithm, - password: Protected, - ) -> Result> { - let secret_key = SecretKey::generate(); - let salt = Salt::generate(); - let nonce = Nonce::generate(algorithm); - let password = password.into_inner().into_bytes().into(); - - let hashed_password = - Hasher::hash_password(hashing_algorithm, &password, salt, &secret_key)?; - - let root_key = Key::generate(); - let root_key_e = - Encryptor::encrypt_key(&hashed_password, &nonce, algorithm, &root_key, Aad::Null)?; - - let rk: key::CreateUnchecked = RootKey { - version: KeyVersion::V1, - uuid: Uuid::new_v4(), - algorithm, - hashing_algorithm, - salt, - key: root_key_e, - } - .try_into()?; - - rk.to_query(&self.db).exec().await?; - - *self.key.lock().await = Some(root_key); - - Ok(secret_key.to_string().into()) - } - - // This will become `add_root_key` at some point, and we'll have dedicated management for them - pub async fn add_root_key( - &self, - algorithm: Algorithm, - hashing_algorithm: HashingAlgorithm, - password: Protected, - ) -> Result> { - self.ensure_unlocked().await?; - - let secret_key = SecretKey::generate(); - let salt = Salt::generate(); - let nonce = Nonce::generate(algorithm); - let password = password.into_inner().into_bytes().into(); - - let hashed_password = - Hasher::hash_password(hashing_algorithm, &password, salt, &secret_key)?; - - let root_key = self.get_root_key().await?; - let root_key_e = - Encryptor::encrypt_key(&hashed_password, &nonce, algorithm, &root_key, Aad::Null)?; - - let rk: key::CreateUnchecked = RootKey { - version: KeyVersion::V1, - uuid: Uuid::new_v4(), - algorithm, - hashing_algorithm, - salt, - key: root_key_e, - } - .try_into()?; - - rk.to_query(&self.db).exec().await?; - - *self.key.lock().await = Some(root_key); - - Ok(secret_key.to_string().into()) - } - - pub async fn delete(&self, uuid: Uuid) -> Result<()> { - let key = self - .db - .key() - .find_unique(key::uuid::equals(uuid.as_bytes().to_vec())) - .exec() - .await? - .ok_or(KeyManagerError::KeyNotFound)?; - - #[allow(clippy::as_conversions)] - if KeyType::try_from(key.key_type)? == KeyType::Root - && self - .db - .key() - .find_many(vec![key::key_type::equals(KeyType::Root as i32)]) - .select(key::select!({ id })) - .exec() - .await? - .len() == 1 - { - return Err(KeyManagerError::LastRootKey); - } - - self.db - .key() - .delete(key::uuid::equals(uuid.as_bytes().to_vec())) - .exec() - .await - .map_err(|_| KeyManagerError::KeyNotFound)?; - - Ok(()) - } - - pub async fn reset(&self) -> Result<()> { - // this is for the sync system, it'll be used when we have sync delete - // let _key_uuids = self - // .db - // .key() - // .find_many(vec![]) - // .select(key::select!({ uuid })) - // .exec() - // .await? - // .into_iter() - // .map(|x| x.uuid) - // .collect::>(); - - self.db - ._batch(( - self.db.key().delete_many(vec![]), - self.db.mounted_key().delete_many(vec![]), - )) - .await?; - - *self.key.lock().await = None; - - Ok(()) - } - - pub async fn update_key_name(&self, uuid: Uuid, name: String) -> Result<()> { - self.db - .key() - .update( - key::uuid::equals(uuid.as_bytes().to_vec()), - vec![key::name::set(Some(name))], - ) - .exec() - .await - .map_or(Err(KeyManagerError::KeyNotFound), |_| Ok(())) - } - - pub async fn insert_new( - &self, - algorithm: Algorithm, - hashing_algorithm: HashingAlgorithm, - password: Protected, - word: Option>, - ) -> Result { - self.ensure_unlocked().await?; - - word.as_ref().map(|w| { - if w.expose().len() < 3 { - Err(KeyManagerError::WordTooShort) - } else { - Ok(()) - } - }); - - // let word: Protected> = word - // .map_or( - // // generate_passphrase(1, '_').into_inner(), - // Protected::into_inner, - // ) - // .into_bytes() - // .into(); - - // TODO(brxken128): remove this and replace with the above once mnemonic/word generation has been optimised - let word: Protected> = Protected::new(b"word".to_vec()); - - let uuid = Uuid::new_v4(); - let tv_key = Key::generate(); - let tv_nonce = Nonce::generate(algorithm); - let tv_salt = Salt::generate(); - - let hashed_password = Hasher::hash_password( - hashing_algorithm, - &password.into_inner().into_bytes().into(), - word_to_salt(&word)?, - &SecretKey::Null, - )?; - - let tv_key = Encryptor::encrypt_key( - &Hasher::derive_key(&hashed_password, tv_salt, TEST_VECTOR_CONTEXT), - &tv_nonce, - algorithm, - &tv_key, - Aad::Null, - )?; - - let ew = EncryptedWord::encrypt(&self.get_root_key().await?, &word, algorithm)?; - - let key: key::CreateUnchecked = UserKey { - version: KeyVersion::V1, - uuid, - algorithm, - hashing_algorithm, - tv: TestVector(tv_salt, tv_key), - word: ew, - } - .try_into()?; - - key.to_query(&self.db).exec().await?; - - let mk = MountedKey::encrypt( - &self.get_root_key().await?, - &hashed_password, - algorithm, - &word, - )?; - - let mkc: mounted_key::CreateUnchecked = mk.try_into()?; - let mk_uuid = mkc.uuid.clone(); - - mkc.to_query(&self.db).exec().await?; - - self.db - .mounted_key() - .update( - mounted_key::uuid::equals(mk_uuid), - vec![mounted_key::SetParam::ConnectAssociatedKey( - key::uuid::equals(uuid.as_bytes().to_vec()), - )], - ) - .exec() - .await?; - - Ok(uuid) - } - - pub async fn list(&self, key_type: KeyType) -> Result> { - self.ensure_unlocked().await?; - - #[allow(clippy::as_conversions)] - self.db - .key() - .find_many(vec![key::key_type::equals(key_type as i32)]) - .select(key_info::select()) - .exec() - .await? - .into_iter() - .map(DisplayKey::try_from) - .collect() - } - - pub async fn mount(&self, uuid: Uuid, password: Protected) -> Result<()> { - self.ensure_unlocked().await?; - - self.db - .key() - .find_unique(key::uuid::equals(uuid.as_bytes().to_vec())) - .select(key::select!({ mounted_key })) - .exec() - .await? - .ok_or(KeyManagerError::KeyNotFound)? - .mounted_key - .map_or(Ok(()), |_| Err(KeyManagerError::AlreadyMounted))?; - - let key = self - .db - .key() - .find_unique(key::uuid::equals(uuid.as_bytes().to_vec())) - .exec() - .await? - .ok_or(KeyManagerError::KeyNotFound)?; - - let key = UserKey::try_from(key)?; - - let word = key - .word - .decrypt(&self.get_root_key().await?, key.algorithm)?; - - let hashed_password = Hasher::hash_password( - key.hashing_algorithm, - &password.into_inner().into_bytes().into(), - word_to_salt(&word)?, - &SecretKey::Null, - )?; - - key.tv.validate(key.algorithm, &hashed_password)?; - - let mk = MountedKey::encrypt( - &self.get_root_key().await?, - &hashed_password, - key.algorithm, - &word, - )?; - - let mkc: mounted_key::CreateUnchecked = mk.try_into()?; - let mk_uuid = mkc.uuid.clone(); - - mkc.to_query(&self.db).exec().await?; - - self.db - .mounted_key() - .update( - mounted_key::uuid::equals(mk_uuid), - vec![mounted_key::SetParam::ConnectAssociatedKey( - key::uuid::equals(uuid.as_bytes().to_vec()), - )], - ) - .exec() - .await?; - - Ok(()) - } - - pub async fn unmount(&self, uuid: Uuid) -> Result<()> { - if self - .db - .mounted_key() - .delete_many(vec![mounted_key::associated_key::is(vec![ - key::uuid::equals(uuid.as_bytes().to_vec()), - ])]) - .exec() - .await? == 1 - { - Ok(()) - } else { - Err(KeyManagerError::KeyNotFound) - } - } - - pub async fn unmount_all(&self) -> Result { - Ok(self - .db - .mounted_key() - .delete_many(vec![]) - .exec() - .await? - .try_into()?) - } - - pub async fn lock(&self) -> Result<()> { - self.ensure_unlocked().await?; - *self.key.lock().await = None; - - Ok(()) - } - - pub async fn get_key(&self, uuid: Uuid) -> Result { - self.ensure_unlocked().await?; - - let key = self - .db - .key() - .find_unique(key::uuid::equals(uuid.as_bytes().to_vec())) - .select(key::select!({ mounted_key })) - .exec() - .await? - .ok_or(KeyManagerError::KeyNotFound)? - .mounted_key - .map_or(Err(KeyManagerError::NotMounted), MountedKey::try_from)?; - - key.decrypt(&self.get_root_key().await?) - } - - pub async fn enumerate_hashed_keys(&self) -> Result> { - self.ensure_unlocked().await?; - - let rk = self.get_root_key().await?; - - self.db - .mounted_key() - .find_many(vec![]) - .exec() - .await? - .into_iter() - .flat_map(MountedKey::try_from) - .map(|x| x.decrypt(&rk)) - .collect() - } - - pub async fn backup_to_file(&self, path: PathBuf) -> Result { - if fs::metadata(&path).await.is_ok() { - return Err(KeyManagerError::FileAlreadyExists); - } - - #[allow(clippy::as_conversions)] - let user_keys = self - .db - .key() - .find_many(vec![key::key_type::equals(KeyType::User as i32)]) - .exec() - .await? - .into_iter() - .map(UserKey::try_from) - .collect::>>()?; - - #[allow(clippy::as_conversions)] - let root_keys = self - .db - .key() - .find_many(vec![key::key_type::equals(KeyType::Root as i32)]) - .exec() - .await? - .into_iter() - .map(RootKey::try_from) - .collect::>>()?; - - let count = user_keys.len() + root_keys.len(); - - let backup = OnDiskBackup { - root_keys, - user_keys, - }; - - let mut file = File::create(&path).await?; - file.write_all(&encoding::encode(&backup)?).await?; - - Ok(count) - } - - pub async fn restore_from_file( - &self, - path: PathBuf, - password: Protected, - secret_key: Protected, - ) -> Result { - let file_len: usize = fs::metadata(&path).await.map_or( - Err(KeyManagerError::FileDoesntExist), - |x: std::fs::Metadata| x.len().try_into().map_err(KeyManagerError::IntConversion), - )?; - - if file_len > (BLOCK_LEN * 16) { - return Err(KeyManagerError::FileTooLarge); - } - - let mut bytes = vec![0u8; file_len]; - let mut file = File::open(&path).await?; - file.read_to_end(&mut bytes).await?; - - let backup: OnDiskBackup = encoding::decode(&bytes)?; - - let password: Protected> = password.into_inner().into_bytes().into(); - let secret_key = secret_key.try_into()?; - - let backup_rk = backup - .root_keys - .into_iter() - .find_map(|k| { - let pw = Hasher::hash_password(k.hashing_algorithm, &password, k.salt, &secret_key) - .ok()?; - - Decryptor::decrypt_key(&pw, k.algorithm, &k.key, Aad::Null).ok() - }) - .ok_or(KeyManagerError::IncorrectPassword)?; - - let rk = self.get_root_key().await?; - - let user_keys = backup - .user_keys - .into_iter() - .map(|mut key| { - let word = key.word.decrypt(&backup_rk, key.algorithm)?; - key.word = EncryptedWord::encrypt(&rk, &word, key.algorithm)?; - - key.try_into() - }) - .collect::>>()?; - - Ok(self - .db - .key() - .create_many(user_keys) - .skip_duplicates() - .exec() - .await? - .try_into()?) - } -} - -#[derive(Clone, Encode, Decode)] -pub struct RootKey { - pub version: KeyVersion, - #[bincode(with_serde)] - pub uuid: Uuid, - pub algorithm: Algorithm, - pub hashing_algorithm: HashingAlgorithm, - pub salt: Salt, - pub key: EncryptedKey, -} - -impl TryFrom for RootKey { - type Error = KeyManagerError; - - fn try_from(value: key::Data) -> std::result::Result { - if KeyType::try_from(value.key_type)? != KeyType::Root { - return Err(KeyManagerError::Conversion); - } - - let rk = Self { - version: KeyVersion::try_from(value.version)?, - uuid: Uuid::from_slice(&value.uuid)?, - algorithm: encoding::decode(&value.algorithm)?, - hashing_algorithm: encoding::decode(&value.hashing_algorithm)?, - key: encoding::decode(&value.key)?, - salt: encoding::decode(&value.salt)?, - }; - - Ok(rk) - } -} - -impl TryFrom<&RootKey> for key::CreateUnchecked { - type Error = KeyManagerError; - - fn try_from(value: &RootKey) -> std::result::Result { - #[allow(clippy::as_conversions)] - let s = Self { - uuid: value.uuid.as_bytes().to_vec(), - version: value.version as i32, - key_type: KeyType::Root as i32, - algorithm: encoding::encode(&value.algorithm)?, - hashing_algorithm: encoding::encode(&value.hashing_algorithm)?, - key: encoding::encode(&value.key)?, - salt: encoding::encode(&value.salt)?, - _params: vec![], - }; - - Ok(s) - } -} - -impl TryFrom for key::CreateUnchecked { - type Error = KeyManagerError; - - fn try_from(value: RootKey) -> std::result::Result { - (&value).try_into() - } -} diff --git a/core/src/crypto/mod.rs b/core/src/crypto/mod.rs deleted file mode 100644 index 7691eb0fc..000000000 --- a/core/src/crypto/mod.rs +++ /dev/null @@ -1,64 +0,0 @@ -#![warn( - clippy::all, - clippy::pedantic, - clippy::correctness, - clippy::perf, - clippy::style, - clippy::suspicious, - clippy::complexity, - clippy::nursery, - clippy::unwrap_used, - unused_qualifications, - clippy::expect_used, - trivial_casts, - trivial_numeric_casts, - unused_allocation, - clippy::as_conversions, - clippy::dbg_macro -)] -#![forbid(unsafe_code)] -#![allow(clippy::missing_errors_doc, clippy::module_name_repetitions)] - -// use sd_crypto::types::{DerivationContext, MagicBytes}; - -// pub mod error; -// pub use error::{KeyManagerError, Result}; - -// pub mod keymanager; -// pub use keymanager::{DisplayKey, KeyManager, KeyType, KeyVersion, RootKey, UserKey}; - -/* -/// Used for OS keyrings to identify our items. -pub const KEYRING_APP_IDENTIFIER: &str = "Spacedrive"; - -/// Used for OS keyrings to identify our items. -pub const SECRET_KEY_IDENTIFIER: &str = "Secret key"; - -/// Defines the context string for BLAKE3-KDF in regards to root key derivation -pub const ROOT_KEY_CONTEXT: DerivationContext = - DerivationContext::new("spacedrive 2022-12-14 12:53:54 root key derivation"); - -/// Defines the context string for BLAKE3-KDF in regards to master password hash derivation -pub const MASTER_PASSWORD_CONTEXT: DerivationContext = - DerivationContext::new("spacedrive 2022-12-14 15:35:41 master password hash derivation"); - -/// Defines the context string for BLAKE3-KDF in regards to file key derivation (for file encryption) -pub const FILE_KEYSLOT_CONTEXT: DerivationContext = - DerivationContext::new("spacedrive 2022-12-14 12:54:12 file key derivation"); -*/ - -// /// Defines the context string for BLAKE3-KDF in regards to key derivation (for the key manager) -// pub const KEY_MOUNTING_CONTEXT: DerivationContext = -// DerivationContext::new("spacedrive 2023-05-24 11:43:07 key mounting derivation"); - -// /// Defines the context string for BLAKE3-KDF in regards to key derivation (for encrypted words) -// pub const ENCRYPTED_WORD_CONTEXT: DerivationContext = -// DerivationContext::new("spacedrive 2023-05-22 18:01:02 encrypted word derivation"); - -// /// Defines the context string for BLAKE3-KDF in regards to key derivation (for test vectors) -// pub const TEST_VECTOR_CONTEXT: DerivationContext = -// DerivationContext::new("spacedrive 2023-05-22 14:37:16 test vector derivation"); - -// /// Encrypted file magic bytes - "ballapp" and then a null byte. -// pub const FILE_MAGIC_BYTES: MagicBytes<8> = -// MagicBytes::new([0x62, 0x61, 0x6C, 0x6C, 0x61, 0x70, 0x70, 0x00]); diff --git a/core/src/lib.rs b/core/src/lib.rs index 72eeaf6f2..e11428628 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -9,12 +9,9 @@ use crate::{ use sd_core_cloud_services::CloudServices; use sd_core_heavy_lifting::{media_processor::ThumbnailKind, JobSystem}; use sd_core_prisma_helpers::CasId; - -#[cfg(feature = "ai")] -use sd_ai::old_image_labeler::{DownloadModelError, OldImageLabeler, YoloV8}; - use sd_task_system::TaskSystem; use sd_utils::error::FileIOError; + use volume::save_storage_statistics; use std::{ @@ -25,7 +22,6 @@ use std::{ use chrono::{DateTime, Utc}; use futures_concurrency::future::Join; -use reqwest::{RequestBuilder, Response}; use thiserror::Error; use tokio::{fs, io, sync::broadcast}; use tracing::{error, info, warn}; @@ -40,8 +36,6 @@ use tracing_subscriber::{ pub mod api; mod cloud; mod context; -#[cfg(feature = "crypto")] -pub(crate) mod crypto; pub mod custom_uri; pub mod library; pub(crate) mod location; From b14e4fce3a239fb8c06376dafbd38e068a527cec Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Thu, 15 Aug 2024 00:46:27 +0300 Subject: [PATCH 058/218] Deep linking wip Pop up shows but doesn't pull focus on the app and actually doesn't receive events --- Cargo.lock | Bin 299924 -> 302317 bytes apps/desktop/src-tauri/Cargo.toml | 1 + .../src-tauri/capabilities/default.json | 1 + apps/desktop/src-tauri/src/main.rs | 6 ++- apps/desktop/src-tauri/tauri.conf.json | 28 ++++++++++++-- apps/desktop/tsconfig.json | 3 +- core/src/node/config.rs | 2 +- .../Layout/Sidebar/DebugPopover.tsx | 2 +- packages/client/src/core.ts | 35 +++--------------- 9 files changed, 40 insertions(+), 38 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 21c3952de913e874b3ba45e1c1854baafa466968..d8916307b76e14828d06234661b755beb44e6f2e 100644 GIT binary patch delta 1065 zcmX|=OK6-`6vz4IPG-_Hl{A_eZAzI2)Pfng?}yNi;$xu=*7~3bF7D%89WpaXCl82= z(ndFdZuHaMji4wP3&lnS2Ny2vLeNcJxvP(rE5R;A7ec);t&0oa{Vx2^|NQ>vyt6(1 z`&ZLD?_FrNhjtK_`g0TOFkI;_4O+)rbNFGBc78>tJ6Zw8MAHb0J1nK7DoEzEh(QQ! zJvBy${NjR^E<)f+S!{uOh030Gf~W> zKMohqccKZJnMf_gF6)3GB6y*Rl|<1<870w3Z=I5!Sf(w{kK7W&jS$S>ba1_Bq;ua_ zX3CxVU1O`=!AASi8PqB-Zce8AUq{pBmz$@)7~6dF(Ry#a-RrKbmOnJ?3C=0bRR!US zYwRo|SSs%@w!xBoKx#uV)xa@u5xoeYoC;9}?};m?7MitG5Y+V~|T`M-X z#`|l%)o$M|r?YE??eHwaeDF3?Wtbc0Y#^B&)xpwesgPOTQYEb*PDbz=6C9`}fGI?k zaW-pv#?$qU;=c5gDw-p7vhw%kO1qm5ltt)p$nGIx$N~UP86u4(M3G>lvCccKECr%G zW)6UQ?i|6`3E`FC*pvqs8tIp}(f-!bFu)K(`%VXQ^AoMP)nT{a?he|^J)6jJbU2;= zt1>qF=g-RHbR1wgBONS`=8vJ5&;5T*r>>ybVZj*{xf486kdbn!iDxm!>_~y}`HS)l z#Tn|H4$N?FbD)Ykz3V;|ALd}6+D3QR*YezS2i^48jmnYq?lwA_Ub%`M+}9q$t2xi% zM!NNMwUMsBRIQcYe%(wTe2!}A*~_S&P8>t^(Z!F^M~7;Q;JsZ;H{P$<4)rE5Wmv9m zGkqiSOi@rQV5u3ALCKs%rYen6z@mo?jOC0d0gWk3ZyhO`wc*;pzqXOieurj94}F6^ zePH4aCJ$-;46+@@85k;~m(n<|eE=TwpwXPkE(*z{BF+NU+L1_b{sjOs{O1kxSYB}V zj(+?NJ=v?>Zu7aHk$WVY&im<{qS#8xd=@g8(WrstC5$+)Cl3H#aU3yLAy9`|9xKU9 oa_dlw-v1e gsT#MS$OEb-w-4|GLTs04UIY`9zoAmM* tauri::Result<()> { .setup(move |app| { // We need a the app handle to determine the data directory now. // This means all the setup code has to be within `setup`, however it doesn't support async so we `block_on`. + app.listen("deep-link://new-url", |url| { + println!("Received deep link: {:?}", url); + }); + block_in_place(|| { block_on(async move { builder.mount_events(app); diff --git a/apps/desktop/src-tauri/tauri.conf.json b/apps/desktop/src-tauri/tauri.conf.json index c873befb1..14bfbaa72 100644 --- a/apps/desktop/src-tauri/tauri.conf.json +++ b/apps/desktop/src-tauri/tauri.conf.json @@ -29,7 +29,9 @@ "transparent": true, "center": true, "windowEffects": { - "effects": ["sidebar"], + "effects": [ + "sidebar" + ], "state": "followsWindowActiveState", "radius": 9 } @@ -41,7 +43,11 @@ }, "bundle": { "active": true, - "targets": ["deb", "msi", "dmg"], + "targets": [ + "deb", + "msi", + "dmg" + ], "publisher": "Spacedrive Technology Inc.", "copyright": "Spacedrive Technology Inc.", "category": "Productivity", @@ -59,14 +65,20 @@ "files": { "/usr/share/spacedrive/models/yolov8s.onnx": "../../.deps/models/yolov8s.onnx" }, - "depends": ["libc6", "libxdo3", "dbus"] + "depends": [ + "libc6", + "libxdo3", + "dbus" + ] } }, "macOS": { "minimumSystemVersion": "10.15", "exceptionDomain": null, "entitlements": null, - "frameworks": ["../../.deps/Spacedrive.framework"] + "frameworks": [ + "../../.deps/Spacedrive.framework" + ] }, "windows": { "certificateThumbprint": null, @@ -90,6 +102,14 @@ "endpoints": [ "https://spacedrive.com/api/releases/tauri/{{version}}/{{target}}/{{arch}}" ] + }, + "deep-link": { + "mobile": [], + "desktop": { + "schemes": [ + "spacedrive" + ] + } } } } diff --git a/apps/desktop/tsconfig.json b/apps/desktop/tsconfig.json index d3855c6fb..11d32a210 100644 --- a/apps/desktop/tsconfig.json +++ b/apps/desktop/tsconfig.json @@ -5,7 +5,8 @@ "declarationDir": "dist", "paths": { "~/*": ["./src/*"] - } + }, + "moduleResolution": "bundler" }, "include": ["src"], "references": [ diff --git a/core/src/node/config.rs b/core/src/node/config.rs index 75ec1a9ed..3dcb641ed 100644 --- a/core/src/node/config.rs +++ b/core/src/node/config.rs @@ -183,7 +183,7 @@ pub enum NodeConfigVersion { } impl ManagedVersion for NodeConfig { - const LATEST_VERSION: NodeConfigVersion = NodeConfigVersion::V4; + const LATEST_VERSION: NodeConfigVersion = NodeConfigVersion::V5; const KIND: Kind = Kind::Json("version"); type MigrationError = NodeConfigError; diff --git a/interface/app/$libraryId/Layout/Sidebar/DebugPopover.tsx b/interface/app/$libraryId/Layout/Sidebar/DebugPopover.tsx index 177a931cb..10cc65173 100644 --- a/interface/app/$libraryId/Layout/Sidebar/DebugPopover.tsx +++ b/interface/app/$libraryId/Layout/Sidebar/DebugPopover.tsx @@ -171,7 +171,7 @@ export default () => { > - + {/* */} {/* */} - +
+ + +
{/* {platform.showDevtools && ( { diff --git a/interface/app/$libraryId/index.tsx b/interface/app/$libraryId/index.tsx index 2f0df905c..6be3b0dd3 100644 --- a/interface/app/$libraryId/index.tsx +++ b/interface/app/$libraryId/index.tsx @@ -79,7 +79,7 @@ export default (platform: Platform) => }, { path: 'auth', - lazy: () => import('./auth'), + lazy: () => import('./Layout/auth'), children: [] }, { path: '*', lazy: () => import('./404') } diff --git a/interface/app/$libraryId/settings/client/account/Tabs.tsx b/interface/app/$libraryId/settings/client/account/Tabs.tsx index e04f90615..890565c64 100644 --- a/interface/app/$libraryId/settings/client/account/Tabs.tsx +++ b/interface/app/$libraryId/settings/client/account/Tabs.tsx @@ -56,7 +56,7 @@ const Tabs = () => { // This is where Google should redirect the user back after login or error. // This URL goes on the Google's dashboard as well. - frontendRedirectURI: 'http://localhost:9420/api/auth/callback/google' + frontendRedirectURI: 'spacedrive://-/auth' }); /* diff --git a/packages/client/src/core.ts b/packages/client/src/core.ts index 4bc80ad02..00b339a1a 100644 --- a/packages/client/src/core.ts +++ b/packages/client/src/core.ts @@ -231,7 +231,7 @@ export type CursorOrderItem = { order: SortOrder; data: T } export type DefaultLocations = { desktop: boolean; documents: boolean; downloads: boolean; pictures: boolean; music: boolean; videos: boolean } -export type Device = { pub_id: DevicePubId; name: string; os: DeviceOS; storage_size: bigint; connection_id: string; created_at: string; updated_at: string } +export type Device = { pub_id: DevicePubId; name: string; os: DeviceOS; storage_size: bigint; used_storage: bigint; connection_id: string; created_at: string; updated_at: string; hardware_model: HardwareModel } export type DeviceDeleteRequest = { access_token: AccessToken; pub_id: DevicePubId } @@ -505,7 +505,7 @@ export type MediaLocation = { latitude: number; longitude: number; pluscode: Plu export type Metadata = { album: string | null; album_artist: string | null; artist: string | null; comment: string | null; composer: string | null; copyright: string | null; creation_time: string | null; date: string | null; disc: number | null; encoder: string | null; encoded_by: string | null; filename: string | null; genre: string | null; language: string | null; performer: string | null; publisher: string | null; service_name: string | null; service_provider: string | null; title: string | null; track: number | null; variant_bit_rate: number | null; custom: { [key in string]: string } } -export type MockDevice = { pub_id: DevicePubId; name: string; os: DeviceOS; used_storage: bigint; storage_size: bigint; created_at: string; updated_at: string; device_model: HardwareModel } +export type MockDevice = { pub_id: DevicePubId; name: string; os: DeviceOS; used_storage: bigint; storage_size: bigint; created_at: string; updated_at: string; device_model: core_HardwareModel } export type NodeConfigP2P = { discovery?: P2PDiscoveryState; port: Port; disabled: boolean; disable_ipv6: boolean; disable_relay: boolean; enable_remote_access: boolean; /** @@ -602,7 +602,7 @@ export type P2PDiscoveryState = "Everyone" | "ContactsOnly" | "Disabled" export type P2PEvent = { type: "PeerChange"; identity: RemoteIdentity; connection: ConnectionMethod; discovery: DiscoveryMethod; metadata: PeerMetadata; addrs: string[] } | { type: "PeerDelete"; identity: RemoteIdentity } | { type: "SpacedropRequest"; id: string; identity: RemoteIdentity; peer_name: string; files: string[] } | { type: "SpacedropProgress"; id: string; percent: number } | { type: "SpacedropTimedOut"; id: string } | { type: "SpacedropRejected"; id: string } -export type PeerMetadata = { name: string; operating_system: OperatingSystem | null; device_model: HardwareModel | null; version: string | null } +export type PeerMetadata = { name: string; operating_system: OperatingSystem | null; device_model: core_HardwareModel | null; version: string | null } export type PlusCode = string @@ -702,3 +702,5 @@ export type UpdateThumbnailerPreferences = Record export type VideoProps = { pixel_format: string | null; color_range: string | null; bits_per_channel: number | null; color_space: string | null; color_primaries: string | null; color_transfer: string | null; field_order: string | null; chroma_location: string | null; width: number; height: number; aspect_ratio_num: number | null; aspect_ratio_den: number | null; properties: string[] } export type Volume = { name: string; mount_points: string[]; total_capacity: string; available_capacity: string; disk_type: DiskType; file_system: string | null; is_root_filesystem: boolean } + +export type core_HardwareModel = "Other" | "MacStudio" | "MacBookAir" | "MacBookPro" | "MacBook" | "MacMini" | "MacPro" | "IMac" | "IMacPro" | "IPad" | "IPhone" | "Simulator" | "Android" diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 4255f3198b53e825e3d8a5fd954347ebe8a0341b..ed3e6a5a7eef8c367c127deec3667ac2e15d0023 100644 GIT binary patch delta 422 zcmZo%>9BaM!-ft%mW+~;g2@Xx#5j!f4D<|ii;^dQR1=vj;LXYkWa}ADHw@ws_@P%FE%YVcGdPyHz{^1DKGM{FwMyFtt<>N2u^nl4NDBo zwu}ficXLWNPPetKo$epXBf4EEgLwfLFUX+|a7Qv8)(Fiq2~P=e^{y~*c1tuXwJ1%> zPBqBQH#aSCF3Kx4izq2_D=Et=&d{#PD)oy@_jT9LG0=82EYA;*@W?ecPBZdJEi2B> zDAx8)@=o>%bkxr)vdA`?ZdJ@IGhHMS=rF9%m&2lK+FNeoIuRA z{b>>RiwEGKU^7y;QmBTwc{PkNFsZm}&cCKIXQ_=@UYEl%{1f%WwC{WKQOqZWhfWGJQ@u zvwV9?2{RD005K~NvjH(X5OV-AClGUOZz Date: Tue, 20 Aug 2024 20:52:21 +0300 Subject: [PATCH 077/218] First autoformat of the PR --- apps/desktop/src-tauri/Cargo.toml | 14 +++--- .../src-tauri/capabilities/default.json | 4 +- apps/desktop/src-tauri/tauri.conf.json | 20 ++------ .../modules/sd-core/android/crate/src/lib.rs | 6 +-- .../client/AccountSettings/AccountProfile.tsx | 2 +- .../settings/client/AccountSettings/Login.tsx | 11 +++-- .../client/AccountSettings/Register.tsx | 8 ++-- core/crates/cloud-services/src/cloud_p2p.rs | 1 - core/src/api/utils/mod.rs | 2 - core/src/cloud/sync/mod.rs | 9 +--- core/src/cloud/sync/receive.rs | 20 ++------ core/src/cloud/sync/send.rs | 21 ++------- core/src/library/manager/mod.rs | 2 +- .../Layout/Sidebar/DebugPopover.tsx | 2 +- interface/app/$libraryId/TopBar/index.tsx | 4 +- .../settings/client/account/Login.tsx | 1 + .../settings/client/account/Register.tsx | 3 +- .../client/account/handlers/cookieHandler.ts | 24 +++++----- .../client/account/handlers/windowHandler.ts | 46 +++++++++---------- .../settings/node/libraries/ListItem.tsx | 2 +- interface/hooks/useDeeplinkEventHandler.ts | 8 ++-- 21 files changed, 83 insertions(+), 127 deletions(-) diff --git a/apps/desktop/src-tauri/Cargo.toml b/apps/desktop/src-tauri/Cargo.toml index aff69b5e3..f7ccf5b4a 100644 --- a/apps/desktop/src-tauri/Cargo.toml +++ b/apps/desktop/src-tauri/Cargo.toml @@ -34,14 +34,14 @@ uuid = { workspace = true, features = ["serde"] } # Specific Desktop dependencies # WARNING: Do NOT enable default features, as that vendors dbus (see below) -opener = { version = "0.7.1", features = ["reveal"], default-features = false } -specta-typescript = "=0.0.7" -tauri-plugin-dialog = "=2.0.0-rc.0" -tauri-plugin-os = "=2.0.0-rc.0" -tauri-plugin-shell = "=2.0.0-rc.0" -tauri-plugin-updater = "=2.0.0-rc.0" +opener = { version = "0.7.1", features = ["reveal"], default-features = false } +specta-typescript = "=0.0.7" tauri-plugin-deep-link = "=2.0.0-rc.0" -tauri-plugin-http = "2.0.0-rc.0" +tauri-plugin-dialog = "=2.0.0-rc.0" +tauri-plugin-http = "2.0.0-rc.0" +tauri-plugin-os = "=2.0.0-rc.0" +tauri-plugin-shell = "=2.0.0-rc.0" +tauri-plugin-updater = "=2.0.0-rc.0" [dependencies.tauri] features = ["linux-libxdo", "macos-private-api", "native-tls-vendored", "unstable"] diff --git a/apps/desktop/src-tauri/capabilities/default.json b/apps/desktop/src-tauri/capabilities/default.json index 20e0aa609..79b0ef88b 100644 --- a/apps/desktop/src-tauri/capabilities/default.json +++ b/apps/desktop/src-tauri/capabilities/default.json @@ -2,9 +2,7 @@ "$schema": "../gen/schemas/desktop-schema.json", "identifier": "default", "description": "Capability for the main window", - "windows": [ - "main" - ], + "windows": ["main"], "permissions": [ "core:app:default", "core:event:default", diff --git a/apps/desktop/src-tauri/tauri.conf.json b/apps/desktop/src-tauri/tauri.conf.json index 1a4adbbf6..0dc0fe09c 100644 --- a/apps/desktop/src-tauri/tauri.conf.json +++ b/apps/desktop/src-tauri/tauri.conf.json @@ -29,9 +29,7 @@ "transparent": true, "center": true, "windowEffects": { - "effects": [ - "sidebar" - ], + "effects": ["sidebar"], "state": "followsWindowActiveState", "radius": 9 } @@ -43,11 +41,7 @@ }, "bundle": { "active": true, - "targets": [ - "deb", - "msi", - "dmg" - ], + "targets": ["deb", "msi", "dmg"], "publisher": "Spacedrive Technology Inc.", "copyright": "Spacedrive Technology Inc.", "category": "Productivity", @@ -66,11 +60,7 @@ "files": { "/usr/share/spacedrive/models/yolov8s.onnx": "../../.deps/models/yolov8s.onnx" }, - "depends": [ - "libc6", - "libxdo3", - "dbus" - ] + "depends": ["libc6", "libxdo3", "dbus"] } }, "macOS": { @@ -114,9 +104,7 @@ "deep-link": { "mobile": [], "desktop": { - "schemes": [ - "spacedrive" - ] + "schemes": ["spacedrive"] } } } diff --git a/apps/mobile/modules/sd-core/android/crate/src/lib.rs b/apps/mobile/modules/sd-core/android/crate/src/lib.rs index bc8fd2998..81a07b8a8 100644 --- a/apps/mobile/modules/sd-core/android/crate/src/lib.rs +++ b/apps/mobile/modules/sd-core/android/crate/src/lib.rs @@ -37,9 +37,7 @@ pub extern "system" fn Java_com_spacedrive_core_SDCoreModule_registerCoreEventLi if let Err(err) = result { // TODO: Send rspc error or something here so we can show this in the UI. // TODO: Maybe reinitialise the core cause it could be in an invalid state? - error!( - "Error in Java_com_spacedrive_core_SDCoreModule_registerCoreEventListener: {err:?}" - ); + error!("Error in Java_com_spacedrive_core_SDCoreModule_registerCoreEventListener: {err:?}"); } } @@ -109,4 +107,4 @@ pub extern "system" fn Java_com_spacedrive_core_SDCoreModule_handleCoreMsg( err ); } -} \ No newline at end of file +} diff --git a/apps/mobile/src/screens/settings/client/AccountSettings/AccountProfile.tsx b/apps/mobile/src/screens/settings/client/AccountSettings/AccountProfile.tsx index d8bc06dd4..2a0599e3c 100644 --- a/apps/mobile/src/screens/settings/client/AccountSettings/AccountProfile.tsx +++ b/apps/mobile/src/screens/settings/client/AccountSettings/AccountProfile.tsx @@ -43,7 +43,7 @@ const AccountProfile = () => { style={tw`w-full items-center justify-start gap-1 bg-app-input !px-2`} > - + {userInfo ? userInfo.email : ''} diff --git a/apps/mobile/src/screens/settings/client/AccountSettings/Login.tsx b/apps/mobile/src/screens/settings/client/AccountSettings/Login.tsx index 989fa4e39..b1788772f 100644 --- a/apps/mobile/src/screens/settings/client/AccountSettings/Login.tsx +++ b/apps/mobile/src/screens/settings/client/AccountSettings/Login.tsx @@ -1,3 +1,4 @@ +import { useNavigation } from '@react-navigation/native'; import { useState } from 'react'; import { Controller } from 'react-hook-form'; import { Text, View } from 'react-native'; @@ -7,11 +8,15 @@ import { Button } from '~/components/primitive/Button'; import { Input } from '~/components/primitive/Input'; import { toast } from '~/components/primitive/Toast'; import { tw } from '~/lib/tailwind'; -import { useNavigation } from '@react-navigation/native'; import { SettingsStackScreenProps } from '~/navigation/tabs/SettingsStack'; + import ShowPassword from './ShowPassword'; -async function signInClicked(email: string, password: string, navigator: SettingsStackScreenProps<'AccountProfile'>['navigation']) { +async function signInClicked( + email: string, + password: string, + navigator: SettingsStackScreenProps<'AccountProfile'>['navigation'] +) { try { const req = await fetch('http://localhost:9420/api/auth/signin', { method: 'POST', @@ -63,7 +68,7 @@ async function signInClicked(email: string, password: string, navigator: Setting // the frontend SDK. toast.success('Sign in successful'); // Refresh the page to show the user is logged in - navigator.navigate('AccountProfile') + navigator.navigate('AccountProfile'); } } catch (err: any) { if (err.isSuperTokensGeneralError === true) { diff --git a/apps/mobile/src/screens/settings/client/AccountSettings/Register.tsx b/apps/mobile/src/screens/settings/client/AccountSettings/Register.tsx index 277dde897..e43d355a3 100644 --- a/apps/mobile/src/screens/settings/client/AccountSettings/Register.tsx +++ b/apps/mobile/src/screens/settings/client/AccountSettings/Register.tsx @@ -145,10 +145,10 @@ const Register = () => { secureTextEntry={!showPassword} /> + showPassword={showPassword} + setShowPassword={setShowPassword} + plural={true} + /> )} /> diff --git a/core/crates/cloud-services/src/cloud_p2p.rs b/core/crates/cloud-services/src/cloud_p2p.rs index 139597f9c..8b1378917 100644 --- a/core/crates/cloud-services/src/cloud_p2p.rs +++ b/core/crates/cloud-services/src/cloud_p2p.rs @@ -1,2 +1 @@ - diff --git a/core/src/api/utils/mod.rs b/core/src/api/utils/mod.rs index a37888bbd..b12c1ec7e 100644 --- a/core/src/api/utils/mod.rs +++ b/core/src/api/utils/mod.rs @@ -3,9 +3,7 @@ use std::path::Path; // #[cfg(not(any(target_os = "ios", target_os = "android")))] // use keyring::Entry; -use regex::Regex; use tokio::{fs, io}; -use tracing::{debug, error}; mod invalidate; mod library; diff --git a/core/src/cloud/sync/mod.rs b/core/src/cloud/sync/mod.rs index 095b0a5e8..10422b0ff 100644 --- a/core/src/cloud/sync/mod.rs +++ b/core/src/cloud/sync/mod.rs @@ -1,8 +1,4 @@ -use sd_sync::*; -use std::sync::{ - atomic::{self, AtomicBool}, - Arc, -}; +use std::sync::{atomic::AtomicBool, Arc}; use tokio::sync::Notify; use uuid::Uuid; @@ -29,7 +25,6 @@ pub async fn declare_actors( db: Arc, ) -> State { let ingest_notify = Arc::new(Notify::new()); - let state = State::default(); // actors // .declare( @@ -90,7 +85,7 @@ pub async fn declare_actors( // ) // .await; - state + State::default() } macro_rules! err_break { diff --git a/core/src/cloud/sync/receive.rs b/core/src/cloud/sync/receive.rs index 566db53d7..6d8b3fda3 100644 --- a/core/src/cloud/sync/receive.rs +++ b/core/src/cloud/sync/receive.rs @@ -1,34 +1,22 @@ use crate::{library::Libraries, Node}; use futures::FutureExt; -use futures_concurrency::future::Race; use sd_actors::Stopper; -use sd_cloud_api::{library::message_collections::get::InstanceTimestamp, RequestConfigProvider}; use sd_p2p::RemoteIdentity; -use sd_prisma::prisma::{cloud_crdt_operation, instance, PrismaClient, SortOrder}; +use sd_prisma::prisma::{cloud_crdt_operation, instance, PrismaClient}; use sd_sync::CRDTOperation; use sd_utils::uuid_to_bytes; use std::{ - collections::{hash_map::Entry, HashMap}, - future::IntoFuture, - str::FromStr, - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, - }, - time::Duration, + collections::HashMap, + sync::{atomic::AtomicBool, Arc}, }; -use base64::prelude::*; use chrono::Utc; use serde_json::to_vec; -use tokio::{sync::Notify, time::sleep}; -use tracing::{debug, info}; +use tokio::sync::Notify; use uuid::Uuid; -use super::{err_break, CompressedCRDTOperations}; - // Responsible for downloading sync operations from the cloud to be processed by the ingester #[allow(clippy::too_many_arguments)] diff --git a/core/src/cloud/sync/send.rs b/core/src/cloud/sync/send.rs index aa2944f22..a97958169 100644 --- a/core/src/cloud/sync/send.rs +++ b/core/src/cloud/sync/send.rs @@ -1,27 +1,12 @@ use sd_actors::Stopper; use sd_core_cloud_services::CloudServices; -use sd_core_sync::{SyncMessage, NTP64}; +use sd_core_sync::SyncMessage; -use std::{ - future::IntoFuture, - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, - }, - time::Duration, -}; +use std::sync::{atomic::AtomicBool, Arc}; -use futures::FutureExt; -use futures_concurrency::future::Race; -use tokio::{ - sync::{broadcast, Notify}, - time::sleep, -}; -use tracing::debug; +use tokio::sync::{broadcast, Notify}; use uuid::Uuid; -use super::{err_break, CompressedCRDTOperations}; - enum RaceNotifiedOrStopped { Notified, Stopped, diff --git a/core/src/library/manager/mod.rs b/core/src/library/manager/mod.rs index a8e187f39..b4dddb3db 100644 --- a/core/src/library/manager/mod.rs +++ b/core/src/library/manager/mod.rs @@ -1,6 +1,6 @@ use crate::{ api::{utils::InvalidateOperationEvent, CoreEvent}, - cloud, invalidate_query, + invalidate_query, location::metadata::{LocationMetadataError, SpacedriveLocationMetadataFile}, object::tag, p2p, sync, diff --git a/interface/app/$libraryId/Layout/Sidebar/DebugPopover.tsx b/interface/app/$libraryId/Layout/Sidebar/DebugPopover.tsx index 60a46fb70..e1e569b58 100644 --- a/interface/app/$libraryId/Layout/Sidebar/DebugPopover.tsx +++ b/interface/app/$libraryId/Layout/Sidebar/DebugPopover.tsx @@ -174,7 +174,7 @@ export default () => { {/* */} {/* */} -
+
diff --git a/interface/app/$libraryId/TopBar/index.tsx b/interface/app/$libraryId/TopBar/index.tsx index 3e6afd7a3..d84646775 100644 --- a/interface/app/$libraryId/TopBar/index.tsx +++ b/interface/app/$libraryId/TopBar/index.tsx @@ -138,7 +138,7 @@ function Tabs() { else if (e.button === 1) removeTab(index); }} className={clsx( - 'duration-[50ms] group relative flex h-full min-w-40 shrink-0 flex-row items-center justify-center px-8 text-center', + 'group relative flex h-full min-w-40 shrink-0 flex-row items-center justify-center px-8 text-center duration-[50ms]', ctx.tabIndex === index ? 'text-ink' : 'top-bar-blur border-t border-sidebar-divider bg-sidebar/30 text-ink-faint/60 transition-colors hover:bg-app/50' @@ -166,7 +166,7 @@ function Tabs() { diff --git a/interface/app/$libraryId/settings/client/account/Login.tsx b/interface/app/$libraryId/settings/client/account/Login.tsx index e89837a0e..b64ec4b6c 100644 --- a/interface/app/$libraryId/settings/client/account/Login.tsx +++ b/interface/app/$libraryId/settings/client/account/Login.tsx @@ -3,6 +3,7 @@ import { Controller } from 'react-hook-form'; import { signIn } from 'supertokens-web-js/recipe/emailpassword'; import { nonLibraryClient, useZodForm } from '@sd/client'; import { Button, Form, Input, toast, z } from '@sd/ui'; + import ShowPassword from './ShowPassword'; async function signInClicked(email: string, password: string) { diff --git a/interface/app/$libraryId/settings/client/account/Register.tsx b/interface/app/$libraryId/settings/client/account/Register.tsx index 7e42dbd25..6dbc4ef15 100644 --- a/interface/app/$libraryId/settings/client/account/Register.tsx +++ b/interface/app/$libraryId/settings/client/account/Register.tsx @@ -1,8 +1,9 @@ import { zodResolver } from '@hookform/resolvers/zod'; -import { Button, Form, Input, toast, z } from '@sd/ui'; import { useState } from 'react'; import { Controller, useForm } from 'react-hook-form'; import { signUp } from 'supertokens-web-js/recipe/emailpassword'; +import { Button, Form, Input, toast, z } from '@sd/ui'; + import ShowPassword from './ShowPassword'; const RegisterSchema = z diff --git a/interface/app/$libraryId/settings/client/account/handlers/cookieHandler.ts b/interface/app/$libraryId/settings/client/account/handlers/cookieHandler.ts index a0ca2abe9..4a7362d8e 100644 --- a/interface/app/$libraryId/settings/client/account/handlers/cookieHandler.ts +++ b/interface/app/$libraryId/settings/client/account/handlers/cookieHandler.ts @@ -1,6 +1,6 @@ -import { CookieHandlerInterface } from "supertokens-website/utils/cookieHandler/types"; +import { CookieHandlerInterface } from 'supertokens-website/utils/cookieHandler/types'; -const frontendCookiesKey = "frontendCookies"; +const frontendCookiesKey = 'frontendCookies'; /** * Tauri handles cookies differently than in browser environments. The SuperTokens @@ -12,8 +12,8 @@ function getCookiesFromStorage(): string { const cookiesFromStorage = window.localStorage.getItem(frontendCookiesKey); if (cookiesFromStorage === null) { - window.localStorage.setItem(frontendCookiesKey, "[]"); - return ""; + window.localStorage.setItem(frontendCookiesKey, '[]'); + return ''; } /** @@ -25,20 +25,20 @@ function getCookiesFromStorage(): string { for (let cookieIndex = 0; cookieIndex < cookieArrayInStorage.length; cookieIndex++) { const currentCookieString = cookieArrayInStorage[cookieIndex]; - const parts = currentCookieString?.split(";"); - let expirationString: string = ""; + const parts = currentCookieString?.split(';'); + let expirationString: string = ''; for (let partIndex = 0; partIndex < parts!.length; partIndex++) { const currentPart = parts![partIndex]; - if (currentPart!.toLocaleLowerCase().includes("expires=")) { + if (currentPart!.toLocaleLowerCase().includes('expires=')) { expirationString = currentPart!; break; } } - if (expirationString !== "") { - const expirationValueString = expirationString.split("=")[1]; + if (expirationString !== '') { + const expirationValueString = expirationString.split('=')[1]; const expirationDate = new Date(expirationValueString!); const currentTimeInMillis = Date.now(); @@ -57,11 +57,11 @@ function getCookiesFromStorage(): string { */ window.localStorage.setItem(frontendCookiesKey, JSON.stringify(cookieArrayToReturn)); - return cookieArrayToReturn.join("; "); + return cookieArrayToReturn.join('; '); } function setCookieToStorage(cookieString: string) { - const cookieName = cookieString.split(";")[0]!.split("=")[0]; + const cookieName = cookieString.split(';')[0]!.split('=')[0]; const cookiesFromStorage = window.localStorage.getItem(frontendCookiesKey); let cookiesArray: string[] = []; @@ -105,6 +105,6 @@ export default function getCookieHandler(original: CookieHandlerInterface): Cook }, setCookie: async function (cookieString: string) { setCookieToStorage(cookieString); - }, + } }; } diff --git a/interface/app/$libraryId/settings/client/account/handlers/windowHandler.ts b/interface/app/$libraryId/settings/client/account/handlers/windowHandler.ts index c5f095733..f4d16996d 100644 --- a/interface/app/$libraryId/settings/client/account/handlers/windowHandler.ts +++ b/interface/app/$libraryId/settings/client/account/handlers/windowHandler.ts @@ -1,4 +1,4 @@ -import { WindowHandlerInterface } from "supertokens-website/utils/windowHandler/types"; +import { WindowHandlerInterface } from 'supertokens-website/utils/windowHandler/types'; /** * This example app uses HashRouter from react-router-dom. The SuperTokens SDK relies on @@ -13,76 +13,76 @@ export default function getWindowHandler(original: WindowHandlerInterface): Wind ...original.location, getSearch: function () { const currentURL = window.location.href; - const firstQuestionMarkIndex = currentURL.indexOf("?"); + const firstQuestionMarkIndex = currentURL.indexOf('?'); if (firstQuestionMarkIndex !== -1) { // Return the query string from the url let queryString = currentURL.substring(firstQuestionMarkIndex); // Remove any hash - if (queryString.includes("#")) { - queryString = queryString.split("#")[0] ?? ""; + if (queryString.includes('#')) { + queryString = queryString.split('#')[0] ?? ''; } return queryString; } - return ""; + return ''; }, getHash: function () { // Location hash always starts with a #, when returning we prepend it let locationHash = window.location.hash; - if (locationHash === "") { - return "#"; + if (locationHash === '') { + return '#'; } - if (locationHash.startsWith("#")) { + if (locationHash.startsWith('#')) { // Remove the starting pound symbol locationHash = locationHash.substring(1); } - if (!locationHash.includes("#")) { + if (!locationHash.includes('#')) { // The remaining string did not have any "#" character - return "#"; + return '#'; } - const locationSplit = locationHash.split("#"); + const locationSplit = locationHash.split('#'); if (locationSplit.length < 2) { // The string contains a "#" but is followed by nothing - return "#"; + return '#'; } - return "#" + locationSplit[1]; + return '#' + locationSplit[1]; }, getOrigin: function () { - return "http://localhost:8001"; + return 'http://localhost:8001'; }, getHostName: function () { - return "localhost"; + return 'localhost'; }, getPathName: function () { let locationHash = window.location.hash; - if (locationHash === "") { - return ""; + if (locationHash === '') { + return ''; } - if (locationHash.startsWith("#")) { + if (locationHash.startsWith('#')) { // Remove the starting pound symbol locationHash = locationHash.substring(1); } - locationHash = locationHash.split("?")[0] ?? ""; + locationHash = locationHash.split('?')[0] ?? ''; - if (locationHash.includes("#")) { + if (locationHash.includes('#')) { // Remove location hash - locationHash = locationHash.split("#")[0] ?? ""; + locationHash = locationHash.split('#')[0] ?? ''; } return locationHash; - }, - }, + } + } }; } diff --git a/interface/app/$libraryId/settings/node/libraries/ListItem.tsx b/interface/app/$libraryId/settings/node/libraries/ListItem.tsx index f847355e1..e789c041a 100644 --- a/interface/app/$libraryId/settings/node/libraries/ListItem.tsx +++ b/interface/app/$libraryId/settings/node/libraries/ListItem.tsx @@ -88,7 +88,7 @@ export default (props: Props) => { exit={{ height: 0, opacity: 0 }} className="relative mt-2 flex origin-top flex-col gap-1 pl-8" > -
+
{cloudDevicesList.data?.map( ( diff --git a/interface/hooks/useDeeplinkEventHandler.ts b/interface/hooks/useDeeplinkEventHandler.ts index 728241ee0..16813b05c 100644 --- a/interface/hooks/useDeeplinkEventHandler.ts +++ b/interface/hooks/useDeeplinkEventHandler.ts @@ -1,6 +1,6 @@ -import { useEffect } from "react"; -import { useNavigate } from "react-router"; -import { DeeplinkEvent } from "~/util/events"; +import { useEffect } from 'react'; +import { useNavigate } from 'react-router'; +import { DeeplinkEvent } from '~/util/events'; export const useDeeplinkEventHandler = () => { const navigate = useNavigate(); @@ -17,4 +17,4 @@ export const useDeeplinkEventHandler = () => { document.addEventListener('deeplink', handler); return () => document.removeEventListener('deeplink', handler); }, [navigate]); -} +}; From e48c33dfbf58eade7073ae6abdc67b4660db58b1 Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Tue, 20 Aug 2024 23:05:57 -0300 Subject: [PATCH 078/218] Update deps and configure TLS with new stuff --- Cargo.lock | Bin 306716 -> 307847 bytes core/crates/cloud-services/Cargo.toml | 20 +++---- .../crates/cloud-services/src/cloud_client.rs | 56 +++++++++++++----- 3 files changed, 50 insertions(+), 26 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6c68fb83c1d9bf374dc550fc7a358b9b69208658..36284387496cd07d78cd56b393d075af5955f9ee 100644 GIT binary patch delta 1304 zcmaJ=ZD>|y80MVw%5=l#Hr+?&dRL&fde`&)5$M1uic;tL5TqYIpYhh**1LE1D^ZY) zeuVjwcSK-6gA!~gry!xQ6cT6t&WgS$0%?MOLaZJ~4l3x^InVu^b6xj+U)S~awkm)c{Cfo3BC?GujR{E zO_*k!YV4`)4UKxNhX%YfpFet&w*!tt)(zJ#CUTDi)|P7~arB;QYMe5{Ats|>2uqBy zcGxM)aFm{CBE8jEO5vTij>FZnd2eg~nH8G}jW@Z(X?_>|c ziI0lDLsyPv<&|UkhP&a$ooNUqomE^Ug=~^W1>-|f-3R71VJs{>Ui6j+Kg)mI2KSBSjP+1F~JXxtg|8&+1TOsSJHfCp<6En4cbrc#k zGwB;sbr09Oiz*x($h%$S$zRI~Nn+a^7MkaSeOE5_RG16Kpl1EyfUhZEcDu?@u3v1R>>}kZ# z@strw9O2Sa>(cgdaxqE3$rm!TUjMBRwr@)UkJgHfaOz0bT{dgQcpD5(XI=kEO8EsV zMpw0^T@gHj%M}k53mxr~vwLc`{%n1+d2{+f`1#>VUwO}t;@4fbK7<`R^EKth_lxVx zmP%$QZTGQ-d_}osu6U|vseb>*#ZdX-=f%$KR;lbfSMZnNVkKV=Q`;-U<=(B8tJ&hY aOy`sN($&4?)8mz2HZA?Q`Ecc%b$UV@ysF_E`KhP^_}?IaXLT8WlBMs)`Dg^KvG(Got1=WK}4Rx zD#?XsK0$O`Gq0o3)`91OvB(FICdyN9kW&q=zC-5c9Mje)r*v+EQAs2T$uOIe&vgQj zQvendQ<;l4hZw+U#+Wdz1TY3)f37ou-SuV^&%8wr;!?lfPOmkG@ZPgz3cDxi{DG)L zc3c{3nQFDEM4Uf2tEgcutxiv3e&jnC3+LygJjDM zneF)D4tg7g_2z+1!7=<2$vAeWn)$glMdLsP;RR%7+m?mMcs3%-q`|ACLJLxRU>4eJ zvSFHY51_fpNdT_APyRP{>HFr<^26KY{$bpNOyKTiG& DrPK{I diff --git a/core/crates/cloud-services/Cargo.toml b/core/crates/cloud-services/Cargo.toml index d45a4604d..9773daec9 100644 --- a/core/crates/cloud-services/Cargo.toml +++ b/core/crates/cloud-services/Cargo.toml @@ -27,18 +27,14 @@ tracing = { workspace = true } zeroize = { workspace = true } # External dependencies -iroh-base = { version = "0.22.0", features = ["key"] } -postcard = { version = "1.0.8", features = ["use-std"] } -quic-rpc = { version = "0.11.0", features = ["quinn-transport"] } -quinn = { package = "iroh-quinn", version = "=0.10.5" } -reqwest-middleware = { version = "0.3", features = ["json"] } -reqwest-retry = "0.6" - -[dependencies.rustls-old] -default-features = false -features = ["dangerous_configuration", "logging", "quic"] -package = "rustls" -version = "0.21.12" # Update blocked by quic-rpc +iroh-base = { version = "0.23.0", features = ["key"] } +postcard = { version = "1.0.8", features = ["use-std"] } +quic-rpc = { version = "0.12.0", features = ["quinn-transport"] } +quinn = { package = "iroh-quinn", version = "=0.11.3" } +reqwest-middleware = { version = "0.3", features = ["json"] } +reqwest-retry = "0.6" +rustls = { version = "0.23", default-features = false, features = ["ring"] } +rustls-platform-verifier = "0.3.3" [dev-dependencies] diff --git a/core/crates/cloud-services/src/cloud_client.rs b/core/crates/cloud-services/src/cloud_client.rs index b9154f979..822e6d0c6 100644 --- a/core/crates/cloud-services/src/cloud_client.rs +++ b/core/crates/cloud-services/src/cloud_client.rs @@ -3,7 +3,7 @@ use sd_cloud_schema::{Client, Service}; use std::{net::SocketAddr, sync::Arc, time::Duration}; use quic_rpc::{transport::quinn::QuinnConnection, RpcClient}; -use quinn::{ClientConfig, Endpoint}; +use quinn::{crypto::rustls::QuicClientConfig, ClientConfig, Endpoint}; use reqwest::{IntoUrl, Url}; use reqwest_middleware::{reqwest, ClientBuilder, ClientWithMiddleware}; use reqwest_retry::{policies::ExponentialBackoff, RetryTransientMiddleware}; @@ -114,36 +114,64 @@ impl CloudServices { let crypto_config = { #[cfg(debug_assertions)] { + #[derive(Debug)] struct SkipServerVerification; - impl rustls_old::client::ServerCertVerifier for SkipServerVerification { + impl rustls::client::danger::ServerCertVerifier for SkipServerVerification { fn verify_server_cert( &self, - _end_entity: &rustls_old::Certificate, - _intermediates: &[rustls_old::Certificate], - _server_name: &rustls_old::ServerName, - _scts: &mut dyn Iterator, + _end_entity: &rustls::pki_types::CertificateDer<'_>, + _intermediates: &[rustls::pki_types::CertificateDer<'_>], + _server_name: &rustls::pki_types::ServerName<'_>, _ocsp_response: &[u8], - _now: std::time::SystemTime, - ) -> Result { - Ok(rustls_old::client::ServerCertVerified::assertion()) + _now: rustls::pki_types::UnixTime, + ) -> Result { + Ok(rustls::client::danger::ServerCertVerified::assertion()) + } + + fn verify_tls12_signature( + &self, + _message: &[u8], + _cert: &rustls::pki_types::CertificateDer<'_>, + _dss: &rustls::DigitallySignedStruct, + ) -> Result { + Ok(rustls::client::danger::HandshakeSignatureValid::assertion()) + } + + fn verify_tls13_signature( + &self, + _message: &[u8], + _cert: &rustls::pki_types::CertificateDer<'_>, + _dss: &rustls::DigitallySignedStruct, + ) -> Result { + Ok(rustls::client::danger::HandshakeSignatureValid::assertion()) + } + + fn supported_verify_schemes(&self) -> Vec { + vec![] } } - rustls_old::ClientConfig::builder() - .with_safe_defaults() + rustls::ClientConfig::builder_with_protocol_versions(&[&rustls::version::TLS13]) + .dangerous() .with_custom_certificate_verifier(Arc::new(SkipServerVerification)) .with_no_client_auth() } #[cfg(not(debug_assertions))] { - rustls_old::ClientConfig::builder() - .with_safe_defaults() + rustls::ClientConfig::builder_with_protocol_versions(&[&rustls::version::TLS13]) + .dangerous() + .with_custom_certificate_verifier(Arc::new( + rustls_platform_verifier::Verifier::new(), + )) .with_no_client_auth() } }; - let client_config = ClientConfig::new(Arc::new(crypto_config)); + let client_config = ClientConfig::new(Arc::new( + QuicClientConfig::try_from(crypto_config) + .expect("misconfigured TLS client config, this is a bug and should crash"), + )); let mut endpoint = Endpoint::client("[::]:0".parse().expect("hardcoded address")) .map_err(Error::FailedToCreateEndpoint)?; From cbb1e92b9ec84788615796dd46a2ab031cdd710c Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Wed, 21 Aug 2024 17:25:39 +0300 Subject: [PATCH 079/218] Working Supertokens Callback --- interface/app/$libraryId/Layout/auth.tsx | 5 +++- .../client/account/handlers/windowHandler.ts | 27 +++++++++++-------- interface/hooks/useDeeplinkEventHandler.ts | 9 +++++-- 3 files changed, 27 insertions(+), 14 deletions(-) diff --git a/interface/app/$libraryId/Layout/auth.tsx b/interface/app/$libraryId/Layout/auth.tsx index 559b12760..2376eebcc 100644 --- a/interface/app/$libraryId/Layout/auth.tsx +++ b/interface/app/$libraryId/Layout/auth.tsx @@ -1,6 +1,7 @@ +/* eslint-disable no-restricted-syntax */ /* eslint-disable react-hooks/exhaustive-deps */ import { useEffect } from 'react'; -import { NavigateFunction, useNavigate } from 'react-router-dom'; +import { NavigateFunction, useNavigate, useSearchParams } from 'react-router-dom'; import { signInAndUp } from 'supertokens-web-js/recipe/thirdparty'; import { toast } from '@sd/ui'; @@ -44,8 +45,10 @@ async function handleGoogleCallback(navigate: NavigateFunction) { export const Component = () => { const navigate = useNavigate(); + const [query] = useSearchParams(); useEffect(() => { + (window.location as any).__TEMP_URL_PARAMS = query; handleGoogleCallback(navigate); }, []); diff --git a/interface/app/$libraryId/settings/client/account/handlers/windowHandler.ts b/interface/app/$libraryId/settings/client/account/handlers/windowHandler.ts index f4d16996d..4fa988732 100644 --- a/interface/app/$libraryId/settings/client/account/handlers/windowHandler.ts +++ b/interface/app/$libraryId/settings/client/account/handlers/windowHandler.ts @@ -1,3 +1,4 @@ +import { useSearchParams } from 'react-router-dom'; import { WindowHandlerInterface } from 'supertokens-website/utils/windowHandler/types'; /** @@ -12,20 +13,24 @@ export default function getWindowHandler(original: WindowHandlerInterface): Wind location: { ...original.location, getSearch: function () { - const currentURL = window.location.href; - const firstQuestionMarkIndex = currentURL.indexOf('?'); + // First try with react-router-dom's useUrlSearchParams + // eslint-disable-next-line no-restricted-syntax - if (firstQuestionMarkIndex !== -1) { - // Return the query string from the url - let queryString = currentURL.substring(firstQuestionMarkIndex); + const params: URLSearchParams | string = (window.location as any).__TEMP_URL_PARAMS ?? ''; + return params.toString(); + // const firstQuestionMarkIndex = currentURL.indexOf('?'); - // Remove any hash - if (queryString.includes('#')) { - queryString = queryString.split('#')[0] ?? ''; - } + // if (firstQuestionMarkIndex !== -1) { + // // Return the query string from the url + // let queryString = currentURL.substring(firstQuestionMarkIndex); - return queryString; - } + // // Remove any hash + // if (queryString.includes('#')) { + // queryString = queryString.split('#')[0] ?? ''; + // } + + // // Return the query string from the url + // } return ''; }, diff --git a/interface/hooks/useDeeplinkEventHandler.ts b/interface/hooks/useDeeplinkEventHandler.ts index 16813b05c..c6c879694 100644 --- a/interface/hooks/useDeeplinkEventHandler.ts +++ b/interface/hooks/useDeeplinkEventHandler.ts @@ -10,8 +10,13 @@ export const useDeeplinkEventHandler = () => { const url = e.detail.url; if (!url) return; - - navigate(url); + // If the URL has search params, we need to navigate to the URL with the search params + const [path, search] = url.split('?'); + if (search) { + navigate({ pathname: path, search }); + } else { + navigate(url); + } }; document.addEventListener('deeplink', handler); From 28dfa442d793aeed6ce5db9ebcc5c711bdd66308 Mon Sep 17 00:00:00 2001 From: lynx <141365347+iLynxcat@users.noreply.github.com> Date: Wed, 21 Aug 2024 13:22:50 -0500 Subject: [PATCH 080/218] Use consistent versioning for tauri-plugin-http --- apps/desktop/src-tauri/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apps/desktop/src-tauri/Cargo.toml b/apps/desktop/src-tauri/Cargo.toml index f7ccf5b4a..888b4045c 100644 --- a/apps/desktop/src-tauri/Cargo.toml +++ b/apps/desktop/src-tauri/Cargo.toml @@ -38,7 +38,7 @@ opener = { version = "0.7.1", features = ["reveal"], default-fea specta-typescript = "=0.0.7" tauri-plugin-deep-link = "=2.0.0-rc.0" tauri-plugin-dialog = "=2.0.0-rc.0" -tauri-plugin-http = "2.0.0-rc.0" +tauri-plugin-http = "=2.0.0-rc.0" tauri-plugin-os = "=2.0.0-rc.0" tauri-plugin-shell = "=2.0.0-rc.0" tauri-plugin-updater = "=2.0.0-rc.0" From 014fcb5b62ec7707e11327aea4d03fc4aca03e9b Mon Sep 17 00:00:00 2001 From: lynx <141365347+iLynxcat@users.noreply.github.com> Date: Wed, 21 Aug 2024 13:23:08 -0500 Subject: [PATCH 081/218] TEMPFIX: remove global fetch reassignment to Tauri fetch --- apps/desktop/src/App.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apps/desktop/src/App.tsx b/apps/desktop/src/App.tsx index db633ccc8..746926714 100644 --- a/apps/desktop/src/App.tsx +++ b/apps/desktop/src/App.tsx @@ -60,7 +60,7 @@ SuperTokens.init({ const startupError = (window as any).__SD_ERROR__ as string | undefined; //Set global fetch to use tauri fetch -globalThis.fetch = fetch; +// globalThis.fetch = fetch; export default function App() { useEffect(() => { From de3fc6b5dd5f8228bfe70bb5124bd1a30255ef29 Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Wed, 21 Aug 2024 15:24:42 -0300 Subject: [PATCH 082/218] Setting Cloud Services ALPN --- Cargo.lock | Bin 307847 -> 330747 bytes core/crates/cloud-services/Cargo.toml | 3 ++- .../crates/cloud-services/src/cloud_client.rs | 8 ++++++-- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 36284387496cd07d78cd56b393d075af5955f9ee..b684cc553364a63442805814420add6376efc99b 100644 GIT binary patch delta 10480 zcmbVydypN)eQwRn+12jqvAbIBLlTmX772sQuDg3?dU_1nBSKIh4z?bcLdIm((+_E3 zchBnVE)U;Ch&Z-y5fdcFrb&D$Bq#Ru4TMK@os&yd5ULo*b@4^`5vr~$HgFBdHpC#u zaW0M%=bJgZ5|8+w6vgVCGv`cCcmICh@B6-Af9mCx|Ke6X`sSMUYCIZ_()`r=jmOul z{c^DI{`jf&zvnH!-dX+MOO{m0C?ohH-Lf{{wz+cr{TDvgx%lDnSLv6$pl)q0KP77! zXIf{e%BWF9l2k|?WtmOGDAFXMp_McZrJ+&~Y2({-C?-V6dx}2+7lsDIqM?OtCZ$Lm6g5#x|DBibyG% zsxS=GED9-45{17+#fllOq;MzuyrJWJKlB%vCL4`0vt!ZPe8Y~F?)eA3YidbklQf|^ z(-?#6SWuBfM955plqOn8MKjEiCK(a2)lzAyEVFSc5}Ib*$uD{X`K8NN=CAFzpuHN6 z7E9L}pe<)R{|y5b|E_V9U#MJDOJx*inwf~Cu@qTM1qs89GC>&8I+AfLWk#`>p`e+J zA`;0cVLT)@%2deRW0!k_`POB<7_9gB(5^==TQS^qvuVsu)TgUktHYr?GwAKE@zih= z=_JECrI96RN)_V?OVfmBl$cB#s>4Wy#t4SF+AIz^klqzs1YUSq&xDN zVBJ!+@NTlc&&mH6$Qn&`m_!Wsi-^rchFcm;$=F0CR&v~sa-$L+#l?6$%7oU$Fr$f1 zOk!Jm@|C=++Nn)r9RE+;?5ewi_Z)xy_^9VD+v*Rv|N9Yt*BI6ZYsEs5@+78-uyI17 zVty*j@FLcs4oMcp1iPI^lE*R8S(Mli8zyy5CI;Obclu+;*L?XCz3%n9yj?YdD6z4n z1iPD9OK2vHii}c%8iN3)TnZ^8CTJ>|K;TiDg%KlI92!~c?wAS&@lJt;lEo z_C3cBfAuGwzM6A)-5O*y6Ui)2sL+(LDB~fEnbJgYi#X$vAU3tg1B@MoLNY^atSr}v zizF2(OLO^^s=NQb%JRYb?D)*Yj2XVTt2#WpUp9;$SNkU>-H}t?+T(|w-Pjj|qu8oF z{?0K)u|Bwes$;CJ36s!ZaWwWqnpB7|M0iOPVN}H;sS*-mYE;J(QN?(Ea{EAjYHvq= zYR_Q)$?yNTL(XEXSu>X({;I4cTp}h}6ekH0p~b2YEE1s@1-l#zfelwW#Wjo-YY{4A zRIHgWG7*%uHoEcBPd)dAEBtH5c#<(1t0>J_#v-8vU?EGwI7zW6HUc`RP*|%KWhth? zh#^UqQk@7L^T-|j0OG&<^q%8;Uite))N(I$d+$50fAI54{@K?C?|sticH0Ijzdk0F z!e&rnDbtGCj7FHWh()R+OD$&xdrvcl%~4Sn0>rXR3X?`!(g;Icmia^Zr(f$l{@ed> zMK~yLkrQ*K@wRLuqiKG0|L(1VXPO{^GKDMv2+=S~Vq{>*73Qcy@(8~!;DNx)u@+S|CSGD8ghm<9ABc|Q0Lj-xaWTv z^!sU=)9pLQNER|}4Yk@TQY0s}jLBF_7Kd>h(xA^@WziHS7Yx+i-o ztGnMZ^5XrScT<48jDWiafF5#gXv{E`ND387Y`J3z!XcV!6SzXsWTJ@H#YOhiNY2>_*`{%o(AFNc}qoLQ<{AuJp z)wZM##G9@+-1Dyih`5l6jU#3Z&q73W0$kuSQ$$MuH9*veV);#EF!c;O9g8BMRLa9d z@T7!qcla)E=@Knx_fN?gN5WvW`}#?*>-_2Z$n2aRQT3@AIoEo~a!;n~(;qX9`nb7u z&VA!6-n!3T?)P?7hY!rlX#i&ZdT*^8Kjp1hIx{gnJu$s+e6Bv@UZmdI=Kp%X_usnS zJ-1(M_j;TE;|lK|2RpFwBgl5@?)Y_Ij`56O&*CgKu|^#rnay|#n97hn2xF!tGKVA) zzLZcSm5|iNS(cbMMICY8Ro>uo%td#WTJ4K(M!Zo0d~%u=%}dZ_X@IbW{gZ!QJ6?e!G_O18H3pVUnYqS|A4B&5aiTB(q859{E}@<-Cnu(F8kxaDm^nZ|!&I*%qsb`wk1jXr^K+&cD;cG4yFPo{Z0r5})VP_RpTc(% zRPCMDdt2O-$0|K-GdIab!>!1@cv(YX35%OBtOGWGwY%V8(9=D)@F>NVVugPCU5`2T z6n1Lzl((!}#Ku%P#_Zhu^hl#M-t2s1pBVxE>~qJy7F^&)uk_k`8uPPrle4%F1KQ#z z?#St&ud8?uuw8uLHgb!copL9h^4H%`9mXy9&CZ!=gxTG192l=gLvy(Wv$L#I&y8QhXHCh4r~*YQnwmdtSfyv3%#5^W6LI^}5`t&v-rV z@jnJdO9@9QN@EVDloS*%0s$akkg~;{tOkn(tBNfPjYZj|2`32j#6$(Db4PBj47i?W z{4RI+vtCaX10z6wbr9s+=z4eCpx^86e%k-7vk&u)@`rSkmy17ZV01Prr@O$Y}ipcHb<>X5o)4|ofU_SI+otBV;eZ9d_= z&D#qcrF1lGe*e4P>Av=fX(Wbm-?^_MYpmcf7V;qpNr=Lpfp7{NrNp2dlL&eS@&KgT zLWg0uY?R;ygo8;kXfBd}V|8oD0Q$n6yazG;FTWXdx%fNaaP2$%HSXEJ@!K|MW?I$t z0uwe3ZN{_P=FCE%qgIa3PSq#ffv#YcJNiwp)6L9zgUzp<@qXFv#U+Qu1A*fK3!~7c zQ36f?x(B_Y7}QrRG-NE^LO4)HNg|UdGqDQs8>GQc%JG_9+yqW#CpMjwoaa#4!SySe0UlG$b<#jmqWES2zFeP4D?7 z#dfCH&Xbpc20nQ`!sv;L-_iWJ=YPUqxYoSA;(r=!xAnz8toXEfX}kY-1I5OmRMBSd zI{yW~uYj(%ia`LXd+|Y}6ZVKpRH=w7#XuEMRgF$G&r(US4k^fChE3E23L#E8jYO6r zwM3CjsS^2P&kPLf$@+|$X61-22t13LQ)a4M7H`;J&GCi*1V$Es*yo;mFaYr@qMPLO zW!n7l2LD3O{p$w5FMr^xyT%fhFi2DcZUnKUp(H{f3bM{n_P9+cr$RsongpQ_xeKKN zTL86M2Au1tC1dd2ZAvPt5n~gb09JiTdumF?BNosB3|-V>R<5O4gzfj8YPa5*h;jIEVhx z5UNRn?E$Og6zUTS!eH%eO5ilWhrw{vCUFdJr*Mv%om>1_zhjZ{yEjgF8{7+*c$e2g zYy+f4tOT@W%&|%tK`6=y(n2e#pf(s-Ikc`p8K9_6Rtv%rMi9-Gm9X-$i~TS3EGDwM zW3#{3?YgvOe(WAI93Gm20g^EW0|L}i3Y5h}Fa-?GjNgVa%JNTH(zaQ;ymKt^lQG_Ri<)L5+QI(|x(F{H-D2Y;IVX?r+ zQ#>?LNFdNHWI0PgB^d%dr@EAKz3!gKTjhE_j%_Xl+m7virFp}cziCGRij#N$CAW08|Txd1ew809?ed;HL{EYHoLBpi|#A z4X#{AF3BUY2N;a{+EKl%#FJjC;dJzblwe>T{UYW*fti$79fNO(x~Ja zJQj)Q0bfCxXIUh;ure~JvncQ^wUh&Usfuk>ST3DsW7VCU4Axd=5VWrMg4D05k?G1QSd!Gk}Z;QOxkyTA9M5%#yxc<=aq(FXv`XS-hc6;WiyTPI>e5haC^4~tMYF~JI8EX%r^oD zgVhXU0)9A$-vm=i7km*l2F{#eSbb2n5CBV)n1_@hw1B!!cEJJr;VnJqE!@3OCapzi zjaEKQIeyo|H}Hu&wlnC<@Bh(`8YzMhUJtMdT0Y_lI18Qv(Q9l(mZh(9fR{*c!^oelEk=URp4M@61lEN{H_8cOOpte&`3V~ z%0=$RNBkal)x-YNZ!uKPUKuW*VJmtB8H_eCT!gYhn?fRAl29XKVQCi02)mXtj{2Qi zp1~uBBdK9U3b@Du(= z+)KP&vVcvGZ^`Ic!O&8ll~p zdn!Zu>j!(?k^3sEdm8eV5j8VEp{t6b4RM{sD2)nMc}z8Ahz06%Ja7t|+6WkF zv4Mk8bXHYFm%E3a$J}4ct{Fr9Fb3rg zbs_|-jA5?AL#7CLYybd+nSv&hP+@#*IXo4vu^|XMEQrqOOM~~0LW@0I1B*ZWf`3Vy zd*&q+mH+cmP}xYJwrR9}6a^p2z!5Nq7!D(zIE14P;stLh1mU(tt%@P5isE6R4^dr= z>)i`^b?3`|cOkDjnxB8!XYKip>-zI`SM8|9A!;@3L8%D`frR@6_Gls&YQSC5wTL0- z2yg=bY1p1bC=Q|ww^!RVX1NEM@XV|J&Fw|K1u=IUuJrnwdw$~2q1krs6NgXwm!A{k zg=q!3-S2Mwi7!{0w`*u9z2?E6`QPj-#VC8H{ZRNQxHV#sxU4JqH8;^24CVivfb^r6 z#7riT(y0yM%tGPl(8BYC8x^9#f&Pzy0#LBAGO$QcW#lG80P7^Ua=q=|I`_V=;JhuR z_wg>Oxaf?$)%qyd^LcS~c!A8GvtSURB}Oe?IBSbc+={1Xd^Bo8zEw*WjyX1uEDdhz zxH!Y?CMT*`pM`0+J{ZxaaAX^1b{5ug*>D@a*gg4kxVD=QLIW;e7xcPhRj}Xv_%*Os zlpOHCBq{=&1He^E;qs%uPxzvAb4C@);=+S0iYrJgN#X6GJ(M448*tnEgNb@r++`hd!Fa<(kDsXqkz0K}Kf6(DR;=G>b&1-@)>sDjx zj4{VB3NuDnkE2L&$N8 z@`+f4J&alf0~JNmBFDfH;VqQ0Sh+6PSt%kf8jYJD{%CNa*WOan?yo=X%bEnTTLtt% z-egcXuqVI(pkG6dkU)`wS&pU!_=ZtHe+~5t;6~L53qz*G#?Uu{$#P@R?)J3>8{D%t zXm9?`4Z+@?_LeHoH*QCp7;2?3CbULN!l1=KEFKjTgwO`xM0Wx$3D^bjfq5v33BdLN z2%{{aF%jn*e>&uTF&*?ZUz-Xp>!^UX%{uQs%mBrVU=`5jr6P?Yj(mhXLh7P$$|CcD zKSaQ4kfBb|aDjge{{t1B!i-5{ckDrQQD$C*?KJiWu#_*nIjF7bJZIUTIpM8uF@2qK z_`HB>3eFc3xV?A>h3(5|rWY9w^x)H!xjXebtLZuY11m_Sr+XviL9}Sijaac4gFMF)A zaqt~aFfk2hdshHfqd zVJa*RFc_6a1*u4rOh6Dz{(f_y6XUZrR^65@v1r&0oO| zOD;OO;GMmjBft{*-XZ@Y_qk)iiOzEt@_+qXBpZxYR5`R%(eFhvL!B|S8Q?U6DuEe5 z_n@JQvj(tftc-Ce07Vmdfq;o-Vv;Goeu?l?#(|7y7QxI_tscspuL3%Esh8%lzWh1 z00_=Hq`*MreuO!LB!?-4JX)O;9aA*kAz&k!U-rvE@RmOJuh*ak!O9vc#^L2SO^nm! zx$cL8Dh5LH6vr>Xf{6lvqXC0*4C03q5{PNYA_1i3g--x&7sco%6%-wiWf915m!&(l zvNGtltq!_f^}|7r8-Ex?Y1y9#oh8dC+@=0UgFiu=F$4)A&A)m)$YHCMZ`{?l!FS0{ z1t5#Bw^ep+MNtVEm>QaWm`)@x4iO1<8VQX|PcjWUSe$6U#2L~9#SEurII9F{1qpzjC3^E{g2ARO z+#4Jbkes1$hod+EuOYYNGzKH>@FySM19SPcS+8$tq4CNzyK%NNo7qrfA}dB%}@A3VT2F`%ZP-=SqXGhSqRy!EN~Up0sKq| z6`adKs}997L3}XSC8dvd{}B3PGaZ%l-K%xD^bd4YzVB5E;pwigR8&n7&|g7u00mKn zn6(jNmt%6VO$=6-LxDqiBcdZ1WGb}?V7QCuMWBwPS&QE9{U>xD&UAb0n+LlpN7kGr zH!rTQtZi=DT)Dj3vDo}^uilTYF50va!*MK}a)7Z0E(H&YA(gO!EaSK}#6}U$IAaA5 z5fT^aloaPt045oW%Te=J*Q{urp#r_w1GDG3ufUJ_*^|Nf&B-e&D+@pW#mdea+!R=w zIGY6*8$}&Q!C>Sen7Blrya4gyTpBvl2~Y|yQ;J~}y0-Am;F<|j8gJPLD%;!LFTaJA z{MPQumgaYMR?c|k`zGH1Pyg(t_ckBDw(_$s_qlzQ<>f@K>5!myPDzGbEnmP8>gOZ)8bbmcdiCvL24 z3Ceqp!O_z=Hex_DVvr%6O~R3_0^^VqX$;cA0Y@3EK>z}pJH=@x_!O3dZJ<87lV66& z!9B;_>$0+|dC7F8ZL^Pt+t-g)vYJJAf}r8fnUXOYKsd|;j)jDXge{Ci9Ci`_Td2dr z3q>ghYs0Y$%o0>9NsHr8gRXuhx-`E#8Z2{nAFFh@$_^;Yqdk77yKk$vv02M2f6?uS ztkvx|8Z!sUE{=#Wftm`1i>4EtEs35@pYOf!cHF@o#P101fQU@EC1u$bO#&~ZDPBmaD`86$wF3?){lgGK zgAFwL=^imBY^+zx9s31ORw`_0t0@XjRvWI|0<#iOX1vHtw{vU#_u2FL@AEv*eX_nk z8s(o=v?XO2)g{HL%_o$ATKjOkR^vEjpyYT^v~_7cAzIjq&p_*b^0Hma-{1$rynt{O zO3+po(Q3`xYH+Ay9Zv1z(;1)*ephEv%hEhr>p)A0mhnqswCcKjRqG$W3)=ZRmw~#h z;L?7anv+`7Y;Gnmxgf-Mq-t_7>%E>8Fp^+|Vrmqa>;itZa z6rkA90AA>UzFaaJ0{*wV5Xn3bXQTNKi&)Tedz`QUSJ=gffz$@H)4AP{OpZih;A6?+ zpx~5L(FEM{sL;of#U$f+Buo4tXwogh$?q1DpMOEvfc$HOjrQh>3EH16tUTZrDS~G5 z#9FS&7mU2TKzswd>t#_PxUEQZnCa(mRJdS=Z~!0LDbfnK_IvRO@R=k=TlA`*YWFVSgt4MCb03X46MQ+QUGn}tFENU4BSiRObiPPmVT$y z>)ZCG+A`s!!!9)N#!PIBjtVO8wyNEw-d*YCzOt$^vbr&w#$LqR7?5i1Yy!{a;aVdV zuLC1ZKwEz@YF;u)0GK8umG{KqE>j~1{alyi(ywet0Nsn}?-5K`dZIQLWH(!&@%7Y6&x<}?Xc)nFy^gPrqm+8rzDvT^Ym39$C3g9x@ z?hh2-{<*CF-;g7|l+&S9@U3+5TP$acw2!5S&-BS&@c^%#m%2xI>Mz+9$y?{83#cy! T66l-_465s~Xuh~0U8a8lP<4(+ diff --git a/core/crates/cloud-services/Cargo.toml b/core/crates/cloud-services/Cargo.toml index 9773daec9..8ccb63218 100644 --- a/core/crates/cloud-services/Cargo.toml +++ b/core/crates/cloud-services/Cargo.toml @@ -28,12 +28,13 @@ zeroize = { workspace = true } # External dependencies iroh-base = { version = "0.23.0", features = ["key"] } +iroh-net = "0.23.0" postcard = { version = "1.0.8", features = ["use-std"] } quic-rpc = { version = "0.12.0", features = ["quinn-transport"] } quinn = { package = "iroh-quinn", version = "=0.11.3" } reqwest-middleware = { version = "0.3", features = ["json"] } reqwest-retry = "0.6" -rustls = { version = "0.23", default-features = false, features = ["ring"] } +rustls = { version = "0.23", default-features = false, features = ["brotli", "ring", "std"] } rustls-platform-verifier = "0.3.3" diff --git a/core/crates/cloud-services/src/cloud_client.rs b/core/crates/cloud-services/src/cloud_client.rs index 822e6d0c6..ec29de90f 100644 --- a/core/crates/cloud-services/src/cloud_client.rs +++ b/core/crates/cloud-services/src/cloud_client.rs @@ -1,4 +1,4 @@ -use sd_cloud_schema::{Client, Service}; +use sd_cloud_schema::{Client, Service, ServicesALPN}; use std::{net::SocketAddr, sync::Arc, time::Duration}; @@ -111,7 +111,7 @@ impl CloudServices { .map_err(Error::FailedToExtractApiAddress)? .parse::()?; - let crypto_config = { + let mut crypto_config = { #[cfg(debug_assertions)] { #[derive(Debug)] @@ -168,6 +168,10 @@ impl CloudServices { } }; + crypto_config + .alpn_protocols + .extend([ServicesALPN::LATEST.to_vec()]); + let client_config = ClientConfig::new(Arc::new( QuicClientConfig::try_from(crypto_config) .expect("misconfigured TLS client config, this is a bug and should crash"), From ad810290c3ffdf00f67ea45319b143b7dadf3ef6 Mon Sep 17 00:00:00 2001 From: myung03 Date: Wed, 21 Aug 2024 12:44:56 -0700 Subject: [PATCH 083/218] populate overview and library settings with mock devices --- interface/app/$libraryId/overview/index.tsx | 54 +++++++++++-------- .../settings/node/libraries/DeviceItem.tsx | 14 +++-- .../settings/node/libraries/ListItem.tsx | 7 ++- 3 files changed, 47 insertions(+), 28 deletions(-) diff --git a/interface/app/$libraryId/overview/index.tsx b/interface/app/$libraryId/overview/index.tsx index de52b7f68..d920e3f7f 100644 --- a/interface/app/$libraryId/overview/index.tsx +++ b/interface/app/$libraryId/overview/index.tsx @@ -1,5 +1,6 @@ +import { Key } from 'react'; import { Link } from 'react-router-dom'; -import { useBridgeQuery, useLibraryQuery } from '@sd/client'; +import { HardwareModel, useBridgeQuery, useLibraryQuery } from '@sd/client'; import { useLocale, useOperatingSystem } from '~/hooks'; import { useRouteTitle } from '~/hooks/useRouteTitle'; import { hardwareModelToIcon } from '~/util/hardware'; @@ -31,12 +32,15 @@ export const Component = () => { const locationsQuery = useLibraryQuery(['locations.list'], { keepPreviousData: true }); const locations = locationsQuery.data ?? []; - const { data: node } = useBridgeQuery(['nodeState']); + // not sure if we'll need the node state in the future, as it should be returned with the cloud.devices.list query + // const { data: node } = useBridgeQuery(['nodeState']); + const cloudDevicesList = useBridgeQuery(['cloud.devices.list'], { + suspense: true, + retry: false + }); const search = useSearchFromSearchParams({ defaultTarget: 'paths' }); - const stats = useLibraryQuery(['library.statistics']); - return (
@@ -87,25 +91,31 @@ export const Component = () => { - {node && ( - + {cloudDevicesList.data?.map( + ( + device: { + pub_id: Key | null | undefined; + name: string; + os: string; + storage_size: bigint; + used_storage: bigint; + created_at: string; + device_model: string; + }, + index: number + ) => ( + + ) )} - - {/**/} diff --git a/interface/app/$libraryId/settings/node/libraries/DeviceItem.tsx b/interface/app/$libraryId/settings/node/libraries/DeviceItem.tsx index 2b861dd6d..1fc9fca0e 100644 --- a/interface/app/$libraryId/settings/node/libraries/DeviceItem.tsx +++ b/interface/app/$libraryId/settings/node/libraries/DeviceItem.tsx @@ -1,27 +1,31 @@ import { Trash } from '@phosphor-icons/react'; +import { iconNames } from '@sd/assets/util'; import { Key } from 'react'; -import { humanizeSize } from '@sd/client'; +import { HardwareModel, humanizeSize } from '@sd/client'; import { Button, Card, Tooltip } from '@sd/ui'; import { Icon } from '~/components'; import { useLocale } from '~/hooks'; +import { hardwareModelToIcon } from '~/util/hardware'; interface DeviceItemProps { pub_id: Key | null | undefined; name: string; os: string; - storage_size: number; + device_model: string; + storage_size: bigint; + used_storage: bigint; created_at: string; } -// unsure where to put pub_id/if this information is important for a user? +// unsure where to put pub_id/if this information is important for a user? also have not included used_storage export default (props: DeviceItemProps) => { const { t } = useLocale(); return ( { suspense: true, retry: false }); + console.log(cloudDevicesList); const toggleExpansion = () => { setIsExpanded((prev) => !prev); @@ -96,8 +97,10 @@ export default (props: Props) => { pub_id: Key | null | undefined; name: string; os: string; - storage_size: number; + storage_size: bigint; + used_storage: bigint; created_at: string; + device_model: string; }, index: number ) => ( @@ -115,7 +118,9 @@ export default (props: Props) => { name={device.name} os={device.os} storage_size={device.storage_size} + used_storage={device.used_storage} created_at={device.created_at} + device_model={device.device_model} />
From 5f4d630914a6753755f5ad2aa4b76df90704a46d Mon Sep 17 00:00:00 2001 From: myung03 Date: Wed, 21 Aug 2024 12:48:10 -0700 Subject: [PATCH 084/218] fix devices count on overview --- interface/app/$libraryId/overview/index.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/interface/app/$libraryId/overview/index.tsx b/interface/app/$libraryId/overview/index.tsx index d920e3f7f..092397095 100644 --- a/interface/app/$libraryId/overview/index.tsx +++ b/interface/app/$libraryId/overview/index.tsx @@ -90,7 +90,7 @@ export const Component = () => { - + {cloudDevicesList.data?.map( ( device: { From ca7b4af13278581c5333975421711feacb29f8dd Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Wed, 21 Aug 2024 22:56:53 +0300 Subject: [PATCH 085/218] Only use Tauri Fetch in prod Regular fetch will work fine in dev. We need to use the Tauri fetch in production due to CORS in the API server. --- apps/desktop/src/App.tsx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/apps/desktop/src/App.tsx b/apps/desktop/src/App.tsx index 746926714..b47bb3b81 100644 --- a/apps/desktop/src/App.tsx +++ b/apps/desktop/src/App.tsx @@ -60,7 +60,8 @@ SuperTokens.init({ const startupError = (window as any).__SD_ERROR__ as string | undefined; //Set global fetch to use tauri fetch -// globalThis.fetch = fetch; +// If the build in in production mode, we need to set the global fetch to use the tauri fetch +if (import.meta.env.DEV === false) globalThis.fetch = fetch; export default function App() { useEffect(() => { From fc8d1b0dc4c2f38b431980d35c108c2787c25b23 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Thu, 22 Aug 2024 16:37:20 +0300 Subject: [PATCH 086/218] Fix plausible --- apps/desktop/src-tauri/capabilities/default.json | 3 +++ 1 file changed, 3 insertions(+) diff --git a/apps/desktop/src-tauri/capabilities/default.json b/apps/desktop/src-tauri/capabilities/default.json index 79b0ef88b..a06855f57 100644 --- a/apps/desktop/src-tauri/capabilities/default.json +++ b/apps/desktop/src-tauri/capabilities/default.json @@ -37,6 +37,9 @@ }, { "url": "http://localhost:9420/" + }, + { + "url": "https://plausible.io/" } ] } From feac668c017a5fb1e57c705f9f2c14e5db61b144 Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Fri, 23 Aug 2024 15:43:10 -0300 Subject: [PATCH 087/218] Cloud P2P abstraction --- Cargo.lock | Bin 330747 -> 329735 bytes core/crates/cloud-services/Cargo.toml | 18 +- .../crates/cloud-services/src/cloud_client.rs | 152 +++-- core/crates/cloud-services/src/cloud_p2p.rs | 1 - .../cloud-services/src/cloud_p2p/mod.rs | 146 +++++ .../cloud-services/src/cloud_p2p/runner.rs | 524 ++++++++++++++++++ core/crates/cloud-services/src/error.rs | 11 + .../src/key_manager/key_store.rs | 54 +- .../cloud-services/src/key_manager/mod.rs | 32 +- core/crates/cloud-services/src/lib.rs | 1 + .../cloud-services/src/token_refresher.rs | 76 ++- crates/crypto/src/cloud/secret_key.rs | 30 + crates/crypto/src/error.rs | 2 + 13 files changed, 976 insertions(+), 71 deletions(-) delete mode 100644 core/crates/cloud-services/src/cloud_p2p.rs create mode 100644 core/crates/cloud-services/src/cloud_p2p/mod.rs create mode 100644 core/crates/cloud-services/src/cloud_p2p/runner.rs diff --git a/Cargo.lock b/Cargo.lock index b684cc553364a63442805814420add6376efc99b..4dfaf9c523cf5de7383d3ad2b0cf4b67f963dd4b 100644 GIT binary patch delta 411 zcmZ9HKS&!<9LIU@cMWoB6HQV>+a)?Eh=F^Td%1U$P-)#-2jVo|3)Wq`+mNEhVK<-8rzwTeKOmI(McT-x+I;N z)N`Xt(eh$`rMgru=c_VQtv2}%>3-vRf+HHd#j`yU|91CY$4~t|Halb*dtEY&pZB1o z{{1FtqGtY(A=0QRqH2nEKH&cpEln|I1V-R_fxTe4MaOas(`QTy$6(wxEiQ})aG8+8 zcRV2-X}W=QSb+<-=-PO4PO=y#H62fHXcJG0bXfBv>8sW)9goI5E^LDbj_@p73X@rd zK5@e=Se%(sSVhMUxQ{DY$g0Z_{>Cu(k;ZYJL+nLmHS%37NALy@Yp|$4eh=Nss*h|9 zmhsm$N#f2LM0ora;`lOzBo4+v$J{1OU~e4~c(eui##|lLTwVK-$H6f=ptPUbJNhWG WFDEeQkk=~M)v8ZS^{TC1e7Oge-GqDq delta 779 zcmaix%}Z557{-}1S2F{%G8I4KCP5^6J9Fma%wT9DR;xa4w1^OyGan?F*X-&d3W`8i znGp0wK?D(GD0($Gk*gN%TJ#@C1nyc@i)v9vgNO)PzVB~%-sgE{X0G8oG~9TB&Mh3t zhXoQAct=-lzGGyfbg}#L8c2-)z#3XM`1^^!fsNi1Yt@29*ZX3_Ck< zH9V-G18`{>4Z!gwR1a;(f?^-@ma9-xr3o|M5Fwq{%u2$3dSP|-36o)dVniFLibwVj_gp;u*oJ&qYzr-!~ zrcpJl1=)$&OdW0`+zxYPybp3152TraHUzy(XghSSWcY0rF810`5S4JnV&bi(ifSXZ zFd>V*U?GVKA<7UV6GnV&BNfFqQ6!FqH8wGK-@j{u`oT=`S~ccy_-xQV)3b`wp4Ff- z2)+G5@u-YVBDu(lPqN0P)7B}?tz%X(&bU^_IWIy%RYGYTdhUphq~{TLNtR5(>rX)* z9(@gFt2Y0iz^QfA1V6%D4YZy|t?7kt!S0EJlvwJCF+3|tMJD30rHS!Mkt|H4MU+{H zT>>{da*gTpQT(bJ?yLp7;muT5*8CM*+_oiNpiouDC3t)b?SlJdJOrbkgF2Y&3L4?+ x2HHO}Jc-lc$=rN5oLtXRv!}U3u(62h(i2Z|AJEn^&6cadZ{C@h=Ht diff --git a/core/crates/cloud-services/Cargo.toml b/core/crates/cloud-services/Cargo.toml index 8ccb63218..cb41b00f6 100644 --- a/core/crates/cloud-services/Cargo.toml +++ b/core/crates/cloud-services/Cargo.toml @@ -5,12 +5,13 @@ version = "0.1.0" edition = "2021" [dependencies] -# First party dependencies +# Spacedrive Sub-crates sd-cloud-schema = { workspace = true } sd-crypto = { path = "../../../crates/crypto" } sd-utils = { path = "../../../crates/utils" } # Workspace dependencies +async-stream = { workspace = true } base64 = { workspace = true } blake3 = { workspace = true } chrono = { workspace = true, features = ["serde"] } @@ -21,22 +22,33 @@ reqwest = { workspace = true, features = ["json", "native-tls-vendor rspc = { workspace = true } serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true } +specta = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true, features = ["sync", "time"] } +tokio-stream = { workspace = true } tracing = { workspace = true } +uuid = { workspace = true, features = ["serde"] } zeroize = { workspace = true } # External dependencies +anyhow = "1.0.86" iroh-base = { version = "0.23.0", features = ["key"] } iroh-net = "0.23.0" +paste = "=1.0.15" postcard = { version = "1.0.8", features = ["use-std"] } -quic-rpc = { version = "0.12.0", features = ["quinn-transport"] } -quinn = { package = "iroh-quinn", version = "=0.11.3" } +quic-rpc = { version = "0.11.0", features = ["quinn-transport"] } +quinn = { package = "iroh-quinn", version = "=0.10.5" } reqwest-middleware = { version = "0.3", features = ["json"] } reqwest-retry = "0.6" rustls = { version = "0.23", default-features = false, features = ["brotli", "ring", "std"] } rustls-platform-verifier = "0.3.3" +[dependencies.rustls-old] +default-features = false +features = ["dangerous_configuration", "logging", "quic"] +package = "rustls" +version = "0.21.12" # Update blocked by quic-rpc + [dev-dependencies] tokio = { workspace = true, features = ["rt", "sync", "time"] } diff --git a/core/crates/cloud-services/src/cloud_client.rs b/core/crates/cloud-services/src/cloud_client.rs index ec29de90f..848facd76 100644 --- a/core/crates/cloud-services/src/cloud_client.rs +++ b/core/crates/cloud-services/src/cloud_client.rs @@ -1,16 +1,21 @@ +use crate::cloud_p2p::{NotifyUser, UserResponse}; + use sd_cloud_schema::{Client, Service, ServicesALPN}; use std::{net::SocketAddr, sync::Arc, time::Duration}; +use futures::Stream; use quic_rpc::{transport::quinn::QuinnConnection, RpcClient}; -use quinn::{crypto::rustls::QuicClientConfig, ClientConfig, Endpoint}; +use quinn::{ClientConfig, Endpoint}; use reqwest::{IntoUrl, Url}; use reqwest_middleware::{reqwest, ClientBuilder, ClientWithMiddleware}; use reqwest_retry::{policies::ExponentialBackoff, RetryTransientMiddleware}; use tokio::sync::RwLock; use tracing::warn; -use super::{error::Error, key_manager::KeyManager, token_refresher::TokenRefresher}; +use super::{ + cloud_p2p::CloudP2P, error::Error, key_manager::KeyManager, token_refresher::TokenRefresher, +}; #[derive(Debug, Default)] enum ClientState { @@ -35,6 +40,11 @@ pub struct CloudServices { domain_name: String, pub token_refresher: TokenRefresher, key_manager: Arc>>>, + cloud_p2p: Arc>>>, + pub(crate) notify_user_tx: flume::Sender, + notify_user_rx: flume::Receiver, + user_response_tx: flume::Sender, + pub(crate) user_response_rx: flume::Receiver, } impl CloudServices { @@ -81,6 +91,9 @@ impl CloudServices { } }; + let (notify_user_tx, notify_user_rx) = flume::bounded(16); + let (user_response_tx, user_response_rx) = flume::bounded(16); + Ok(Self { client_state, token_refresher: TokenRefresher::new( @@ -91,9 +104,29 @@ impl CloudServices { http_client, domain_name, key_manager: Arc::default(), + cloud_p2p: Arc::default(), + notify_user_tx, + notify_user_rx, + user_response_tx, + user_response_rx, }) } + pub fn stream_user_notifications(&self) -> impl Stream + '_ { + self.notify_user_rx.stream() + } + + /// Send back a user response to the Cloud P2P actor + /// + /// # Panics + /// Will panic if the channel is closed, which should never happen + pub async fn send_user_response(&self, response: UserResponse) { + self.user_response_tx + .send_async(response) + .await + .expect("user response channel must never close"); + } + async fn init_client( http_client: &ClientWithMiddleware, get_cloud_api_address: Url, @@ -114,56 +147,82 @@ impl CloudServices { let mut crypto_config = { #[cfg(debug_assertions)] { - #[derive(Debug)] + // FIXME(@fogodev): use this commented code when we can update to quic-rpc 0.12.0 or newer + // #[derive(Debug)] + // struct SkipServerVerification; + // impl rustls::client::danger::ServerCertVerifier for SkipServerVerification { + // fn verify_server_cert( + // &self, + // _end_entity: &rustls::pki_types::CertificateDer<'_>, + // _intermediates: &[rustls::pki_types::CertificateDer<'_>], + // _server_name: &rustls::pki_types::ServerName<'_>, + // _ocsp_response: &[u8], + // _now: rustls::pki_types::UnixTime, + // ) -> Result { + // Ok(rustls::client::danger::ServerCertVerified::assertion()) + // } + + // fn verify_tls12_signature( + // &self, + // _message: &[u8], + // _cert: &rustls::pki_types::CertificateDer<'_>, + // _dss: &rustls::DigitallySignedStruct, + // ) -> Result { + // Ok(rustls::client::danger::HandshakeSignatureValid::assertion()) + // } + + // fn verify_tls13_signature( + // &self, + // _message: &[u8], + // _cert: &rustls::pki_types::CertificateDer<'_>, + // _dss: &rustls::DigitallySignedStruct, + // ) -> Result { + // Ok(rustls::client::danger::HandshakeSignatureValid::assertion()) + // } + + // fn supported_verify_schemes(&self) -> Vec { + // vec![] + // } + // } + + // rustls::ClientConfig::builder_with_protocol_versions(&[&rustls::version::TLS13]) + // .dangerous() + // .with_custom_certificate_verifier(Arc::new(SkipServerVerification)) + // .with_no_client_auth() + struct SkipServerVerification; - impl rustls::client::danger::ServerCertVerifier for SkipServerVerification { + impl rustls_old::client::ServerCertVerifier for SkipServerVerification { fn verify_server_cert( &self, - _end_entity: &rustls::pki_types::CertificateDer<'_>, - _intermediates: &[rustls::pki_types::CertificateDer<'_>], - _server_name: &rustls::pki_types::ServerName<'_>, + _end_entity: &rustls_old::Certificate, + _intermediates: &[rustls_old::Certificate], + _server_name: &rustls_old::ServerName, + _scts: &mut dyn Iterator, _ocsp_response: &[u8], - _now: rustls::pki_types::UnixTime, - ) -> Result { - Ok(rustls::client::danger::ServerCertVerified::assertion()) - } - - fn verify_tls12_signature( - &self, - _message: &[u8], - _cert: &rustls::pki_types::CertificateDer<'_>, - _dss: &rustls::DigitallySignedStruct, - ) -> Result { - Ok(rustls::client::danger::HandshakeSignatureValid::assertion()) - } - - fn verify_tls13_signature( - &self, - _message: &[u8], - _cert: &rustls::pki_types::CertificateDer<'_>, - _dss: &rustls::DigitallySignedStruct, - ) -> Result { - Ok(rustls::client::danger::HandshakeSignatureValid::assertion()) - } - - fn supported_verify_schemes(&self) -> Vec { - vec![] + _now: std::time::SystemTime, + ) -> Result { + Ok(rustls_old::client::ServerCertVerified::assertion()) } } - rustls::ClientConfig::builder_with_protocol_versions(&[&rustls::version::TLS13]) - .dangerous() + rustls_old::ClientConfig::builder() + .with_safe_defaults() .with_custom_certificate_verifier(Arc::new(SkipServerVerification)) .with_no_client_auth() } #[cfg(not(debug_assertions))] { - rustls::ClientConfig::builder_with_protocol_versions(&[&rustls::version::TLS13]) - .dangerous() - .with_custom_certificate_verifier(Arc::new( - rustls_platform_verifier::Verifier::new(), - )) + // FIXME(@fogodev): use this commented code when we can update to quic-rpc 0.12.0 or newer + // rustls::ClientConfig::builder_with_protocol_versions(&[&rustls::version::TLS13]) + // .dangerous() + // .with_custom_certificate_verifier(Arc::new( + // rustls_platform_verifier::Verifier::new(), + // )) + // .with_no_client_auth() + + rustls_old::ClientConfig::builder() + .with_safe_defaults() .with_no_client_auth() } }; @@ -172,10 +231,13 @@ impl CloudServices { .alpn_protocols .extend([ServicesALPN::LATEST.to_vec()]); - let client_config = ClientConfig::new(Arc::new( - QuicClientConfig::try_from(crypto_config) - .expect("misconfigured TLS client config, this is a bug and should crash"), - )); + // FIXME(@fogodev): use this commented code when we can update to quic-rpc 0.12.0 or newer + // let client_config = ClientConfig::new(Arc::new( + // QuicClientConfig::try_from(crypto_config) + // .expect("misconfigured TLS client config, this is a bug and should crash"), + // )); + + let client_config = ClientConfig::new(Arc::new(crypto_config)); let mut endpoint = Endpoint::client("[::]:0".parse().expect("hardcoded address")) .map_err(Error::FailedToCreateEndpoint)?; @@ -229,6 +291,10 @@ impl CloudServices { Ok(Arc::clone(key_manager)) }) } + + pub async fn set_cloud_p2p(&self, cloud_p2p: CloudP2P) { + self.cloud_p2p.write().await.replace(Arc::new(cloud_p2p)); + } } #[cfg(test)] diff --git a/core/crates/cloud-services/src/cloud_p2p.rs b/core/crates/cloud-services/src/cloud_p2p.rs deleted file mode 100644 index 8b1378917..000000000 --- a/core/crates/cloud-services/src/cloud_p2p.rs +++ /dev/null @@ -1 +0,0 @@ - diff --git a/core/crates/cloud-services/src/cloud_p2p/mod.rs b/core/crates/cloud-services/src/cloud_p2p/mod.rs new file mode 100644 index 000000000..edf3502c3 --- /dev/null +++ b/core/crates/cloud-services/src/cloud_p2p/mod.rs @@ -0,0 +1,146 @@ +use crate::{CloudServices, Error}; + +use sd_cloud_schema::{ + cloud_p2p::{authorize_new_device_in_sync_group, CloudP2PALPN, CloudP2PError}, + devices::{self, Device}, + sync::groups::GroupWithLibraryAndDevices, +}; +use sd_crypto::{CryptoRng, SeedableRng}; + +use iroh_base::key::SecretKey as IrohSecretKey; +use iroh_net::{ + discovery::dns::DnsDiscovery, + relay::{RelayMap, RelayMode, RelayUrl}, + Endpoint, NodeId, +}; +use serde::{Deserialize, Serialize}; +use tokio::spawn; +use tracing::error; + +mod runner; + +use runner::Runner; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize, specta::Type)] +#[serde(transparent)] +#[repr(transparent)] +#[specta(rename = "CloudP2PTicket")] +pub struct Ticket(u64); + +#[derive(Debug, Serialize, specta::Type)] +#[serde(tag = "kind", content = "data")] +pub enum NotifyUser { + ReceivedJoinSyncGroupRequest { + ticket: Ticket, + asking_device: Device, + sync_group: GroupWithLibraryAndDevices, + }, + ReceivedJoinSyncGroupResponse { + response: JoinSyncGroupResponse, + sync_group: GroupWithLibraryAndDevices, + }, + SendingJoinSyncGroupResponseError { + error: JoinSyncGroupError, + sync_group: GroupWithLibraryAndDevices, + }, + TimedOutJoinRequest { + device: Device, + succeeded: bool, + }, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize, specta::Type)] +pub enum JoinSyncGroupError { + Communication, + InternalServer, + Auth, +} + +#[derive(Debug, Serialize, specta::Type)] +pub enum JoinSyncGroupResponse { + Accepted { authorizor_device: Device }, + Failed(CloudP2PError), + CriticalError, +} + +#[derive(Debug, Deserialize, specta::Type)] +#[serde(tag = "kind", content = "data")] +pub enum UserResponse { + AcceptDeviceInSyncGroup { ticket: Ticket, accepted: bool }, +} +#[derive(Debug, Clone)] +pub struct CloudP2P { + msgs_tx: flume::Sender, +} + +impl CloudP2P { + pub async fn new( + current_device_pub_id: devices::PubId, + cloud_services: CloudServices, + mut rng: CryptoRng, + iroh_secret_key: IrohSecretKey, + dns_origin_domain: String, + relay_url: RelayUrl, + ) -> Result { + let endpoint = Endpoint::builder() + .alpns(vec![CloudP2PALPN::LATEST.to_vec()]) + .secret_key(iroh_secret_key) + .relay_mode(RelayMode::Custom(RelayMap::from_url(relay_url))) + .discovery(Box::new(DnsDiscovery::new(dns_origin_domain))) + // Using 0 as port will bind to a random available port chosen by the OS. + .bind(0) + .await + .map_err(Error::CreateCloudP2PEndpoint)?; + + let (msgs_tx, msgs_rx) = flume::bounded(16); + + spawn({ + let runner = Runner::new(current_device_pub_id, &cloud_services, endpoint).await?; + let user_response_rx = cloud_services.user_response_rx.clone(); + + async move { + // All cloned runners share a single state with internal mutability + while let Err(e) = spawn(runner.clone().run( + msgs_rx.clone(), + user_response_rx.clone(), + CryptoRng::from_seed(rng.generate_fixed()), + )) + .await + { + if e.is_panic() { + error!("Cloud P2P runner panicked"); + } else { + break; + } + } + } + }); + + Ok(Self { msgs_tx }) + } + + /// Requests the device with the given connection ID asking for permission to the current device + /// to join the sync group + /// + /// # Panics + /// Will panic if the actor channel is closed, which should never happen + pub async fn request_join_sync_group( + &self, + devices_connection_ids: Vec, + req: authorize_new_device_in_sync_group::Request, + ) { + self.msgs_tx + .send_async(runner::Message::Request(runner::Request::JoinSyncGroup { + req, + devices_connection_ids, + })) + .await + .expect("Channel closed"); + } +} + +impl Drop for CloudP2P { + fn drop(&mut self) { + self.msgs_tx.send(runner::Message::Stop).ok(); + } +} diff --git a/core/crates/cloud-services/src/cloud_p2p/runner.rs b/core/crates/cloud-services/src/cloud_p2p/runner.rs new file mode 100644 index 000000000..df2be63d5 --- /dev/null +++ b/core/crates/cloud-services/src/cloud_p2p/runner.rs @@ -0,0 +1,524 @@ +use crate::{ + cloud_p2p::JoinSyncGroupError, token_refresher::TokenRefresher, CloudServices, Error, + KeyManager, +}; + +use sd_cloud_schema::{ + cloud_p2p::{ + self, authorize_new_device_in_sync_group, Client, CloudP2PALPN, CloudP2PError, Service, + }, + devices::{self, Device}, + sync::groups, +}; +use sd_crypto::{CryptoRng, SeedableRng}; + +use std::{ + collections::HashMap, + pin::pin, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, + time::Duration, +}; + +use flume::SendError; +use futures::StreamExt; +use futures_concurrency::stream::Merge; +use iroh_net::{Endpoint, NodeId}; +use quic_rpc::{ + server::{Accepting, RpcChannel, RpcServerError}, + transport::quinn::{QuinnConnection, QuinnServerEndpoint}, + RpcClient, RpcServer, +}; +use tokio::{ + spawn, + sync::Mutex, + task::JoinHandle, + time::{interval, Instant, MissedTickBehavior}, +}; +use tokio_stream::wrappers::IntervalStream; +use tracing::{error, warn}; + +use super::{JoinSyncGroupResponse, NotifyUser, Ticket, UserResponse}; + +const TEN_SECONDS: Duration = Duration::from_secs(10); +const FIVE_MINUTES: Duration = Duration::from_secs(60 * 5); + +#[allow(clippy::large_enum_variant)] // Ignoring because the enum Stop variant will only happen a single time ever +pub enum Message { + Request(Request), + Stop, +} + +pub enum Request { + JoinSyncGroup { + req: authorize_new_device_in_sync_group::Request, + devices_connection_ids: Vec, + }, +} + +/// We use internal mutability here, but don't worry because there will always be a single +/// [`Runner`] running at a time, so the lock is never contended +pub struct Runner { + current_device_pub_id: devices::PubId, + token_refresher: TokenRefresher, + cloud_services: sd_cloud_schema::Client< + QuinnConnection, + sd_cloud_schema::Service, + >, + endpoint: Endpoint, + key_manager: Arc, + ticketer: Arc, + notify_user_tx: flume::Sender, + pending_sync_group_join_requests: Arc>>, + timeout_checker_buffer: Vec<(Ticket, PendingSyncGroupJoin)>, +} + +impl Clone for Runner { + fn clone(&self) -> Self { + Self { + current_device_pub_id: self.current_device_pub_id, + token_refresher: self.token_refresher.clone(), + cloud_services: self.cloud_services.clone(), + endpoint: self.endpoint.clone(), + key_manager: Arc::clone(&self.key_manager), + ticketer: Arc::clone(&self.ticketer), + notify_user_tx: self.notify_user_tx.clone(), + pending_sync_group_join_requests: Arc::clone(&self.pending_sync_group_join_requests), + // This one is a temporary buffer only used for timeout checker + timeout_checker_buffer: vec![], + } + } +} + +struct PendingSyncGroupJoin { + channel: RpcChannel>, + request: authorize_new_device_in_sync_group::Request, + this_device: Device, + since: Instant, +} + +impl Runner { + pub async fn new( + current_device_pub_id: devices::PubId, + cloud_services: &CloudServices, + endpoint: Endpoint, + ) -> Result { + Ok(Self { + current_device_pub_id, + token_refresher: cloud_services.token_refresher.clone(), + cloud_services: cloud_services.client().await?, + endpoint, + key_manager: cloud_services.key_manager().await?, + ticketer: Arc::default(), + notify_user_tx: cloud_services.notify_user_tx.clone(), + pending_sync_group_join_requests: Arc::default(), + timeout_checker_buffer: vec![], + }) + } + + pub async fn run( + mut self, + msgs_rx: flume::Receiver, + user_response_rx: flume::Receiver, + mut rng: CryptoRng, + ) { + // Ignoring because this is only used internally and I think that boxing will be more expensive than wasting + // some extra bytes for smaller variants + #[allow(clippy::large_enum_variant)] + enum StreamMessage { + AcceptResult( + Result< + Accepting>, + RpcServerError>, + >, + ), + Message(Message), + UserResponse(UserResponse), + Tick, + } + + let mut ticker = interval(TEN_SECONDS); + ticker.set_missed_tick_behavior(MissedTickBehavior::Skip); + + // FIXME(@fogodev): Update this function to use iroh-net transport instead of quinn + // when it's implemented + let (server, server_handle) = setup_server_endpoint(self.endpoint.clone()); + + let mut msg_stream = pin!(( + async_stream::stream! { + loop { + yield StreamMessage::AcceptResult(server.accept().await); + } + }, + msgs_rx.stream().map(StreamMessage::Message), + user_response_rx.stream().map(StreamMessage::UserResponse), + IntervalStream::new(ticker).map(|_| StreamMessage::Tick), + ) + .merge()); + + while let Some(msg) = msg_stream.next().await { + match msg { + StreamMessage::AcceptResult(Ok(accepting)) => { + let Ok((request, channel)) = accepting.read_first().await.map_err(|e| { + error!(?e, "Failed to read first request from a new connection;"); + }) else { + continue; + }; + + self.handle_request(request, channel).await; + } + + StreamMessage::AcceptResult(Err(e)) => { + // TODO(@fogodev): Maybe report this error to the user on a toast? + error!(?e, "Error accepting connection;"); + } + + StreamMessage::Message(Message::Request(Request::JoinSyncGroup { + req, + devices_connection_ids, + })) => self.dispatch_join_requests(req, devices_connection_ids, &mut rng), + + StreamMessage::UserResponse(UserResponse::AcceptDeviceInSyncGroup { + ticket, + accepted, + }) => { + self.handle_join_response(ticket, accepted).await; + } + + StreamMessage::Tick => self.tick().await, + + StreamMessage::Message(Message::Stop) => { + server_handle.abort(); + break; + } + } + } + } + + fn dispatch_join_requests( + &self, + req: authorize_new_device_in_sync_group::Request, + devices_connection_ids: Vec, + rng: &mut CryptoRng, + ) { + async fn inner( + key_manager: Arc, + endpoint: Endpoint, + mut rng: CryptoRng, + req: authorize_new_device_in_sync_group::Request, + devices_connection_ids: Vec, + ) -> Result { + let group_pub_id = req.sync_group.pub_id; + loop { + let client = + match connect_to_first_available_client(&endpoint, &devices_connection_ids) + .await + { + Ok(client) => client, + Err(e) => { + return Ok(JoinSyncGroupResponse::Failed(e)); + } + }; + + match client + .authorize_new_device_in_sync_group(req.clone()) + .await? + { + Ok(authorize_new_device_in_sync_group::Response { + authorizor_device, + keys, + }) => { + key_manager + .add_many_keys( + group_pub_id, + keys.into_iter().map(|key| { + key.as_slice() + .try_into() + .expect("critical error, backend has invalid secret keys") + }), + &mut rng, + ) + .await?; + return Ok(JoinSyncGroupResponse::Accepted { authorizor_device }); + } + // In case of timeout, we will try again + Err(CloudP2PError::TimedOut) => continue, + Err(e) => return Ok(JoinSyncGroupResponse::Failed(e)), + } + } + } + + spawn({ + let endpoint = self.endpoint.clone(); + let notify_user_tx = self.notify_user_tx.clone(); + let key_manager = Arc::clone(&self.key_manager); + let rng = CryptoRng::from_seed(rng.generate_fixed()); + async move { + let sync_group = req.sync_group.clone(); + + if let Err(SendError(response)) = notify_user_tx + .send_async(NotifyUser::ReceivedJoinSyncGroupResponse { + response: inner(key_manager, endpoint, rng, req, devices_connection_ids) + .await + .unwrap_or_else(|e| { + error!( + ?e, + "Failed to issue authorize new device in sync group request;" + ); + JoinSyncGroupResponse::CriticalError + }), + sync_group, + }) + .await + { + error!(?response, "Failed to send response to user;"); + } + } + }); + } + + async fn handle_request( + &self, + request: cloud_p2p::Request, + channel: RpcChannel>, + ) { + match request { + cloud_p2p::Request::AuthorizeNewDeviceInSyncGroup( + authorize_new_device_in_sync_group::Request { + sync_group, + asking_device, + }, + ) => { + let ticket = Ticket(self.ticketer.fetch_add(1, Ordering::Relaxed)); + let this_device = sync_group + .devices + .iter() + .find(|device| device.pub_id == self.current_device_pub_id) + .expect( + "current device must be in the sync group, otherwise we wouldn't be here", + ) + .clone(); + + self.notify_user_tx + .send_async(NotifyUser::ReceivedJoinSyncGroupRequest { + ticket, + asking_device: asking_device.clone(), + sync_group: sync_group.clone(), + }) + .await + .expect("notify_user_tx must never closes!"); + + self.pending_sync_group_join_requests.lock().await.insert( + ticket, + PendingSyncGroupJoin { + channel, + request: authorize_new_device_in_sync_group::Request { + sync_group, + asking_device, + }, + this_device, + since: Instant::now(), + }, + ); + } + } + } + + async fn handle_join_response(&self, ticket: Ticket, accepted: bool) { + let Some(PendingSyncGroupJoin { + channel, + request, + this_device, + .. + }) = self + .pending_sync_group_join_requests + .lock() + .await + .remove(&ticket) + else { + warn!("Received join response for unknown ticket; We probably timed out this request already"); + return; + }; + + let sync_group = request.sync_group.clone(); + let asking_device_pub_id = request.asking_device.pub_id; + + let response = if accepted { + Ok(authorize_new_device_in_sync_group::Response { + authorizor_device: this_device, + keys: self + .key_manager + .get_group_keys(request.sync_group.pub_id) + .await + .into_iter() + .map(Into::into) + .collect(), + }) + } else { + Err(CloudP2PError::Rejected) + }; + + if let Err(e) = channel + .rpc(request, (), |(), _req| async move { response }) + .await + { + error!(?e, "Failed to send response to user;"); + self.notify_join_error(sync_group, JoinSyncGroupError::Communication) + .await; + + return; + } + + if accepted { + let Ok(access_token) = self + .token_refresher + .get_access_token() + .await + .map_err(|e| error!(?e, "Failed to get access token;")) + else { + self.notify_join_error(sync_group, JoinSyncGroupError::Auth) + .await; + return; + }; + + match self + .cloud_services + .sync() + .groups() + .reply_join_request(groups::reply_join_request::Request { + access_token, + group_pub_id: sync_group.pub_id, + authorized_device_pub_id: asking_device_pub_id, + authorizor_device_pub_id: self.current_device_pub_id, + }) + .await + { + Ok(Ok(groups::reply_join_request::Response)) => { + // Everything is Awesome! + } + Ok(Err(e)) => { + error!(?e, "Failed to reply to join request"); + self.notify_join_error(sync_group, JoinSyncGroupError::InternalServer) + .await; + } + Err(e) => { + error!(?e, "Failed to send reply to join request"); + self.notify_join_error(sync_group, JoinSyncGroupError::Communication) + .await; + } + } + } + } + + async fn notify_join_error( + &self, + sync_group: groups::GroupWithLibraryAndDevices, + error: JoinSyncGroupError, + ) { + self.notify_user_tx + .send_async(NotifyUser::SendingJoinSyncGroupResponseError { error, sync_group }) + .await + .expect("notify_user_tx must never closes!"); + } + + async fn tick(&mut self) { + self.timeout_checker_buffer.clear(); + + let mut pending_sync_group_join_requests = + self.pending_sync_group_join_requests.lock().await; + + for (ticket, pending_sync_group_join) in pending_sync_group_join_requests.drain() { + if pending_sync_group_join.since.elapsed() > FIVE_MINUTES { + let PendingSyncGroupJoin { + channel, request, .. + } = pending_sync_group_join; + + let asking_device = request.asking_device.clone(); + + let notify_message = match channel + .rpc(request, (), |(), _req| async move { + Err(CloudP2PError::TimedOut) + }) + .await + { + Ok(()) => NotifyUser::TimedOutJoinRequest { + device: asking_device, + succeeded: true, + }, + Err(e) => { + error!(?e, "Failed to send timed out response to user;"); + NotifyUser::TimedOutJoinRequest { + device: asking_device, + succeeded: false, + } + } + }; + + self.notify_user_tx + .send_async(notify_message) + .await + .expect("notify_user_tx must never closes!"); + } else { + self.timeout_checker_buffer + .push((ticket, pending_sync_group_join)); + } + } + + pending_sync_group_join_requests.extend(self.timeout_checker_buffer.drain(..)); + } +} + +async fn connect_to_first_available_client( + endpoint: &Endpoint, + devices_connection_ids: &[NodeId], +) -> Result, Service>, CloudP2PError> { + for device_connection_id in devices_connection_ids { + if let Ok(connection) = endpoint + .connect_by_node_id(*device_connection_id, CloudP2PALPN::LATEST) + .await + .map_err(|e| error!(?e, "Failed to connect to authorizor device candidate")) + { + return Ok(Client::new(RpcClient::new( + QuinnConnection::::from_connection(connection), + ))); + } + } + + Err(CloudP2PError::UnableToConnect) +} + +fn setup_server_endpoint( + endpoint: Endpoint, +) -> ( + RpcServer>, + JoinHandle<()>, +) { + let local_addr = { + let (ipv4_addr, maybe_ipv6_addr) = endpoint.bound_sockets(); + // Trying to give preference to IPv6 addresses because it's 2024 + maybe_ipv6_addr.unwrap_or(ipv4_addr) + }; + + let (connections_tx, connections_rx) = flume::bounded(16); + + ( + RpcServer::new(QuinnServerEndpoint::::handle_connections( + connections_rx, + local_addr, + )), + spawn(async move { + while let Some(connecting) = endpoint.accept().await { + if let Ok(connection) = connecting.await.map_err(|e| { + warn!(?e, "Cloud P2P failed to accept connection"); + }) { + if connections_tx.send_async(connection).await.is_err() { + warn!("Connection receiver dropped"); + break; + } + } + } + }), + ) +} diff --git a/core/crates/cloud-services/src/error.rs b/core/crates/cloud-services/src/error.rs index d76d3f349..07eefc0db 100644 --- a/core/crates/cloud-services/src/error.rs +++ b/core/crates/cloud-services/src/error.rs @@ -1,7 +1,10 @@ +use sd_cloud_schema::cloud_p2p::Service; use sd_utils::error::FileIOError; use std::{io, net::AddrParseError}; +use quic_rpc::transport::quinn::QuinnConnection; + #[derive(thiserror::Error, Debug)] pub enum Error { #[error("Couldn't parse Cloud Services API address URL: {0}")] @@ -48,6 +51,14 @@ pub enum Error { }, #[error("Key manager not initialized")] KeyManagerNotInitialized, + + // Cloud P2P errors + #[error("Failed to create Cloud P2P endpoint: {0}")] + CreateCloudP2PEndpoint(anyhow::Error), + #[error("Failed to connect to Cloud P2P node: {0}")] + ConnectToCloudP2PNode(anyhow::Error), + #[error("Communication error with Cloud P2P node: {0}")] + CloudP2PRpcCommunication(#[from] quic_rpc::pattern::rpc::Error>), } #[derive(thiserror::Error, Debug)] diff --git a/core/crates/cloud-services/src/key_manager/key_store.rs b/core/crates/cloud-services/src/key_manager/key_store.rs index 1d80f42c5..8f9d16c1b 100644 --- a/core/crates/cloud-services/src/key_manager/key_store.rs +++ b/core/crates/cloud-services/src/key_manager/key_store.rs @@ -1,6 +1,6 @@ use crate::Error; -use sd_cloud_schema::sync::KeyHash; +use sd_cloud_schema::sync::{groups, KeyHash}; use sd_crypto::{ cloud::{decrypt, encrypt, secret_key::SecretKey}, primitives::{EncryptedBlock, OneShotNonce, StreamNonce}, @@ -8,7 +8,12 @@ use sd_crypto::{ }; use sd_utils::error::FileIOError; -use std::{collections::HashMap, fs::Metadata, path::PathBuf, pin::pin}; +use std::{ + collections::{BTreeMap, HashMap}, + fs::Metadata, + path::PathBuf, + pin::pin, +}; use futures::StreamExt; use iroh_base::key::{NodeId, SecretKey as IrohSecretKey}; @@ -22,26 +27,44 @@ use zeroize::{Zeroize, ZeroizeOnDrop}; #[derive(Serialize, Deserialize)] pub struct KeyStore { iroh_secret_key: IrohSecretKey, - keys_by_hash: HashMap, + keys: BTreeMap>, } impl KeyStore { pub fn new(iroh_secret_key: IrohSecretKey) -> Self { Self { iroh_secret_key, - keys_by_hash: HashMap::new(), + keys: BTreeMap::new(), } } - pub fn add_key(&mut self, key: SecretKey) { + pub fn add_key(&mut self, group_pub_id: groups::PubId, key: SecretKey) { let mut hasher = blake3::Hasher::new(); hasher.update(key.as_ref()); let hash = hasher.finalize(); - self.keys_by_hash + self.keys + .entry(group_pub_id) + .or_default() .insert(KeyHash(hash.to_hex().to_string()), key); } + pub fn add_many_keys( + &mut self, + group_pub_id: groups::PubId, + keys: impl IntoIterator, + ) { + let group_entry = self.keys.entry(group_pub_id).or_default(); + + for key in keys { + let mut hasher = blake3::Hasher::new(); + hasher.update(key.as_ref()); + let hash = hasher.finalize(); + + group_entry.insert(KeyHash(hash.to_hex().to_string()), key); + } + } + pub fn iroh_secret_key(&self) -> IrohSecretKey { self.iroh_secret_key.clone() } @@ -50,8 +73,17 @@ impl KeyStore { self.iroh_secret_key.public() } - pub fn get_key(&self, hash: &KeyHash) -> Option { - self.keys_by_hash.get(hash).cloned() + pub fn get_key(&self, group_pub_id: groups::PubId, hash: &KeyHash) -> Option { + self.keys + .get(&group_pub_id) + .and_then(|group| group.get(hash).cloned()) + } + + pub fn get_group_keys(&self, group_pub_id: groups::PubId) -> Vec { + self.keys + .get(&group_pub_id) + .map(|group| group.values().cloned().collect()) + .unwrap_or_default() } pub async fn encrypt( @@ -219,8 +251,10 @@ impl KeyStore { impl Zeroize for KeyStore { fn zeroize(&mut self) { self.iroh_secret_key = IrohSecretKey::generate(); - self.keys_by_hash.values_mut().for_each(Zeroize::zeroize); - self.keys_by_hash = HashMap::new(); + self.keys + .values_mut() + .for_each(|group| group.values_mut().for_each(Zeroize::zeroize)); + self.keys = BTreeMap::new(); } } diff --git a/core/crates/cloud-services/src/key_manager/mod.rs b/core/crates/cloud-services/src/key_manager/mod.rs index 74386e0c8..411712c2b 100644 --- a/core/crates/cloud-services/src/key_manager/mod.rs +++ b/core/crates/cloud-services/src/key_manager/mod.rs @@ -1,6 +1,6 @@ use crate::Error; -use sd_cloud_schema::sync::KeyHash; +use sd_cloud_schema::sync::{groups, KeyHash}; use sd_crypto::{cloud::secret_key::SecretKey, CryptoRng}; use sd_utils::error::FileIOError; @@ -95,17 +95,39 @@ impl KeyManager { self.store.read().await.node_id() } - pub async fn add_key(&self, key: SecretKey, rng: &mut CryptoRng) -> Result<(), Error> { + pub async fn add_key( + &self, + group_pub_id: groups::PubId, + key: SecretKey, + rng: &mut CryptoRng, + ) -> Result<(), Error> { let mut store = self.store.write().await; - store.add_key(key); + store.add_key(group_pub_id, key); // Keeping the write lock here, this way we ensure that we can't corrupt the file store .encrypt(&self.master_key, rng, &self.keys_file_path) .await } - pub async fn get_key(&self, hash: &KeyHash) -> Option { - self.store.read().await.get_key(hash) + pub async fn add_many_keys( + &self, + group_pub_id: groups::PubId, + keys: impl IntoIterator + Send, + rng: &mut CryptoRng, + ) -> Result<(), Error> { + let mut store = self.store.write().await; + store.add_many_keys(group_pub_id, keys); + // Keeping the write lock here, this way we ensure that we can't corrupt the file + store + .encrypt(&self.master_key, rng, &self.keys_file_path) + .await + } + pub async fn get_key(&self, group_pub_id: groups::PubId, hash: &KeyHash) -> Option { + self.store.read().await.get_key(group_pub_id, hash) + } + + pub async fn get_group_keys(&self, group_pub_id: groups::PubId) -> Vec { + self.store.read().await.get_group_keys(group_pub_id) } } diff --git a/core/crates/cloud-services/src/lib.rs b/core/crates/cloud-services/src/lib.rs index 470550715..1e84e9ef3 100644 --- a/core/crates/cloud-services/src/lib.rs +++ b/core/crates/cloud-services/src/lib.rs @@ -36,6 +36,7 @@ mod key_manager; mod token_refresher; pub use cloud_client::CloudServices; +pub use cloud_p2p::{CloudP2P, JoinSyncGroupResponse, NotifyUser, Ticket, UserResponse}; pub use error::{Error, GetTokenError}; pub use key_manager::KeyManager; diff --git a/core/crates/cloud-services/src/token_refresher.rs b/core/crates/cloud-services/src/token_refresher.rs index a9e4a63a2..f0e23515f 100644 --- a/core/crates/cloud-services/src/token_refresher.rs +++ b/core/crates/cloud-services/src/token_refresher.rs @@ -8,12 +8,18 @@ use futures::StreamExt; use futures_concurrency::stream::Merge; use reqwest::Url; use reqwest_middleware::{reqwest::header, ClientWithMiddleware}; -use tokio::{spawn, sync::oneshot, time::sleep}; +use tokio::{ + spawn, + sync::oneshot, + time::{interval, sleep, MissedTickBehavior}, +}; +use tokio_stream::wrappers::IntervalStream; use tracing::{error, warn}; use super::{Error, GetTokenError}; const ONE_MINUTE: Duration = Duration::from_secs(60); +const TEN_SECONDS: Duration = Duration::from_secs(10); enum Message { Init( @@ -23,8 +29,10 @@ enum Message { oneshot::Sender>, ), ), + CheckInitialization(oneshot::Sender>), RequestToken(oneshot::Sender>), RefreshTime, + Tick, } #[derive(Debug, Clone)] @@ -75,6 +83,16 @@ impl TokenRefresher { rx.await.expect("Token refresher channel closed") } + pub async fn check_initialization(&self) -> Result<(), GetTokenError> { + let (tx, rx) = oneshot::channel(); + self.tx + .send_async(Message::CheckInitialization(tx)) + .await + .expect("Token refresher channel closed"); + + rx.await.expect("Token refresher channel closed") + } + pub async fn get_access_token(&self) -> Result { let (tx, rx) = oneshot::channel(); self.tx @@ -104,7 +122,15 @@ impl Runner { ) { let (refresh_tx, refresh_rx) = flume::bounded(1); - let mut msg_stream = pin!((msgs_rx.into_stream(), refresh_rx.into_stream()).merge()); + let mut ticker = interval(TEN_SECONDS); + ticker.set_missed_tick_behavior(MissedTickBehavior::Skip); + + let mut msg_stream = pin!(( + msgs_rx.into_stream(), + refresh_rx.into_stream(), + IntervalStream::new(ticker).map(|_| Message::Tick) + ) + .merge()); let mut runner = Self { initialized: false, @@ -127,6 +153,8 @@ impl Runner { } } + Message::CheckInitialization(ack) => runner.check_initialization(ack), + Message::RequestToken(ack) => runner.reply_token(ack), Message::RefreshTime => { @@ -134,6 +162,8 @@ impl Runner { error!(?e, "Failed to refresh token: {e}"); } } + + Message::Tick => runner.tick().await, } } } @@ -143,7 +173,8 @@ impl Runner { access_token: AccessToken, refresh_token: RefreshToken, ) -> Result<(), Error> { - let access_token_duration = self.extract_access_token_duration(&access_token)?; + let access_token_duration = + Self::extract_access_token_duration(&mut self.token_decoding_buffer, &access_token)?; self.initialized = true; self.current_token = Some(access_token); @@ -182,10 +213,9 @@ impl Runner { } async fn refresh(&mut self) -> Result<(), Error> { - self.current_token = None; let RefreshToken(refresh_token) = self .current_refresh_token - .take() + .clone() .expect("refresh token is set otherwise we wouldn't be here"); let response = self @@ -219,7 +249,7 @@ impl Runner { } fn extract_access_token_duration( - &mut self, + token_decoding_buffer: &mut Vec, AccessToken(token): &AccessToken, ) -> Result { #[derive(serde::Deserialize)] @@ -228,10 +258,10 @@ impl Runner { exp: DateTime, } - BASE64_URL_SAFE_NO_PAD.decode_vec(token, &mut self.token_decoding_buffer)?; - self.token_decoding_buffer.clear(); + BASE64_URL_SAFE_NO_PAD.decode_vec(token, token_decoding_buffer)?; + token_decoding_buffer.clear(); - let token = serde_json::from_slice::(&self.token_decoding_buffer)?; + let token = serde_json::from_slice::(token_decoding_buffer)?; token .exp @@ -251,6 +281,34 @@ impl Runner { fn token_header_value_to_string(token: &header::HeaderValue) -> Result { token.to_str().map(str::to_string).map_err(Into::into) } + + fn check_initialization(&self, ack: oneshot::Sender>) { + if ack + .send(if self.initialized { + Ok(()) + } else { + Err(GetTokenError::RefresherNotInitialized) + }) + .is_err() + { + warn!("Failed to send access token response, receiver dropped;"); + } + } + + /// This method is a safeguard to make sure we try to keep refreshing tokens even if they + /// already expired, as the refresh token has a bigger expiration than the access token. + async fn tick(&mut self) { + if let Some(access_token) = &self.current_token { + if matches!( + Self::extract_access_token_duration(&mut self.token_decoding_buffer, access_token), + Err(Error::TokenExpired) + ) { + if let Err(e) = self.refresh().await { + error!(?e, "Failed to refresh expired token on tick method;"); + } + } + } + } } /// This test is here for documentation purposes only, they are not meant to be run. diff --git a/crates/crypto/src/cloud/secret_key.rs b/crates/crypto/src/cloud/secret_key.rs index 34b703c31..d097389f6 100644 --- a/crates/crypto/src/cloud/secret_key.rs +++ b/crates/crypto/src/cloud/secret_key.rs @@ -1,6 +1,7 @@ use crate::{ ct::{Choice, ConstantTimeEq, ConstantTimeEqNull}, rng::CryptoRng, + Error, }; use std::fmt; @@ -98,6 +99,35 @@ impl From<&SecretKey> for Array { } } +impl From<&SecretKey> for Vec { + fn from(SecretKey(key): &SecretKey) -> Self { + key.to_vec() + } +} + +impl From for Vec { + fn from(SecretKey(key): SecretKey) -> Self { + key.to_vec() + } +} + +impl TryFrom<&[u8]> for SecretKey { + type Error = Error; + + fn try_from(key: &[u8]) -> Result { + if key.len() != 32 { + return Err(Error::InvalidKeySize(key.len())); + } + + Ok(Self(Array([ + key[0], key[1], key[2], key[3], key[4], key[5], key[6], key[7], key[8], key[9], + key[10], key[11], key[12], key[13], key[14], key[15], key[16], key[17], key[18], + key[19], key[20], key[21], key[22], key[23], key[24], key[25], key[26], key[27], + key[28], key[29], key[30], key[31], + ]))) + } +} + impl From> for SecretKey { fn from(key: GenericArray) -> Self { Self(Array([ diff --git a/crates/crypto/src/error.rs b/crates/crypto/src/error.rs index f7444b20f..c8371d1ae 100644 --- a/crates/crypto/src/error.rs +++ b/crates/crypto/src/error.rs @@ -7,6 +7,8 @@ use tokio::io; pub enum Error { #[error("Block too big for oneshot encryption: size in bytes = {0}")] BlockTooBig(usize), + #[error("Invalid key size: expected 32 bytes, got {0}")] + InvalidKeySize(usize), /// Encrypt and decrypt errors, AEAD crate doesn't provide any error context for these /// as it can be a security hazard to leak information about the error. From cbf06e80c4c33340081c42c6e1e1b72332ff44c8 Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Fri, 23 Aug 2024 17:38:34 -0300 Subject: [PATCH 088/218] Integrate cloud p2p into node and rspc routes --- .../crates/cloud-services/src/cloud_client.rs | 15 +++++++ .../cloud-services/src/cloud_p2p/mod.rs | 4 +- core/src/api/cloud/mod.rs | 43 +++++++++++++++++-- core/src/lib.rs | 21 ++++++++- 4 files changed, 75 insertions(+), 8 deletions(-) diff --git a/core/crates/cloud-services/src/cloud_client.rs b/core/crates/cloud-services/src/cloud_client.rs index 848facd76..986765b5b 100644 --- a/core/crates/cloud-services/src/cloud_client.rs +++ b/core/crates/cloud-services/src/cloud_client.rs @@ -5,6 +5,7 @@ use sd_cloud_schema::{Client, Service, ServicesALPN}; use std::{net::SocketAddr, sync::Arc, time::Duration}; use futures::Stream; +use iroh_net::relay::RelayUrl; use quic_rpc::{transport::quinn::QuinnConnection, RpcClient}; use quinn::{ClientConfig, Endpoint}; use reqwest::{IntoUrl, Url}; @@ -38,6 +39,8 @@ pub struct CloudServices { get_cloud_api_address: Url, http_client: ClientWithMiddleware, domain_name: String, + pub cloud_p2p_dns_origin_name: String, + pub cloud_p2p_relay_url: RelayUrl, pub token_refresher: TokenRefresher, key_manager: Arc>>>, cloud_p2p: Arc>>>, @@ -54,6 +57,8 @@ impl CloudServices { /// If the client fails to connect, it will try again the next time it's used. pub async fn new( get_cloud_api_address: impl IntoUrl + Send, + cloud_p2p_relay_url: impl IntoUrl + Send, + cloud_p2p_dns_origin_name: String, domain_name: String, ) -> Result { let http_client_builder = reqwest::Client::builder().timeout(Duration::from_secs(3)); @@ -63,6 +68,11 @@ impl CloudServices { builder = builder.https_only(true); } + let cloud_p2p_relay_url = cloud_p2p_relay_url + .into_url() + .map_err(Error::InvalidUrl)? + .into(); + let http_client = ClientBuilder::new(http_client_builder.build().map_err(Error::HttpClientInit)?) .with(RetryTransientMiddleware::new_with_policy( @@ -102,6 +112,8 @@ impl CloudServices { ), get_cloud_api_address, http_client, + cloud_p2p_dns_origin_name, + cloud_p2p_relay_url, domain_name, key_manager: Arc::default(), cloud_p2p: Arc::default(), @@ -303,10 +315,13 @@ mod tests { use super::*; + #[ignore] #[tokio::test] async fn test_client() { let response = CloudServices::new( "http://localhost:9420/cloud-api-address", + "http://relay.localhost:9999/", + "dns.localhost:9999".to_string(), "localhost".to_string(), ) .await diff --git a/core/crates/cloud-services/src/cloud_p2p/mod.rs b/core/crates/cloud-services/src/cloud_p2p/mod.rs index edf3502c3..49a11807d 100644 --- a/core/crates/cloud-services/src/cloud_p2p/mod.rs +++ b/core/crates/cloud-services/src/cloud_p2p/mod.rs @@ -76,7 +76,7 @@ pub struct CloudP2P { impl CloudP2P { pub async fn new( current_device_pub_id: devices::PubId, - cloud_services: CloudServices, + cloud_services: &CloudServices, mut rng: CryptoRng, iroh_secret_key: IrohSecretKey, dns_origin_domain: String, @@ -95,7 +95,7 @@ impl CloudP2P { let (msgs_tx, msgs_rx) = flume::bounded(16); spawn({ - let runner = Runner::new(current_device_pub_id, &cloud_services, endpoint).await?; + let runner = Runner::new(current_device_pub_id, cloud_services, endpoint).await?; let user_response_rx = cloud_services.user_response_rx.clone(); async move { diff --git a/core/src/api/cloud/mod.rs b/core/src/api/cloud/mod.rs index 2fd6e31b6..bc39cae49 100644 --- a/core/src/api/cloud/mod.rs +++ b/core/src/api/cloud/mod.rs @@ -5,9 +5,13 @@ use sd_cloud_schema::{ error::{ClientSideError, Error}, users, Client, Service, }; -use sd_core_cloud_services::{IrohSecretKey, KeyManager, QuinnConnection}; +use sd_core_cloud_services::{CloudP2P, IrohSecretKey, KeyManager, QuinnConnection, UserResponse}; use sd_crypto::{CryptoRng, SeedableRng}; +use std::pin::pin; + +use async_stream::stream; +use futures::StreamExt; use rspc::alpha::AlphaRouter; use tracing::error; use uuid::Uuid; @@ -128,14 +132,45 @@ pub(crate) fn mount() -> AlphaRouter { node.cloud_services.set_key_manager(key_manager).await; - // TODO: With this device iroh's secret key (NodeId) now known and we can start the iroh - // node for cloud p2p - todo!("Start iroh node for cloud p2p"); + node.cloud_services + .set_cloud_p2p( + CloudP2P::new( + device_pub_id, + &node.cloud_services, + rng, + iroh_secret_key, + node.cloud_services.cloud_p2p_dns_origin_name.clone(), + node.cloud_services.cloud_p2p_relay_url.clone(), + ) + .await?, + ) + .await; Ok(()) }, ) }) + .procedure( + "listenCloudServicesNotifications", + R.subscription(|node, _: ()| async move { + stream! { + let mut notifications_stream = + pin!(node.cloud_services.stream_user_notifications()); + + while let Some(notification) = notifications_stream.next().await { + yield notification; + } + } + }), + ) + .procedure( + "userResponse", + R.mutation(|node, response: UserResponse| async move { + node.cloud_services.send_user_response(response).await; + + Ok(()) + }), + ) } fn handle_comm_error( diff --git a/core/src/lib.rs b/core/src/lib.rs index ea7d02930..44686e23e 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -108,12 +108,21 @@ impl Node { let (old_jobs, jobs_actor) = old_job::OldJobs::new(); let libraries = library::Libraries::new(data_dir.join("libraries")).await?; - let (get_cloud_api_address, cloud_services_domain_name) = { + let ( + get_cloud_api_address, + cloud_p2p_relay_url, + cloud_p2p_dns_origin_name, + cloud_services_domain_name, + ) = { #[cfg(debug_assertions)] { ( std::env::var("SD_CLOUD_API_ADDRESS_URL") .unwrap_or_else(|_| "http://localhost:9420/cloud-api-address".to_string()), + std::env::var("SD_CLOUD_P2P_RELAY_URL") + .unwrap_or_else(|_| "http://relay.localhost:9999/".to_string()), + std::env::var("SD_CLOUD_P2P_DNS_ORIGIN_NAME") + .unwrap_or_else(|_| "dnf.localhost:9999".to_string()), std::env::var("SD_CLOUD_API_DOMAIN_NAME") .unwrap_or_else(|_| "localhost".to_string()), ) @@ -122,6 +131,8 @@ impl Node { { ( "https://auth.spacedrive.com/cloud-api-address".to_string(), + "https://relay.spacedrive.com/".to_string(), + "dns.spacedrive.com".to_string(), "api.spacedrive.com".to_string(), ) } @@ -144,7 +155,13 @@ impl Node { event_bus, libraries, cloud_services: Arc::new( - CloudServices::new(&get_cloud_api_address, cloud_services_domain_name).await?, + CloudServices::new( + &get_cloud_api_address, + cloud_p2p_relay_url, + cloud_p2p_dns_origin_name, + cloud_services_domain_name, + ) + .await?, ), master_rng: Arc::new(Mutex::new(CryptoRng::new()?)), }); From 752bdbd2194ec8c20a251c0c9680839292adafb5 Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Fri, 23 Aug 2024 23:42:51 -0300 Subject: [PATCH 089/218] Some tweks to simplify sync subcrate Initial work to port cloud sync system to new cloud services --- Cargo.lock | Bin 329735 -> 329700 bytes core/crates/sync/src/db_operation.rs | 12 +- core/crates/sync/src/ingest.rs | 42 +++--- core/crates/sync/src/lib.rs | 11 +- core/crates/sync/src/manager.rs | 18 +-- core/crates/sync/tests/lib.rs | 4 +- core/crates/sync/tests/mock_instance.rs | 8 +- core/prisma/schema.prisma | 55 ++++--- core/src/api/sync.rs | 22 +-- core/src/cloud/sync/ingest.rs | 6 +- core/src/cloud/sync/receive.rs | 10 +- core/src/cloud/sync/send.rs | 187 ++++++++++++------------ core/src/location/non_indexed.rs | 4 +- core/src/p2p/sync/mod.rs | 65 ++++---- crates/sync/Cargo.toml | 4 +- crates/sync/src/compressed.rs | 103 ++++++------- crates/sync/src/crdt.rs | 24 ++- crates/sync/src/factory.rs | 71 ++++----- crates/sync/src/lib.rs | 4 + crates/sync/src/model_traits.rs | 4 +- 20 files changed, 344 insertions(+), 310 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4dfaf9c523cf5de7383d3ad2b0cf4b67f963dd4b..d27b0c74293c5687107f23344775406cba48708f 100644 GIT binary patch delta 36 scmZplAo8SNq@jheg=q`3W%u^rZsrb)>E>0;a_vkVPSOv0R5Q^lmGw# delta 51 zcmaFTFVa3iq@jheg=q`3W%u+A$;<-N`RkdvrYqJkYi&>NW{$I%u2jRU-hTfu^Y;6P HSy){F;U5xs diff --git a/core/crates/sync/src/db_operation.rs b/core/crates/sync/src/db_operation.rs index ff49d32b3..858788f18 100644 --- a/core/crates/sync/src/db_operation.rs +++ b/core/crates/sync/src/db_operation.rs @@ -28,11 +28,11 @@ impl crdt_with_instance::Data { pub fn into_operation(self) -> Result { Ok(CRDTOperation { - instance: self.instance(), + device_pub_id: self.instance(), timestamp: self.timestamp(), record_id: rmp_serde::from_slice(&self.record_id)?, - model: { + model_id: { #[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)] // SAFETY: we will not have more than 2^16 models and we had to store using signed // integers due to SQLite limitations @@ -60,10 +60,10 @@ impl cloud_crdt_with_instance::Data { Ok(( self.id, CRDTOperation { - instance: self.instance(), + device_pub_id: self.instance(), timestamp: self.timestamp(), record_id: rmp_serde::from_slice(&self.record_id)?, - model: { + model_id: { #[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)] // SAFETY: we will not have more than 2^16 models and we had to store using signed // integers due to SQLite limitations @@ -87,10 +87,10 @@ pub async fn write_crdt_op_to_db(op: &CRDTOperation, db: &PrismaClient) -> Resul op.timestamp.0 as i64 } }, - instance: instance::pub_id::equals(op.instance.as_bytes().to_vec()), + instance: instance::pub_id::equals(op.device_pub_id.as_bytes().to_vec()), kind: op.kind().to_string(), data: rmp_serde::to_vec(&op.data)?, - model: i32::from(op.model), + model: i32::from(op.model_id), record_id: rmp_serde::to_vec(&op.record_id)?, _params: vec![], } diff --git a/core/crates/sync/src/ingest.rs b/core/crates/sync/src/ingest.rs index d868f685d..08a06a75d 100644 --- a/core/crates/sync/src/ingest.rs +++ b/core/crates/sync/src/ingest.rs @@ -3,8 +3,8 @@ use sd_prisma::{ prisma_sync::ModelSyncData, }; use sd_sync::{ - CRDTOperation, CRDTOperationData, CompressedCRDTOperation, CompressedCRDTOperations, - OperationKind, + CRDTOperation, CRDTOperationData, CompressedCRDTOperation, + CompressedCRDTOperationsPerModelPerDevice, OperationKind, }; use std::{ @@ -133,7 +133,7 @@ impl Actor { let (tx, rx) = oneshot::channel::<()>(); let timestamps = self - .timestamps + .timestamp_per_device .read() .await .iter() @@ -289,7 +289,12 @@ impl Actor { .expect("timestamp has too much drift!"); // read the timestamp for the operation's instance, or insert one if it doesn't exist - let timestamp = self.timestamps.read().await.get(&instance).copied(); + let timestamp = self + .timestamp_per_device + .read() + .await + .get(&instance) + .copied(); // Delete - ignores all other messages if let Some(delete_op) = ops @@ -388,7 +393,10 @@ impl Actor { // update the stored timestamp for this instance - will be derived from the crdt operations table on restart let new_ts = NTP64::max(timestamp.unwrap_or_default(), new_timestamp); - self.timestamps.write().await.insert(instance, new_ts); + self.timestamp_per_device + .write() + .await + .insert(instance, new_ts); Ok(()) } @@ -416,8 +424,8 @@ async fn handle_crdt_updates( .run(|db| async move { // fake operation to batch them all at once ModelSyncData::from_op(CRDTOperation { - instance, - model, + device_pub_id: instance, + model_id: model, record_id: record_id.clone(), timestamp: NTP64(0), data: CRDTOperationData::Create( @@ -439,8 +447,8 @@ async fn handle_crdt_updates( async move { write_crdt_op_to_db( &CRDTOperation { - instance, - model, + device_pub_id: instance, + model_id: model, record_id, timestamp, data: CRDTOperationData::Update { field, value }, @@ -495,8 +503,8 @@ async fn handle_crdt_create_and_updates( .run(|db| async move { // fake a create with a bunch of data rather than individual insert ModelSyncData::from_op(CRDTOperation { - instance, - model, + device_pub_id: instance, + model_id: model, record_id: record_id.clone(), timestamp, data: CRDTOperationData::Create( @@ -516,8 +524,8 @@ async fn handle_crdt_create_and_updates( let db = &db; async move { let operation = CRDTOperation { - instance, - model, + device_pub_id: instance, + model_id: model, record_id, timestamp: op.timestamp, data: op.data.clone(), @@ -543,8 +551,8 @@ async fn handle_crdt_deletion( ) -> Result<(), Error> { // deletes are the be all and end all, no need to check anything let op = CRDTOperation { - instance, - model, + device_pub_id: instance, + model_id: model, record_id, timestamp: delete_op.timestamp, data: CRDTOperationData::Delete, @@ -579,7 +587,7 @@ pub struct Handler { #[derive(Debug)] pub struct MessagesEvent { pub instance_id: Uuid, - pub messages: CompressedCRDTOperations, + pub messages: CompressedCRDTOperationsPerModelPerDevice, pub has_more: bool, pub wait_tx: Option>, } @@ -609,7 +617,7 @@ mod test { NonZeroU128::new(instance.to_u128_le()).expect("Non zero id"), )) .build(), - timestamps: Arc::default(), + timestamp_per_device: Arc::default(), emit_messages_flag: Arc::new(AtomicBool::new(true)), active: AtomicBool::default(), active_notify: Notify::default(), diff --git a/core/crates/sync/src/lib.rs b/core/crates/sync/src/lib.rs index d5c208668..118a343b6 100644 --- a/core/crates/sync/src/lib.rs +++ b/core/crates/sync/src/lib.rs @@ -54,13 +54,14 @@ pub enum SyncMessage { Created, } -pub type Timestamps = Arc>>; +pub type DevicePubId = Uuid; +pub type TimestampPerDevice = Arc>>; pub struct SharedState { pub db: Arc, pub emit_messages_flag: Arc, pub instance: Uuid, - pub timestamps: Timestamps, + pub timestamp_per_device: TimestampPerDevice, pub clock: uhlc::HLC, pub active: AtomicBool, pub active_notify: Notify, @@ -105,10 +106,10 @@ pub fn crdt_op_db(op: &CRDTOperation) -> Result { op.timestamp.as_u64() as i64 } }, - instance: instance::pub_id::equals(op.instance.as_bytes().to_vec()), + instance: instance::pub_id::equals(op.device_pub_id.as_bytes().to_vec()), kind: op.kind().to_string(), data: rmp_serde::to_vec(&op.data)?, - model: i32::from(op.model), + model: i32::from(op.model_id), record_id: rmp_serde::to_vec(&op.record_id)?, _params: vec![], }) @@ -129,7 +130,7 @@ pub fn crdt_op_unchecked_db( instance_id, kind: op.kind().to_string(), data: rmp_serde::to_vec(&op.data)?, - model: i32::from(op.model), + model: i32::from(op.model_id), record_id: rmp_serde::to_vec(&op.record_id)?, _params: vec![], }) diff --git a/core/crates/sync/src/manager.rs b/core/crates/sync/src/manager.rs index d7f9562d9..0ab5b76ed 100644 --- a/core/crates/sync/src/manager.rs +++ b/core/crates/sync/src/manager.rs @@ -40,7 +40,7 @@ impl fmt::Debug for Manager { #[derive(serde::Serialize, serde::Deserialize, Debug, PartialEq, Eq)] pub struct GetOpsArgs { - pub clocks: Vec<(Uuid, NTP64)>, + pub timestamp_per_device: Vec<(Uuid, NTP64)>, pub count: u32, } @@ -115,7 +115,7 @@ impl Manager { db, instance: current_instance_uuid, clock, - timestamps: Arc::new(RwLock::new(timestamps)), + timestamp_per_device: Arc::new(RwLock::new(timestamps)), emit_messages_flag, active: AtomicBool::default(), active_notify: Notify::default(), @@ -165,7 +165,7 @@ impl Manager { if let Some(last) = ops.last() { self.shared - .timestamps + .timestamp_per_device .write() .await .insert(self.instance, last.timestamp); @@ -213,7 +213,7 @@ impl Manager { }; self.shared - .timestamps + .timestamp_per_device .write() .await .insert(self.instance, op.timestamp); @@ -251,7 +251,7 @@ impl Manager { .db .crdt_operation() .find_many(vec![or(args - .clocks + .timestamp_per_device .iter() .map(|(instance_id, timestamp)| { and![ @@ -269,7 +269,7 @@ impl Manager { }) .chain([crdt_operation::instance::is_not(vec![ instance::pub_id::in_vec( - args.clocks + args.timestamp_per_device .iter() .map(|(instance_id, _)| uuid_to_bytes(instance_id)) .collect(), @@ -301,7 +301,7 @@ impl Manager { .db .cloud_crdt_operation() .find_many(vec![or(args - .clocks + .timestamp_per_device .iter() .map(|(instance_id, timestamp)| { and![ @@ -319,7 +319,7 @@ impl Manager { }) .chain([cloud_crdt_operation::instance::is_not(vec![ instance::pub_id::in_vec( - args.clocks + args.timestamp_per_device .iter() .map(|(instance_id, _)| uuid_to_bytes(instance_id)) .collect(), @@ -349,7 +349,7 @@ impl OperationFactory for Manager { &self.clock } - fn get_instance(&self) -> Uuid { + fn get_device_pub_id(&self) -> Uuid { self.instance } } diff --git a/core/crates/sync/tests/lib.rs b/core/crates/sync/tests/lib.rs index 604739ac8..45c602c51 100644 --- a/core/crates/sync/tests/lib.rs +++ b/core/crates/sync/tests/lib.rs @@ -99,7 +99,7 @@ async fn writes_operations_and_rows_together() -> Result<(), Box Result<(), Box> let out = instance2 .sync .get_ops(GetOpsArgs { - clocks: vec![], + timestamp_per_device: vec![], count: 100, }) .await?; diff --git a/core/crates/sync/tests/mock_instance.rs b/core/crates/sync/tests/mock_instance.rs index 807ccd4f6..591b52a8a 100644 --- a/core/crates/sync/tests/mock_instance.rs +++ b/core/crates/sync/tests/mock_instance.rs @@ -1,7 +1,7 @@ use sd_core_sync::*; use sd_prisma::prisma; -use sd_sync::CompressedCRDTOperations; +use sd_sync::CompressedCRDTOperationsPerModelPerDevice; use sd_utils::uuid_to_bytes; use std::sync::{atomic::AtomicBool, Arc}; @@ -122,7 +122,7 @@ impl Instance { let messages = left .sync .get_ops(GetOpsArgs { - clocks: timestamps, + timestamp_per_device: timestamps, count: 100, }) .await @@ -133,7 +133,9 @@ impl Instance { ingest .event_tx .send(ingest::Event::Messages(ingest::MessagesEvent { - messages: CompressedCRDTOperations::new(messages), + messages: CompressedCRDTOperationsPerModelPerDevice::new( + messages, + ), has_more: false, instance_id: left.id, wait_tx: None, diff --git a/core/prisma/schema.prisma b/core/prisma/schema.prisma index 62da3559d..d1a0d679d 100644 --- a/core/prisma/schema.prisma +++ b/core/prisma/schema.prisma @@ -28,9 +28,10 @@ model CRDTOperation { kind String data Bytes - instance_id Int - instance Instance @relation(fields: [instance_id], references: [id]) + device_pub_id Bytes + device Device @relation(fields: [device_pub_id], references: [pub_id]) + @@index([timestamp]) @@map("crdt_operation") } @@ -46,22 +47,36 @@ model CloudCRDTOperation { kind String data Bytes - instance_id Int - instance Instance @relation(fields: [instance_id], references: [id]) + device_pub_id Bytes + device Device @relation(fields: [device_pub_id], references: [pub_id]) + @@index([timestamp]) @@map("cloud_crdt_operation") } -/// @deprecated: This model has to exist solely for backwards compatibility. -/// @local -model Node { - id Int @id @default(autoincrement()) - pub_id Bytes @unique - name String - // Enum: sd_core::node::Platform - platform Int +/// Devices are the owner machines connected to this library +/// @shared(id: pub_id, modelId: 12) +model Device { + id Int @id @default(autoincrement()) + // uuid v7 + pub_id Bytes @unique + name String + + // Enum: sd_cloud_schema::device::DeviceOS + os Int + // Enum: sd_cloud_schema::device::HardwareModel + hardware_model Int + + // clock timestamp for sync + timestamp BigInt? + date_created DateTime - identity Bytes? // TODO: Change to required field in future + date_deleted DateTime? + + CRDTOperation CRDTOperation[] + CloudCRDTOperation CloudCRDTOperation[] + StorageStatistics StorageStatistics? + Location Location[] @@map("node") } @@ -89,11 +104,6 @@ model Instance { // clock timestamp for sync timestamp BigInt? - locations Location[] - CRDTOperation CRDTOperation[] - CloudCRDTOperation CloudCRDTOperation[] - storage_statistics StorageStatistics? - @@map("instance") } @@ -158,9 +168,8 @@ model Location { scan_state Int @default(0) // Enum: sd_core::location::ScanState - // this should just be a local-only cache but it's too much effort to broadcast online locations rn (@brendan) - instance_id Int? - instance Instance? @relation(fields: [instance_id], references: [id], onDelete: SetNull) + device_pub_id Bytes + device Device @relation(fields: [device_pub_id], references: [pub_id], onDelete: Cascade) file_paths FilePath[] indexer_rules IndexerRulesInLocation[] @@ -576,8 +585,8 @@ model StorageStatistics { total_capacity BigInt @default(0) available_capacity BigInt @default(0) - instance_pub_id Bytes? @unique - instance Instance? @relation(fields: [instance_pub_id], references: [pub_id], onDelete: Cascade) + device_pub_id Bytes @unique + device Device @relation(fields: [device_pub_id], references: [pub_id], onDelete: Cascade) @@map("storage_statistics") } diff --git a/core/src/api/sync.rs b/core/src/api/sync.rs index 50935a249..00db6277b 100644 --- a/core/src/api/sync.rs +++ b/core/src/api/sync.rs @@ -23,17 +23,17 @@ pub(crate) fn mount() -> AlphaRouter { } }) }) - .procedure("messages", { - R.with2(library()).query(|(_, library), _: ()| async move { - Ok(library - .sync - .get_ops(GetOpsArgs { - clocks: vec![], - count: 1000, - }) - .await?) - }) - }) + // .procedure("messages", { + // R.with2(library()).query(|(_, library), _: ()| async move { + // Ok(library + // .sync + // .get_ops(GetOpsArgs { + // timestamp_per_device: vec![], + // count: 1000, + // }) + // .await?) + // }) + // }) .procedure("backfill", { R.with2(library()) .mutation(|(node, library), _: ()| async move { diff --git a/core/src/cloud/sync/ingest.rs b/core/src/cloud/sync/ingest.rs index d41331dff..dc44ee9e6 100644 --- a/core/src/cloud/sync/ingest.rs +++ b/core/src/cloud/sync/ingest.rs @@ -2,7 +2,7 @@ use crate::cloud::sync::err_break; use sd_actors::Stopper; use sd_prisma::prisma::cloud_crdt_operation; -use sd_sync::CompressedCRDTOperations; +use sd_sync::CompressedCRDTOperationsPerModelPerDevice; use std::{ future::IntoFuture, @@ -65,7 +65,7 @@ pub async fn run_actor( let (ops_ids, ops): (Vec<_>, Vec<_>) = err_break!( sync.get_cloud_ops(GetOpsArgs { - clocks: timestamps, + timestamp_per_device: timestamps, count: OPS_PER_REQUEST, }) .await @@ -92,7 +92,7 @@ pub async fn run_actor( .send(sd_core_sync::Event::Messages(MessagesEvent { instance_id: sync.instance, has_more: ops.len() == OPS_PER_REQUEST as usize, - messages: CompressedCRDTOperations::new(ops), + messages: CompressedCRDTOperationsPerModelPerDevice::new(ops), wait_tx: Some(wait_tx) })) .await diff --git a/core/src/cloud/sync/receive.rs b/core/src/cloud/sync/receive.rs index 6d8b3fda3..fd2bc7d8f 100644 --- a/core/src/cloud/sync/receive.rs +++ b/core/src/cloud/sync/receive.rs @@ -239,10 +239,10 @@ async fn write_cloud_ops_to_db( fn crdt_op_db(op: &CRDTOperation) -> cloud_crdt_operation::Create { cloud_crdt_operation::Create { timestamp: op.timestamp.0 as i64, - instance: instance::pub_id::equals(op.instance.as_bytes().to_vec()), + instance: instance::pub_id::equals(op.device_pub_id.as_bytes().to_vec()), kind: op.data.as_kind().to_string(), data: to_vec(&op.data).expect("unable to serialize data"), - model: op.model as i32, + model: op.model_id as i32, record_id: rmp_serde::to_vec(&op.record_id).expect("unable to serialize record id"), _params: vec![], } @@ -283,7 +283,11 @@ pub async fn upsert_instance( .exec() .await?; - sync.timestamps.write().await.entry(*uuid).or_default(); + sync.timestamp_per_device + .write() + .await + .entry(*uuid) + .or_default(); // Called again so the new instances are picked up libraries.update_instances_by_id(library_id).await; diff --git a/core/src/cloud/sync/send.rs b/core/src/cloud/sync/send.rs index a97958169..f566aa26c 100644 --- a/core/src/cloud/sync/send.rs +++ b/core/src/cloud/sync/send.rs @@ -2,9 +2,18 @@ use sd_actors::Stopper; use sd_core_cloud_services::CloudServices; use sd_core_sync::SyncMessage; -use std::sync::{atomic::AtomicBool, Arc}; +use std::{ + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + time::Duration, +}; -use tokio::sync::{broadcast, Notify}; +use tokio::{ + sync::{broadcast, Notify}, + time::sleep, +}; use uuid::Uuid; enum RaceNotifiedOrStopped { @@ -16,117 +25,105 @@ pub async fn run_actor( library_id: Uuid, sync: Arc, cloud_services: CloudServices, - state: Arc, + is_active: Arc, state_notify: Arc, stop: Stopper, ) { - // loop { - // state.store(true, Ordering::Relaxed); - // state_notify.notify_waiters(); + loop { + is_active.store(true, Ordering::Relaxed); + state_notify.notify_waiters(); - // loop { - // // all available instances will have a default timestamp from create_instance - // let instances = sync - // .timestamps - // .read() - // .await - // .keys() - // .cloned() - // .collect::>(); + // loop { + // // all available instances will have a default timestamp from create_instance + // let instances = sync + // .timestamp_per_device + // .read() + // .await + // .keys() + // .cloned() + // .collect::>(); - // // obtains a lock on the timestamp collections for the instances we have - // let req_adds = err_break!( - // sd_cloud_api::library::message_collections::request_add( - // cloud_api_config_provider.get_request_config().await, - // library_id, - // instances, - // ) - // .await - // ); + // // obtains a lock on the timestamp collections for the instances we have - // let mut instances = vec![]; + // debug!( + // total_operations = req_adds.len(), + // "Preparing to send instance's operations to cloud;" + // ); - // use sd_cloud_api::library::message_collections::do_add; + // // gets new operations for each instance to send to cloud + // for req_add in req_adds { + // let ops = err_break!( + // sync.get_instance_ops( + // 1000, + // req_add.instance_uuid, + // NTP64( + // req_add + // .from_time + // .unwrap_or_else(|| "0".to_string()) + // .parse() + // .expect("couldn't parse ntp64 value"), + // ) + // ) + // .await + // ); - // debug!( - // total_operations = req_adds.len(), - // "Preparing to send instance's operations to cloud;" - // ); + // if ops.is_empty() { + // continue; + // } - // // gets new operations for each instance to send to cloud - // for req_add in req_adds { - // let ops = err_break!( - // sync.get_instance_ops( - // 1000, - // req_add.instance_uuid, - // NTP64( - // req_add - // .from_time - // .unwrap_or_else(|| "0".to_string()) - // .parse() - // .expect("couldn't parse ntp64 value"), - // ) - // ) - // .await - // ); + // let start_time = ops[0].timestamp.0.to_string(); + // let end_time = ops[ops.len() - 1].timestamp.0.to_string(); - // if ops.is_empty() { - // continue; - // } + // let ops_len = ops.len(); - // let start_time = ops[0].timestamp.0.to_string(); - // let end_time = ops[ops.len() - 1].timestamp.0.to_string(); + // use base64::prelude::*; - // let ops_len = ops.len(); + // debug!(instance_id = %req_add.instance_uuid, %start_time, %end_time); - // use base64::prelude::*; + // instances.push(do_add::Input { + // uuid: req_add.instance_uuid, + // key: req_add.key, + // start_time, + // end_time, + // contents: BASE64_STANDARD.encode( + // rmp_serde::to_vec_named(&CompressedCRDTOperations::new(ops)) + // .expect("CompressedCRDTOperation should serialize!"), + // ), + // ops_count: ops_len, + // }) + // } - // debug!(instance_id = %req_add.instance_uuid, %start_time, %end_time); + // if instances.is_empty() { + // break; + // } - // instances.push(do_add::Input { - // uuid: req_add.instance_uuid, - // key: req_add.key, - // start_time, - // end_time, - // contents: BASE64_STANDARD.encode( - // rmp_serde::to_vec_named(&CompressedCRDTOperations::new(ops)) - // .expect("CompressedCRDTOperation should serialize!"), - // ), - // ops_count: ops_len, - // }) - // } + // // uses lock we acquired earlier to send the operations to the cloud + // err_break!( + // do_add( + // cloud_api_config_provider.get_request_config().await, + // library_id, + // instances, + // ) + // .await + // ); + // } - // if instances.is_empty() { - // break; - // } + // is_active.store(false, Ordering::Relaxed); + // state_notify.notify_waiters(); - // // uses lock we acquired earlier to send the operations to the cloud - // err_break!( - // do_add( - // cloud_api_config_provider.get_request_config().await, - // library_id, - // instances, - // ) - // .await - // ); - // } + // if let RaceNotifiedOrStopped::Stopped = ( + // // recreate subscription each time so that existing messages are dropped + // wait_notification(sync.subscribe()), + // stop.into_future().map(|()| RaceNotifiedOrStopped::Stopped), + // ) + // .race() + // .await + // { + // break; + // } - // state.store(false, Ordering::Relaxed); - // state_notify.notify_waiters(); - - // if let RaceNotifiedOrStopped::Stopped = ( - // // recreate subscription each time so that existing messages are dropped - // wait_notification(sync.subscribe()), - // stop.into_future().map(|()| RaceNotifiedOrStopped::Stopped), - // ) - // .race() - // .await - // { - // break; - // } - - // sleep(Duration::from_millis(1000)).await; - // } + sleep(Duration::from_millis(1000)).await; + } } async fn wait_notification(mut rx: broadcast::Receiver) -> RaceNotifiedOrStopped { diff --git a/core/src/location/non_indexed.rs b/core/src/location/non_indexed.rs index 04d040f1c..5ad43989e 100644 --- a/core/src/location/non_indexed.rs +++ b/core/src/location/non_indexed.rs @@ -376,7 +376,9 @@ impl Entry { /// /// From my M1 Macbook Pro this: /// - takes 11ms per 10 000 files -/// and +/// +/// and +/// /// - consumes 0.16MB of RAM per 10 000 entries. /// /// The reason we collect these all up is so we can apply ordering, and then begin streaming the data as it's processed to the frontend. diff --git a/core/src/p2p/sync/mod.rs b/core/src/p2p/sync/mod.rs index 8ec7c29c0..8233a01c5 100644 --- a/core/src/p2p/sync/mod.rs +++ b/core/src/p2p/sync/mod.rs @@ -6,7 +6,7 @@ use crate::{ }; use sd_p2p_proto::{decode, encode}; -use sd_sync::CompressedCRDTOperations; +use sd_sync::CompressedCRDTOperationsPerModelPerDevice; use std::sync::Arc; @@ -30,8 +30,8 @@ mod originator { use super::*; - #[derive(Debug, PartialEq)] - pub struct Operations(pub CompressedCRDTOperations); + #[derive(Debug)] + pub struct Operations(pub CompressedCRDTOperationsPerModelPerDevice); impl Operations { // TODO: Per field errors for better error handling @@ -53,34 +53,36 @@ mod originator { } } - #[cfg(test)] - #[tokio::test] - async fn test() { - use sd_sync::CRDTOperation; - use uuid::Uuid; + // #[cfg(test)] + // #[tokio::test] + // async fn test() { + // use sd_sync::CRDTOperation; + // use uuid::Uuid; - { - let original = Operations(CompressedCRDTOperations::new(vec![])); + // { + // let original = Operations(CompressedCRDTOperationsPerModelPerDevice::new(vec![])); - let mut cursor = std::io::Cursor::new(original.to_bytes()); - let result = Operations::from_stream(&mut cursor).await.unwrap(); - assert_eq!(original, result); - } + // let mut cursor = std::io::Cursor::new(original.to_bytes()); + // let result = Operations::from_stream(&mut cursor).await.unwrap(); + // assert_eq!(original, result); + // } - { - let original = Operations(CompressedCRDTOperations::new(vec![CRDTOperation { - instance: Uuid::new_v4(), - timestamp: sync::NTP64(0), - record_id: rmpv::Value::Nil, - model: 0, - data: sd_sync::CRDTOperationData::create(), - }])); + // { + // let original = Operations(CompressedCRDTOperationsPerModelPerDevice::new(vec![ + // CRDTOperation { + // device_pub_id: Uuid::new_v4(), + // timestamp: sync::NTP64(0), + // record_id: rmpv::Value::Nil, + // model_id: 0, + // data: sd_sync::CRDTOperationData::create(), + // }, + // ])); - let mut cursor = std::io::Cursor::new(original.to_bytes()); - let result = Operations::from_stream(&mut cursor).await.unwrap(); - assert_eq!(original, result); - } - } + // let mut cursor = std::io::Cursor::new(original.to_bytes()); + // let result = Operations::from_stream(&mut cursor).await.unwrap(); + // assert_eq!(original, result); + // } + // } } #[instrument(skip(sync, p2p))] @@ -123,7 +125,10 @@ mod originator { let ops = sync.get_ops(args).await.unwrap(); tunnel - .write_all(&tx::Operations(CompressedCRDTOperations::new(ops)).to_bytes()) + .write_all( + &tx::Operations(CompressedCRDTOperationsPerModelPerDevice::new(ops)) + .to_bytes(), + ) .await .unwrap(); tunnel.flush().await.unwrap(); @@ -176,7 +181,7 @@ mod responder { async fn test() { { let original = MainRequest::GetOperations(GetOpsArgs { - clocks: vec![], + timestamp_per_device: vec![], count: 0, }); @@ -220,7 +225,7 @@ mod responder { stream .write_all( &tx::MainRequest::GetOperations(sync::GetOpsArgs { - clocks: timestamps, + timestamp_per_device: timestamps, count: OPS_PER_REQUEST, }) .to_bytes(), diff --git a/crates/sync/Cargo.toml b/crates/sync/Cargo.toml index 8b15355ca..302b37a53 100644 --- a/crates/sync/Cargo.toml +++ b/crates/sync/Cargo.toml @@ -12,7 +12,5 @@ rmp = { workspace = true } rmp-serde = { workspace = true } rmpv = { workspace = true } serde = { workspace = true } -serde_json = { workspace = true } -specta = { workspace = true, features = ["serde_json", "uhlc", "uuid"] } uhlc = { workspace = true } -uuid = { workspace = true, features = ["serde", "v4"] } +uuid = { workspace = true, features = ["serde", "v7"] } diff --git a/crates/sync/src/compressed.rs b/crates/sync/src/compressed.rs index 0db151330..52de0db7a 100644 --- a/crates/sync/src/compressed.rs +++ b/crates/sync/src/compressed.rs @@ -1,18 +1,21 @@ +use crate::{CRDTOperation, CRDTOperationData, DevicePubId, ModelId, RecordId}; + use std::mem; use serde::{Deserialize, Serialize}; use uhlc::NTP64; use uuid::Uuid; -use crate::{CRDTOperation, CRDTOperationData}; - -pub type CompressedCRDTOperationsForModel = Vec<(rmpv::Value, Vec)>; +pub type CompressedCRDTOperationsPerModel = + Vec<(ModelId, Vec<(RecordId, Vec)>)>; /// Stores a bunch of [`CRDTOperation`]s in a more memory-efficient form for sending to the cloud. -#[derive(Serialize, Deserialize, Debug, PartialEq)] -pub struct CompressedCRDTOperations(pub Vec<(Uuid, Vec<(u16, CompressedCRDTOperationsForModel)>)>); +#[derive(Serialize, Deserialize, Debug)] +pub struct CompressedCRDTOperationsPerModelPerDevice( + pub Vec<(DevicePubId, CompressedCRDTOperationsPerModel)>, +); -impl CompressedCRDTOperations { +impl CompressedCRDTOperationsPerModelPerDevice { #[must_use] pub fn new(ops: Vec) -> Self { let mut compressed = vec![]; @@ -23,36 +26,36 @@ impl CompressedCRDTOperations { return Self(vec![]); }; - let mut instance_id = first.instance; - let mut instance = vec![]; + let mut device_pub_id = first.device_pub_id; + let mut device_messages = vec![]; - let mut model_str = first.model; + let mut model_id = first.model_id; let mut model = vec![]; let mut record_id = first.record_id.clone(); let mut record = vec![first.into()]; for op in ops_iter { - if instance_id != op.instance { + if device_pub_id != op.device_pub_id { model.push(( mem::replace(&mut record_id, op.record_id.clone()), mem::take(&mut record), )); - instance.push(( - mem::replace(&mut model_str, op.model), + device_messages.push(( + mem::replace(&mut model_id, op.model_id), mem::take(&mut model), )); compressed.push(( - mem::replace(&mut instance_id, op.instance), - mem::take(&mut instance), + mem::replace(&mut device_pub_id, op.device_pub_id), + mem::take(&mut device_messages), )); - } else if model_str != op.model { + } else if model_id != op.model_id { model.push(( mem::replace(&mut record_id, op.record_id.clone()), mem::take(&mut record), )); - instance.push(( - mem::replace(&mut model_str, op.model), + device_messages.push(( + mem::replace(&mut model_id, op.model_id), mem::take(&mut model), )); } else if record_id != op.record_id { @@ -66,8 +69,8 @@ impl CompressedCRDTOperations { } model.push((record_id, record)); - instance.push((model_str, model)); - compressed.push((instance_id, instance)); + device_messages.push((model_id, model)); + compressed.push((device_pub_id, device_messages)); Self(compressed) } @@ -113,13 +116,13 @@ impl CompressedCRDTOperations { pub fn into_ops(self) -> Vec { let mut ops = vec![]; - for (instance_id, instance) in self.0 { - for (model_str, model) in instance { - for (record_id, record) in model { + for (device_pub_id, device_messages) in self.0 { + for (model_id, model_messages) in device_messages { + for (record_id, record) in model_messages { for op in record { ops.push(CRDTOperation { - instance: instance_id, - model: model_str, + device_pub_id, + model_id, record_id: record_id.clone(), timestamp: op.timestamp, data: op.data, @@ -140,11 +143,12 @@ pub struct CompressedCRDTOperation { } impl From for CompressedCRDTOperation { - fn from(value: CRDTOperation) -> Self { - Self { - timestamp: value.timestamp, - data: value.data, - } + fn from( + CRDTOperation { + timestamp, data, .. + }: CRDTOperation, + ) -> Self { + Self { timestamp, data } } } @@ -154,61 +158,62 @@ mod test { #[test] fn compress() { - let instance = Uuid::new_v4(); + let device_pub_id = Uuid::now_v7(); let uncompressed = vec![ CRDTOperation { - instance, + device_pub_id, timestamp: NTP64(0), - model: 0, + model_id: 0, record_id: rmpv::Value::Nil, data: CRDTOperationData::create(), }, CRDTOperation { - instance, + device_pub_id, timestamp: NTP64(0), - model: 0, + model_id: 0, record_id: rmpv::Value::Nil, data: CRDTOperationData::create(), }, CRDTOperation { - instance, + device_pub_id, timestamp: NTP64(0), - model: 0, + model_id: 0, record_id: rmpv::Value::Nil, data: CRDTOperationData::create(), }, CRDTOperation { - instance, + device_pub_id, timestamp: NTP64(0), - model: 1, + model_id: 1, record_id: rmpv::Value::Nil, data: CRDTOperationData::create(), }, CRDTOperation { - instance, + device_pub_id, timestamp: NTP64(0), - model: 1, + model_id: 1, record_id: rmpv::Value::Nil, data: CRDTOperationData::create(), }, CRDTOperation { - instance, + device_pub_id, timestamp: NTP64(0), - model: 0, + model_id: 0, record_id: rmpv::Value::Nil, data: CRDTOperationData::create(), }, CRDTOperation { - instance, + device_pub_id, timestamp: NTP64(0), - model: 0, + model_id: 0, record_id: rmpv::Value::Nil, data: CRDTOperationData::create(), }, ]; - let CompressedCRDTOperations(compressed) = CompressedCRDTOperations::new(uncompressed); + let CompressedCRDTOperationsPerModelPerDevice(compressed) = + CompressedCRDTOperationsPerModelPerDevice::new(uncompressed); assert_eq!(compressed[0].1[0].0, 0); assert_eq!(compressed[0].1[1].0, 1); @@ -221,7 +226,7 @@ mod test { #[test] fn into_ops() { - let compressed = CompressedCRDTOperations(vec![( + let compressed = CompressedCRDTOperationsPerModelPerDevice(vec![( Uuid::new_v4(), vec![ ( @@ -282,8 +287,8 @@ mod test { let uncompressed = compressed.into_ops(); assert_eq!(uncompressed.len(), 7); - assert_eq!(uncompressed[2].model, 0); - assert_eq!(uncompressed[4].model, 1); - assert_eq!(uncompressed[6].model, 0); + assert_eq!(uncompressed[2].model_id, 0); + assert_eq!(uncompressed[4].model_id, 1); + assert_eq!(uncompressed[6].model_id, 0); } } diff --git a/crates/sync/src/crdt.rs b/crates/sync/src/crdt.rs index 2a3872c92..13eda3ffa 100644 --- a/crates/sync/src/crdt.rs +++ b/crates/sync/src/crdt.rs @@ -1,9 +1,9 @@ +use crate::{DevicePubId, ModelId}; + use std::{collections::BTreeMap, fmt}; use serde::{Deserialize, Serialize}; -use specta::Type; use uhlc::NTP64; -use uuid::Uuid; pub enum OperationKind<'a> { Create, @@ -21,16 +21,12 @@ impl fmt::Display for OperationKind<'_> { } } -#[derive(PartialEq, Serialize, Deserialize, Clone, Debug, Type)] +#[derive(PartialEq, Serialize, Deserialize, Clone, Debug)] pub enum CRDTOperationData { #[serde(rename = "c")] - Create(#[specta(type = BTreeMap)] BTreeMap), + Create(BTreeMap), #[serde(rename = "u")] - Update { - field: String, - #[specta(type = serde_json::Value)] - value: rmpv::Value, - }, + Update { field: String, value: rmpv::Value }, #[serde(rename = "d")] Delete, } @@ -51,13 +47,11 @@ impl CRDTOperationData { } } -#[derive(PartialEq, Serialize, Deserialize, Clone, Type)] +#[derive(PartialEq, Serialize, Deserialize, Clone)] pub struct CRDTOperation { - pub instance: Uuid, - #[specta(type = u32)] + pub device_pub_id: DevicePubId, pub timestamp: NTP64, - pub model: u16, - #[specta(type = serde_json::Value)] + pub model_id: ModelId, pub record_id: rmpv::Value, pub data: CRDTOperationData, } @@ -73,7 +67,7 @@ impl fmt::Debug for CRDTOperation { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("CRDTOperation") .field("data", &self.data) - .field("model", &self.model) + .field("model", &self.model_id) .field("record_id", &self.record_id.to_string()) .finish_non_exhaustive() } diff --git a/crates/sync/src/factory.rs b/crates/sync/src/factory.rs index dd553173c..8973ff14d 100644 --- a/crates/sync/src/factory.rs +++ b/crates/sync/src/factory.rs @@ -1,10 +1,10 @@ -use uhlc::HLC; -use uuid::Uuid; - use crate::{ - CRDTOperation, CRDTOperationData, RelationSyncId, RelationSyncModel, SharedSyncModel, SyncId, + CRDTOperation, CRDTOperationData, DevicePubId, RelationSyncId, RelationSyncModel, + SharedSyncModel, SyncId, SyncModel, }; +use uhlc::HLC; + macro_rules! msgpack { (nil) => { ::rmpv::Value::Nil @@ -19,26 +19,27 @@ macro_rules! msgpack { pub trait OperationFactory { fn get_clock(&self) -> &HLC; - fn get_instance(&self) -> Uuid; + fn get_device_pub_id(&self) -> DevicePubId; - fn new_op(&self, id: &TSyncId, data: CRDTOperationData) -> CRDTOperation - where - TSyncId::Model: crate::SyncModel, - { + fn new_op>( + &self, + id: &SId, + data: CRDTOperationData, + ) -> CRDTOperation { let timestamp = self.get_clock().new_timestamp(); CRDTOperation { - instance: self.get_instance(), + device_pub_id: self.get_device_pub_id(), timestamp: *timestamp.get_time(), - model: ::MODEL_ID, + model_id: ::MODEL_ID, record_id: msgpack!(id), data, } } - fn shared_create, TModel: SharedSyncModel>( + fn shared_create( &self, - id: TSyncId, + id: impl SyncId, values: impl IntoIterator + 'static, ) -> Vec { vec![self.new_op( @@ -51,9 +52,10 @@ pub trait OperationFactory { ), )] } - fn shared_update, TModel: SharedSyncModel>( + + fn shared_update( &self, - id: TSyncId, + id: impl SyncId, field: impl Into, value: rmpv::Value, ) -> CRDTOperation { @@ -65,16 +67,14 @@ pub trait OperationFactory { }, ) } - fn shared_delete, TModel: SharedSyncModel>( - &self, - id: TSyncId, - ) -> CRDTOperation { + + fn shared_delete(&self, id: impl SyncId) -> CRDTOperation { self.new_op(&id, CRDTOperationData::Delete) } - fn relation_create, TModel: RelationSyncModel>( + fn relation_create( &self, - id: TSyncId, + id: impl RelationSyncId, values: impl IntoIterator + 'static, ) -> Vec { vec![self.new_op( @@ -87,9 +87,9 @@ pub trait OperationFactory { ), )] } - fn relation_update, TModel: RelationSyncModel>( + fn relation_update( &self, - id: TSyncId, + id: impl RelationSyncId, field: impl Into, value: rmpv::Value, ) -> CRDTOperation { @@ -101,9 +101,9 @@ pub trait OperationFactory { }, ) } - fn relation_delete, TModel: RelationSyncModel>( + fn relation_delete( &self, - id: TSyncId, + id: impl RelationSyncId, ) -> CRDTOperation { self.new_op(&id, CRDTOperationData::Delete) } @@ -111,29 +111,32 @@ pub trait OperationFactory { #[macro_export] macro_rules! sync_entry { - ($v:expr, $($m:tt)*) => { - ($($m)*::NAME, ::sd_utils::msgpack!($v)) + ($value:expr, $($prisma_column_module:tt)+) => { + ($($prisma_column_module)+::NAME, ::sd_utils::msgpack!($value)) } } #[macro_export] macro_rules! option_sync_entry { - ($v:expr, $($m:tt)*) => { - $v.map(|v| $crate::sync_entry!(v, $($m)*)) + ($value:expr, $($prisma_column_module:tt)+) => { + $value.map(|value| $crate::sync_entry!(value, $($prisma_column_module)+)) } } #[macro_export] macro_rules! sync_db_entry { - ($v:expr, $($m:tt)*) => {{ - let v = $v.into(); - ($crate::sync_entry!(&v, $($m)*), $($m)*::set(Some(v))) + ($value:expr, $($prisma_column_module:tt)+) => {{ + let value = $value.into(); + ( + $crate::sync_entry!(&value, $($prisma_column_module)+), + $($prisma_column_module)+::set(Some(value)) + ) }} } #[macro_export] macro_rules! option_sync_db_entry { - ($v:expr, $($m:tt)*) => { - $v.map(|v| $crate::sync_db_entry!(v, $($m)*)) + ($value:expr, $($prisma_column_module:tt)+) => { + $value.map(|value| $crate::sync_db_entry!(value, $($prisma_column_module)+)) }; } diff --git a/crates/sync/src/lib.rs b/crates/sync/src/lib.rs index 3d5eac56f..239a1298d 100644 --- a/crates/sync/src/lib.rs +++ b/crates/sync/src/lib.rs @@ -38,3 +38,7 @@ pub use factory::*; pub use model_traits::*; pub use uhlc::NTP64; + +pub type DevicePubId = uuid::Uuid; +pub type ModelId = u16; +pub type RecordId = rmpv::Value; diff --git a/crates/sync/src/model_traits.rs b/crates/sync/src/model_traits.rs index b0a063f2e..48a4efacd 100644 --- a/crates/sync/src/model_traits.rs +++ b/crates/sync/src/model_traits.rs @@ -1,3 +1,5 @@ +use crate::ModelId; + use prisma_client_rust::ModelTypes; use serde::{de::DeserializeOwned, Serialize}; @@ -6,7 +8,7 @@ pub trait SyncId: Serialize + DeserializeOwned { } pub trait SyncModel: ModelTypes { - const MODEL_ID: u16; + const MODEL_ID: ModelId; } pub trait SharedSyncModel: SyncModel { From 9a6f5f8493513edb00c55ed4487f63e7aeb2f1a8 Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Mon, 26 Aug 2024 17:36:04 -0300 Subject: [PATCH 090/218] Properly stack keys to be able to fetch the latest one when needed --- .../src/key_manager/key_store.rs | 37 +++++++++++++------ .../cloud-services/src/key_manager/mod.rs | 13 ++++++- 2 files changed, 38 insertions(+), 12 deletions(-) diff --git a/core/crates/cloud-services/src/key_manager/key_store.rs b/core/crates/cloud-services/src/key_manager/key_store.rs index 8f9d16c1b..6a3ac7497 100644 --- a/core/crates/cloud-services/src/key_manager/key_store.rs +++ b/core/crates/cloud-services/src/key_manager/key_store.rs @@ -9,7 +9,7 @@ use sd_crypto::{ use sd_utils::error::FileIOError; use std::{ - collections::{BTreeMap, HashMap}, + collections::{BTreeMap, VecDeque}, fs::Metadata, path::PathBuf, pin::pin, @@ -24,10 +24,12 @@ use tokio::{ }; use zeroize::{Zeroize, ZeroizeOnDrop}; +type KeyStack = VecDeque<(KeyHash, SecretKey)>; + #[derive(Serialize, Deserialize)] pub struct KeyStore { iroh_secret_key: IrohSecretKey, - keys: BTreeMap>, + keys: BTreeMap, } impl KeyStore { @@ -46,22 +48,24 @@ impl KeyStore { self.keys .entry(group_pub_id) .or_default() - .insert(KeyHash(hash.to_hex().to_string()), key); + .push_front((KeyHash(hash.to_hex().to_string()), key)); } pub fn add_many_keys( &mut self, group_pub_id: groups::PubId, - keys: impl IntoIterator, + keys: impl IntoIterator>, ) { let group_entry = self.keys.entry(group_pub_id).or_default(); - for key in keys { + // We reverse the secret keys as a implementation detail to + // keep the keys in the same order as they were added as a stack + for key in keys.into_iter().rev() { let mut hasher = blake3::Hasher::new(); hasher.update(key.as_ref()); let hash = hasher.finalize(); - group_entry.insert(KeyHash(hash.to_hex().to_string()), key); + group_entry.push_front((KeyHash(hash.to_hex().to_string()), key)); } } @@ -74,15 +78,23 @@ impl KeyStore { } pub fn get_key(&self, group_pub_id: groups::PubId, hash: &KeyHash) -> Option { + self.keys.get(&group_pub_id).and_then(|group| { + group + .iter() + .find_map(|(key_hash, key)| (key_hash == hash).then(|| key.clone())) + }) + } + + pub fn get_latest_key(&self, group_pub_id: groups::PubId) -> Option<(KeyHash, SecretKey)> { self.keys .get(&group_pub_id) - .and_then(|group| group.get(hash).cloned()) + .and_then(|group| group.front().cloned()) } pub fn get_group_keys(&self, group_pub_id: groups::PubId) -> Vec { self.keys .get(&group_pub_id) - .map(|group| group.values().cloned().collect()) + .map(|group| group.iter().map(|(_key_hash, key)| key.clone()).collect()) .unwrap_or_default() } @@ -251,9 +263,12 @@ impl KeyStore { impl Zeroize for KeyStore { fn zeroize(&mut self) { self.iroh_secret_key = IrohSecretKey::generate(); - self.keys - .values_mut() - .for_each(|group| group.values_mut().for_each(Zeroize::zeroize)); + self.keys.values_mut().for_each(|group| { + group + .iter_mut() + .map(|(_key_hash, key)| key) + .for_each(Zeroize::zeroize); + }); self.keys = BTreeMap::new(); } } diff --git a/core/crates/cloud-services/src/key_manager/mod.rs b/core/crates/cloud-services/src/key_manager/mod.rs index 411712c2b..6264cfa52 100644 --- a/core/crates/cloud-services/src/key_manager/mod.rs +++ b/core/crates/cloud-services/src/key_manager/mod.rs @@ -112,7 +112,10 @@ impl KeyManager { pub async fn add_many_keys( &self, group_pub_id: groups::PubId, - keys: impl IntoIterator + Send, + keys: impl IntoIterator< + Item = SecretKey, + IntoIter = impl DoubleEndedIterator + Send, + > + Send, rng: &mut CryptoRng, ) -> Result<(), Error> { let mut store = self.store.write().await; @@ -122,6 +125,14 @@ impl KeyManager { .encrypt(&self.master_key, rng, &self.keys_file_path) .await } + + pub async fn get_latest_key( + &self, + group_pub_id: groups::PubId, + ) -> Option<(KeyHash, SecretKey)> { + self.store.read().await.get_latest_key(group_pub_id) + } + pub async fn get_key(&self, group_pub_id: groups::PubId, hash: &KeyHash) -> Option { self.store.read().await.get_key(group_pub_id, hash) } From 3cd7f8de6ad74c54950f5c244009cdbb7dd3397e Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Tue, 27 Aug 2024 17:01:42 -0300 Subject: [PATCH 091/218] Make core compile again --- Cargo.lock | Bin 329700 -> 329727 bytes .../heavy-lifting/src/job_system/job.rs | 4 +- core/crates/prisma-helpers/src/lib.rs | 43 ++- core/crates/sync/Cargo.toml | 2 + core/crates/sync/src/backfill.rs | 110 +++---- core/crates/sync/src/db_operation.rs | 145 +++++---- core/crates/sync/src/ingest.rs | 93 +++--- core/crates/sync/src/lib.rs | 14 +- core/crates/sync/src/manager.rs | 120 +++---- core/crates/sync/tests/lib.rs | 18 +- core/crates/sync/tests/mock_instance.rs | 43 +-- core/prisma/schema.prisma | 25 +- core/src/api/cloud/libraries.rs | 106 +++--- core/src/api/cloud/library.rs | 134 -------- core/src/api/cloud/mod.rs | 7 +- core/src/api/mod.rs | 20 +- core/src/api/nodes.rs | 26 +- core/src/api/search/saved.rs | 2 +- core/src/api/sync.rs | 7 +- core/src/api/tags.rs | 2 +- core/src/cloud/sync/ingest.rs | 2 +- core/src/cloud/sync/receive.rs | 52 +-- core/src/library/config.rs | 84 +---- core/src/library/manager/error.rs | 8 +- core/src/library/manager/mod.rs | 31 +- core/src/location/manager/runner.rs | 34 +- core/src/location/mod.rs | 6 +- core/src/node/config.rs | 23 +- core/src/node/hardware.rs | 304 +++++++++++------- core/src/object/tag/mod.rs | 2 +- core/src/p2p/manager.rs | 4 +- core/src/p2p/metadata.rs | 2 +- core/src/p2p/sync/mod.rs | 2 +- core/src/volume/mod.rs | 24 +- core/src/volume/watcher.rs | 1 - crates/sync-generator/src/sync_data.rs | 2 +- crates/sync/src/factory.rs | 4 +- packages/client/src/core.ts | 55 ++-- 38 files changed, 715 insertions(+), 846 deletions(-) delete mode 100644 core/src/api/cloud/library.rs diff --git a/Cargo.lock b/Cargo.lock index d27b0c74293c5687107f23344775406cba48708f..aaee6b64cbb4bdae34599a7fe2cc4657418ce281 100644 GIT binary patch delta 65 zcmV-H0KWg^j}-rp6o7;QgaWh!r+b&Ta045c`E>&hmw<5t441ij0~D9cfC3qpAZq~& Xm(KPA2$!xb0u6@_d;_-*d<52uV0{?F delta 30 mcmez0FY=^cq@jheg=q`(^6Ke7OPTrFFH|#czfjGh-va>E6%9H7 diff --git a/core/crates/heavy-lifting/src/job_system/job.rs b/core/crates/heavy-lifting/src/job_system/job.rs index 0a3642797..785e33263 100644 --- a/core/crates/heavy-lifting/src/job_system/job.rs +++ b/core/crates/heavy-lifting/src/job_system/job.rs @@ -158,7 +158,7 @@ where JobCtx: JobContext, { fn into_job(self) -> Box> { - let id = JobId::new_v4(); + let id = JobId::now_v7(); Box::new(JobHolder { id, @@ -333,7 +333,7 @@ where } pub fn new(job: J) -> Self { - let id = JobId::new_v4(); + let id = JobId::now_v7(); Self { id, job, diff --git a/core/crates/prisma-helpers/src/lib.rs b/core/crates/prisma-helpers/src/lib.rs index 2d5abddd9..c9310aa44 100644 --- a/core/crates/prisma-helpers/src/lib.rs +++ b/core/crates/prisma-helpers/src/lib.rs @@ -34,7 +34,6 @@ use sd_utils::{from_bytes_to_uuid, uuid_to_bytes}; use std::{borrow::Cow, fmt}; use serde::{Deserialize, Serialize}; -use specta::Type; use uuid::Uuid; // File Path selectables! @@ -244,7 +243,7 @@ job::select!(job_without_data { location::select!(location_ids_and_path { id pub_id - instance_id + device_pub_id path }); @@ -259,6 +258,7 @@ impl From for location::Data { id: data.id, pub_id: data.pub_id, path: data.path, + device_pub_id: data.device_pub_id, instance_id: data.instance_id, name: data.name, total_capacity: data.total_capacity, @@ -272,6 +272,7 @@ impl From for location::Data { scan_state: data.scan_state, file_paths: None, indexer_rules: None, + device: None, instance: None, } } @@ -283,6 +284,7 @@ impl From<&location_with_indexer_rules::Data> for location::Data { id: data.id, pub_id: data.pub_id.clone(), path: data.path.clone(), + device_pub_id: data.device_pub_id.clone(), instance_id: data.instance_id, name: data.name.clone(), total_capacity: data.total_capacity, @@ -296,6 +298,7 @@ impl From<&location_with_indexer_rules::Data> for location::Data { scan_state: data.scan_state, file_paths: None, indexer_rules: None, + device: None, instance: None, } } @@ -311,7 +314,7 @@ label::include!((take: i64) => label_with_objects { } }); -#[derive(Debug, Serialize, Deserialize, Hash, PartialEq, Eq, Type)] +#[derive(Debug, Serialize, Deserialize, Hash, PartialEq, Eq, specta::Type)] #[serde(transparent)] pub struct CasId<'cas_id>(Cow<'cas_id, str>); @@ -374,17 +377,26 @@ impl From<&CasId<'_>> for String { } } -#[derive(Debug, Serialize, Deserialize, Hash, PartialEq, Eq, Clone)] +#[derive(Debug, Serialize, Deserialize, Hash, PartialEq, Eq, Clone, specta::Type)] #[serde(transparent)] #[repr(transparent)] +#[specta(rename = "CoreDevicePubId")] +pub struct DevicePubId(PubId); + +#[derive(Debug, Serialize, Deserialize, Hash, PartialEq, Eq, Clone, specta::Type)] +#[serde(transparent)] +#[repr(transparent)] +#[specta(rename = "CoreFilePathPubId")] pub struct FilePathPubId(PubId); -#[derive(Debug, Serialize, Deserialize, Hash, PartialEq, Eq, Clone)] +#[derive(Debug, Serialize, Deserialize, Hash, PartialEq, Eq, Clone, specta::Type)] #[serde(transparent)] #[repr(transparent)] +#[specta(rename = "CoreObjectPubId")] pub struct ObjectPubId(PubId); -#[derive(Debug, Serialize, Deserialize, Hash, PartialEq, Eq, Clone)] +#[derive(Debug, Serialize, Deserialize, Hash, PartialEq, Eq, Clone, specta::Type)] +#[specta(rename = "CorePubId")] enum PubId { Uuid(Uuid), Vec(Vec), @@ -392,7 +404,7 @@ enum PubId { impl PubId { fn new() -> Self { - Self::Uuid(Uuid::new_v4()) + Self::Uuid(Uuid::now_v7()) } fn to_db(&self) -> Vec { @@ -451,6 +463,15 @@ impl From for Uuid { } } +impl From<&PubId> for Uuid { + fn from(pub_id: &PubId) -> Self { + match pub_id { + PubId::Uuid(uuid) => *uuid, + PubId::Vec(bytes) => from_bytes_to_uuid(bytes), + } + } +} + impl fmt::Display for PubId { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { @@ -499,6 +520,12 @@ macro_rules! delegate_pub_id { } } + impl From<&$type_name> for ::uuid::Uuid { + fn from(pub_id: &$type_name) -> Self { + (&pub_id.0).into() + } + } + impl ::std::fmt::Display for $type_name { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { write!(f, "{}", self.0) @@ -526,4 +553,4 @@ macro_rules! delegate_pub_id { }; } -delegate_pub_id!(FilePathPubId, ObjectPubId); +delegate_pub_id!(FilePathPubId, ObjectPubId, DevicePubId); diff --git a/core/crates/sync/Cargo.toml b/core/crates/sync/Cargo.toml index 930c3cdd0..e4f6a80fe 100644 --- a/core/crates/sync/Cargo.toml +++ b/core/crates/sync/Cargo.toml @@ -9,6 +9,8 @@ default = [] [dependencies] # Spacedrive Sub-crates +sd-core-prisma-helpers = { path = "../prisma-helpers" } + sd-actors = { path = "../../../crates/actors" } sd-prisma = { path = "../../../crates/prisma" } sd-sync = { path = "../../../crates/sync" } diff --git a/core/crates/sync/src/backfill.rs b/core/crates/sync/src/backfill.rs index 77f16a575..6a363cbf2 100644 --- a/core/crates/sync/src/backfill.rs +++ b/core/crates/sync/src/backfill.rs @@ -1,7 +1,7 @@ use sd_prisma::{ prisma::{ - crdt_operation, exif_data, file_path, instance, label, label_on_object, location, object, - tag, tag_on_object, PrismaClient, SortOrder, + crdt_operation, exif_data, file_path, label, label_on_object, location, object, tag, + tag_on_object, PrismaClient, SortOrder, }, prisma_sync, }; @@ -17,11 +17,7 @@ use super::{crdt_op_unchecked_db, Error}; /// Takes all the syncable data in the database and generates [`CRDTOperations`] for it. /// This is a requirement before the library can sync. -pub async fn backfill_operations( - db: &PrismaClient, - sync: &crate::Manager, - instance_id: instance::id::Type, -) -> Result<(), Error> { +pub async fn backfill_operations(db: &PrismaClient, sync: &crate::Manager) -> Result<(), Error> { let lock = sync.timestamp_lock.lock().await; let res = db @@ -31,18 +27,20 @@ pub async fn backfill_operations( debug!("backfill started"); let start = Instant::now(); db.crdt_operation() - .delete_many(vec![crdt_operation::instance_id::equals(instance_id)]) + .delete_many(vec![crdt_operation::device_pub_id::equals( + sync.device_pub_id.to_db(), + )]) .exec() .await?; - paginate_tags(&db, sync, instance_id).await?; - paginate_locations(&db, sync, instance_id).await?; - paginate_objects(&db, sync, instance_id).await?; - paginate_exif_datas(&db, sync, instance_id).await?; - paginate_file_paths(&db, sync, instance_id).await?; - paginate_tags_on_objects(&db, sync, instance_id).await?; - paginate_labels(&db, sync, instance_id).await?; - paginate_labels_on_objects(&db, sync, instance_id).await?; + paginate_tags(&db, sync).await?; + paginate_locations(&db, sync).await?; + paginate_objects(&db, sync).await?; + paginate_exif_datas(&db, sync).await?; + paginate_file_paths(&db, sync).await?; + paginate_tags_on_objects(&db, sync).await?; + paginate_labels(&db, sync).await?; + paginate_labels_on_objects(&db, sync).await?; debug!(elapsed = ?start.elapsed(), "backfill ended"); @@ -112,13 +110,11 @@ where } #[instrument(skip(db, sync), err)] -async fn paginate_tags( - db: &PrismaClient, - sync: &crate::Manager, - instance_id: instance::id::Type, -) -> Result<(), Error> { +async fn paginate_tags(db: &PrismaClient, sync: &crate::Manager) -> Result<(), Error> { use tag::{color, date_created, date_modified, id, name}; + let device_pub_id = &sync.device_pub_id; + paginate( |cursor| { db.tag() @@ -143,7 +139,7 @@ async fn paginate_tags( ), ) }) - .map(|o| crdt_op_unchecked_db(&o, instance_id)) + .map(|o| crdt_op_unchecked_db(&o, device_pub_id)) .collect::, _>>() .map(|creates| db.crdt_operation().create_many(creates).exec()) }, @@ -152,16 +148,14 @@ async fn paginate_tags( } #[instrument(skip(db, sync), err)] -async fn paginate_locations( - db: &PrismaClient, - sync: &crate::Manager, - instance_id: instance::id::Type, -) -> Result<(), Error> { +async fn paginate_locations(db: &PrismaClient, sync: &crate::Manager) -> Result<(), Error> { use location::{ available_capacity, date_created, generate_preview_media, hidden, id, include, instance, is_archived, name, path, size_in_bytes, sync_preview_media, total_capacity, }; + let device_pub_id = &sync.device_pub_id; + paginate( |cursor| { db.location() @@ -209,7 +203,7 @@ async fn paginate_locations( ), ) }) - .map(|o| crdt_op_unchecked_db(&o, instance_id)) + .map(|o| crdt_op_unchecked_db(&o, device_pub_id)) .collect::, _>>() .map(|creates| db.crdt_operation().create_many(creates).exec()) }, @@ -218,13 +212,11 @@ async fn paginate_locations( } #[instrument(skip(db, sync), err)] -async fn paginate_objects( - db: &PrismaClient, - sync: &crate::Manager, - instance_id: instance::id::Type, -) -> Result<(), Error> { +async fn paginate_objects(db: &PrismaClient, sync: &crate::Manager) -> Result<(), Error> { use object::{date_accessed, date_created, favorite, hidden, id, important, kind, note}; + let device_pub_id = &sync.device_pub_id; + paginate( |cursor| { db.object() @@ -254,7 +246,7 @@ async fn paginate_objects( ), ) }) - .map(|o| crdt_op_unchecked_db(&o, instance_id)) + .map(|o| crdt_op_unchecked_db(&o, device_pub_id)) .collect::, _>>() .map(|creates| db.crdt_operation().create_many(creates).exec()) }, @@ -263,16 +255,14 @@ async fn paginate_objects( } #[instrument(skip(db, sync), err)] -async fn paginate_exif_datas( - db: &PrismaClient, - sync: &crate::Manager, - instance_id: instance::id::Type, -) -> Result<(), Error> { +async fn paginate_exif_datas(db: &PrismaClient, sync: &crate::Manager) -> Result<(), Error> { use exif_data::{ artist, camera_data, copyright, description, epoch_time, exif_version, id, include, media_date, media_location, resolution, }; + let device_pub_id = &sync.device_pub_id; + paginate( |cursor| { db.exif_data() @@ -311,7 +301,7 @@ async fn paginate_exif_datas( ), ) }) - .map(|o| crdt_op_unchecked_db(&o, instance_id)) + .map(|o| crdt_op_unchecked_db(&o, device_pub_id)) .collect::, _>>() .map(|creates| db.crdt_operation().create_many(creates).exec()) }, @@ -320,16 +310,14 @@ async fn paginate_exif_datas( } #[instrument(skip(db, sync), err)] -async fn paginate_file_paths( - db: &PrismaClient, - sync: &crate::Manager, - instance_id: instance::id::Type, -) -> Result<(), Error> { +async fn paginate_file_paths(db: &PrismaClient, sync: &crate::Manager) -> Result<(), Error> { use file_path::{ cas_id, date_created, date_indexed, date_modified, extension, hidden, id, include, inode, integrity_checksum, is_dir, location, materialized_path, name, object, size_in_bytes_bytes, }; + let device_pub_id = &sync.device_pub_id; + paginate( |cursor| { db.file_path() @@ -379,7 +367,7 @@ async fn paginate_file_paths( ), ) }) - .map(|o| crdt_op_unchecked_db(&o, instance_id)) + .map(|o| crdt_op_unchecked_db(&o, device_pub_id)) .collect::, _>>() .map(|creates| db.crdt_operation().create_many(creates).exec()) }, @@ -388,13 +376,11 @@ async fn paginate_file_paths( } #[instrument(skip(db, sync), err)] -async fn paginate_tags_on_objects( - db: &PrismaClient, - sync: &crate::Manager, - instance_id: instance::id::Type, -) -> Result<(), Error> { +async fn paginate_tags_on_objects(db: &PrismaClient, sync: &crate::Manager) -> Result<(), Error> { use tag_on_object::{date_created, include, object_id, tag_id}; + let device_pub_id = &sync.device_pub_id; + paginate_relation( |group_id, item_id| { db.tag_on_object() @@ -427,7 +413,7 @@ async fn paginate_tags_on_objects( ), ) }) - .map(|o| crdt_op_unchecked_db(&o, instance_id)) + .map(|o| crdt_op_unchecked_db(&o, device_pub_id)) .collect::, _>>() .map(|creates| db.crdt_operation().create_many(creates).exec()) }, @@ -436,13 +422,11 @@ async fn paginate_tags_on_objects( } #[instrument(skip(db, sync), err)] -async fn paginate_labels( - db: &PrismaClient, - sync: &crate::Manager, - instance_id: instance::id::Type, -) -> Result<(), Error> { +async fn paginate_labels(db: &PrismaClient, sync: &crate::Manager) -> Result<(), Error> { use label::{date_created, date_modified, id}; + let device_pub_id = &sync.device_pub_id; + paginate( |cursor| { db.label() @@ -466,7 +450,7 @@ async fn paginate_labels( ), ) }) - .map(|o| crdt_op_unchecked_db(&o, instance_id)) + .map(|o| crdt_op_unchecked_db(&o, device_pub_id)) .collect::, _>>() .map(|creates| db.crdt_operation().create_many(creates).exec()) }, @@ -475,13 +459,11 @@ async fn paginate_labels( } #[instrument(skip(db, sync), err)] -async fn paginate_labels_on_objects( - db: &PrismaClient, - sync: &crate::Manager, - instance_id: instance::id::Type, -) -> Result<(), Error> { +async fn paginate_labels_on_objects(db: &PrismaClient, sync: &crate::Manager) -> Result<(), Error> { use label_on_object::{date_created, include, label_id, object_id}; + let device_pub_id = &sync.device_pub_id; + paginate_relation( |group_id, item_id| { db.label_on_object() @@ -511,7 +493,7 @@ async fn paginate_labels_on_objects( [sync_entry!(l_o.date_created, date_created)], ) }) - .map(|o| crdt_op_unchecked_db(&o, instance_id)) + .map(|o| crdt_op_unchecked_db(&o, device_pub_id)) .collect::, _>>() .map(|creates| db.crdt_operation().create_many(creates).exec()) }, diff --git a/core/crates/sync/src/db_operation.rs b/core/crates/sync/src/db_operation.rs index 858788f18..2e5b883b2 100644 --- a/core/crates/sync/src/db_operation.rs +++ b/core/crates/sync/src/db_operation.rs @@ -1,82 +1,13 @@ -use sd_prisma::prisma::{cloud_crdt_operation, crdt_operation, instance, PrismaClient}; +use sd_core_prisma_helpers::DevicePubId; + +use sd_prisma::prisma::{cloud_crdt_operation, crdt_operation, device, PrismaClient}; use sd_sync::CRDTOperation; -use sd_utils::from_bytes_to_uuid; use tracing::instrument; use uhlc::NTP64; -use uuid::Uuid; use super::Error; -crdt_operation::include!(crdt_with_instance { - instance: select { pub_id } -}); - -cloud_crdt_operation::include!(cloud_crdt_with_instance { - instance: select { pub_id } -}); - -impl crdt_with_instance::Data { - #[allow(clippy::cast_sign_loss)] // SAFETY: we had to store using i64 due to SQLite limitations - pub const fn timestamp(&self) -> NTP64 { - NTP64(self.timestamp as u64) - } - - pub fn instance(&self) -> Uuid { - from_bytes_to_uuid(&self.instance.pub_id) - } - - pub fn into_operation(self) -> Result { - Ok(CRDTOperation { - device_pub_id: self.instance(), - timestamp: self.timestamp(), - record_id: rmp_serde::from_slice(&self.record_id)?, - - model_id: { - #[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)] - // SAFETY: we will not have more than 2^16 models and we had to store using signed - // integers due to SQLite limitations - { - self.model as u16 - } - }, - data: rmp_serde::from_slice(&self.data)?, - }) - } -} - -impl cloud_crdt_with_instance::Data { - #[allow(clippy::cast_sign_loss)] // SAFETY: we had to store using i64 due to SQLite limitations - pub const fn timestamp(&self) -> NTP64 { - NTP64(self.timestamp as u64) - } - - pub fn instance(&self) -> Uuid { - from_bytes_to_uuid(&self.instance.pub_id) - } - - #[instrument(skip(self), err)] - pub fn into_operation(self) -> Result<(i32, CRDTOperation), Error> { - Ok(( - self.id, - CRDTOperation { - device_pub_id: self.instance(), - timestamp: self.timestamp(), - record_id: rmp_serde::from_slice(&self.record_id)?, - model_id: { - #[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)] - // SAFETY: we will not have more than 2^16 models and we had to store using signed - // integers due to SQLite limitations - { - self.model as u16 - } - }, - data: rmp_serde::from_slice(&self.data)?, - }, - )) - } -} - #[instrument(skip(op, db), err)] pub async fn write_crdt_op_to_db(op: &CRDTOperation, db: &PrismaClient) -> Result<(), Error> { crdt_operation::Create { @@ -87,7 +18,7 @@ pub async fn write_crdt_op_to_db(op: &CRDTOperation, db: &PrismaClient) -> Resul op.timestamp.0 as i64 } }, - instance: instance::pub_id::equals(op.device_pub_id.as_bytes().to_vec()), + device: device::pub_id::equals(op.device_pub_id.as_bytes().to_vec()), kind: op.kind().to_string(), data: rmp_serde::to_vec(&op.data)?, model: i32::from(op.model_id), @@ -100,3 +31,71 @@ pub async fn write_crdt_op_to_db(op: &CRDTOperation, db: &PrismaClient) -> Resul .await .map_or_else(|e| Err(e.into()), |_| Ok(())) } + +pub fn into_ops( + crdt_operation::Data { + timestamp, + model, + record_id, + data, + device_pub_id, + .. + }: crdt_operation::Data, +) -> Result { + Ok(CRDTOperation { + device_pub_id: DevicePubId::from(device_pub_id).into(), + timestamp: { + #[allow(clippy::cast_sign_loss)] + { + // SAFETY: we had to store using i64 due to SQLite limitations + NTP64(timestamp as u64) + } + }, + model_id: { + #[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)] + { + // SAFETY: we will not have more than 2^16 models and we had to store using signed + // integers due to SQLite limitations + model as u16 + } + }, + record_id: rmp_serde::from_slice(&record_id)?, + data: rmp_serde::from_slice(&data)?, + }) +} + +pub fn into_cloud_ops( + cloud_crdt_operation::Data { + id, + timestamp, + model, + record_id, + data, + device_pub_id, + .. + }: cloud_crdt_operation::Data, +) -> Result<(cloud_crdt_operation::id::Type, CRDTOperation), Error> { + Ok(( + id, + CRDTOperation { + device_pub_id: DevicePubId::from(device_pub_id).into(), + timestamp: { + #[allow(clippy::cast_sign_loss)] + { + // SAFETY: we had to store using i64 due to SQLite limitations + NTP64(timestamp as u64) + } + }, + model_id: { + #[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)] + { + // SAFETY: we will not have more than 2^16 models and we had to store using signed + // integers due to SQLite limitations + model as u16 + } + }, + record_id: rmp_serde::from_slice(&record_id)?, + data: rmp_serde::from_slice(&data)?, + }, + )) +} diff --git a/core/crates/sync/src/ingest.rs b/core/crates/sync/src/ingest.rs index 08a06a75d..e5fa60050 100644 --- a/core/crates/sync/src/ingest.rs +++ b/core/crates/sync/src/ingest.rs @@ -1,10 +1,12 @@ +use sd_core_prisma_helpers::DevicePubId; + use sd_prisma::{ prisma::{crdt_operation, PrismaClient, SortOrder}, prisma_sync::ModelSyncData, }; use sd_sync::{ CRDTOperation, CRDTOperationData, CompressedCRDTOperation, - CompressedCRDTOperationsPerModelPerDevice, OperationKind, + CompressedCRDTOperationsPerModelPerDevice, ModelId, OperationKind, }; use std::{ @@ -40,7 +42,7 @@ use super::{ /// Stuff that can be handled outside the actor pub enum Request { Messages { - timestamps: Vec<(Uuid, NTP64)>, + timestamps: Vec<(DevicePubId, NTP64)>, tx: oneshot::Sender<()>, }, FinishedIngesting, @@ -137,7 +139,7 @@ impl Actor { .read() .await .iter() - .map(|(&uid, ×tamp)| (uid, timestamp)) + .map(|(uid, ×tamp)| (uid.clone(), timestamp)) .collect(); if self @@ -193,11 +195,11 @@ impl Actor { "Ingesting operations;", ); - for (instance, data) in event.messages.0 { + for (device_pub_id, data) in event.messages.0 { for (model, data) in data { for (record, ops) in data { if let Err(e) = self - .process_crdt_operations(instance, model, record, ops) + .process_crdt_operations(device_pub_id.into(), model, record, ops) .await { error!(?e, "Failed to ingest CRDT operations;"); @@ -268,7 +270,7 @@ impl Actor { #[instrument(skip(self, ops), fields(operations_count = %ops.len()), err)] async fn process_crdt_operations( &mut self, - instance: Uuid, + device_pub_id: DevicePubId, model: u16, record_id: rmpv::Value, mut ops: Vec, @@ -284,7 +286,9 @@ impl Actor { self.clock .update_with_timestamp(&Timestamp::new( new_timestamp, - uhlc::ID::from(NonZeroU128::new(instance.to_u128_le()).expect("Non zero id")), + uhlc::ID::from( + NonZeroU128::new(Uuid::from(&device_pub_id).to_u128_le()).expect("Non zero id"), + ), )) .expect("timestamp has too much drift!"); @@ -293,7 +297,7 @@ impl Actor { .timestamp_per_device .read() .await - .get(&instance) + .get(&device_pub_id) .copied(); // Delete - ignores all other messages @@ -303,7 +307,7 @@ impl Actor { .find(|op| matches!(op.data, CRDTOperationData::Delete)) { trace!("Deleting operation"); - handle_crdt_deletion(db, instance, model, record_id, delete_op).await?; + handle_crdt_deletion(db, &device_pub_id, model, record_id, delete_op).await?; } // Create + > 0 Update - overwrites the create's data with the updates else if let Some(timestamp) = ops @@ -330,7 +334,8 @@ impl Actor { return Ok(()); } - handle_crdt_create_and_updates(db, instance, model, record_id, ops, timestamp).await?; + handle_crdt_create_and_updates(db, &device_pub_id, model, record_id, ops, timestamp) + .await?; } // > 0 Update - batches updates with a fake Create op else { @@ -387,7 +392,7 @@ impl Actor { return Ok(()); } - handle_crdt_updates(db, instance, model, record_id, data, updates).await?; + handle_crdt_updates(db, &device_pub_id, model, record_id, data, updates).await?; } // update the stored timestamp for this instance - will be derived from the crdt operations table on restart @@ -396,7 +401,7 @@ impl Actor { self.timestamp_per_device .write() .await - .insert(instance, new_ts); + .insert(device_pub_id, new_ts); Ok(()) } @@ -404,13 +409,14 @@ impl Actor { async fn handle_crdt_updates( db: &PrismaClient, - instance: Uuid, + device_pub_id: &DevicePubId, model: u16, record_id: rmpv::Value, mut data: BTreeMap, updates: Vec>, ) -> Result<(), Error> { let keys = data.keys().cloned().collect::>(); + let device_pub_id = sd_sync::DevicePubId::from(device_pub_id); // does the same thing as processing ops one-by-one and returning early if a newer op was found for (update, key) in updates.into_iter().zip(keys) { @@ -424,7 +430,7 @@ async fn handle_crdt_updates( .run(|db| async move { // fake operation to batch them all at once ModelSyncData::from_op(CRDTOperation { - device_pub_id: instance, + device_pub_id, model_id: model, record_id: record_id.clone(), timestamp: NTP64(0), @@ -447,7 +453,7 @@ async fn handle_crdt_updates( async move { write_crdt_op_to_db( &CRDTOperation { - device_pub_id: instance, + device_pub_id, model_id: model, record_id, timestamp, @@ -468,23 +474,24 @@ async fn handle_crdt_updates( async fn handle_crdt_create_and_updates( db: &PrismaClient, - instance: Uuid, - model: u16, + device_pub_id: &DevicePubId, + model_id: ModelId, record_id: rmpv::Value, ops: Vec, timestamp: NTP64, ) -> Result<(), Error> { let mut data = BTreeMap::new(); + let device_pub_id = sd_sync::DevicePubId::from(device_pub_id); let mut applied_ops = vec![]; // search for all Updates until a Create is found - for op in ops.iter().rev() { + for op in ops.into_iter().rev() { match &op.data { CRDTOperationData::Delete => unreachable!("Delete can't exist here!"), CRDTOperationData::Create(create_data) => { for (k, v) in create_data { - data.entry(k).or_insert(v); + data.entry(k.clone()).or_insert_with(|| v.clone()); } applied_ops.push(op); @@ -492,8 +499,8 @@ async fn handle_crdt_create_and_updates( break; } CRDTOperationData::Update { field, value } => { + data.insert(field.clone(), value.clone()); applied_ops.push(op); - data.insert(field, value); } } } @@ -503,35 +510,33 @@ async fn handle_crdt_create_and_updates( .run(|db| async move { // fake a create with a bunch of data rather than individual insert ModelSyncData::from_op(CRDTOperation { - device_pub_id: instance, - model_id: model, + device_pub_id, + model_id, record_id: record_id.clone(), timestamp, - data: CRDTOperationData::Create( - data.into_iter() - .map(|(k, v)| (k.clone(), v.clone())) - .collect(), - ), + data: CRDTOperationData::Create(data), }) - .ok_or(Error::InvalidModelId(model))? + .ok_or(Error::InvalidModelId(model_id))? .exec(&db) .await?; applied_ops .into_iter() - .map(|op| { + .map(|CompressedCRDTOperation { timestamp, data }| { let record_id = record_id.clone(); let db = &db; async move { - let operation = CRDTOperation { - device_pub_id: instance, - model_id: model, - record_id, - timestamp: op.timestamp, - data: op.data.clone(), - }; - - write_crdt_op_to_db(&operation, db).await + write_crdt_op_to_db( + &CRDTOperation { + device_pub_id, + timestamp, + model_id, + record_id, + data, + }, + db, + ) + .await } }) .collect::>() @@ -544,14 +549,14 @@ async fn handle_crdt_create_and_updates( async fn handle_crdt_deletion( db: &PrismaClient, - instance: Uuid, + device_pub_id: &DevicePubId, model: u16, record_id: rmpv::Value, delete_op: &CompressedCRDTOperation, ) -> Result<(), Error> { // deletes are the be all and end all, no need to check anything let op = CRDTOperation { - device_pub_id: instance, + device_pub_id: device_pub_id.into(), model_id: model, record_id, timestamp: delete_op.timestamp, @@ -586,7 +591,7 @@ pub struct Handler { #[derive(Debug)] pub struct MessagesEvent { - pub instance_id: Uuid, + pub device_pub_id: DevicePubId, pub messages: CompressedCRDTOperationsPerModelPerDevice, pub has_more: bool, pub wait_tx: Option>, @@ -608,13 +613,13 @@ mod test { use super::*; async fn new_actor() -> (Handler, Arc) { - let instance = Uuid::new_v4(); + let device_pub_id = Uuid::now_v7(); let shared = Arc::new(SharedState { db: sd_prisma::test_db().await, - instance, + device_pub_id: device_pub_id.into(), clock: HLCBuilder::new() .with_id(uhlc::ID::from( - NonZeroU128::new(instance.to_u128_le()).expect("Non zero id"), + NonZeroU128::new(device_pub_id.to_u128_le()).expect("Non zero id"), )) .build(), timestamp_per_device: Arc::default(), diff --git a/core/crates/sync/src/lib.rs b/core/crates/sync/src/lib.rs index 118a343b6..1297e48cd 100644 --- a/core/crates/sync/src/lib.rs +++ b/core/crates/sync/src/lib.rs @@ -27,7 +27,7 @@ #![forbid(deprecated_in_future)] #![allow(clippy::missing_errors_doc, clippy::module_name_repetitions)] -use sd_prisma::prisma::{crdt_operation, instance, PrismaClient}; +use sd_prisma::prisma::{crdt_operation, device, PrismaClient}; use sd_sync::CRDTOperation; use std::{ @@ -36,7 +36,6 @@ use std::{ }; use tokio::sync::{Notify, RwLock}; -use uuid::Uuid; mod actor; pub mod backfill; @@ -54,13 +53,14 @@ pub enum SyncMessage { Created, } -pub type DevicePubId = Uuid; +pub use sd_core_prisma_helpers::DevicePubId; + pub type TimestampPerDevice = Arc>>; pub struct SharedState { pub db: Arc, pub emit_messages_flag: Arc, - pub instance: Uuid, + pub device_pub_id: DevicePubId, pub timestamp_per_device: TimestampPerDevice, pub clock: uhlc::HLC, pub active: AtomicBool, @@ -106,7 +106,7 @@ pub fn crdt_op_db(op: &CRDTOperation) -> Result { op.timestamp.as_u64() as i64 } }, - instance: instance::pub_id::equals(op.device_pub_id.as_bytes().to_vec()), + device: device::pub_id::equals(op.device_pub_id.as_bytes().to_vec()), kind: op.kind().to_string(), data: rmp_serde::to_vec(&op.data)?, model: i32::from(op.model_id), @@ -117,7 +117,7 @@ pub fn crdt_op_db(op: &CRDTOperation) -> Result { pub fn crdt_op_unchecked_db( op: &CRDTOperation, - instance_id: i32, + device_pub_id: &DevicePubId, ) -> Result { Ok(crdt_operation::CreateUnchecked { timestamp: { @@ -127,7 +127,7 @@ pub fn crdt_op_unchecked_db( op.timestamp.as_u64() as i64 } }, - instance_id, + device_pub_id: device_pub_id.to_db(), kind: op.kind().to_string(), data: rmp_serde::to_vec(&op.data)?, model: i32::from(op.model_id), diff --git a/core/crates/sync/src/manager.rs b/core/crates/sync/src/manager.rs index 0ab5b76ed..1419d810e 100644 --- a/core/crates/sync/src/manager.rs +++ b/core/crates/sync/src/manager.rs @@ -1,7 +1,8 @@ -use sd_prisma::prisma::{cloud_crdt_operation, crdt_operation, instance, PrismaClient, SortOrder}; +use sd_core_prisma_helpers::DevicePubId; + +use sd_prisma::prisma::{cloud_crdt_operation, crdt_operation, device, PrismaClient, SortOrder}; use sd_sync::{CRDTOperation, OperationFactory}; -use sd_utils::{from_bytes_to_uuid, uuid_to_bytes}; -use tracing::warn; +use sd_utils::from_bytes_to_uuid; use std::{ cmp, fmt, @@ -15,12 +16,13 @@ use std::{ use prisma_client_rust::{and, operator::or}; use tokio::sync::{broadcast, Mutex, Notify, RwLock}; +use tracing::warn; use uhlc::{HLCBuilder, HLC}; use uuid::Uuid; use super::{ crdt_op_db, - db_operation::{cloud_crdt_with_instance, crdt_with_instance}, + db_operation::{into_cloud_ops, into_ops}, ingest, Error, SharedState, SyncMessage, NTP64, }; @@ -40,7 +42,7 @@ impl fmt::Debug for Manager { #[derive(serde::Serialize, serde::Deserialize, Debug, PartialEq, Eq)] pub struct GetOpsArgs { - pub timestamp_per_device: Vec<(Uuid, NTP64)>, + pub timestamp_per_device: Vec<(DevicePubId, NTP64)>, pub count: u32, } @@ -49,17 +51,17 @@ impl Manager { /// Sync messages are received on the returned [`broadcast::Receiver`]. pub async fn new( db: Arc, - current_instance_uuid: Uuid, + current_device_pub_id: &DevicePubId, emit_messages_flag: Arc, actors: Arc, ) -> Result<(Self, broadcast::Receiver), Error> { - let existing_instances = db.instance().find_many(vec![]).exec().await?; + let existing_devices = db.device().find_many(vec![]).exec().await?; - Self::with_existing_instances( + Self::with_existing_devices( db, - current_instance_uuid, + current_device_pub_id, emit_messages_flag, - &existing_instances, + &existing_devices, actors, ) .await @@ -69,33 +71,35 @@ impl Manager { /// Sync messages are received on the returned [`broadcast::Receiver`]. /// /// # Panics - /// Panics if the `current_instance_id` UUID is zeroed. - pub async fn with_existing_instances( + /// Panics if the `current_device_pub_id` UUID is zeroed, which will never happen as we use `UUIDv7` for the + /// device pub id. As this version have a timestamp part, instead of being totally random. So the only + /// possible way to get zero from a `UUIDv7` is to go back in time to 1970 + pub async fn with_existing_devices( db: Arc, - current_instance_uuid: Uuid, + current_device_pub_id: &DevicePubId, emit_messages_flag: Arc, - existing_instances: &[instance::Data], + existing_devices: &[device::Data], actors: Arc, ) -> Result<(Self, broadcast::Receiver), Error> { - let timestamps = db + let latest_timestamp_per_device = db ._batch( - existing_instances + existing_devices .iter() - .map(|i| { + .map(|device| { db.crdt_operation() - .find_first(vec![crdt_operation::instance::is(vec![ - instance::id::equals(i.id), - ])]) + .find_first(vec![crdt_operation::device_pub_id::equals( + device.pub_id.clone(), + )]) .order_by(crdt_operation::timestamp::order(SortOrder::Desc)) }) .collect::>(), ) .await? .into_iter() - .zip(existing_instances) - .map(|(op, i)| { + .zip(existing_devices) + .map(|(op, device)| { ( - from_bytes_to_uuid(&i.pub_id), + DevicePubId::from(&device.pub_id), #[allow(clippy::cast_sign_loss)] // SAFETY: we had to store using i64 due to SQLite limitations NTP64(op.map(|o| o.timestamp).unwrap_or_default() as u64), @@ -107,15 +111,16 @@ impl Manager { let clock = HLCBuilder::new() .with_id(uhlc::ID::from( - NonZeroU128::new(current_instance_uuid.to_u128_le()).expect("Non zero id"), + NonZeroU128::new(Uuid::from(current_device_pub_id).to_u128_le()) + .expect("Non zero id"), )) .build(); let shared = Arc::new(SharedState { db, - instance: current_instance_uuid, + device_pub_id: current_device_pub_id.clone(), clock, - timestamp_per_device: Arc::new(RwLock::new(timestamps)), + timestamp_per_device: Arc::new(RwLock::new(latest_timestamp_per_device)), emit_messages_flag, active: AtomicBool::default(), active_notify: Notify::default(), @@ -168,7 +173,7 @@ impl Manager { .timestamp_per_device .write() .await - .insert(self.instance, last.timestamp); + .insert(self.device_pub_id.clone(), last.timestamp); } if self.tx.send(SyncMessage::Created).is_err() { @@ -216,33 +221,30 @@ impl Manager { .timestamp_per_device .write() .await - .insert(self.instance, op.timestamp); + .insert(self.device_pub_id.clone(), op.timestamp); Ok(ret) } - pub async fn get_instance_ops( + pub async fn get_device_ops( &self, count: u32, - instance_uuid: Uuid, + device_pub_id: DevicePubId, timestamp: NTP64, ) -> Result, Error> { self.db .crdt_operation() .find_many(vec![ - crdt_operation::instance::is(vec![instance::pub_id::equals(uuid_to_bytes( - &instance_uuid, - ))]), + crdt_operation::device::is(vec![device::pub_id::equals(device_pub_id.into())]), #[allow(clippy::cast_possible_wrap)] crdt_operation::timestamp::gt(timestamp.as_u64() as i64), ]) .take(i64::from(count)) .order_by(crdt_operation::timestamp::order(SortOrder::Asc)) - .include(crdt_with_instance::include()) .exec() .await? .into_iter() - .map(crdt_with_instance::Data::into_operation) + .map(into_ops) .collect() } @@ -253,10 +255,10 @@ impl Manager { .find_many(vec![or(args .timestamp_per_device .iter() - .map(|(instance_id, timestamp)| { + .map(|(device_pub_id, timestamp)| { and![ - crdt_operation::instance::is(vec![instance::pub_id::equals( - uuid_to_bytes(instance_id) + crdt_operation::device::is(vec![device::pub_id::equals( + device_pub_id.to_db() )]), crdt_operation::timestamp::gt({ #[allow(clippy::cast_possible_wrap)] @@ -267,46 +269,47 @@ impl Manager { }) ] }) - .chain([crdt_operation::instance::is_not(vec![ - instance::pub_id::in_vec( + .chain([crdt_operation::device::is_not(vec![ + device::pub_id::in_vec( args.timestamp_per_device .iter() - .map(|(instance_id, _)| uuid_to_bytes(instance_id)) + .map(|(device_pub_id, _)| device_pub_id.to_db()) .collect(), ), ])]) .collect())]) .take(i64::from(args.count)) .order_by(crdt_operation::timestamp::order(SortOrder::Asc)) - .include(crdt_with_instance::include()) .exec() .await?; - ops.sort_by(|a, b| match a.timestamp().cmp(&b.timestamp()) { - cmp::Ordering::Equal => a.instance().cmp(&b.instance()), + ops.sort_by(|a, b| match a.timestamp.cmp(&b.timestamp) { + cmp::Ordering::Equal => { + from_bytes_to_uuid(&a.device_pub_id).cmp(&from_bytes_to_uuid(&b.device_pub_id)) + } o => o, }); ops.into_iter() .take(args.count as usize) - .map(crdt_with_instance::Data::into_operation) + .map(into_ops) .collect() } pub async fn get_cloud_ops( &self, args: GetOpsArgs, - ) -> Result, Error> { + ) -> Result, Error> { let mut ops = self .db .cloud_crdt_operation() .find_many(vec![or(args .timestamp_per_device .iter() - .map(|(instance_id, timestamp)| { + .map(|(device_pub_id, timestamp)| { and![ - cloud_crdt_operation::instance::is(vec![instance::pub_id::equals( - uuid_to_bytes(instance_id) + cloud_crdt_operation::device::is(vec![device::pub_id::equals( + device_pub_id.to_db() )]), cloud_crdt_operation::timestamp::gt({ #[allow(clippy::cast_possible_wrap)] @@ -317,29 +320,30 @@ impl Manager { }) ] }) - .chain([cloud_crdt_operation::instance::is_not(vec![ - instance::pub_id::in_vec( + .chain([cloud_crdt_operation::device::is_not(vec![ + device::pub_id::in_vec( args.timestamp_per_device .iter() - .map(|(instance_id, _)| uuid_to_bytes(instance_id)) + .map(|(device_pub_id, _)| device_pub_id.to_db()) .collect(), ), ])]) .collect())]) .take(i64::from(args.count)) .order_by(cloud_crdt_operation::timestamp::order(SortOrder::Asc)) - .include(cloud_crdt_with_instance::include()) .exec() .await?; - ops.sort_by(|a, b| match a.timestamp().cmp(&b.timestamp()) { - cmp::Ordering::Equal => a.instance().cmp(&b.instance()), + ops.sort_by(|a, b| match a.timestamp.cmp(&b.timestamp) { + cmp::Ordering::Equal => { + from_bytes_to_uuid(&a.device_pub_id).cmp(&from_bytes_to_uuid(&b.device_pub_id)) + } o => o, }); ops.into_iter() .take(args.count as usize) - .map(cloud_crdt_with_instance::Data::into_operation) + .map(into_cloud_ops) .collect() } } @@ -349,8 +353,8 @@ impl OperationFactory for Manager { &self.clock } - fn get_device_pub_id(&self) -> Uuid { - self.instance + fn get_device_pub_id(&self) -> sd_sync::DevicePubId { + sd_sync::DevicePubId::from(&self.device_pub_id) } } diff --git a/core/crates/sync/tests/lib.rs b/core/crates/sync/tests/lib.rs index 45c602c51..b2a27b516 100644 --- a/core/crates/sync/tests/lib.rs +++ b/core/crates/sync/tests/lib.rs @@ -6,7 +6,7 @@ use sd_prisma::{prisma::location, prisma_sync}; use sd_sync::*; use sd_utils::{msgpack, uuid_to_bytes}; -use mock_instance::Instance; +use mock_instance::Device; use tracing::info; use tracing_test::traced_test; use uuid::Uuid; @@ -14,7 +14,7 @@ use uuid::Uuid; const MOCK_LOCATION_NAME: &str = "Location 0"; const MOCK_LOCATION_PATH: &str = "/User/Anon/Documents"; -async fn write_test_location(instance: &Instance) -> location::Data { +async fn write_test_location(instance: &Device) -> location::Data { let location_pub_id = Uuid::new_v4(); let location = instance @@ -81,7 +81,7 @@ async fn write_test_location(instance: &Instance) -> location::Data { #[tokio::test] #[traced_test] async fn writes_operations_and_rows_together() -> Result<(), Box> { - let instance = Instance::new(Uuid::new_v4()).await; + let instance = Device::new(Uuid::new_v4()).await; write_test_location(&instance).await; @@ -119,14 +119,14 @@ async fn writes_operations_and_rows_together() -> Result<(), Box Result<(), Box> { - let instance1 = Instance::new(Uuid::new_v4()).await; - let instance2 = Instance::new(Uuid::new_v4()).await; + let instance1 = Device::new(Uuid::new_v4()).await; + let instance2 = Device::new(Uuid::new_v4()).await; let mut instance2_sync_rx = instance2.sync_rx.resubscribe(); info!("Created instances!"); - Instance::pair(&instance1, &instance2).await; + Device::pair(&instance1, &instance2).await; info!("Paired instances!"); @@ -162,12 +162,12 @@ async fn operations_send_and_ingest() -> Result<(), Box> #[tokio::test] async fn no_update_after_delete() -> Result<(), Box> { - let instance1 = Instance::new(Uuid::new_v4()).await; - let instance2 = Instance::new(Uuid::new_v4()).await; + let instance1 = Device::new(Uuid::new_v4()).await; + let instance2 = Device::new(Uuid::new_v4()).await; let mut instance2_sync_rx = instance2.sync_rx.resubscribe(); - Instance::pair(&instance1, &instance2).await; + Device::pair(&instance1, &instance2).await; let location = write_test_location(&instance1).await; diff --git a/core/crates/sync/tests/mock_instance.rs b/core/crates/sync/tests/mock_instance.rs index 591b52a8a..fd1c13700 100644 --- a/core/crates/sync/tests/mock_instance.rs +++ b/core/crates/sync/tests/mock_instance.rs @@ -2,11 +2,9 @@ use sd_core_sync::*; use sd_prisma::prisma; use sd_sync::CompressedCRDTOperationsPerModelPerDevice; -use sd_utils::uuid_to_bytes; use std::sync::{atomic::AtomicBool, Arc}; -use prisma_client_rust::chrono::Utc; use tokio::{fs, spawn, sync::broadcast}; use tracing::{info, instrument, warn, Instrument}; use uuid::Uuid; @@ -16,16 +14,17 @@ fn db_path(id: Uuid) -> String { } #[derive(Clone)] -pub struct Instance { - pub id: Uuid, +pub struct Device { + pub pub_id: DevicePubId, pub db: Arc, pub sync: Arc, pub sync_rx: Arc>, } -impl Instance { +impl Device { pub async fn new(id: Uuid) -> Arc { let url = format!("file:{}", db_path(id)); + let device_pub_id = DevicePubId::from(id); let db = Arc::new( prisma::PrismaClient::_builder() @@ -37,22 +36,15 @@ impl Instance { db._db_push().await.unwrap(); - db.instance() - .create( - uuid_to_bytes(&id), - vec![], - vec![], - Utc::now().into(), - Utc::now().into(), - vec![], - ) + db.device() + .create(device_pub_id.to_db(), vec![]) .exec() .await .unwrap(); let (sync, sync_rx) = sd_core_sync::Manager::new( Arc::clone(&db), - id, + &device_pub_id, Arc::new(AtomicBool::new(true)), Default::default(), ) @@ -60,7 +52,7 @@ impl Instance { .expect("failed to create sync manager"); Arc::new(Self { - id, + pub_id: device_pub_id, db, sync: Arc::new(sync), sync_rx: Arc::new(sync_rx), @@ -68,22 +60,17 @@ impl Instance { } pub async fn teardown(&self) { - fs::remove_file(db_path(self.id)).await.unwrap(); + fs::remove_file(db_path(Uuid::from(&self.pub_id))) + .await + .unwrap(); } pub async fn pair(instance1: &Arc, instance2: &Arc) { #[instrument(skip(left, right))] - async fn half(left: &Arc, right: &Arc, context: &'static str) { + async fn half(left: &Arc, right: &Arc, context: &'static str) { left.db - .instance() - .create( - uuid_to_bytes(&right.id), - vec![], - vec![], - Utc::now().into(), - Utc::now().into(), - vec![], - ) + .device() + .create(right.pub_id.to_db(), vec![]) .exec() .await .unwrap(); @@ -137,7 +124,7 @@ impl Instance { messages, ), has_more: false, - instance_id: left.id, + device_pub_id: left.pub_id.clone(), wait_tx: None, })) .await diff --git a/core/prisma/schema.prisma b/core/prisma/schema.prisma index d1a0d679d..41d698f63 100644 --- a/core/prisma/schema.prisma +++ b/core/prisma/schema.prisma @@ -57,20 +57,20 @@ model CloudCRDTOperation { /// Devices are the owner machines connected to this library /// @shared(id: pub_id, modelId: 12) model Device { - id Int @id @default(autoincrement()) + id Int @id @default(autoincrement()) // uuid v7 - pub_id Bytes @unique - name String + pub_id Bytes @unique + name String? // Not actually NULLABLE, but we have to comply with current sync implementation BS // Enum: sd_cloud_schema::device::DeviceOS - os Int + os Int? // Not actually NULLABLE, but we have to comply with current sync implementation BS // Enum: sd_cloud_schema::device::HardwareModel - hardware_model Int + hardware_model Int? // Not actually NULLABLE, but we have to comply with current sync implementation BS // clock timestamp for sync timestamp BigInt? - date_created DateTime + date_created DateTime? // Not actually NULLABLE, but we have to comply with current sync implementation BS date_deleted DateTime? CRDTOperation CRDTOperation[] @@ -103,6 +103,7 @@ model Instance { // clock timestamp for sync timestamp BigInt? + Location Location[] @@map("instance") } @@ -168,8 +169,12 @@ model Location { scan_state Int @default(0) // Enum: sd_core::location::ScanState - device_pub_id Bytes - device Device @relation(fields: [device_pub_id], references: [pub_id], onDelete: Cascade) + device_pub_id Bytes? + device Device? @relation(fields: [device_pub_id], references: [pub_id], onDelete: Cascade) + + // this should just be a local-only cache but it's too much effort to broadcast online locations rn (@brendan) + instance_id Int? + instance Instance? @relation(fields: [instance_id], references: [id], onDelete: SetNull) file_paths FilePath[] indexer_rules IndexerRulesInLocation[] @@ -585,8 +590,8 @@ model StorageStatistics { total_capacity BigInt @default(0) available_capacity BigInt @default(0) - device_pub_id Bytes @unique - device Device @relation(fields: [device_pub_id], references: [pub_id], onDelete: Cascade) + device_pub_id Bytes? @unique + device Device? @relation(fields: [device_pub_id], references: [pub_id], onDelete: Cascade) @@map("storage_statistics") } diff --git a/core/src/api/cloud/libraries.rs b/core/src/api/cloud/libraries.rs index bbc7d3027..9848de43c 100644 --- a/core/src/api/cloud/libraries.rs +++ b/core/src/api/cloud/libraries.rs @@ -2,7 +2,9 @@ use crate::api::{utils::library, Ctx, R}; use sd_cloud_schema::{auth::AccessToken, devices, libraries}; +use futures_concurrency::future::TryJoin; use rspc::alpha::AlphaRouter; +use serde::Deserialize; use tracing::debug; use super::try_get_cloud_services_client; @@ -42,25 +44,25 @@ pub fn mount() -> AlphaRouter { }) }) .procedure("create", { - #[derive(Debug, serde::Serialize, serde::Deserialize, specta::Type)] - struct LibrariesCreateArgs { - access_token: AccessToken, - device_pub_id: devices::PubId, - } - R.with2(library()) - .mutation(|(node, library), args: LibrariesCreateArgs| async move { - let req = libraries::create::Request { - name: library.config().await.name.to_string(), - access_token: args.access_token, - pub_id: libraries::PubId(library.id), - device_pub_id: args.device_pub_id, - }; + .mutation(|(node, library), access_token: AccessToken| async move { + let (client, name, device_pub_id) = ( + try_get_cloud_services_client(&node), + async { Ok(library.config().await.name.to_string()) }, + async { Ok(devices::PubId(node.config.get().await.id.into())) }, + ) + .try_join() + .await?; + super::handle_comm_error( - try_get_cloud_services_client(&node) - .await? + client .libraries() - .create(req) + .create(libraries::create::Request { + name, + access_token, + pub_id: libraries::PubId(library.id), + device_pub_id, + }) .await, "Failed to create library;", )??; @@ -69,35 +71,59 @@ pub fn mount() -> AlphaRouter { }) }) .procedure("delete", { - R.mutation(|node, req: libraries::delete::Request| async move { - super::handle_comm_error( - try_get_cloud_services_client(&node) - .await? - .libraries() - .delete(req) - .await, - "Failed to delete library;", - )??; + R.with2(library()) + .mutation(|(node, library), access_token: AccessToken| async move { + super::handle_comm_error( + try_get_cloud_services_client(&node) + .await? + .libraries() + .delete(libraries::delete::Request { + access_token, + pub_id: libraries::PubId(library.id), + }) + .await, + "Failed to delete library;", + )??; - debug!("Deleted library"); + debug!("Deleted library"); - Ok(()) - }) + Ok(()) + }) }) .procedure("update", { - R.mutation(|node, req: libraries::update::Request| async move { - super::handle_comm_error( - try_get_cloud_services_client(&node) - .await? - .libraries() - .update(req) - .await, - "Failed to update library;", - )??; + #[derive(Deserialize, specta::Type)] + struct LibrariesUpdateArgs { + access_token: AccessToken, + name: String, + } - debug!("Updated library"); + R.with2(library()).mutation( + |(node, library), + LibrariesUpdateArgs { access_token, name }: LibrariesUpdateArgs| async move { + super::handle_comm_error( + try_get_cloud_services_client(&node) + .await? + .libraries() + .update(libraries::update::Request { + access_token, + pub_id: libraries::PubId(library.id), + name, + }) + .await, + "Failed to update library;", + )??; - Ok(()) - }) + debug!("Updated library"); + + Ok(()) + }, + ) + }) + .procedure("sync", { + R.with2(library()) + .mutation(|(_, library), _: ()| async move { + library.do_cloud_sync(); + Ok(()) + }) }) } diff --git a/core/src/api/cloud/library.rs b/core/src/api/cloud/library.rs deleted file mode 100644 index ca110c397..000000000 --- a/core/src/api/cloud/library.rs +++ /dev/null @@ -1,134 +0,0 @@ -// This file is being deprecated in favor of libraries.rs -// This is due to the migration to the new API system, but the frontend is still using this file - -use crate::{api::utils::library, invalidate_query}; - -use super::*; - -pub fn mount() -> AlphaRouter { - R.router() - .procedure("get", { - R.with2(library()) - .query(|(node, library), _: ()| async move { - // Ok( - // sd_cloud_api::library::get(node.cloud_api_config().await, library.id) - // .await?, - // ) - - Ok(()) - }) - }) - .procedure("list", { - R.query(|node, _: ()| async move { - // Ok(sd_cloud_api::library::list(node.cloud_api_config().await).await?) - Ok(()) - }) - }) - .procedure("create", { - R.with2(library()) - .mutation(|(node, library), _: ()| async move { - // let node_config = node.config.get().await; - // let cloud_library = sd_cloud_api::library::create( - // node.cloud_api_config().await, - // library.id, - // &library.config().await.name, - // library.instance_uuid, - // library.identity.to_remote_identity(), - // node_config.id, - // node_config.identity.to_remote_identity(), - // &node.p2p.peer_metadata(), - // ) - // .await?; - // node.libraries - // .edit( - // library.id, - // None, - // MaybeUndefined::Undefined, - // MaybeUndefined::Value(cloud_library.id), - // None, - // ) - // .await?; - - invalidate_query!(library, "cloud.library.get"); - - Ok(()) - }) - }) - .procedure("join", { - R.mutation(|node, library_id: Uuid| async move { - // let Some(cloud_library) = - // sd_cloud_api::library::get(node.cloud_api_config().await, library_id).await? - // else { - // return Err(rspc::Error::new( - // rspc::ErrorCode::NotFound, - // "Library not found".to_string(), - // )); - // }; - - // let library = node - // .libraries - // .create_with_uuid( - // library_id, - // LibraryName::new(cloud_library.name).map_err(|e| { - // rspc::Error::new(rspc::ErrorCode::InternalServerError, e.to_string()) - // })?, - // None, - // false, - // None, - // &node, - // true, - // ) - // .await?; - // node.libraries - // .edit( - // library.id, - // None, - // MaybeUndefined::Undefined, - // MaybeUndefined::Value(cloud_library.id), - // None, - // ) - // .await?; - - // let node_config = node.config.get().await; - // let instances = sd_cloud_api::library::join( - // node.cloud_api_config().await, - // library_id, - // library.instance_uuid, - // library.identity.to_remote_identity(), - // node_config.id, - // node_config.identity.to_remote_identity(), - // node.p2p.peer_metadata(), - // ) - // .await?; - - // for instance in instances { - // crate::cloud::sync::receive::upsert_instance( - // library.id, - // &library.db, - // &library.sync, - // &node.libraries, - // &instance.uuid, - // instance.identity, - // &instance.node_id, - // RemoteIdentity::from_str(&instance.node_remote_identity) - // .expect("malformed remote identity in the DB"), - // instance.metadata, - // ) - // .await?; - // } - - // invalidate_query!(library, "cloud.library.get"); - // invalidate_query!(library, "cloud.library.list"); - - // Ok(LibraryConfigWrapped::from_library(&library).await) - Ok(()) - }) - }) - .procedure("sync", { - R.with2(library()) - .mutation(|(_, library), _: ()| async move { - library.do_cloud_sync(); - Ok(()) - }) - }) -} diff --git a/core/src/api/cloud/mod.rs b/core/src/api/cloud/mod.rs index bc39cae49..49c5cad36 100644 --- a/core/src/api/cloud/mod.rs +++ b/core/src/api/cloud/mod.rs @@ -1,11 +1,12 @@ use crate::{node::config::NodeConfig, volume::get_volumes, Node}; +use sd_core_cloud_services::{CloudP2P, IrohSecretKey, KeyManager, QuinnConnection, UserResponse}; + use sd_cloud_schema::{ auth, error::{ClientSideError, Error}, users, Client, Service, }; -use sd_core_cloud_services::{CloudP2P, IrohSecretKey, KeyManager, QuinnConnection, UserResponse}; use sd_crypto::{CryptoRng, SeedableRng}; use std::pin::pin; @@ -14,13 +15,11 @@ use async_stream::stream; use futures::StreamExt; use rspc::alpha::AlphaRouter; use tracing::error; -use uuid::Uuid; use super::{Ctx, R}; mod devices; mod libraries; -mod library; mod locations; async fn try_get_cloud_services_client( @@ -69,7 +68,7 @@ pub(crate) fn mount() -> AlphaRouter { let (device_pub_id, name, os) = { let NodeConfig { id, name, os, .. } = node.config.get().await; - (devices::PubId(id), name, os) + (devices::PubId(id.into()), name, os) }; let mut hasher = blake3::Hasher::new(); hasher.update(device_pub_id.0.as_bytes().as_slice()); diff --git a/core/src/api/mod.rs b/core/src/api/mod.rs index 30e55aee7..7a1dd1597 100644 --- a/core/src/api/mod.rs +++ b/core/src/api/mod.rs @@ -3,13 +3,16 @@ use crate::{ library::LibraryId, node::{ config::{is_in_docker, NodeConfig, NodeConfigP2P, NodePreferences}, - get_hardware_model_name, HardwareModel, + HardwareModel, }, old_job::JobProgressEvent, Node, }; use sd_core_heavy_lifting::media_processor::ThumbKey; +use sd_core_sync::DevicePubId; + +use sd_cloud_schema::devices::DeviceOS; use sd_p2p::RemoteIdentity; use sd_prisma::prisma::file_path; @@ -20,7 +23,6 @@ use rspc::{alpha::Rspc, Config, ErrorCode}; use serde::{Deserialize, Serialize}; use specta::Type; use tracing::warn; -use uuid::Uuid; mod backups; mod cloud; @@ -86,13 +88,15 @@ pub enum BackendFeature {} #[derive(Debug, Serialize, Deserialize, Clone, Type)] pub struct SanitizedNodeConfig { /// id is a unique identifier for the current node. Each node has a public identifier (this one) and is given a local id for each library (done within the library code). - pub id: Uuid, + pub id: DevicePubId, /// name is the display name of the current node. This is set by the user and is shown in the UI. // TODO: Length validation so it can fit in DNS record pub name: String, pub identity: RemoteIdentity, pub p2p: NodeConfigP2P, pub features: Vec, pub preferences: NodePreferences, + pub os: DeviceOS, + pub hardware_model: HardwareModel, } impl From for SanitizedNodeConfig { @@ -104,6 +108,8 @@ impl From for SanitizedNodeConfig { p2p: value.p2p, features: value.features, preferences: value.preferences, + os: value.os, + hardware_model: value.hardware_model, } } } @@ -136,12 +142,11 @@ pub(crate) fn mount() -> Arc { }) .procedure("nodeState", { R.query(|node, _: ()| async move { - let device_model = get_hardware_model_name() - .unwrap_or(HardwareModel::Other) - .to_string(); + let config = SanitizedNodeConfig::from(node.config.get().await); Ok(NodeState { - config: node.config.get().await.into(), + device_model: Some(config.hardware_model.to_string()), + config, // We are taking the assumption here that this value is only used on the frontend for display purposes data_path: node .config @@ -149,7 +154,6 @@ pub(crate) fn mount() -> Arc { .to_str() .expect("Found non-UTF-8 path") .to_string(), - device_model: Some(device_model), is_in_docker: is_in_docker(), }) }) diff --git a/core/src/api/nodes.rs b/core/src/api/nodes.rs index 09fb102fb..4ade1b8a8 100644 --- a/core/src/api/nodes.rs +++ b/core/src/api/nodes.rs @@ -5,9 +5,10 @@ use crate::{ node::config::{P2PDiscoveryState, Port}, }; -use sd_prisma::prisma::{instance, location}; +use sd_prisma::prisma::location; use rspc::{alpha::AlphaRouter, ErrorCode}; +use sd_utils::uuid_to_bytes; use serde::Deserialize; use specta::Type; use tracing::error; @@ -88,27 +89,16 @@ pub(crate) fn mount() -> AlphaRouter { .procedure("listLocations", { R.with2(library()) // TODO: I don't like this. `node_id` should probs be a machine hash or something cause `node_id` is dynamic in the context of P2P and what does it mean for removable media to be owned by a node? - .query(|(_, library), node_id: Option| async move { - // Be aware multiple instances can exist on a single node. This is generally an edge case but it's possible. - let instances = library - .db - .instance() - .find_many(vec![node_id - .map(|id| instance::node_id::equals(id.as_bytes().to_vec())) - .unwrap_or(instance::id::equals( - library.config().await.instance_id, - ))]) - .exec() - .await?; - + .query(|(_, library), device_pub_id: Option| async move { Ok(library .db .location() .find_many( - instances - .into_iter() - .map(|i| location::instance_id::equals(Some(i.id))) - .collect(), + device_pub_id + .map(|id| { + vec![location::device_pub_id::equals(Some(uuid_to_bytes(&id)))] + }) + .unwrap_or_default(), ) .exec() .await? diff --git a/core/src/api/search/saved.rs b/core/src/api/search/saved.rs index e2e797765..2cedd712a 100644 --- a/core/src/api/search/saved.rs +++ b/core/src/api/search/saved.rs @@ -66,7 +66,7 @@ pub(crate) fn mount() -> AlphaRouter { |(_, library), args: Args| async move { let Library { db, sync, .. } = library.as_ref(); - let pub_id = Uuid::new_v4().as_bytes().to_vec(); + let pub_id = Uuid::now_v7().as_bytes().to_vec(); let date_created: DateTime = Utc::now().into(); let (sync_params, db_params): (Vec<_>, Vec<_>) = chain_optional_iter( diff --git a/core/src/api/sync.rs b/core/src/api/sync.rs index 00db6277b..42725d0dd 100644 --- a/core/src/api/sync.rs +++ b/core/src/api/sync.rs @@ -46,12 +46,7 @@ pub(crate) fn mount() -> AlphaRouter { return Ok(()); } - sd_core_sync::backfill::backfill_operations( - &library.db, - &library.sync, - library.config().await.instance_id, - ) - .await?; + sd_core_sync::backfill::backfill_operations(&library.db, &library.sync).await?; node.libraries .edit( diff --git a/core/src/api/tags.rs b/core/src/api/tags.rs index b951368f2..98b06ef4c 100644 --- a/core/src/api/tags.rs +++ b/core/src/api/tags.rs @@ -221,7 +221,7 @@ pub(crate) fn mount() -> AlphaRouter { .iter() .filter(|fp| fp.is_dir.unwrap_or_default() && fp.object.is_none()) .map(|fp| { - let id = uuid_to_bytes(&Uuid::new_v4()); + let id = uuid_to_bytes(&Uuid::now_v7()); sync_params.extend(sync.shared_create( prisma_sync::object::SyncId { pub_id: id.clone() }, diff --git a/core/src/cloud/sync/ingest.rs b/core/src/cloud/sync/ingest.rs index dc44ee9e6..78e5c9ae2 100644 --- a/core/src/cloud/sync/ingest.rs +++ b/core/src/cloud/sync/ingest.rs @@ -90,7 +90,7 @@ pub async fn run_actor( sync.ingest .event_tx .send(sd_core_sync::Event::Messages(MessagesEvent { - instance_id: sync.instance, + device_pub_id: sync.device_pub_id.clone(), has_more: ops.len() == OPS_PER_REQUEST as usize, messages: CompressedCRDTOperationsPerModelPerDevice::new(ops), wait_tx: Some(wait_tx) diff --git a/core/src/cloud/sync/receive.rs b/core/src/cloud/sync/receive.rs index fd2bc7d8f..3a8925a37 100644 --- a/core/src/cloud/sync/receive.rs +++ b/core/src/cloud/sync/receive.rs @@ -2,8 +2,9 @@ use crate::{library::Libraries, Node}; use futures::FutureExt; use sd_actors::Stopper; +use sd_core_sync::DevicePubId; use sd_p2p::RemoteIdentity; -use sd_prisma::prisma::{cloud_crdt_operation, instance, PrismaClient}; +use sd_prisma::prisma::{cloud_crdt_operation, device, instance, PrismaClient}; use sd_sync::CRDTOperation; use sd_utils::uuid_to_bytes; @@ -239,7 +240,7 @@ async fn write_cloud_ops_to_db( fn crdt_op_db(op: &CRDTOperation) -> cloud_crdt_operation::Create { cloud_crdt_operation::Create { timestamp: op.timestamp.0 as i64, - instance: instance::pub_id::equals(op.device_pub_id.as_bytes().to_vec()), + device: device::pub_id::equals(uuid_to_bytes(&op.device_pub_id)), kind: op.data.as_kind().to_string(), data: to_vec(&op.data).expect("unable to serialize data"), model: op.model_id as i32, @@ -247,50 +248,3 @@ fn crdt_op_db(op: &CRDTOperation) -> cloud_crdt_operation::Create { _params: vec![], } } - -#[allow(clippy::too_many_arguments)] -pub async fn upsert_instance( - library_id: Uuid, - db: &PrismaClient, - sync: &sd_core_sync::Manager, - libraries: &Libraries, - uuid: &Uuid, - identity: RemoteIdentity, - node_id: &Uuid, - node_remote_identity: RemoteIdentity, - metadata: HashMap, -) -> prisma_client_rust::Result<()> { - db.instance() - .upsert( - instance::pub_id::equals(uuid_to_bytes(uuid)), - instance::create( - uuid_to_bytes(uuid), - identity.get_bytes().to_vec(), - node_id.as_bytes().to_vec(), - Utc::now().into(), - Utc::now().into(), - vec![ - instance::node_remote_identity::set(Some( - node_remote_identity.get_bytes().to_vec(), - )), - instance::metadata::set(Some( - serde_json::to_vec(&metadata).expect("unable to serialize metadata"), - )), - ], - ), - vec![], - ) - .exec() - .await?; - - sync.timestamp_per_device - .write() - .await - .entry(*uuid) - .or_default(); - - // Called again so the new instances are picked up - libraries.update_instances_by_id(library_id).await; - - Ok(()) -} diff --git a/core/src/library/config.rs b/core/src/library/config.rs index 863f744f9..c8cb9db7d 100644 --- a/core/src/library/config.rs +++ b/core/src/library/config.rs @@ -4,7 +4,7 @@ use crate::{ }; use sd_p2p::{Identity, RemoteIdentity}; -use sd_prisma::prisma::{file_path, indexer_rule, instance, location, node, PrismaClient}; +use sd_prisma::prisma::{file_path, indexer_rule, instance, location, PrismaClient}; use sd_utils::{db::maybe_missing, error::FileIOError}; use std::{ @@ -12,7 +12,6 @@ use std::{ sync::{atomic::AtomicBool, Arc}, }; -use chrono::Utc; use int_enum::IntEnum; use prisma_client_rust::not; use serde::{Deserialize, Serialize}; @@ -167,34 +166,8 @@ impl LibraryConfig { } (LibraryConfigVersion::V2, LibraryConfigVersion::V3) => { - // The fact I have to migrate this hurts my soul - if db.node().count(vec![]).exec().await? != 1 { - return Err(LibraryConfigError::TooManyNodes); - } - - db.node() - .update_many( - vec![], - vec![node::pub_id::set(node_config.id.as_bytes().to_vec())], - ) - .exec() - .await?; - - let mut config = serde_json::from_slice::>( - &fs::read(path).await.map_err(|e| { - VersionManagerError::FileIO(FileIOError::from((path, e))) - })?, - ) - .map_err(VersionManagerError::SerdeJson)?; - - config.insert(String::from("node_id"), json!(node_config.id.to_string())); - - fs::write( - path, - &serde_json::to_vec(&config).map_err(VersionManagerError::SerdeJson)?, - ) - .await - .map_err(|e| VersionManagerError::FileIO(FileIOError::from((path, e))))?; + // Removed, can't be automatically updated + return Err(LibraryConfigError::CriticalUpdateError); } (LibraryConfigVersion::V3, LibraryConfigVersion::V4) => { @@ -255,51 +228,8 @@ impl LibraryConfig { }, (LibraryConfigVersion::V5, LibraryConfigVersion::V6) => { - let nodes = db.node().find_many(vec![]).exec().await?; - if nodes.is_empty() { - error!("6 - No nodes found... How did you even get this far? but this is fine we can fix it."); - } else if nodes.len() > 1 { - error!("6 - More than one node found in the DB... This can't be automatically reconciled!"); - return Err(LibraryConfigError::TooManyNodes); - } - - let node = nodes.first(); - let now = Utc::now().fixed_offset(); - let instance_id = Uuid::new_v4(); - - instance::Create { - pub_id: instance_id.as_bytes().to_vec(), - // WARNING: At this stage in the migration this field *should* be an `Identity` not a `RemoteIdentityOrIdentity` (as that was introduced later on). - remote_identity: node - .and_then(|n| n.identity.clone()) - .unwrap_or_else(|| Identity::new().to_bytes()), - node_id: node_config.id.as_bytes().to_vec(), - last_seen: now, - date_created: node.map(|n| n.date_created).unwrap_or_else(|| now), - _params: vec![], - } - .to_query(db) - .exec() - .await?; - - let mut config = serde_json::from_slice::>( - &fs::read(path).await.map_err(|e| { - VersionManagerError::FileIO(FileIOError::from((path, e))) - })?, - ) - .map_err(VersionManagerError::SerdeJson)?; - - config.remove("node_id"); - config.remove("identity"); - - config.insert(String::from("instance_id"), json!(instance_id.to_string())); - - fs::write( - path, - &serde_json::to_vec(&config).map_err(VersionManagerError::SerdeJson)?, - ) - .await - .map_err(|e| VersionManagerError::FileIO(FileIOError::from((path, e))))?; + // Removed, can't be automatically updated + return Err(LibraryConfigError::CriticalUpdateError); } (LibraryConfigVersion::V6, LibraryConfigVersion::V7) => { @@ -344,7 +274,7 @@ impl LibraryConfig { } (LibraryConfigVersion::V7, LibraryConfigVersion::V8) => { - let instances = db.instance().find_many(vec![]).exec().await?; + let instances = db.device().find_many(vec![]).exec().await?; let Some(instance) = instances.first() else { error!("8 - No nodes found... How did you even get this far?!"); return Err(LibraryConfigError::MissingInstance); @@ -498,6 +428,8 @@ pub enum LibraryConfigError { TooManyInstances, #[error("missing instances")] MissingInstance, + #[error("your library version can't be automatically updated, please recreate your library")] + CriticalUpdateError, #[error(transparent)] SerdeJson(#[from] serde_json::Error), diff --git a/core/src/library/manager/error.rs b/core/src/library/manager/error.rs index 3541eabfd..5a12ff221 100644 --- a/core/src/library/manager/error.rs +++ b/core/src/library/manager/error.rs @@ -1,6 +1,7 @@ use crate::{library::LibraryConfigError, location::LocationManagerError}; use sd_core_indexer_rules::seed::SeederError; +use sd_core_sync::DevicePubId; use sd_p2p::IdentityErr; use sd_utils::{ @@ -8,10 +9,9 @@ use sd_utils::{ error::{FileIOError, NonUtf8PathError}, }; -use thiserror::Error; use tracing::error; -#[derive(Error, Debug)] +#[derive(thiserror::Error, Debug)] pub enum LibraryManagerError { #[error("error serializing or deserializing the JSON in the config file: {0}")] Json(#[from] serde_json::Error), @@ -23,8 +23,6 @@ pub enum LibraryManagerError { Uuid(#[from] uuid::Error), #[error("failed to run indexer rules seeder: {0}")] IndexerRulesSeeder(#[from] SeederError), - // #[error("failed to initialize the key manager: {0}")] - // KeyManager(#[from] sd_crypto::Error), #[error("error migrating the library: {0}")] MigrationError(#[from] db::MigrationError), #[error("invalid library configuration: {0}")] @@ -39,6 +37,8 @@ pub enum LibraryManagerError { InvalidIdentity, #[error("current instance with id '{0}' was not found in the database")] CurrentInstanceNotFound(String), + #[error("current device with pub id '{0}' was not found in the database")] + CurrentDeviceNotFound(DevicePubId), #[error("missing-field: {0}")] MissingField(#[from] MissingFieldError), diff --git a/core/src/library/manager/mod.rs b/core/src/library/manager/mod.rs index b4dddb3db..476926e78 100644 --- a/core/src/library/manager/mod.rs +++ b/core/src/library/manager/mod.rs @@ -156,7 +156,7 @@ impl Libraries { description: Option, node: &Arc, ) -> Result, LibraryManagerError> { - self.create_with_uuid(Uuid::new_v4(), name, description, true, None, node, false) + self.create_with_uuid(Uuid::now_v7(), name, description, true, None, node, false) .await } @@ -206,9 +206,9 @@ impl Libraries { Some({ let identity = Identity::new(); let mut create = instance.unwrap_or_else(|| instance::Create { - pub_id: Uuid::new_v4().as_bytes().to_vec(), + pub_id: Uuid::now_v7().as_bytes().to_vec(), remote_identity: identity.to_remote_identity().get_bytes().to_vec(), - node_id: node_cfg.id.as_bytes().to_vec(), + node_id: node_cfg.id.to_db(), last_seen: now, date_created: now, _params: vec![ @@ -458,6 +458,7 @@ impl Libraries { } let node_config = node.config.get().await; + let device_pub_id = node_config.id.clone(); let config = LibraryConfig::load(config_path, &node_config, &db).await?; let instances = db.instance().find_many(vec![]).exec().await?; @@ -470,6 +471,17 @@ impl Libraries { })? .clone(); + let devices = db.device().find_many(vec![]).exec().await?; + + let device_pub_id_to_db = device_pub_id.to_db(); + if devices + .iter() + .find(|device| device.pub_id == device_pub_id_to_db) + .is_none() + { + return Err(LibraryManagerError::CurrentDeviceNotFound(device_pub_id)); + } + let identity = match instance.identity.as_ref() { Some(b) => Arc::new(Identity::from_bytes(b)?), // We are not this instance, so we don't have the private key. @@ -486,7 +498,7 @@ impl Libraries { .node_remote_identity .as_ref() .and_then(|v| RemoteIdentity::from_bytes(v).ok()); - if instance_node_id != node_config.id + if instance_node_id != Uuid::from(&node_config.id) || instance_node_remote_identity != Some(node_config.identity.to_remote_identity()) || curr_metadata != Some(node.p2p.peer_metadata()) { @@ -502,7 +514,7 @@ impl Libraries { .update( instance::id::equals(instance.id), vec![ - instance::node_id::set(node_config.id.as_bytes().to_vec()), + instance::node_id::set(node_config.id.to_db()), instance::node_remote_identity::set(Some( node_config .identity @@ -522,16 +534,13 @@ impl Libraries { // TODO: Move this reconciliation into P2P and do reconciliation of both local and remote nodes. - // let key_manager = Arc::new(KeyManager::new(vec![]).await?); - // seed_keymanager(&db, &key_manager).await?; - let actors = Default::default(); - let (sync, sync_rx) = sync::Manager::with_existing_instances( + let (sync, sync_rx) = sync::Manager::with_existing_devices( Arc::clone(&db), - instance_id, + &device_pub_id, Arc::clone(&config.generate_sync_operations), - &instances, + &devices, Arc::clone(&actors), ) .await?; diff --git a/core/src/location/manager/runner.rs b/core/src/location/manager/runner.rs index 1daa383ce..735d4b6f2 100644 --- a/core/src/location/manager/runner.rs +++ b/core/src/location/manager/runner.rs @@ -3,7 +3,7 @@ use crate::{ Node, }; -use sd_core_prisma_helpers::location_ids_and_path; +use sd_core_prisma_helpers::{location_ids_and_path, DevicePubId}; use sd_prisma::prisma::location; use sd_utils::db::maybe_missing; @@ -38,14 +38,16 @@ type LocationIdAndLibraryId = (location::id::Type, LibraryId); struct Runner { node: Arc, + device_pub_id_to_db: Option>, locations_to_check: HashMap>, locations_watched: HashMap, locations_unwatched: HashMap, forced_unwatch: HashSet, } impl Runner { - fn new(node: Arc) -> Self { + async fn new(node: Arc) -> Self { Self { + device_pub_id_to_db: Some(DevicePubId::from(node.config.get().await.id).to_db()), node, locations_to_check: HashMap::new(), locations_watched: HashMap::new(), @@ -54,13 +56,17 @@ impl Runner { } } + fn check_same_device(&self, location: &location_ids_and_path::Data) -> bool { + location.device_pub_id == self.device_pub_id_to_db + } + async fn add_location( &mut self, location_id: i32, library: Arc, ) -> Result<(), LocationManagerError> { if let Some(location) = get_location(location_id, &library).await? { - check_online(&location, &self.node, &library) + check_online(&location, &self.node, &library, &self.device_pub_id_to_db) .await .and_then(|is_online| { LocationWatcher::new(location, Arc::clone(&library), Arc::clone(&self.node)) @@ -92,8 +98,7 @@ impl Runner { let key = (location_id, library.id); if let Some(location) = get_location(location_id, &library).await? { - // TODO(N): This isn't gonna work with removable media and this will likely permanently break if the DB is restored from a backup. - if location.instance_id == Some(library.config().await.instance_id) { + if self.check_same_device(&location) { self.unwatch_location(location, library.id); self.locations_unwatched.remove(&key); self.forced_unwatch.remove(&key); @@ -101,7 +106,7 @@ impl Runner { self.drop_location( location_id, library.id, - "Dropping location from location manager, because we don't have a `local_path` anymore", + "Dropping location from location manager, because it isn't from this device", ); } } else { @@ -298,9 +303,8 @@ impl Runner { let key = (location_id, library.id); if let Some(location) = get_location(location_id, &library).await? { - // TODO(N): This isn't gonna work with removable media and this will likely permanently break if the DB is restored from a backup. - if location.instance_id == Some(library.config().await.instance_id) { - if check_online(&location, &self.node, &library).await? + if self.check_same_device(&location) { + if check_online(&location, &self.node, &library, &self.device_pub_id_to_db).await? && !self.forced_unwatch.contains(&key) { self.watch_location(location, library.id); @@ -314,7 +318,7 @@ impl Runner { location_id, library.id, "Dropping location from location manager, because \ - it isn't a location in the current node", + it isn't a location in the current device", ); self.forced_unwatch.remove(&key); } @@ -344,7 +348,7 @@ pub(super) async fn run( let mut check_locations_interval = interval(Duration::from_secs(2)); check_locations_interval.set_missed_tick_behavior(MissedTickBehavior::Skip); - let mut runner = Runner::new(node); + let mut runner = Runner::new(node).await; let mut msg_stream = pin!(( location_management_rx.map(StreamMessage::LocationManagementMessage), @@ -410,20 +414,20 @@ async fn get_location( fields(%location_id, library_id = %library.id), err, )] -pub(super) async fn check_online( +async fn check_online( location_ids_and_path::Data { id: location_id, pub_id, - instance_id, + device_pub_id, path, }: &location_ids_and_path::Data, node: &Node, library: &Library, + device_pub_id_to_db: &Option>, ) -> Result { let pub_id = Uuid::from_slice(pub_id)?; - // TODO(N): This isn't gonna work with removable media and this will likely permanently break if the DB is restored from a backup. - if *instance_id == Some(library.config().await.instance_id) { + if *device_pub_id == *device_pub_id_to_db { match fs::metadata(maybe_missing(path, "location.path")?).await { Ok(_) => { node.locations.add_online(pub_id).await; diff --git a/core/src/location/mod.rs b/core/src/location/mod.rs index d613aca4e..0e378dc7c 100644 --- a/core/src/location/mod.rs +++ b/core/src/location/mod.rs @@ -163,7 +163,7 @@ impl LocationCreateArgs { } ); - let uuid = Uuid::new_v4(); + let uuid = Uuid::now_v7(); let location = create_location( library, @@ -246,7 +246,7 @@ impl LocationCreateArgs { }, ); - let uuid = Uuid::new_v4(); + let uuid = Uuid::now_v7(); let location = create_location( library, @@ -1160,7 +1160,7 @@ pub async fn create_file_path( .unzip() }; - let pub_id = sd_utils::uuid_to_bytes(&Uuid::new_v4()); + let pub_id = sd_utils::uuid_to_bytes(&Uuid::now_v7()); let created_path = sync .write_ops( diff --git a/core/src/node/config.rs b/core/src/node/config.rs index 528938770..6f25a1f62 100644 --- a/core/src/node/config.rs +++ b/core/src/node/config.rs @@ -5,6 +5,7 @@ use crate::{ }; use sd_cloud_schema::devices::DeviceOS; +use sd_core_sync::DevicePubId; use sd_p2p::Identity; use sd_utils::error::FileIOError; @@ -27,6 +28,8 @@ use tokio::{ use tracing::error; use uuid::Uuid; +use super::HardwareModel; + /// NODE_STATE_CONFIG_NAME is the name of the file which stores the NodeState pub const NODE_STATE_CONFIG_NAME: &str = "node_state.sdconfig"; @@ -117,7 +120,7 @@ impl Default for NodeConfigP2P { #[derive(Debug, Clone, Serialize, Deserialize)] // If you are adding `specta::Type` on this your probably about to leak the P2P private key pub struct NodeConfig { /// id is a unique identifier for the current node. Each node has a public identifier (this one) and is given a local id for each library (done within the library code). - pub id: Uuid, + pub id: DevicePubId, /// name is the display name of the current node. This is set by the user and is shown in the UI. // TODO: Length validation so it can fit in DNS record pub name: String, /// core level notifications @@ -137,8 +140,10 @@ pub struct NodeConfig { pub features: Vec, /// The aggregation of many different preferences for the node pub preferences: NodePreferences, - // Operating System of the node + /// Operating System of the node pub os: DeviceOS, + /// Hardware model of the node + pub hardware_model: HardwareModel, version: NodeConfigVersion, } @@ -196,9 +201,13 @@ impl ManagedVersion for NodeConfig { name.truncate(255); let os = DeviceOS::from_env(); + let hardware_model = HardwareModel::try_get().unwrap_or_else(|e| { + error!(?e, "Failed to get hardware model"); + HardwareModel::Other + }); Some(Self { - id: Uuid::now_v7(), + id: Uuid::now_v7().into(), name, identity: Identity::default(), p2p: NodeConfigP2P::default(), @@ -207,6 +216,7 @@ impl ManagedVersion for NodeConfig { notifications: vec![], preferences: NodePreferences::default(), os, + hardware_model, }) } } @@ -342,6 +352,13 @@ impl NodeConfig { serde_json::to_value(DeviceOS::from_env()) .map_err(VersionManagerError::SerdeJson)?, ); + config.insert( + String::from("hardware_model"), + serde_json::to_value( + HardwareModel::try_get().unwrap_or(HardwareModel::Other), + ) + .map_err(VersionManagerError::SerdeJson)?, + ); config.remove("features"); config.remove("auth_token"); diff --git a/core/src/node/hardware.rs b/core/src/node/hardware.rs index 1af50b530..04873e51c 100644 --- a/core/src/node/hardware.rs +++ b/core/src/node/hardware.rs @@ -1,151 +1,209 @@ -use std::io::Error; -use std::str; +use std::io; use serde::{Deserialize, Serialize}; use specta::Type; +use strum::IntoEnumIterator; use strum_macros::{Display, EnumIter}; #[repr(i32)] #[derive(Debug, Clone, Display, Copy, EnumIter, Type, Serialize, Deserialize, Eq, PartialEq)] -#[specta(rename = "core_HardwareModel")] +#[specta(rename = "CoreHardwareModel")] pub enum HardwareModel { - Other, - MacStudio, - MacBookAir, - MacBookPro, - MacBook, - MacMini, - MacPro, - IMac, - IMacPro, - IPad, - IPhone, - Simulator, - Android, + Other = 0, + MacStudio = 1, + MacBookAir = 2, + MacBookPro = 3, + MacBook = 4, + MacMini = 5, + MacPro = 6, + IMac = 7, + IMacPro = 8, + IPad = 9, + IPhone = 10, + Simulator = 11, + Android = 12, } -impl HardwareModel { - pub fn from_display_name(name: &str) -> Self { - use strum::IntoEnumIterator; - HardwareModel::iter() +impl From for HardwareModel { + fn from(value: i32) -> Self { + match value { + 1 => Self::MacStudio, + 2 => Self::MacBookAir, + 3 => Self::MacBookPro, + 4 => Self::MacBook, + 5 => Self::MacMini, + 6 => Self::MacPro, + 7 => Self::IMac, + 8 => Self::IMacPro, + 9 => Self::IPad, + 10 => Self::IPhone, + 11 => Self::Simulator, + 12 => Self::Android, + _ => Self::Other, + } + } +} + +impl From for sd_cloud_schema::devices::HardwareModel { + fn from(model: HardwareModel) -> Self { + match model { + HardwareModel::MacStudio => Self::MacStudio, + HardwareModel::MacBookAir => Self::MacBookAir, + HardwareModel::MacBookPro => Self::MacBookPro, + HardwareModel::MacBook => Self::MacBook, + HardwareModel::MacMini => Self::MacMini, + HardwareModel::MacPro => Self::MacPro, + HardwareModel::IMac => Self::IMac, + HardwareModel::IMacPro => Self::IMacPro, + HardwareModel::IPad => Self::IPad, + HardwareModel::IPhone => Self::IPhone, + HardwareModel::Simulator => Self::Simulator, + HardwareModel::Android => Self::Android, + HardwareModel::Other => Self::Other, + } + } +} + +impl From for HardwareModel { + fn from(model: sd_cloud_schema::devices::HardwareModel) -> Self { + match model { + sd_cloud_schema::devices::HardwareModel::MacStudio => Self::MacStudio, + sd_cloud_schema::devices::HardwareModel::MacBookAir => Self::MacBookAir, + sd_cloud_schema::devices::HardwareModel::MacBookPro => Self::MacBookPro, + sd_cloud_schema::devices::HardwareModel::MacBook => Self::MacBook, + sd_cloud_schema::devices::HardwareModel::MacMini => Self::MacMini, + sd_cloud_schema::devices::HardwareModel::MacPro => Self::MacPro, + sd_cloud_schema::devices::HardwareModel::IMac => Self::IMac, + sd_cloud_schema::devices::HardwareModel::IMacPro => Self::IMacPro, + sd_cloud_schema::devices::HardwareModel::IPad => Self::IPad, + sd_cloud_schema::devices::HardwareModel::IPhone => Self::IPhone, + sd_cloud_schema::devices::HardwareModel::Simulator => Self::Simulator, + sd_cloud_schema::devices::HardwareModel::Android => Self::Android, + sd_cloud_schema::devices::HardwareModel::Other => Self::Other, + } + } +} + +impl From<&str> for HardwareModel { + fn from(name: &str) -> Self { + Self::iter() .find(|&model| { model.to_string().to_lowercase().replace(' ', "") == name.to_lowercase().replace(' ', "") }) - .unwrap_or(HardwareModel::Other) + .unwrap_or(Self::Other) } } -pub fn get_hardware_model_name() -> Result { - #[cfg(target_os = "macos")] - { - use std::process::Command; +impl HardwareModel { + pub fn try_get() -> Result { + #[cfg(target_os = "macos")] + { + use std::process::Command; - let output = Command::new("system_profiler") - .arg("SPHardwareDataType") - .output()?; + let output = Command::new("system_profiler") + .arg("SPHardwareDataType") + .output()?; - if output.status.success() { - let output_str = std::str::from_utf8(&output.stdout).unwrap_or_default(); - let hardware_model = output_str - .lines() - .find(|line| line.to_lowercase().contains("model name")) - .and_then(|line| line.split_once(':')) - .map(|(_, model_name)| HardwareModel::from_display_name(model_name.trim())) - .unwrap_or(HardwareModel::Other); + if output.status.success() { + let output_str = std::str::from_utf8(&output.stdout).unwrap_or_default(); + let hardware_model = output_str + .lines() + .find(|line| line.to_lowercase().contains("model name")) + .and_then(|line| line.split_once(':')) + .map(|(_, model_name)| model_name.trim().into()) + .unwrap_or(Self::Other); - Ok(hardware_model) - } else { - Err(Error::new( - std::io::ErrorKind::Other, - format!( - "Failed to get hardware model name: {}", - String::from_utf8_lossy(&output.stderr) - ), - )) - } - } - #[cfg(target_os = "ios")] - { - use std::ffi::CString; - use std::ptr; - - extern "C" { - fn sysctlbyname( - name: *const libc::c_char, - oldp: *mut libc::c_void, - oldlenp: *mut usize, - newp: *mut libc::c_void, - newlen: usize, - ) -> libc::c_int; - } - - fn get_device_type() -> Option { - let mut size: usize = 0; - let name = CString::new("hw.machine").expect("CString::new failed"); - - // First, get the size of the buffer needed - unsafe { - sysctlbyname( - name.as_ptr(), - ptr::null_mut(), - &mut size, - ptr::null_mut(), - 0, - ); - } - - // Allocate a buffer with the correct size - let mut buffer: Vec = vec![0; size]; - - // Get the actual machine type - unsafe { - sysctlbyname( - name.as_ptr(), - buffer.as_mut_ptr() as *mut libc::c_void, - &mut size, - ptr::null_mut(), - 0, - ); - } - - // Convert the buffer to a String - let machine_type = String::from_utf8_lossy(&buffer).trim().to_string(); - - // Check if the device is an iPad or iPhone - if machine_type.starts_with("iPad") { - Some("iPad".to_string()) - } else if machine_type.starts_with("iPhone") { - Some("iPhone".to_string()) - } else if machine_type.starts_with("arm") { - Some("Simulator".to_string()) + Ok(hardware_model) } else { - None + Err(io::Error::new( + io::ErrorKind::Other, + format!( + "Failed to get hardware model name: {}", + String::from_utf8_lossy(&output.stderr) + ), + )) + } + } + #[cfg(target_os = "ios")] + { + use std::ffi::CString; + use std::ptr; + + extern "C" { + fn sysctlbyname( + name: *const libc::c_char, + oldp: *mut libc::c_void, + oldlenp: *mut usize, + newp: *mut libc::c_void, + newlen: usize, + ) -> libc::c_int; + } + + fn get_device_type() -> Option { + let mut size: usize = 0; + let name = CString::new("hw.machine").expect("CString::new failed"); + + // First, get the size of the buffer needed + unsafe { + sysctlbyname( + name.as_ptr(), + ptr::null_mut(), + &mut size, + ptr::null_mut(), + 0, + ); + } + + // Allocate a buffer with the correct size + let mut buffer: Vec = vec![0; size]; + + // Get the actual machine type + unsafe { + sysctlbyname( + name.as_ptr(), + buffer.as_mut_ptr() as *mut libc::c_void, + &mut size, + ptr::null_mut(), + 0, + ); + } + + // Convert the buffer to a String + let machine_type = String::from_utf8_lossy(&buffer).trim().to_string(); + + // Check if the device is an iPad or iPhone + if machine_type.starts_with("iPad") { + Some("iPad".to_string()) + } else if machine_type.starts_with("iPhone") { + Some("iPhone".to_string()) + } else if machine_type.starts_with("arm") { + Some("Simulator".to_string()) + } else { + None + } + } + + if let Some(device_type) = get_device_type() { + let hardware_model = Self::from_display_name(&device_type.as_str()); + + Ok(hardware_model) + } else { + Err(Error::new( + std::io::ErrorKind::Other, + "Failed to get hardware model name", + )) } } - if let Some(device_type) = get_device_type() { - let hardware_model = HardwareModel::from_display_name(&device_type.as_str()); + #[cfg(target_os = "android")] + { + Ok(Self::Android) + } - Ok(hardware_model) - } else { - Err(Error::new( - std::io::ErrorKind::Other, - "Failed to get hardware model name", - )) + #[cfg(not(any(target_os = "macos", target_os = "ios", target_os = "android")))] + { + Ok(Self::Other) } } - - #[cfg(target_os = "android")] - { - Ok(HardwareModel::Android) - } - - #[cfg(not(any(target_os = "macos", target_os = "ios", target_os = "android")))] - { - Err(Error::new( - std::io::ErrorKind::Unsupported, - "Unsupported operating system", - )) - } } diff --git a/core/src/object/tag/mod.rs b/core/src/object/tag/mod.rs index 41b4e88bd..a8d232cf6 100644 --- a/core/src/object/tag/mod.rs +++ b/core/src/object/tag/mod.rs @@ -21,7 +21,7 @@ impl TagCreateArgs { self, Library { db, sync, .. }: &Library, ) -> Result { - let pub_id = Uuid::new_v4().as_bytes().to_vec(); + let pub_id = Uuid::now_v7().as_bytes().to_vec(); let (sync_params, db_params): (Vec<_>, Vec<_>) = [ sync_db_entry!(self.name, tag::name), diff --git a/core/src/p2p/manager.rs b/core/src/p2p/manager.rs index b986e5f3b..96ee7a264 100644 --- a/core/src/p2p/manager.rs +++ b/core/src/p2p/manager.rs @@ -1,7 +1,7 @@ use crate::{ node::{ config::{self, P2PDiscoveryState}, - get_hardware_model_name, HardwareModel, + HardwareModel, }, p2p::{ libraries::libraries_hook, operations, sync::SyncMessage, Header, OperatingSystem, @@ -208,7 +208,7 @@ impl P2PManager { PeerMetadata { name: config.name.clone(), operating_system: Some(OperatingSystem::get_os()), - device_model: Some(get_hardware_model_name().unwrap_or(HardwareModel::Other)), + device_model: Some(HardwareModel::try_get().unwrap_or(HardwareModel::Other)), version: Some(env!("CARGO_PKG_VERSION").to_string()), } .update(&mut self.p2p.metadata_mut()); diff --git a/core/src/p2p/metadata.rs b/core/src/p2p/metadata.rs index 5e03e9c7d..054eea0ca 100644 --- a/core/src/p2p/metadata.rs +++ b/core/src/p2p/metadata.rs @@ -47,7 +47,7 @@ impl PeerMetadata { .get("os") .map(|os| os.parse().map_err(|_| "Unable to parse 'OperationSystem'!")) .transpose()?, - device_model: Some(HardwareModel::from_display_name( + device_model: Some(HardwareModel::from( data.get("device_model") .map(|s| s.as_str()) .unwrap_or("Other"), diff --git a/core/src/p2p/sync/mod.rs b/core/src/p2p/sync/mod.rs index 8233a01c5..ca2ea73a5 100644 --- a/core/src/p2p/sync/mod.rs +++ b/core/src/p2p/sync/mod.rs @@ -245,7 +245,7 @@ mod responder { ingest .event_tx .send(Event::Messages(MessagesEvent { - instance_id: library.sync.instance, + device_pub_id: library.sync.device_pub_id.clone(), has_more: ops.len() == OPS_PER_REQUEST as usize, messages: ops, wait_tx: Some(wait_tx), diff --git a/core/src/volume/mod.rs b/core/src/volume/mod.rs index ada4d4ae3..d746d4d1e 100644 --- a/core/src/volume/mod.rs +++ b/core/src/volume/mod.rs @@ -515,16 +515,15 @@ fn compute_stats<'v>(volumes: impl IntoIterator) -> (u64, u64 async fn update_storage_statistics( db: &PrismaClient, sync: &SyncManager, - instance_pub_id: &Uuid, total_capacity: u64, available_capacity: u64, ) -> Result<(), VolumeError> { - let instance_pub_id = uuid_to_bytes(instance_pub_id); + let device_pub_id = sync.device_pub_id.to_db(); let storage_statistics_pub_id = db .storage_statistics() - .find_unique(storage_statistics::instance_pub_id::equals( - instance_pub_id.clone(), + .find_unique(storage_statistics::device_pub_id::equals( + device_pub_id.clone(), )) .select(storage_statistics::select!({ pub_id })) .exec() @@ -571,7 +570,7 @@ async fn update_storage_statistics( ) .await?; } else { - let new_storage_statistics_id = uuid_to_bytes(&Uuid::new_v4()); + let new_storage_statistics_id = uuid_to_bytes(&Uuid::now_v7()); sync.write_ops( db, @@ -590,8 +589,8 @@ async fn update_storage_statistics( msgpack!(available_capacity), ), ( - storage_statistics::instance_pub_id::NAME, - msgpack!(instance_pub_id), + storage_statistics::device_pub_id::NAME, + msgpack!(device_pub_id), ), ], ), @@ -601,7 +600,7 @@ async fn update_storage_statistics( vec![ storage_statistics::total_capacity::set(total_capacity as i64), storage_statistics::available_capacity::set(available_capacity as i64), - storage_statistics::instance_pub_id::set(Some(instance_pub_id.clone())), + storage_statistics::device_pub_id::set(Some(device_pub_id.clone())), ], ) // We don't need any data here, just the id avoids receiving the entire object @@ -633,14 +632,7 @@ pub fn save_storage_statistics(node: &Node) { .. } = &*library; - update_storage_statistics( - db, - sync, - instance_uuid, - total_capacity, - available_capacity, - ) - .await + update_storage_statistics(db, sync, total_capacity, available_capacity).await }) .collect::>() .join() diff --git a/core/src/volume/watcher.rs b/core/src/volume/watcher.rs index 4c71bade3..efc0556a1 100644 --- a/core/src/volume/watcher.rs +++ b/core/src/volume/watcher.rs @@ -29,7 +29,6 @@ pub fn spawn_volume_watcher(library: Arc) { if let Err(e) = super::update_storage_statistics( &library.db, &library.sync, - &library.instance_uuid, total_capacity, available_capacity, ) diff --git a/crates/sync-generator/src/sync_data.rs b/crates/sync-generator/src/sync_data.rs index 66556e752..9e9fdd937 100644 --- a/crates/sync-generator/src/sync_data.rs +++ b/crates/sync-generator/src/sync_data.rs @@ -61,7 +61,7 @@ pub fn enumerate(models: &[ModelWithSyncType<'_>]) -> TokenStream { impl ModelSyncData { pub fn from_op(op: sd_sync::CRDTOperation) -> Option { - Some(match op.model { + Some(match op.model_id { #(#matches),*, _ => return None }) diff --git a/crates/sync/src/factory.rs b/crates/sync/src/factory.rs index 8973ff14d..b029dd8aa 100644 --- a/crates/sync/src/factory.rs +++ b/crates/sync/src/factory.rs @@ -26,11 +26,9 @@ pub trait OperationFactory { id: &SId, data: CRDTOperationData, ) -> CRDTOperation { - let timestamp = self.get_clock().new_timestamp(); - CRDTOperation { device_pub_id: self.get_device_pub_id(), - timestamp: *timestamp.get_time(), + timestamp: *self.get_clock().new_timestamp().get_time(), model_id: ::MODEL_ID, record_id: msgpack!(id), data, diff --git a/packages/client/src/core.ts b/packages/client/src/core.ts index 00b339a1a..a3aa89a56 100644 --- a/packages/client/src/core.ts +++ b/packages/client/src/core.ts @@ -52,7 +52,6 @@ export type Procedures = { { key: "search.saved.get", input: LibraryArgs, result: SavedSearch | null } | { key: "search.saved.list", input: LibraryArgs, result: SavedSearch[] } | { key: "sync.enabled", input: LibraryArgs, result: boolean } | - { key: "sync.messages", input: LibraryArgs, result: CRDTOperation[] } | { key: "tags.get", input: LibraryArgs, result: Tag | null } | { key: "tags.getForObject", input: LibraryArgs, result: Tag[] } | { key: "tags.getWithObjects", input: LibraryArgs, result: { [key in number]: ({ object: { id: number }; date_created: string | null })[] } } | @@ -66,14 +65,15 @@ export type Procedures = { { key: "cloud.bootstrap", input: [AccessToken, RefreshToken], result: null } | { key: "cloud.devices.delete", input: DeviceDeleteRequest, result: null } | { key: "cloud.devices.update", input: DeviceUpdateRequest, result: null } | - { key: "cloud.libraries.create", input: LibraryArgs, result: null } | - { key: "cloud.libraries.delete", input: LibraryDeleteRequest, result: null } | - { key: "cloud.libraries.update", input: LibraryUpdateRequest, result: null } | + { key: "cloud.libraries.create", input: LibraryArgs, result: null } | + { key: "cloud.libraries.delete", input: LibraryArgs, result: null } | + { key: "cloud.libraries.update", input: LibraryArgs, result: null } | { key: "cloud.library.create", input: LibraryArgs, result: null } | { key: "cloud.library.join", input: string, result: null } | { key: "cloud.library.sync", input: LibraryArgs, result: null } | { key: "cloud.locations.create", input: LocationCreateRequest, result: null } | { key: "cloud.locations.delete", input: LocationDeleteRequest, result: null } | + { key: "cloud.userResponse", input: UserResponse, result: null } | { key: "ephemeralFiles.copyFiles", input: LibraryArgs, result: null } | { key: "ephemeralFiles.createFile", input: LibraryArgs, result: string } | { key: "ephemeralFiles.createFolder", input: LibraryArgs, result: string } | @@ -136,6 +136,7 @@ export type Procedures = { { key: "tags.update", input: LibraryArgs, result: null } | { key: "toggleFeatureFlag", input: BackendFeature, result: null }, subscriptions: + { key: "cloud.listenCloudServicesNotifications", input: never, result: NotifyUser } | { key: "invalidation.listen", input: never, result: InvalidateOperationEvent[] } | { key: "jobs.newFilePathIdentified", input: LibraryArgs, result: number[] } | { key: "jobs.newThumbnail", input: LibraryArgs, result: ThumbKey } | @@ -171,10 +172,6 @@ export type Backup = ({ id: string; timestamp: string; library_id: string; libra export type BuildInfo = { version: string; commit: string } -export type CRDTOperation = { instance: string; timestamp: number; model: number; record_id: JsonValue; data: CRDTOperationData } - -export type CRDTOperationData = { c: { [key in string]: JsonValue } } | { u: { field: string; value: JsonValue } } | "d" - export type CameraData = { device_make: string | null; device_model: string | null; color_space: string | null; color_profile: ColorProfile | null; focal_length: number | null; shutter_speed: number | null; flash: Flash | null; orientation: Orientation; lens_make: string | null; lens_model: string | null; bit_depth: number | null; zoom: number | null; iso: number | null; software: string | null; serial_number: string | null; lens_serial_number: string | null; contrast: number | null; saturation: number | null; sharpness: number | null; composite: Composite | null } export type CasId = string @@ -185,6 +182,10 @@ export type Chapter = { id: number; start: [number, number]; end: [number, numbe export type CloudLocation = { pub_id: LocationPubId; name: string; device: Device | null; library: Library | null; created_at: string; updated_at: string } +export type CloudP2PError = "Rejected" | "UnableToConnect" | "TimedOut" + +export type CloudP2PTicket = bigint + export type Codec = { kind: string | null; sub_kind: string | null; tag: string | null; name: string | null; profile: string | null; bit_rate: number; props: Props | null } export type ColorProfile = "Normal" | "Custom" | "HDRNoOriginal" | "HDRWithOriginal" | "OriginalForHDR" | "Panorama" | "PortraitHDR" | "Portrait" @@ -217,6 +218,12 @@ export type ConvertImageArgs = { location_id: number; file_path_id: number; dele export type ConvertibleExtension = "bmp" | "dib" | "ff" | "gif" | "ico" | "jpg" | "jpeg" | "png" | "pnm" | "qoi" | "tga" | "icb" | "vda" | "vst" | "tiff" | "tif" | "hif" | "heif" | "heifs" | "heic" | "heics" | "avif" | "avci" | "avcs" | "svg" | "svgz" | "pdf" | "webp" +export type CoreDevicePubId = CorePubId + +export type CoreHardwareModel = "Other" | "MacStudio" | "MacBookAir" | "MacBookPro" | "MacBook" | "MacMini" | "MacPro" | "IMac" | "IMacPro" | "IPad" | "IPhone" | "Simulator" | "Android" + +export type CorePubId = { Uuid: string } | { Vec: number[] } + export type CreateEphemeralFileArgs = { path: string; context: EphemeralFileCreateContextTypes; name: string | null } export type CreateEphemeralFolderArgs = { path: string; name: string | null } @@ -395,8 +402,14 @@ export type JobName = "Indexer" | "FileIdentifier" | "MediaProcessor" | "Copy" | export type JobProgressEvent = { id: string; library_id: string; task_count: number; completed_task_count: number; phase: string; message: string; info: string; estimated_completion: string } +export type JoinSyncGroupError = "Communication" | "InternalServer" | "Auth" + +export type JoinSyncGroupResponse = { Accepted: { authorizor_device: Device } } | { Failed: CloudP2PError } | "CriticalError" + export type JsonValue = null | boolean | number | string | JsonValue[] | { [key in string]: JsonValue } +export type KeyHash = string + export type KindStatistic = { kind: number; name: string; count: [number, number]; total_bytes: [number, number] } export type KindStatistics = { statistics: { [key in number]: KindStatistic }; total_identified_files: number; total_unidentified_files: number } @@ -405,7 +418,7 @@ export type Label = { id: number; name: string; date_created: string | null; dat export type LabelWithObjects = { id: number; name: string; date_created: string | null; date_modified: string | null; label_objects: { object: { id: number; file_paths: FilePath[] } }[] } -export type LibrariesCreateArgs = { access_token: AccessToken; device_pub_id: DevicePubId } +export type LibrariesUpdateArgs = { access_token: AccessToken; name: string } export type Library = { pub_id: LibraryPubId; name: string; original_device: Device | null; created_at: string; updated_at: string } @@ -440,8 +453,6 @@ export type LibraryConfigVersion = "V0" | "V1" | "V2" | "V3" | "V4" | "V5" | "V6 export type LibraryConfigWrapped = { uuid: string; instance_id: string; instance_public_key: RemoteIdentity; config: LibraryConfig } -export type LibraryDeleteRequest = { access_token: AccessToken; pub_id: LibraryPubId } - export type LibraryGetRequest = { access_token: AccessToken; pub_id: LibraryPubId; with_device: boolean } export type LibraryListRequest = { access_token: AccessToken; with_device: boolean } @@ -452,15 +463,13 @@ export type LibraryPreferences = { location?: { [key in string]: LocationSetting export type LibraryPubId = string -export type LibraryUpdateRequest = { access_token: AccessToken; pub_id: LibraryPubId; name: string } - export type LightScanArgs = { location_id: number; sub_path: string } export type ListenerState = { type: "Listening" } | { type: "Error"; error: string } | { type: "NotListening" } export type Listeners = { ipv4: ListenerState; ipv6: ListenerState; relay: ListenerState } -export type Location = { id: number; pub_id: number[]; name: string | null; path: string | null; total_capacity: number | null; available_capacity: number | null; size_in_bytes: number[] | null; is_archived: boolean | null; generate_preview_media: boolean | null; sync_preview_media: boolean | null; hidden: boolean | null; date_created: string | null; scan_state: number; instance_id: number | null } +export type Location = { id: number; pub_id: number[]; name: string | null; path: string | null; total_capacity: number | null; available_capacity: number | null; size_in_bytes: number[] | null; is_archived: boolean | null; generate_preview_media: boolean | null; sync_preview_media: boolean | null; hidden: boolean | null; date_created: string | null; scan_state: number; device_pub_id: number[] | null; instance_id: number | null } /** * `LocationCreateArgs` is the argument received from the client using `rspc` to create a new location. @@ -505,7 +514,7 @@ export type MediaLocation = { latitude: number; longitude: number; pluscode: Plu export type Metadata = { album: string | null; album_artist: string | null; artist: string | null; comment: string | null; composer: string | null; copyright: string | null; creation_time: string | null; date: string | null; disc: number | null; encoder: string | null; encoded_by: string | null; filename: string | null; genre: string | null; language: string | null; performer: string | null; publisher: string | null; service_name: string | null; service_provider: string | null; title: string | null; track: number | null; variant_bit_rate: number | null; custom: { [key in string]: string } } -export type MockDevice = { pub_id: DevicePubId; name: string; os: DeviceOS; used_storage: bigint; storage_size: bigint; created_at: string; updated_at: string; device_model: core_HardwareModel } +export type MockDevice = { pub_id: DevicePubId; name: string; os: DeviceOS; used_storage: bigint; storage_size: bigint; created_at: string; updated_at: string; device_model: CoreHardwareModel } export type NodeConfigP2P = { discovery?: P2PDiscoveryState; port: Port; disabled: boolean; disable_ipv6: boolean; disable_relay: boolean; enable_remote_access: boolean; /** @@ -527,11 +536,11 @@ export type NodeState = ({ /** * id is a unique identifier for the current node. Each node has a public identifier (this one) and is given a local id for each library (done within the library code). */ -id: string; +id: CoreDevicePubId; /** * name is the display name of the current node. This is set by the user and is shown in the UI. // TODO: Length validation so it can fit in DNS record */ -name: string; identity: RemoteIdentity; p2p: NodeConfigP2P; features: BackendFeature[]; preferences: NodePreferences }) & { data_path: string; device_model: string | null; is_in_docker: boolean } +name: string; identity: RemoteIdentity; p2p: NodeConfigP2P; features: BackendFeature[]; preferences: NodePreferences; os: DeviceOS; hardware_model: CoreHardwareModel }) & { data_path: string; device_model: string | null; is_in_docker: boolean } export type NonCriticalError = { indexer: NonCriticalIndexerError } | { file_identifier: NonCriticalFileIdentifierError } | { media_processor: NonCriticalMediaProcessorError } @@ -562,6 +571,8 @@ export type NotificationId = { type: "library"; id: [string, number] } | { type: export type NotificationKind = "info" | "success" | "error" | "warning" +export type NotifyUser = { kind: "ReceivedJoinSyncGroupRequest"; data: { ticket: CloudP2PTicket; asking_device: Device; sync_group: SyncGroup } } | { kind: "ReceivedJoinSyncGroupResponse"; data: { response: JoinSyncGroupResponse; sync_group: SyncGroup } } | { kind: "SendingJoinSyncGroupResponseError"; data: { error: JoinSyncGroupError; sync_group: SyncGroup } } | { kind: "TimedOutJoinRequest"; data: { device: Device; succeeded: boolean } } + export type Object = { id: number; pub_id: number[]; kind: number | null; key_id: number | null; hidden: boolean | null; favorite: boolean | null; important: boolean | null; note: string | null; date_created: string | null; date_accessed: string | null } export type ObjectCursor = "none" | { dateAccessed: CursorOrderItem } | { kind: CursorOrderItem } @@ -602,7 +613,7 @@ export type P2PDiscoveryState = "Everyone" | "ContactsOnly" | "Disabled" export type P2PEvent = { type: "PeerChange"; identity: RemoteIdentity; connection: ConnectionMethod; discovery: DiscoveryMethod; metadata: PeerMetadata; addrs: string[] } | { type: "PeerDelete"; identity: RemoteIdentity } | { type: "SpacedropRequest"; id: string; identity: RemoteIdentity; peer_name: string; files: string[] } | { type: "SpacedropProgress"; id: string; percent: number } | { type: "SpacedropTimedOut"; id: string } | { type: "SpacedropRejected"; id: string } -export type PeerMetadata = { name: string; operating_system: OperatingSystem | null; device_model: core_HardwareModel | null; version: string | null } +export type PeerMetadata = { name: string; operating_system: OperatingSystem | null; device_model: CoreHardwareModel | null; version: string | null } export type PlusCode = string @@ -675,6 +686,10 @@ export type Stream = { id: number; name: string | null; codec: Codec | null; asp export type SubtitleProps = { width: number; height: number } +export type SyncGroup = { pub_id: SyncGroupPubId; name: string; latest_key_hash: KeyHash; library: Library; devices: Device[]; created_at: string; updated_at: string } + +export type SyncGroupPubId = string + export type SyncStatus = { ingest: boolean; cloud_send: boolean; cloud_receive: boolean; cloud_ingest: boolean } export type SystemLocations = { desktop: string | null; documents: string | null; downloads: string | null; pictures: string | null; music: string | null; videos: string | null } @@ -699,8 +714,8 @@ export type ThumbKey = { shard_hex: string; cas_id: CasId; base_directory_str: s export type UpdateThumbnailerPreferences = Record +export type UserResponse = { kind: "AcceptDeviceInSyncGroup"; data: { ticket: CloudP2PTicket; accepted: boolean } } + export type VideoProps = { pixel_format: string | null; color_range: string | null; bits_per_channel: number | null; color_space: string | null; color_primaries: string | null; color_transfer: string | null; field_order: string | null; chroma_location: string | null; width: number; height: number; aspect_ratio_num: number | null; aspect_ratio_den: number | null; properties: string[] } export type Volume = { name: string; mount_points: string[]; total_capacity: string; available_capacity: string; disk_type: DiskType; file_system: string | null; is_root_filesystem: boolean } - -export type core_HardwareModel = "Other" | "MacStudio" | "MacBookAir" | "MacBookPro" | "MacBook" | "MacMini" | "MacPro" | "IMac" | "IMacPro" | "IPad" | "IPhone" | "Simulator" | "Android" From f6ed7b9e6ae507cd6dc28b9d494fefb25f062251 Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Tue, 27 Aug 2024 17:05:37 -0300 Subject: [PATCH 092/218] bruh --- core/src/api/cloud/mod.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/core/src/api/cloud/mod.rs b/core/src/api/cloud/mod.rs index 49c5cad36..da13ca1ab 100644 --- a/core/src/api/cloud/mod.rs +++ b/core/src/api/cloud/mod.rs @@ -35,7 +35,6 @@ async fn try_get_cloud_services_client( pub(crate) fn mount() -> AlphaRouter { R.router() - .merge("library.", library::mount()) .merge("libraries.", libraries::mount()) .merge("locations.", locations::mount()) .merge("devices.", devices::mount()) From 4ffb4aa7a858e6030df94eca2a5b032ec8073c36 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Tue, 27 Aug 2024 21:51:16 -0400 Subject: [PATCH 093/218] Fix compile again --- core/src/node/config.rs | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/core/src/node/config.rs b/core/src/node/config.rs index 6f25a1f62..f8c38f207 100644 --- a/core/src/node/config.rs +++ b/core/src/node/config.rs @@ -321,12 +321,24 @@ impl NodeConfig { .map_err(VersionManagerError::SerdeJson)?; config.remove("id"); - config.insert(String::from("id"), json!(Uuid::now_v7())); + config.insert( + String::from("id"), + serde_json::to_value(Uuid::now_v7()) + .map_err(VersionManagerError::SerdeJson)?, + ); config.remove("name"); - config.insert(String::from("name"), json!(generate_device_name())); + config.insert( + String::from("name"), + serde_json::to_value(generate_device_name()) + .map_err(VersionManagerError::SerdeJson)?, + ); - config.insert(String::from("os"), json!(std::env::consts::OS)); + config.insert( + String::from("os"), + serde_json::to_value(std::env::consts::OS) + .map_err(VersionManagerError::SerdeJson)?, + ); let a = serde_json::to_vec(&config).map_err(VersionManagerError::SerdeJson)?; @@ -365,6 +377,13 @@ impl NodeConfig { config.remove("sd_api_origin"); config.remove("image_labeler_version"); + config.remove("id"); + config.insert( + String::from("id"), + serde_json::to_value(DevicePubId::from(Uuid::now_v7())) + .map_err(VersionManagerError::SerdeJson)?, + ); + fs::write( path, serde_json::to_vec(&config).map_err(VersionManagerError::SerdeJson)?, From cbef19f756d73e1eee202b2e3a241f71277f0d27 Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Tue, 27 Aug 2024 23:44:36 -0300 Subject: [PATCH 094/218] More tweaks on sync subcrate and other fixes --- Cargo.lock | Bin 329727 -> 329403 bytes Cargo.toml | 2 +- apps/desktop/src-tauri/Cargo.toml | 6 +- .../src/file_identifier/tasks/identifier.rs | 2 +- .../src/file_identifier/tasks/mod.rs | 7 +- .../file_identifier/tasks/object_processor.rs | 2 +- core/crates/heavy-lifting/src/indexer/mod.rs | 4 +- .../heavy-lifting/src/indexer/shallow.rs | 2 +- .../heavy-lifting/src/indexer/tasks/saver.rs | 6 +- .../src/indexer/tasks/updater.rs | 2 +- .../heavy-lifting/src/job_system/job.rs | 2 +- .../heavy-lifting/src/job_system/report.rs | 2 +- .../heavy-lifting/src/job_system/runner.rs | 2 +- .../helpers/exif_media_data.rs | 24 +++-- .../src/media_processor/shallow.rs | 2 +- .../tasks/media_data_extractor.rs | 2 +- core/crates/sync/src/backfill.rs | 45 +++++---- core/crates/sync/src/lib.rs | 12 ++- core/crates/sync/src/manager.rs | 88 ++++++++---------- core/crates/sync/tests/lib.rs | 63 +++++-------- core/crates/sync/tests/mock_instance.rs | 19 ++-- core/src/api/backups.rs | 1 + core/src/api/search/saved.rs | 16 ++-- core/src/api/sync.rs | 27 ------ core/src/api/tags.rs | 4 +- core/src/cloud/mod.rs | 8 +- core/src/cloud/sync/ingest.rs | 17 ++-- core/src/cloud/sync/mod.rs | 8 +- core/src/cloud/sync/receive.rs | 7 +- core/src/cloud/sync/send.rs | 11 ++- core/src/context.rs | 7 +- core/src/library/config.rs | 2 +- core/src/library/library.rs | 7 +- core/src/library/manager/mod.rs | 40 +++++--- core/src/location/manager/runner.rs | 4 +- core/src/location/manager/watcher/utils.rs | 72 +++++++------- core/src/location/mod.rs | 73 +++++++-------- core/src/object/tag/mod.rs | 16 ++-- core/src/p2p/sync/mod.rs | 21 ++--- core/src/volume/mod.rs | 73 +++++++-------- crates/ai/src/old_image_labeler/old_actor.rs | 15 +-- crates/ai/src/old_image_labeler/process.rs | 9 +- crates/ffmpeg/src/frame_decoder.rs | 2 +- crates/sync/src/factory.rs | 13 +-- 44 files changed, 349 insertions(+), 398 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index aaee6b64cbb4bdae34599a7fe2cc4657418ce281..9a651846b729e3fbc21cb80b615e581cbb0bb2a3 100644 GIT binary patch delta 3606 zcmbtXYm6Ob8P1&fx?8sQ?sj*(rQ7W#A}u}r?iVS$MTl}yET<*WNPzDuEu{;+(1MVZ zXbnWsrGtDwRHPCKk`UU|GA59;)rf{*EJjUG12kfSH7Lj>QdE4;Zkw)6O^p87IkS_Q zd2i4AJkNaawVtQ%>)AKZs+#MLkmA7&59)*L%2zrw0dDj&muTA zO}^xUuAwDZFYX6iCSxtGR$bou$7T| ztdsCYNh}GDP8!Ocpw^nGjDS0ck(fH`tT4el7o4N{*kJjgd}w2%Gw&K`boTgByE2V# z-yH0=4WpaeahtfE+E6=7+{9pK^4`}s(+$3R?XUZWi@+dYd8{) zxkyek7qN1}`a~dU%^f8gb4er>un43R8(3n;I zc1Ks|$+X(RP&?ad#!j3M^2VG4U%dVC{_4t|v-8knjn;@0f+`Y)*PbhF14|5!l*h!l zf%X{^vEUlcEtN75e2Tg_IUgnFhhS1l(Tk`z-v9hPS?YWuxp5K+N1J$03&QkEi zpgy*IusXhHVGe^uYdBdWEaQ?=CZRatT;kYK?34)3>X5AVhI_@hjaFN4tne0cgxVRA z`At#u<)PI@cmDZUIcsv+?H>x}9qJ*c4Ew zt$zE?!cWh$_7sD;|DIw-9=f;atnaamq`;Lz$I;pHymRfB9 zKRVlUioWT76ey()Q3)6w+|^i+|M?o4nh&mQY|4k zjU{!usBwH*9`TK*@~=KX1GR{azwggq`F`1|KhaL^P)ZFIrWd%myh+GXB06( zdZ#_LFj7H)2eITJHL#ruKyG0|K*@KnLcR6ln@Yayj2F$W9vGfeci&&$(QLETDfzmm zUU0bVWG8RVdxsnI^TY3ys2+K%oPLT)Ctber?Q$W?)4o-XQ0$^(80R#@X)%i#;vxF*RDIp$+yt;MqN#YuLubeWT6U_I&)3$Nk?tZU)a{44l zu>7gDfZsJQ!%%me0E&9!DnQ+LPLvnsU3Zrq`HH7dSAP9OIW=GU6q;R6dB6NYnN=T} zk+1(Hx@5!{1+p9>aK0tX!FmH%c}+04mT+lB(5g*|EDBg|paAN;NE)`=>tJ18?141y zy0kHK?$(o{JZaJH{Gfd4;0BK#T6Y6o_kY1HC%=y@?ZGU=|}NT(Yp49)ufMLTK1x z!=(Z369I>i64a0gR0sPVJm&CZ-q$M6?{Hg2w{6-`UxrXeQGN8qwZmXmjG$ODP{+V9 zCnR}jfsMof!5Lsel_ZOV!~pgtTBuC`ZBTGH)QW2VU4401FPfF#4UNHir~^$yHR(dT zr=HGxM1cokmPaz2kiWwzi6!!WH0D;eT zqmc@2{qa>Dy7?fUm`qUHf9F8z56TBBR zRA>!-)dKijRr?2f+ub_xe>^+U>1yqthT5>}$n*Nqg44ZTlfVmLkO&HJGC^=241k|& zg_SYZCeS$T0U@?+N}NqVI#iN2>niNDp;d8y@01CQ9P3BhWOen?)=0!Q#z-u<2tfdY zG=gn%36uotOALIl1bs?@Ybg^DSD@Hz|1%4t^WW%gtL+YlH+!0jR)w zmN{st3}Ad*fE|udEohS9fH$HX*8%ndEM4NDMjSw=BWD1p_3c*k8E`6%(FvWv z1^W z`PdUhcfIBj)YZuM?=J@Gjo(Fgj59xJvij9W(UPK`{unykIZn|6rttieXi@#g zedjyh@>|}4Z+D-1pnK24(SqgT`>nB|AUO_HI4_wOI60EA6I`SeG^SVu8VR=C8z}_C z8XL#4vy5q}sqoC?gQo1QULPB%{xmwj)uwsgg7Vy|WAvS}%?B&8tiPH+)-Grsx(b0?z`+CM6 z8OR5(E4$Ag_*7SWz>jZ=%Wv6a?_9UuZ_c~gt#cpD)2owP2ddv~>Ce|}FGi~wTeel( zxAg6Md+S?$)y`_#zE`VpuMK%1%^Q$8}(7r3b_w;!E+Kl2ScT|N<)RLRW$EKt)6Nw>!=o9FtdJpTj6Kr?~E1w^~U>)g`M?| z?-n;Mn%>5PYLMmOC(%HD`efOePy7|ltuK70n0Zb?swMZ04MlG()F(*A1=fjq3oja5 zL$yy)L15{;qQ>(`gG-TVL7nF!SY{o=dDpj4Z?)&fj{L#B<-Gibf3{{-M{RFzA1+XR z;m?brsFxlt?!LZ;T3vNE48oGxT8q zH*eiMF$o*OZOt(hm=BVRC>tb$N+}~Lp1ge59XG`dcijQk#URN)>Os9tGsGax$F3-r zENwKf`|U7(D53rLYh3oHzgQrQ5h1S*--GAO79wOEQ|9Apxs zfO=fo(m4;ej@RoiZT)#^UU4xR8zM<^>1{GtdWtEPim@1kgTdz%@^wZ<=BX8)QstwR z+LJ_mq}WEGd?ss(p8Vdr)*RI2kcWTV>d&8E*ZNg{`KeaBIqJ+8t#9>A1`FN+nw&R; zVn{$zm!yb^1Yx7q+GyrD#fc})OmHL`hdtHA0>+J3CXdVlly5&YBQIZDEXu2QpzeHh zYq4Y^Obub)*;~EdGdh$gM6p025*BrsNJ<>c38WYkri4&~1GL%_2~Qz7vkX|BbS%?p{-N z&7R(tgLn|63*aE07t+;)1bLDDv5}zz59FW7spG!v>~eg{K&gyNA=LEd3p2o<=OR`hst$> z4fXWrBZtb{PUl*$e6j3c`GMu-g1r47Wk8gGCEMg;Ciy~7P;Z6*=1f5fTMP9J?Au7{~M74i2L*5_sbv7 z{Rkv*-gdHFGu5DZqc97voGe!sDi7^Oz3?{j7Ft=a`=Gq9%=>4cYleu4(Xa@r(riL# z4Ya{yAd)EKrHa5K99YH@j-!W52?#X+mpKi$;V>aF`JO9VXU&|1`6S6a*FvvNZDRZx zn43K%T3IyV6>R)y7n+vOng-2mwy@er3*co|5eZ}!RFaA$U{L3@$52lX5-3QFUIXKL z*(i>ViAU*y5xI`};N<|SSEivqOr~UjjH6@`lz;*=DoQ~XxPmV>qhJ`o##&Dq#>V(W zpzOe_5K2cSxQ}`7SJ1LqZqxV|Z(rYLz(BbEatE4TOfGH(PXd^LYFGpy1Y!)m1iHd}=^8`mGqJ=;f_`^^om?JI(YKh{3 zBrB|}>@WIG&udp7`utR2--kAKsliElzk5^OxEt8%^)56$pXx(5Qwc0>p(fOv|KTBQ z0va8h<0f)U0}z%JT9~Lgk=4r!&eRSHwE!LeKNq=WBKcKR!=y2vlBF^l5ACCl1?(BI zPAe#dQi*d5f8Z&IBEnc0zrd8WF5qg{4ZZ(1EqUjGvbSEa7~LTA6}cE4Vm4?-jQ7?s zqD{2MLJb?bjUZA2hDK1M1a{Ja&|+elF@QjTn^GdkYU8xto<_Pq#!vwdT$aCivh2+yKMhOK81eXZ12PC>}+Nn+e)K@kXvL7v~!vgUNb+7ubp09z|z?eHdHi2~Zg@ zTmwKtDF@O64)h@rFfmaCF9W3@tiU)9N1JRzDOi#uj5jV9htRe8rpM5E6FGp-0JJH^ zKmilTl9bqi?MW=cU})#H@Jb{CAd4{>roHC@9*si-D#;@i>dmhXpc#2^H=6rlCe7aK z&AZWi#T5MRcmnmere^oV#jQp4<4>Zk;KqJ{2F~1c`BUhHjy!uGn$o~(N4vXTy$^j2 z<>6mKuL&>=^^OM(J{fbF7+CcPhMg;JmBo~E1)h~L_)w{!NB|ZAt>9Q{0!HoFvtS%{ z{uFiQ&)i>h!hSdGM_1){51`Sy`&snIaxw}S|3Eo27KTVlu)ILjzJV>&2yB`*jWq$w zsK5#`8D#{Eq5x@1!-LTVNKKIWXL?zG`vCgw>BS9!Ulzf`XhPv(Z}6W&g3T*1vynD) zA_G_k2k6tl(-H0i;*l({NNmV9Knm^0&``eSAmp+7dDJ(V!2X}17g`euTmsY, Vec<_>>(); - ( - sync.into_iter().flatten().collect(), - db.object().create_many(db_params), - ) + (sync, db.object().create_many(db_params)) }) .await?; diff --git a/core/crates/heavy-lifting/src/file_identifier/tasks/object_processor.rs b/core/crates/heavy-lifting/src/file_identifier/tasks/object_processor.rs index 9569c1563..ebcb57533 100644 --- a/core/crates/heavy-lifting/src/file_identifier/tasks/object_processor.rs +++ b/core/crates/heavy-lifting/src/file_identifier/tasks/object_processor.rs @@ -1,7 +1,7 @@ use crate::{file_identifier, Error}; use sd_core_prisma_helpers::{file_path_id, object_for_file_identifier, CasId, ObjectPubId}; -use sd_core_sync::Manager as SyncManager; +use sd_core_sync::SyncManager; use sd_prisma::prisma::{file_path, object, PrismaClient}; use sd_task_system::{ diff --git a/core/crates/heavy-lifting/src/indexer/mod.rs b/core/crates/heavy-lifting/src/indexer/mod.rs index 0fa7ce732..dbf12b223 100644 --- a/core/crates/heavy-lifting/src/indexer/mod.rs +++ b/core/crates/heavy-lifting/src/indexer/mod.rs @@ -4,7 +4,7 @@ use sd_core_file_path_helper::{FilePathError, IsolatedFilePathData}; use sd_core_prisma_helpers::{ file_path_pub_and_cas_ids, file_path_to_isolate_with_pub_id, file_path_walker, }; -use sd_core_sync::Manager as SyncManager; +use sd_core_sync::SyncManager; use sd_prisma::{ prisma::{file_path, indexer_rule, location, PrismaClient, SortOrder}, @@ -213,7 +213,7 @@ async fn update_location_size( async fn remove_non_existing_file_paths( to_remove: Vec, db: &PrismaClient, - sync: &sd_core_sync::Manager, + sync: &SyncManager, ) -> Result { #[allow(clippy::cast_sign_loss)] let (sync_params, db_params): (Vec<_>, Vec<_>) = to_remove diff --git a/core/crates/heavy-lifting/src/indexer/shallow.rs b/core/crates/heavy-lifting/src/indexer/shallow.rs index c57993840..1c4c89ca7 100644 --- a/core/crates/heavy-lifting/src/indexer/shallow.rs +++ b/core/crates/heavy-lifting/src/indexer/shallow.rs @@ -4,7 +4,7 @@ use crate::{ use sd_core_indexer_rules::{IndexerRule, IndexerRuler}; use sd_core_prisma_helpers::location_with_indexer_rules; -use sd_core_sync::Manager as SyncManager; +use sd_core_sync::SyncManager; use sd_prisma::prisma::PrismaClient; use sd_task_system::{BaseTaskDispatcher, CancelTaskOnDrop, IntoTask, TaskDispatcher, TaskOutput}; diff --git a/core/crates/heavy-lifting/src/indexer/tasks/saver.rs b/core/crates/heavy-lifting/src/indexer/tasks/saver.rs index 31fdf8d9a..fcf2d84a7 100644 --- a/core/crates/heavy-lifting/src/indexer/tasks/saver.rs +++ b/core/crates/heavy-lifting/src/indexer/tasks/saver.rs @@ -1,7 +1,7 @@ use crate::{indexer, Error}; use sd_core_file_path_helper::{FilePathMetadata, IsolatedFilePathDataParts}; -use sd_core_sync::Manager as SyncManager; +use sd_core_sync::SyncManager; use sd_prisma::{ prisma::{file_path, location, PrismaClient}, @@ -88,7 +88,7 @@ impl Task for Saver { .. } = self; - let (sync_stuff, paths): (Vec<_>, Vec<_>) = walked_entries + let (create_crdt_ops, paths): (Vec<_>, Vec<_>) = walked_entries .drain(..) .map( |WalkedEntry { @@ -160,7 +160,7 @@ impl Task for Saver { .write_ops( db, ( - sync_stuff.into_iter().flatten().collect(), + create_crdt_ops, db.file_path().create_many(paths).skip_duplicates(), ), ) diff --git a/core/crates/heavy-lifting/src/indexer/tasks/updater.rs b/core/crates/heavy-lifting/src/indexer/tasks/updater.rs index c103397ec..f737dd1d4 100644 --- a/core/crates/heavy-lifting/src/indexer/tasks/updater.rs +++ b/core/crates/heavy-lifting/src/indexer/tasks/updater.rs @@ -1,7 +1,7 @@ use crate::{indexer, Error}; use sd_core_file_path_helper::{FilePathMetadata, IsolatedFilePathDataParts}; -use sd_core_sync::Manager as SyncManager; +use sd_core_sync::SyncManager; use sd_prisma::{ prisma::{file_path, object, PrismaClient}, diff --git a/core/crates/heavy-lifting/src/job_system/job.rs b/core/crates/heavy-lifting/src/job_system/job.rs index 785e33263..30b499840 100644 --- a/core/crates/heavy-lifting/src/job_system/job.rs +++ b/core/crates/heavy-lifting/src/job_system/job.rs @@ -1,6 +1,6 @@ use crate::{Error, NonCriticalError, UpdateEvent}; -use sd_core_sync::Manager as SyncManager; +use sd_core_sync::SyncManager; use sd_prisma::prisma::PrismaClient; use sd_task_system::{ diff --git a/core/crates/heavy-lifting/src/job_system/report.rs b/core/crates/heavy-lifting/src/job_system/report.rs index 06cba2af9..359bc4496 100644 --- a/core/crates/heavy-lifting/src/job_system/report.rs +++ b/core/crates/heavy-lifting/src/job_system/report.rs @@ -300,7 +300,7 @@ impl Report { Ok(()) } - pub async fn update(&mut self, db: &PrismaClient) -> Result<(), ReportError> { + pub async fn update(&self, db: &PrismaClient) -> Result<(), ReportError> { db.job() .update( job::id::equals(self.id.as_bytes().to_vec()), diff --git a/core/crates/heavy-lifting/src/job_system/runner.rs b/core/crates/heavy-lifting/src/job_system/runner.rs index ae067fb0b..57c237ead 100644 --- a/core/crates/heavy-lifting/src/job_system/runner.rs +++ b/core/crates/heavy-lifting/src/job_system/runner.rs @@ -313,7 +313,7 @@ impl> JobSystemRunner { let name = { let db = handle.ctx.db(); - let mut report = handle.ctx.report_mut().await; + let report = handle.ctx.report().await; if let Err(e) = report.update(db).await { error!(?e, "Failed to update report on job shutdown;"); } diff --git a/core/crates/heavy-lifting/src/media_processor/helpers/exif_media_data.rs b/core/crates/heavy-lifting/src/media_processor/helpers/exif_media_data.rs index 306984fc9..a17eb7c56 100644 --- a/core/crates/heavy-lifting/src/media_processor/helpers/exif_media_data.rs +++ b/core/crates/heavy-lifting/src/media_processor/helpers/exif_media_data.rs @@ -1,7 +1,7 @@ use crate::media_processor::{self, media_data_extractor}; use sd_core_prisma_helpers::ObjectPubId; -use sd_core_sync::Manager as SyncManager; +use sd_core_sync::SyncManager; use sd_file_ext::extensions::{Extension, ImageExtension, ALL_IMAGE_EXTENSIONS}; use sd_media_metadata::ExifMetadata; @@ -113,21 +113,19 @@ pub async fn save( let (sync_params, create) = to_query(exif_data, object_id); let db_params = create._params.clone(); - sync.write_ops( + sync.write_op( db, - ( - sync.shared_create( - prisma_sync::exif_data::SyncId { - object: prisma_sync::object::SyncId { - pub_id: object_pub_id.into(), - }, + sync.shared_create( + prisma_sync::exif_data::SyncId { + object: prisma_sync::object::SyncId { + pub_id: object_pub_id.into(), }, - sync_params, - ), - db.exif_data() - .upsert(exif_data::object_id::equals(object_id), create, db_params) - .select(exif_data::select!({ id })), + }, + sync_params, ), + db.exif_data() + .upsert(exif_data::object_id::equals(object_id), create, db_params) + .select(exif_data::select!({ id })), ) .await }) diff --git a/core/crates/heavy-lifting/src/media_processor/shallow.rs b/core/crates/heavy-lifting/src/media_processor/shallow.rs index b74c8c063..3f79192ba 100644 --- a/core/crates/heavy-lifting/src/media_processor/shallow.rs +++ b/core/crates/heavy-lifting/src/media_processor/shallow.rs @@ -4,7 +4,7 @@ use crate::{ }; use sd_core_file_path_helper::IsolatedFilePathData; -use sd_core_sync::Manager as SyncManager; +use sd_core_sync::SyncManager; use sd_prisma::prisma::{location, PrismaClient}; use sd_task_system::{ diff --git a/core/crates/heavy-lifting/src/media_processor/tasks/media_data_extractor.rs b/core/crates/heavy-lifting/src/media_processor/tasks/media_data_extractor.rs index 30072b1c1..eaf3b261a 100644 --- a/core/crates/heavy-lifting/src/media_processor/tasks/media_data_extractor.rs +++ b/core/crates/heavy-lifting/src/media_processor/tasks/media_data_extractor.rs @@ -8,7 +8,7 @@ use crate::{ use sd_core_file_path_helper::IsolatedFilePathData; use sd_core_prisma_helpers::{file_path_for_media_processor, ObjectPubId}; -use sd_core_sync::Manager as SyncManager; +use sd_core_sync::SyncManager; use sd_media_metadata::{ExifMetadata, FFmpegMetadata}; use sd_prisma::prisma::{exif_data, ffmpeg_data, file_path, location, object, PrismaClient}; diff --git a/core/crates/sync/src/backfill.rs b/core/crates/sync/src/backfill.rs index 6a363cbf2..85e0d8ba0 100644 --- a/core/crates/sync/src/backfill.rs +++ b/core/crates/sync/src/backfill.rs @@ -17,8 +17,11 @@ use super::{crdt_op_unchecked_db, Error}; /// Takes all the syncable data in the database and generates [`CRDTOperations`] for it. /// This is a requirement before the library can sync. -pub async fn backfill_operations(db: &PrismaClient, sync: &crate::Manager) -> Result<(), Error> { - let lock = sync.timestamp_lock.lock().await; +pub async fn backfill_operations( + db: &PrismaClient, + sync: &crate::SyncManager, +) -> Result<(), Error> { + let lock = sync.sync_lock.lock().await; let res = db ._transaction() @@ -110,7 +113,7 @@ where } #[instrument(skip(db, sync), err)] -async fn paginate_tags(db: &PrismaClient, sync: &crate::Manager) -> Result<(), Error> { +async fn paginate_tags(db: &PrismaClient, sync: &crate::SyncManager) -> Result<(), Error> { use tag::{color, date_created, date_modified, id, name}; let device_pub_id = &sync.device_pub_id; @@ -125,7 +128,7 @@ async fn paginate_tags(db: &PrismaClient, sync: &crate::Manager) -> Result<(), E |tag| tag.id, |tags| { tags.into_iter() - .flat_map(|t| { + .map(|t| { sync.shared_create( prisma_sync::tag::SyncId { pub_id: t.pub_id }, chain_optional_iter( @@ -148,7 +151,7 @@ async fn paginate_tags(db: &PrismaClient, sync: &crate::Manager) -> Result<(), E } #[instrument(skip(db, sync), err)] -async fn paginate_locations(db: &PrismaClient, sync: &crate::Manager) -> Result<(), Error> { +async fn paginate_locations(db: &PrismaClient, sync: &crate::SyncManager) -> Result<(), Error> { use location::{ available_capacity, date_created, generate_preview_media, hidden, id, include, instance, is_archived, name, path, size_in_bytes, sync_preview_media, total_capacity, @@ -174,7 +177,7 @@ async fn paginate_locations(db: &PrismaClient, sync: &crate::Manager) -> Result< |locations| { locations .into_iter() - .flat_map(|l| { + .map(|l| { sync.shared_create( prisma_sync::location::SyncId { pub_id: l.pub_id }, chain_optional_iter( @@ -212,7 +215,7 @@ async fn paginate_locations(db: &PrismaClient, sync: &crate::Manager) -> Result< } #[instrument(skip(db, sync), err)] -async fn paginate_objects(db: &PrismaClient, sync: &crate::Manager) -> Result<(), Error> { +async fn paginate_objects(db: &PrismaClient, sync: &crate::SyncManager) -> Result<(), Error> { use object::{date_accessed, date_created, favorite, hidden, id, important, kind, note}; let device_pub_id = &sync.device_pub_id; @@ -229,7 +232,7 @@ async fn paginate_objects(db: &PrismaClient, sync: &crate::Manager) -> Result<() |objects| { objects .into_iter() - .flat_map(|o| { + .map(|o| { sync.shared_create( prisma_sync::object::SyncId { pub_id: o.pub_id }, chain_optional_iter( @@ -255,7 +258,7 @@ async fn paginate_objects(db: &PrismaClient, sync: &crate::Manager) -> Result<() } #[instrument(skip(db, sync), err)] -async fn paginate_exif_datas(db: &PrismaClient, sync: &crate::Manager) -> Result<(), Error> { +async fn paginate_exif_datas(db: &PrismaClient, sync: &crate::SyncManager) -> Result<(), Error> { use exif_data::{ artist, camera_data, copyright, description, epoch_time, exif_version, id, include, media_date, media_location, resolution, @@ -278,7 +281,7 @@ async fn paginate_exif_datas(db: &PrismaClient, sync: &crate::Manager) -> Result |exif_datas| { exif_datas .into_iter() - .flat_map(|ed| { + .map(|ed| { sync.shared_create( prisma_sync::exif_data::SyncId { object: prisma_sync::object::SyncId { @@ -310,7 +313,7 @@ async fn paginate_exif_datas(db: &PrismaClient, sync: &crate::Manager) -> Result } #[instrument(skip(db, sync), err)] -async fn paginate_file_paths(db: &PrismaClient, sync: &crate::Manager) -> Result<(), Error> { +async fn paginate_file_paths(db: &PrismaClient, sync: &crate::SyncManager) -> Result<(), Error> { use file_path::{ cas_id, date_created, date_indexed, date_modified, extension, hidden, id, include, inode, integrity_checksum, is_dir, location, materialized_path, name, object, size_in_bytes_bytes, @@ -333,7 +336,7 @@ async fn paginate_file_paths(db: &PrismaClient, sync: &crate::Manager) -> Result |file_paths| { file_paths .into_iter() - .flat_map(|fp| { + .map(|fp| { sync.shared_create( prisma_sync::file_path::SyncId { pub_id: fp.pub_id }, chain_optional_iter( @@ -376,7 +379,10 @@ async fn paginate_file_paths(db: &PrismaClient, sync: &crate::Manager) -> Result } #[instrument(skip(db, sync), err)] -async fn paginate_tags_on_objects(db: &PrismaClient, sync: &crate::Manager) -> Result<(), Error> { +async fn paginate_tags_on_objects( + db: &PrismaClient, + sync: &crate::SyncManager, +) -> Result<(), Error> { use tag_on_object::{date_created, include, object_id, tag_id}; let device_pub_id = &sync.device_pub_id; @@ -397,7 +403,7 @@ async fn paginate_tags_on_objects(db: &PrismaClient, sync: &crate::Manager) -> R |tag_on_objects| { tag_on_objects .into_iter() - .flat_map(|t_o| { + .map(|t_o| { sync.relation_create( prisma_sync::tag_on_object::SyncId { tag: prisma_sync::tag::SyncId { @@ -422,7 +428,7 @@ async fn paginate_tags_on_objects(db: &PrismaClient, sync: &crate::Manager) -> R } #[instrument(skip(db, sync), err)] -async fn paginate_labels(db: &PrismaClient, sync: &crate::Manager) -> Result<(), Error> { +async fn paginate_labels(db: &PrismaClient, sync: &crate::SyncManager) -> Result<(), Error> { use label::{date_created, date_modified, id}; let device_pub_id = &sync.device_pub_id; @@ -438,7 +444,7 @@ async fn paginate_labels(db: &PrismaClient, sync: &crate::Manager) -> Result<(), |labels| { labels .into_iter() - .flat_map(|l| { + .map(|l| { sync.shared_create( prisma_sync::label::SyncId { name: l.name }, chain_optional_iter( @@ -459,7 +465,10 @@ async fn paginate_labels(db: &PrismaClient, sync: &crate::Manager) -> Result<(), } #[instrument(skip(db, sync), err)] -async fn paginate_labels_on_objects(db: &PrismaClient, sync: &crate::Manager) -> Result<(), Error> { +async fn paginate_labels_on_objects( + db: &PrismaClient, + sync: &crate::SyncManager, +) -> Result<(), Error> { use label_on_object::{date_created, include, label_id, object_id}; let device_pub_id = &sync.device_pub_id; @@ -480,7 +489,7 @@ async fn paginate_labels_on_objects(db: &PrismaClient, sync: &crate::Manager) -> |label_on_objects| { label_on_objects .into_iter() - .flat_map(|l_o| { + .map(|l_o| { sync.relation_create( prisma_sync::label_on_object::SyncId { label: prisma_sync::label::SyncId { diff --git a/core/crates/sync/src/lib.rs b/core/crates/sync/src/lib.rs index 1297e48cd..0ce608947 100644 --- a/core/crates/sync/src/lib.rs +++ b/core/crates/sync/src/lib.rs @@ -28,7 +28,7 @@ #![allow(clippy::missing_errors_doc, clippy::module_name_repetitions)] use sd_prisma::prisma::{crdt_operation, device, PrismaClient}; -use sd_sync::CRDTOperation; +use sd_sync::{CRDTOperation, ModelId}; use std::{ collections::HashMap, @@ -43,12 +43,12 @@ mod db_operation; pub mod ingest; mod manager; -pub use ingest::*; -pub use manager::*; +pub use ingest::{Actor, Event, Handler, MessagesEvent, Request, State}; +pub use manager::{GetOpsArgs, Manager as SyncManager}; pub use uhlc::NTP64; #[derive(Clone, Debug)] -pub enum SyncMessage { +pub enum SyncEvent { Ingested, Created, } @@ -77,7 +77,9 @@ pub enum Error { #[error("database error: {0}")] Database(#[from] prisma_client_rust::QueryError), #[error("invalid model id: {0}")] - InvalidModelId(u16), + InvalidModelId(ModelId), + #[error("tried to write an empty operations list")] + EmptyOperations, } impl From for rspc::Error { diff --git a/core/crates/sync/src/manager.rs b/core/crates/sync/src/manager.rs index 1419d810e..837f9c116 100644 --- a/core/crates/sync/src/manager.rs +++ b/core/crates/sync/src/manager.rs @@ -23,15 +23,15 @@ use uuid::Uuid; use super::{ crdt_op_db, db_operation::{into_cloud_ops, into_ops}, - ingest, Error, SharedState, SyncMessage, NTP64, + ingest, Error, SharedState, SyncEvent, NTP64, }; /// Wrapper that spawns the ingest actor and provides utilities for reading and writing sync operations. pub struct Manager { - pub tx: broadcast::Sender, + pub tx: broadcast::Sender, pub ingest: ingest::Handler, pub shared: Arc, - pub timestamp_lock: Mutex<()>, + pub sync_lock: Mutex<()>, } impl fmt::Debug for Manager { @@ -54,7 +54,7 @@ impl Manager { current_device_pub_id: &DevicePubId, emit_messages_flag: Arc, actors: Arc, - ) -> Result<(Self, broadcast::Receiver), Error> { + ) -> Result<(Self, broadcast::Receiver), Error> { let existing_devices = db.device().find_many(vec![]).exec().await?; Self::with_existing_devices( @@ -80,7 +80,7 @@ impl Manager { emit_messages_flag: Arc, existing_devices: &[device::Data], actors: Arc, - ) -> Result<(Self, broadcast::Receiver), Error> { + ) -> Result<(Self, broadcast::Receiver), Error> { let latest_timestamp_per_device = db ._batch( existing_devices @@ -134,30 +134,30 @@ impl Manager { tx, ingest, shared, - timestamp_lock: Mutex::default(), + sync_lock: Mutex::default(), }, rx, )) } - pub fn subscribe(&self) -> broadcast::Receiver { + pub fn subscribe(&self) -> broadcast::Receiver { self.tx.subscribe() } pub async fn write_ops<'item, Q>( &self, tx: &PrismaClient, - (mut ops, queries): (Vec, Q), + (ops, queries): (Vec, Q), ) -> Result where Q: prisma_client_rust::BatchItem<'item, ReturnValue: Send> + Send, { - let ret = if self.emit_messages_flag.load(atomic::Ordering::Relaxed) { - let lock = self.timestamp_lock.lock().await; + if ops.is_empty() { + return Err(Error::EmptyOperations); + } - for op in &mut ops { - op.timestamp = *self.get_clock().new_timestamp().get_time(); - } + let ret = if self.emit_messages_flag.load(atomic::Ordering::Relaxed) { + let lock_guard = self.sync_lock.lock().await; let (res, _) = tx ._batch(( @@ -176,11 +176,11 @@ impl Manager { .insert(self.device_pub_id.clone(), last.timestamp); } - if self.tx.send(SyncMessage::Created).is_err() { + if self.tx.send(SyncEvent::Created).is_err() { warn!("failed to send created message on `write_ops`"); } - drop(lock); + drop(lock_guard); res } else { @@ -193,24 +193,22 @@ impl Manager { pub async fn write_op<'item, Q>( &self, tx: &PrismaClient, - mut op: CRDTOperation, + op: CRDTOperation, query: Q, ) -> Result where Q: prisma_client_rust::BatchItem<'item, ReturnValue: Send> + Send, { let ret = if self.emit_messages_flag.load(atomic::Ordering::Relaxed) { - let lock = self.timestamp_lock.lock().await; - - op.timestamp = *self.get_clock().new_timestamp().get_time(); + let lock_guard = self.sync_lock.lock().await; let ret = tx._batch((crdt_op_db(&op)?.to_query(tx), query)).await?.1; - if self.tx.send(SyncMessage::Created).is_err() { + if self.tx.send(SyncEvent::Created).is_err() { warn!("failed to send created message on `write_op`"); } - drop(lock); + drop(lock_guard); ret } else { @@ -235,7 +233,7 @@ impl Manager { self.db .crdt_operation() .find_many(vec![ - crdt_operation::device::is(vec![device::pub_id::equals(device_pub_id.into())]), + crdt_operation::device_pub_id::equals(device_pub_id.into()), #[allow(clippy::cast_possible_wrap)] crdt_operation::timestamp::gt(timestamp.as_u64() as i64), ]) @@ -248,12 +246,15 @@ impl Manager { .collect() } - pub async fn get_ops(&self, args: GetOpsArgs) -> Result, Error> { + pub async fn get_ops( + &self, + count: u32, + timestamp_per_device: Vec<(DevicePubId, NTP64)>, + ) -> Result, Error> { let mut ops = self .db .crdt_operation() - .find_many(vec![or(args - .timestamp_per_device + .find_many(vec![or(timestamp_per_device .iter() .map(|(device_pub_id, timestamp)| { and![ @@ -271,14 +272,14 @@ impl Manager { }) .chain([crdt_operation::device::is_not(vec![ device::pub_id::in_vec( - args.timestamp_per_device + timestamp_per_device .iter() .map(|(device_pub_id, _)| device_pub_id.to_db()) .collect(), ), ])]) .collect())]) - .take(i64::from(args.count)) + .take(i64::from(count)) .order_by(crdt_operation::timestamp::order(SortOrder::Asc)) .exec() .await?; @@ -290,27 +291,22 @@ impl Manager { o => o, }); - ops.into_iter() - .take(args.count as usize) - .map(into_ops) - .collect() + ops.into_iter().take(count as usize).map(into_ops).collect() } pub async fn get_cloud_ops( &self, - args: GetOpsArgs, + count: u32, + timestamp_per_device: Vec<(DevicePubId, NTP64)>, ) -> Result, Error> { let mut ops = self .db .cloud_crdt_operation() - .find_many(vec![or(args - .timestamp_per_device + .find_many(vec![or(timestamp_per_device .iter() .map(|(device_pub_id, timestamp)| { and![ - cloud_crdt_operation::device::is(vec![device::pub_id::equals( - device_pub_id.to_db() - )]), + cloud_crdt_operation::device_pub_id::equals(device_pub_id.to_db()), cloud_crdt_operation::timestamp::gt({ #[allow(clippy::cast_possible_wrap)] // SAFETY: we had to store using i64 due to SQLite limitations @@ -320,16 +316,14 @@ impl Manager { }) ] }) - .chain([cloud_crdt_operation::device::is_not(vec![ - device::pub_id::in_vec( - args.timestamp_per_device - .iter() - .map(|(device_pub_id, _)| device_pub_id.to_db()) - .collect(), - ), - ])]) + .chain([cloud_crdt_operation::device_pub_id::not_in_vec( + timestamp_per_device + .iter() + .map(|(device_pub_id, _)| device_pub_id.to_db()) + .collect(), + )]) .collect())]) - .take(i64::from(args.count)) + .take(i64::from(count)) .order_by(cloud_crdt_operation::timestamp::order(SortOrder::Asc)) .exec() .await?; @@ -342,7 +336,7 @@ impl Manager { }); ops.into_iter() - .take(args.count as usize) + .take(count as usize) .map(into_cloud_ops) .collect() } diff --git a/core/crates/sync/tests/lib.rs b/core/crates/sync/tests/lib.rs index b2a27b516..708494382 100644 --- a/core/crates/sync/tests/lib.rs +++ b/core/crates/sync/tests/lib.rs @@ -17,29 +17,28 @@ const MOCK_LOCATION_PATH: &str = "/User/Anon/Documents"; async fn write_test_location(instance: &Device) -> location::Data { let location_pub_id = Uuid::new_v4(); + let (sync_ops, db_ops): (Vec<_>, Vec<_>) = [ + sync_db_entry!(MOCK_LOCATION_NAME, location::name), + sync_db_entry!(MOCK_LOCATION_PATH, location::path), + ] + .into_iter() + .unzip(); + let location = instance .sync - .write_ops(&instance.db, { - let (sync_ops, db_ops): (Vec<_>, Vec<_>) = [ - sync_db_entry!(MOCK_LOCATION_NAME, location::name), - sync_db_entry!(MOCK_LOCATION_PATH, location::path), - ] - .into_iter() - .unzip(); - - ( - instance.sync.shared_create( - prisma_sync::location::SyncId { - pub_id: uuid_to_bytes(&location_pub_id), - }, - sync_ops, - ), - instance - .db - .location() - .create(uuid_to_bytes(&location_pub_id), db_ops), - ) - }) + .write_op( + &instance.db, + instance.sync.shared_create( + prisma_sync::location::SyncId { + pub_id: uuid_to_bytes(&location_pub_id), + }, + sync_ops, + ), + instance + .db + .location() + .create(uuid_to_bytes(&location_pub_id), db_ops), + ) .await .expect("failed to create mock location"); @@ -96,13 +95,7 @@ async fn writes_operations_and_rows_together() -> Result<(), Box Result<(), Box> assert!(matches!( instance2_sync_rx.recv().await?, - SyncMessage::Ingested + SyncEvent::Ingested )); - let out = instance2 - .sync - .get_ops(GetOpsArgs { - timestamp_per_device: vec![], - count: 100, - }) - .await?; + let out = instance2.sync.get_ops(100, vec![]).await?; assert_locations_equality( &instance1.db.location().find_many(vec![]).exec().await?[0], @@ -173,7 +160,7 @@ async fn no_update_after_delete() -> Result<(), Box> { assert!(matches!( instance2_sync_rx.recv().await?, - SyncMessage::Ingested + SyncEvent::Ingested )); instance2 @@ -189,7 +176,7 @@ async fn no_update_after_delete() -> Result<(), Box> { assert!(matches!( instance1.sync_rx.resubscribe().recv().await?, - SyncMessage::Ingested + SyncEvent::Ingested )); instance1 diff --git a/core/crates/sync/tests/mock_instance.rs b/core/crates/sync/tests/mock_instance.rs index fd1c13700..14779588e 100644 --- a/core/crates/sync/tests/mock_instance.rs +++ b/core/crates/sync/tests/mock_instance.rs @@ -17,8 +17,8 @@ fn db_path(id: Uuid) -> String { pub struct Device { pub pub_id: DevicePubId, pub db: Arc, - pub sync: Arc, - pub sync_rx: Arc>, + pub sync: Arc, + pub sync_rx: Arc>, } impl Device { @@ -42,7 +42,7 @@ impl Device { .await .unwrap(); - let (sync, sync_rx) = sd_core_sync::Manager::new( + let (sync, sync_rx) = sd_core_sync::SyncManager::new( Arc::clone(&db), &device_pub_id, Arc::new(AtomicBool::new(true)), @@ -82,7 +82,7 @@ impl Device { async move { while let Ok(msg) = sync_rx_left.recv().await { info!(?msg, "sync_rx_left received message"); - if matches!(msg, SyncMessage::Created) { + if matches!(msg, SyncEvent::Created) { right .sync .ingest @@ -106,14 +106,7 @@ impl Device { info!(?msg, "right instance received request"); match msg { ingest::Request::Messages { timestamps, tx } => { - let messages = left - .sync - .get_ops(GetOpsArgs { - timestamp_per_device: timestamps, - count: 100, - }) - .await - .unwrap(); + let messages = left.sync.get_ops(100, timestamps).await.unwrap(); let ingest = &right.sync.ingest; @@ -135,7 +128,7 @@ impl Device { } } ingest::Request::FinishedIngesting => { - right.sync.tx.send(SyncMessage::Ingested).unwrap(); + right.sync.tx.send(SyncEvent::Ingested).unwrap(); } } } diff --git a/core/src/api/backups.rs b/core/src/api/backups.rs index de3bb1deb..01e49ced0 100644 --- a/core/src/api/backups.rs +++ b/core/src/api/backups.rs @@ -381,6 +381,7 @@ async fn restore_backup(node: &Arc, path: impl AsRef) -> Result AlphaRouter { .into_iter() .unzip(); - sync.write_ops( + sync.write_op( db, - ( - sync.shared_create( - prisma_sync::saved_search::SyncId { - pub_id: pub_id.clone(), - }, - sync_params, - ), - db.saved_search().create(pub_id, db_params), + sync.shared_create( + prisma_sync::saved_search::SyncId { + pub_id: pub_id.clone(), + }, + sync_params, ), + db.saved_search().create(pub_id, db_params), ) .await?; diff --git a/core/src/api/sync.rs b/core/src/api/sync.rs index 42725d0dd..09e3a62b4 100644 --- a/core/src/api/sync.rs +++ b/core/src/api/sync.rs @@ -1,5 +1,4 @@ use rspc::alpha::AlphaRouter; -use sd_core_sync::GetOpsArgs; use std::sync::atomic::Ordering; use crate::util::MaybeUndefined; @@ -8,32 +7,6 @@ use super::{utils::library, Ctx, R}; pub(crate) fn mount() -> AlphaRouter { R.router() - .procedure("newMessage", { - R.with2(library()) - .subscription(|(_, library), _: ()| async move { - async_stream::stream! { - let mut rx = library.sync.subscribe(); - while let Ok(_msg) = rx.recv().await { - // let op = match msg { - // SyncMessage::Ingested => (), - // SyncMessage::Created => op - // }; - yield (); - } - } - }) - }) - // .procedure("messages", { - // R.with2(library()).query(|(_, library), _: ()| async move { - // Ok(library - // .sync - // .get_ops(GetOpsArgs { - // timestamp_per_device: vec![], - // count: 1000, - // }) - // .await?) - // }) - // }) .procedure("backfill", { R.with2(library()) .mutation(|(node, library), _: ()| async move { diff --git a/core/src/api/tags.rs b/core/src/api/tags.rs index 98b06ef4c..963caff86 100644 --- a/core/src/api/tags.rs +++ b/core/src/api/tags.rs @@ -223,7 +223,7 @@ pub(crate) fn mount() -> AlphaRouter { .map(|fp| { let id = uuid_to_bytes(&Uuid::now_v7()); - sync_params.extend(sync.shared_create( + sync_params.push(sync.shared_create( prisma_sync::object::SyncId { pub_id: id.clone() }, [], )); @@ -270,7 +270,7 @@ pub(crate) fn mount() -> AlphaRouter { ))], }); - sync_ops.extend(sync.relation_create(sync_id!(pub_id), [])); + sync_ops.push(sync.relation_create(sync_id!(pub_id), [])); (sync_ops, db_creates) }, diff --git a/core/src/cloud/mod.rs b/core/src/cloud/mod.rs index 529c18cd1..1efcbafe5 100644 --- a/core/src/cloud/mod.rs +++ b/core/src/cloud/mod.rs @@ -1,9 +1,11 @@ +use crate::Node; + +use sd_core_sync::SyncManager; + use std::sync::Arc; use uuid::Uuid; -use crate::Node; - pub mod sync; #[derive(Default)] @@ -16,7 +18,7 @@ pub async fn start( actors: &Arc, library_id: Uuid, instance_uuid: Uuid, - sync: &Arc, + sync: &Arc, db: &Arc, ) -> State { let sync = sync::declare_actors( diff --git a/core/src/cloud/sync/ingest.rs b/core/src/cloud/sync/ingest.rs index 78e5c9ae2..7a7ae8ba3 100644 --- a/core/src/cloud/sync/ingest.rs +++ b/core/src/cloud/sync/ingest.rs @@ -1,5 +1,7 @@ use crate::cloud::sync::err_break; +use sd_core_sync::SyncManager; + use sd_actors::Stopper; use sd_prisma::prisma::cloud_crdt_operation; use sd_sync::CompressedCRDTOperationsPerModelPerDevice; @@ -22,7 +24,7 @@ use tracing::debug; // and applying them to the local database via the sync system's ingest actor. pub async fn run_actor( - sync: Arc, + sync: Arc, notify: Arc, state: Arc, state_notify: Arc, @@ -63,15 +65,10 @@ pub async fn run_actor( Request::Messages { timestamps, .. } => timestamps, }; - let (ops_ids, ops): (Vec<_>, Vec<_>) = err_break!( - sync.get_cloud_ops(GetOpsArgs { - timestamp_per_device: timestamps, - count: OPS_PER_REQUEST, - }) - .await - ) - .into_iter() - .unzip(); + let (ops_ids, ops): (Vec<_>, Vec<_>) = + err_break!(sync.get_cloud_ops(OPS_PER_REQUEST, timestamps,).await) + .into_iter() + .unzip(); if ops.is_empty() { break; diff --git a/core/src/cloud/sync/mod.rs b/core/src/cloud/sync/mod.rs index 10422b0ff..272bb5465 100644 --- a/core/src/cloud/sync/mod.rs +++ b/core/src/cloud/sync/mod.rs @@ -1,9 +1,11 @@ +use crate::Node; + +use sd_core_sync::SyncManager; + use std::sync::{atomic::AtomicBool, Arc}; use tokio::sync::Notify; use uuid::Uuid; -use crate::Node; - pub mod ingest; pub mod receive; pub mod send; @@ -21,7 +23,7 @@ pub async fn declare_actors( actors: &Arc, library_id: Uuid, instance_uuid: Uuid, - sync: Arc, + sync: Arc, db: Arc, ) -> State { let ingest_notify = Arc::new(Notify::new()); diff --git a/core/src/cloud/sync/receive.rs b/core/src/cloud/sync/receive.rs index 3a8925a37..908eee018 100644 --- a/core/src/cloud/sync/receive.rs +++ b/core/src/cloud/sync/receive.rs @@ -1,8 +1,8 @@ use crate::{library::Libraries, Node}; -use futures::FutureExt; +use sd_core_sync::{DevicePubId, SyncManager}; + use sd_actors::Stopper; -use sd_core_sync::DevicePubId; use sd_p2p::RemoteIdentity; use sd_prisma::prisma::{cloud_crdt_operation, device, instance, PrismaClient}; use sd_sync::CRDTOperation; @@ -14,6 +14,7 @@ use std::{ }; use chrono::Utc; +use futures::FutureExt; use serde_json::to_vec; use tokio::sync::Notify; use uuid::Uuid; @@ -26,7 +27,7 @@ pub async fn run_actor( db: Arc, library_id: Uuid, instance_uuid: Uuid, - sync: Arc, + sync: Arc, ingest_notify: Arc, node: Arc, active: Arc, diff --git a/core/src/cloud/sync/send.rs b/core/src/cloud/sync/send.rs index f566aa26c..46cfd9556 100644 --- a/core/src/cloud/sync/send.rs +++ b/core/src/cloud/sync/send.rs @@ -1,6 +1,7 @@ -use sd_actors::Stopper; use sd_core_cloud_services::CloudServices; -use sd_core_sync::SyncMessage; +use sd_core_sync::{SyncEvent, SyncManager}; + +use sd_actors::Stopper; use std::{ sync::{ @@ -23,7 +24,7 @@ enum RaceNotifiedOrStopped { pub async fn run_actor( library_id: Uuid, - sync: Arc, + sync: Arc, cloud_services: CloudServices, is_active: Arc, state_notify: Arc, @@ -126,10 +127,10 @@ pub async fn run_actor( } } -async fn wait_notification(mut rx: broadcast::Receiver) -> RaceNotifiedOrStopped { +async fn wait_notification(mut rx: broadcast::Receiver) -> RaceNotifiedOrStopped { // wait until Created message comes in loop { - if let Ok(SyncMessage::Created) = rx.recv().await { + if let Ok(SyncEvent::Created) = rx.recv().await { break; }; } diff --git a/core/src/context.rs b/core/src/context.rs index 217acd54b..3a3f23ee0 100644 --- a/core/src/context.rs +++ b/core/src/context.rs @@ -4,6 +4,7 @@ use sd_core_heavy_lifting::{ job_system::report::{Report, Status}, OuterContext, ProgressUpdate, UpdateEvent, }; +use sd_core_sync::SyncManager; use std::{ ops::{Deref, DerefMut}, @@ -49,7 +50,7 @@ impl OuterContext for NodeContext { &self.library.db } - fn sync(&self) -> &Arc { + fn sync(&self) -> &Arc { &self.library.sync } @@ -96,7 +97,7 @@ impl OuterContext for JobContext &Arc { + fn sync(&self) -> &Arc { self.outer_ctx.sync() } @@ -191,7 +192,7 @@ impl sd_core_heavy_lifting::JobContext< spawn({ let db = Arc::clone(&library.db); - let mut report = report.clone(); + let report = report.clone(); async move { if let Err(e) = report.update(&db).await { error!( diff --git a/core/src/library/config.rs b/core/src/library/config.rs index c8cb9db7d..53390fad8 100644 --- a/core/src/library/config.rs +++ b/core/src/library/config.rs @@ -104,7 +104,7 @@ impl LibraryConfig { pub(crate) async fn load( path: impl AsRef, - node_config: &NodeConfig, + _node_config: &NodeConfig, db: &PrismaClient, ) -> Result { let path = path.as_ref(); diff --git a/core/src/library/library.rs b/core/src/library/library.rs index 593c3509b..7423c870a 100644 --- a/core/src/library/library.rs +++ b/core/src/library/library.rs @@ -1,9 +1,10 @@ -use crate::{api::CoreEvent, cloud, sync, Node}; +use crate::{api::CoreEvent, cloud, Node}; use sd_core_file_path_helper::IsolatedFilePathData; use sd_core_heavy_lifting::media_processor::ThumbnailKind; use sd_core_prisma_helpers::{file_path_to_full_path, CasId}; +use sd_core_sync::SyncManager; use sd_p2p::Identity; use sd_prisma::prisma::{file_path, location, PrismaClient}; use sd_utils::{db::maybe_missing, error::FileIOError}; @@ -29,7 +30,7 @@ pub struct Library { config: RwLock, /// db holds the database client for the current library. pub db: Arc, - pub sync: Arc, + pub sync: Arc, pub cloud: cloud::State, /// key manager that provides encryption keys to functions that require them // pub key_manager: Arc, @@ -70,7 +71,7 @@ impl Library { identity: Arc, db: Arc, node: &Arc, - sync: Arc, + sync: Arc, cloud: cloud::State, do_cloud_sync: broadcast::Sender<()>, actors: Arc, diff --git a/core/src/library/manager/mod.rs b/core/src/library/manager/mod.rs index 476926e78..d5da65a6e 100644 --- a/core/src/library/manager/mod.rs +++ b/core/src/library/manager/mod.rs @@ -3,14 +3,14 @@ use crate::{ invalidate_query, location::metadata::{LocationMetadataError, SpacedriveLocationMetadataFile}, object::tag, - p2p, sync, + p2p, util::{mpscrr, MaybeUndefined}, Node, }; -use sd_core_sync::SyncMessage; +use sd_core_sync::{SyncEvent, SyncManager}; use sd_p2p::{Identity, RemoteIdentity}; -use sd_prisma::prisma::{instance, location}; +use sd_prisma::prisma::{device, instance, location}; use sd_utils::{ db, error::{FileIOError, NonUtf8PathError}, @@ -133,7 +133,7 @@ impl Libraries { } let _library_arc = self - .load(library_id, &db_path, config_path, None, true, node) + .load(library_id, &db_path, config_path, None, None, true, node) .await?; // FIX-ME: Linux releases crashes with *** stack smashing detected *** if spawn_volume_watcher is enabled @@ -203,6 +203,15 @@ impl Libraries { id, self.libraries_dir.join(format!("{id}.db")), config_path, + Some(device::Create { + pub_id: node_cfg.id.to_db(), + _params: vec![ + device::name::set(Some(node_cfg.name.clone())), + device::os::set(Some(node_cfg.os as i32)), + device::hardware_model::set(Some(node_cfg.hardware_model as i32)), + device::date_created::set(Some(now)), + ], + }), Some({ let identity = Identity::new(); let mut create = instance.unwrap_or_else(|| instance::Create { @@ -438,7 +447,8 @@ impl Libraries { id: Uuid, db_path: impl AsRef, config_path: impl AsRef, - create: Option, + maybe_create_device: Option, + maybe_create_instance: Option, should_seed: bool, node: &Arc, ) -> Result, LibraryManagerError> { @@ -453,7 +463,12 @@ impl Libraries { ); let db = Arc::new(db::load_and_migrate(&db_url).await?); - if let Some(create) = create { + if let Some(create) = maybe_create_device { + create.to_query(&db).exec().await?; + } + + // TODO: remove instances from locations + if let Some(create) = maybe_create_instance { create.to_query(&db).exec().await?; } @@ -474,10 +489,9 @@ impl Libraries { let devices = db.device().find_many(vec![]).exec().await?; let device_pub_id_to_db = device_pub_id.to_db(); - if devices + if !devices .iter() - .find(|device| device.pub_id == device_pub_id_to_db) - .is_none() + .any(|device| device.pub_id == device_pub_id_to_db) { return Err(LibraryManagerError::CurrentDeviceNotFound(device_pub_id)); } @@ -536,7 +550,7 @@ impl Libraries { let actors = Default::default(); - let (sync, sync_rx) = sync::Manager::with_existing_devices( + let (sync, sync_rx) = SyncManager::with_existing_devices( Arc::clone(&db), &device_pub_id, Arc::clone(&config.generate_sync_operations), @@ -746,7 +760,7 @@ impl Libraries { async fn sync_rx_actor( library: Arc, node: Arc, - mut sync_rx: broadcast::Receiver, + mut sync_rx: broadcast::Receiver, ) { loop { let Ok(msg) = sync_rx.recv().await else { @@ -755,10 +769,10 @@ async fn sync_rx_actor( match msg { // TODO: Any sync event invalidates the entire React Query cache this is a hacky workaround until the new invalidation system. - SyncMessage::Ingested => node.emit(CoreEvent::InvalidateOperation( + SyncEvent::Ingested => node.emit(CoreEvent::InvalidateOperation( InvalidateOperationEvent::all(), )), - SyncMessage::Created => { + SyncEvent::Created => { p2p::sync::originator(library.clone(), &library.sync, &node.p2p).await } } diff --git a/core/src/location/manager/runner.rs b/core/src/location/manager/runner.rs index 735d4b6f2..a15890f00 100644 --- a/core/src/location/manager/runner.rs +++ b/core/src/location/manager/runner.rs @@ -3,7 +3,7 @@ use crate::{ Node, }; -use sd_core_prisma_helpers::{location_ids_and_path, DevicePubId}; +use sd_core_prisma_helpers::location_ids_and_path; use sd_prisma::prisma::location; use sd_utils::db::maybe_missing; @@ -47,7 +47,7 @@ struct Runner { impl Runner { async fn new(node: Arc) -> Self { Self { - device_pub_id_to_db: Some(DevicePubId::from(node.config.get().await.id).to_db()), + device_pub_id_to_db: Some(node.config.get().await.id.to_db()), node, locations_to_check: HashMap::new(), locations_watched: HashMap::new(), diff --git a/core/src/location/manager/watcher/utils.rs b/core/src/location/manager/watcher/utils.rs index e6380dd5e..7043efc4d 100644 --- a/core/src/location/manager/watcher/utils.rs +++ b/core/src/location/manager/watcher/utils.rs @@ -352,28 +352,26 @@ async fn inner_create_file( DateTime::::from(fs_metadata.created_or_now()).into(); let int_kind = kind as i32; - sync.write_ops( + sync.write_op( db, - ( - sync.shared_create( - prisma_sync::object::SyncId { - pub_id: pub_id.to_db(), - }, - [ - (object::date_created::NAME, msgpack!(date_created)), - (object::kind::NAME, msgpack!(int_kind)), - ], - ), - db.object() - .create( - pub_id.into(), - vec![ - object::date_created::set(Some(date_created)), - object::kind::set(Some(int_kind)), - ], - ) - .select(object_ids::select()), + sync.shared_create( + prisma_sync::object::SyncId { + pub_id: pub_id.to_db(), + }, + [ + (object::date_created::NAME, msgpack!(date_created)), + (object::kind::NAME, msgpack!(int_kind)), + ], ), + db.object() + .create( + pub_id.into(), + vec![ + object::date_created::set(Some(date_created)), + object::kind::set(Some(int_kind)), + ], + ) + .select(object_ids::select()), ) .await? }; @@ -709,25 +707,23 @@ async fn inner_update_file( let date_created: DateTime = DateTime::::from(fs_metadata.created_or_now()).into(); - sync.write_ops( + sync.write_op( db, - ( - sync.shared_create( - prisma_sync::object::SyncId { - pub_id: pub_id.to_db(), - }, - [ - (object::date_created::NAME, msgpack!(date_created)), - (object::kind::NAME, msgpack!(int_kind)), - ], - ), - db.object().create( - pub_id.to_db(), - vec![ - object::date_created::set(Some(date_created)), - object::kind::set(Some(int_kind)), - ], - ), + sync.shared_create( + prisma_sync::object::SyncId { + pub_id: pub_id.to_db(), + }, + [ + (object::date_created::NAME, msgpack!(date_created)), + (object::kind::NAME, msgpack!(int_kind)), + ], + ), + db.object().create( + pub_id.to_db(), + vec![ + object::date_created::set(Some(date_created)), + object::kind::set(Some(int_kind)), + ], ), ) .await?; diff --git a/core/src/location/mod.rs b/core/src/location/mod.rs index 0e378dc7c..32732c489 100644 --- a/core/src/location/mod.rs +++ b/core/src/location/mod.rs @@ -767,41 +767,34 @@ async fn create_location( let date_created = Utc::now(); + let device_pub_id = sync.device_pub_id.to_db(); + + let (sync_values, mut db_params) = [ + sync_db_entry!(&name, location::name), + sync_db_entry!(path, location::path), + sync_db_entry!(date_created, location::date_created), + sync_db_entry!(device_pub_id, location::device_pub_id), + ] + .into_iter() + .unzip::<_, _, Vec<_>, Vec<_>>(); + + // temporary workaround until we remove instances from locations + db_params.push(location::instance_id::set(Some( + library.config().await.instance_id, + ))); + let location = sync - .write_ops( + .write_op( db, - ( - sync.shared_create( - prisma_sync::location::SyncId { - pub_id: location_pub_id.as_bytes().to_vec(), - }, - [ - (location::name::NAME, msgpack!(&name)), - (location::path::NAME, msgpack!(&path)), - (location::date_created::NAME, msgpack!(date_created)), - // ( - // location::instance::NAME, - // msgpack!(prisma_sync::instance::SyncId { - // pub_id: uuid_to_bytes(sync.instance) - // }), - // ), - ], - ), - db.location() - .create( - location_pub_id.as_bytes().to_vec(), - vec![ - location::name::set(Some(name.clone())), - location::path::set(Some(path)), - location::date_created::set(Some(date_created.into())), - location::instance_id::set(Some(library.config().await.instance_id)), - // location::instance::connect(instance::id::equals( - // library.config.instance_id.as_bytes().to_vec(), - // )), - ], - ) - .include(location_with_indexer_rules::include()), + sync.shared_create( + prisma_sync::location::SyncId { + pub_id: location_pub_id.as_bytes().to_vec(), + }, + sync_values, ), + db.location() + .create(location_pub_id.as_bytes().to_vec(), db_params) + .include(location_with_indexer_rules::include()), ) .await?; @@ -1163,17 +1156,15 @@ pub async fn create_file_path( let pub_id = sd_utils::uuid_to_bytes(&Uuid::now_v7()); let created_path = sync - .write_ops( + .write_op( db, - ( - sync.shared_create( - prisma_sync::file_path::SyncId { - pub_id: pub_id.clone(), - }, - sync_params, - ), - db.file_path().create(pub_id, db_params), + sync.shared_create( + prisma_sync::file_path::SyncId { + pub_id: pub_id.clone(), + }, + sync_params, ), + db.file_path().create(pub_id, db_params), ) .await?; diff --git a/core/src/object/tag/mod.rs b/core/src/object/tag/mod.rs index a8d232cf6..98238462b 100644 --- a/core/src/object/tag/mod.rs +++ b/core/src/object/tag/mod.rs @@ -32,17 +32,15 @@ impl TagCreateArgs { .into_iter() .unzip(); - sync.write_ops( + sync.write_op( db, - ( - sync.shared_create( - prisma_sync::tag::SyncId { - pub_id: pub_id.clone(), - }, - sync_params, - ), - db.tag().create(pub_id, db_params), + sync.shared_create( + prisma_sync::tag::SyncId { + pub_id: pub_id.clone(), + }, + sync_params, ), + db.tag().create(pub_id, db_params), ) .await } diff --git a/core/src/p2p/sync/mod.rs b/core/src/p2p/sync/mod.rs index ca2ea73a5..f1255d144 100644 --- a/core/src/p2p/sync/mod.rs +++ b/core/src/p2p/sync/mod.rs @@ -24,6 +24,7 @@ mod originator { use super::*; use responder::tx as rx; + use sd_core_sync::SyncManager; use sd_p2p_tunnel::Tunnel; pub mod tx { @@ -87,11 +88,7 @@ mod originator { #[instrument(skip(sync, p2p))] /// REMEMBER: This only syncs one direction! - pub async fn run( - library: Arc, - sync: &Arc, - p2p: &Arc, - ) { + pub async fn run(library: Arc, sync: &Arc, p2p: &Arc) { for (remote_identity, peer) in p2p.get_library_instances(&library.id) { if !peer.is_connected() { continue; @@ -119,15 +116,17 @@ mod originator { .unwrap(); tunnel.flush().await.unwrap(); - while let Ok(rx::MainRequest::GetOperations(args)) = - rx::MainRequest::from_stream(&mut tunnel).await + while let Ok(rx::MainRequest::GetOperations(GetOpsArgs { + timestamp_per_device, + count, + })) = rx::MainRequest::from_stream(&mut tunnel).await { - let ops = sync.get_ops(args).await.unwrap(); - tunnel .write_all( - &tx::Operations(CompressedCRDTOperationsPerModelPerDevice::new(ops)) - .to_bytes(), + &tx::Operations(CompressedCRDTOperationsPerModelPerDevice::new( + sync.get_ops(count, timestamp_per_device).await.unwrap(), + )) + .to_bytes(), ) .await .unwrap(); diff --git a/core/src/volume/mod.rs b/core/src/volume/mod.rs index d746d4d1e..48a8af0a6 100644 --- a/core/src/volume/mod.rs +++ b/core/src/volume/mod.rs @@ -2,7 +2,7 @@ use crate::{library::Library, Node}; -use sd_core_sync::Manager as SyncManager; +use sd_core_sync::SyncManager; use sd_prisma::{ prisma::{storage_statistics, PrismaClient}, prisma_sync, @@ -572,41 +572,39 @@ async fn update_storage_statistics( } else { let new_storage_statistics_id = uuid_to_bytes(&Uuid::now_v7()); - sync.write_ops( + sync.write_op( db, - ( - sync.shared_create( - prisma_sync::storage_statistics::SyncId { - pub_id: new_storage_statistics_id.clone(), - }, - [ - ( - storage_statistics::total_capacity::NAME, - msgpack!(total_capacity), - ), - ( - storage_statistics::available_capacity::NAME, - msgpack!(available_capacity), - ), - ( - storage_statistics::device_pub_id::NAME, - msgpack!(device_pub_id), - ), - ], - ), - db.storage_statistics() - .create( - new_storage_statistics_id, - vec![ - storage_statistics::total_capacity::set(total_capacity as i64), - storage_statistics::available_capacity::set(available_capacity as i64), - storage_statistics::device_pub_id::set(Some(device_pub_id.clone())), - ], - ) - // We don't need any data here, just the id avoids receiving the entire object - // as we can't pass an empty select macro call - .select(storage_statistics::select!({ id })), + sync.shared_create( + prisma_sync::storage_statistics::SyncId { + pub_id: new_storage_statistics_id.clone(), + }, + [ + ( + storage_statistics::total_capacity::NAME, + msgpack!(total_capacity), + ), + ( + storage_statistics::available_capacity::NAME, + msgpack!(available_capacity), + ), + ( + storage_statistics::device_pub_id::NAME, + msgpack!(device_pub_id), + ), + ], ), + db.storage_statistics() + .create( + new_storage_statistics_id, + vec![ + storage_statistics::total_capacity::set(total_capacity as i64), + storage_statistics::available_capacity::set(available_capacity as i64), + storage_statistics::device_pub_id::set(Some(device_pub_id.clone())), + ], + ) + // We don't need any data here, just the id avoids receiving the entire object + // as we can't pass an empty select macro call + .select(storage_statistics::select!({ id })), ) .await?; } @@ -625,12 +623,7 @@ pub fn save_storage_statistics(node: &Node) { .await .into_iter() .map(move |library: Arc| async move { - let Library { - db, - sync, - instance_uuid, - .. - } = &*library; + let Library { db, sync, .. } = &*library; update_storage_statistics(db, sync, total_capacity, available_capacity).await }) diff --git a/crates/ai/src/old_image_labeler/old_actor.rs b/crates/ai/src/old_image_labeler/old_actor.rs index 257f69986..108f0dfa6 100644 --- a/crates/ai/src/old_image_labeler/old_actor.rs +++ b/crates/ai/src/old_image_labeler/old_actor.rs @@ -1,5 +1,6 @@ use sd_core_prisma_helpers::file_path_for_media_processor; +use sd_core_sync::SyncManager; use sd_prisma::prisma::{location, PrismaClient}; use sd_utils::error::FileIOError; @@ -38,7 +39,7 @@ const PENDING_BATCHES_FILE: &str = "pending_image_labeler_batches.bin"; type ResumeBatchRequest = ( BatchToken, Arc, - Arc, + Arc, oneshot::Sender, ImageLabelerError>>, ); @@ -55,7 +56,7 @@ pub(super) struct Batch { pub(super) output_tx: chan::Sender, pub(super) is_resumable: bool, pub(super) db: Arc, - pub(super) sync: Arc, + pub(super) sync: Arc, } #[derive(Serialize, Deserialize, Debug)] @@ -168,7 +169,7 @@ impl OldImageLabeler { location_path: PathBuf, file_paths: Vec, db: Arc, - sync: Arc, + sync: Arc, is_resumable: bool, ) -> (BatchToken, chan::Receiver) { let (tx, rx) = chan::bounded(usize::max(file_paths.len(), 1)); @@ -206,7 +207,7 @@ impl OldImageLabeler { location_path: PathBuf, file_paths: Vec, db: Arc, - sync: Arc, + sync: Arc, ) -> chan::Receiver { self.new_batch_inner(location_id, location_path, file_paths, db, sync, false) .await @@ -220,7 +221,7 @@ impl OldImageLabeler { location_path: PathBuf, file_paths: Vec, db: Arc, - sync: Arc, + sync: Arc, ) -> (BatchToken, chan::Receiver) { self.new_batch_inner(location_id, location_path, file_paths, db, sync, true) .await @@ -291,7 +292,7 @@ impl OldImageLabeler { &self, token: BatchToken, db: Arc, - sync: Arc, + sync: Arc, ) -> Result, ImageLabelerError> { let (tx, rx) = oneshot::channel(); @@ -344,7 +345,7 @@ async fn actor_loop( ResumeBatch( BatchToken, Arc, - Arc, + Arc, oneshot::Sender, ImageLabelerError>>, ), UpdateModel( diff --git a/crates/ai/src/old_image_labeler/process.rs b/crates/ai/src/old_image_labeler/process.rs index 125dbe21c..a6554b35b 100644 --- a/crates/ai/src/old_image_labeler/process.rs +++ b/crates/ai/src/old_image_labeler/process.rs @@ -1,5 +1,6 @@ use sd_core_file_path_helper::IsolatedFilePathData; use sd_core_prisma_helpers::file_path_for_media_processor; +use sd_core_sync::SyncManager; use sd_prisma::{ prisma::{file_path, label, label_on_object, object, PrismaClient}, @@ -300,7 +301,7 @@ async fn spawned_process_single_file( chan::Sender, ), db: Arc, - sync: Arc, + sync: Arc, _permit: OwnedSemaphorePermit, ) { let image = @@ -398,7 +399,7 @@ pub async fn assign_labels( object_id: object::id::Type, mut labels: HashSet, db: &PrismaClient, - sync: &sd_core_sync::Manager, + sync: &SyncManager, ) -> Result { let object = db .object() @@ -432,7 +433,7 @@ pub async fn assign_labels( let db_params = labels .into_iter() .map(|name| { - sync_params.extend(sync.shared_create( + sync_params.push(sync.shared_create( prisma_sync::label::SyncId { name: name.clone() }, [(label::date_created::NAME, msgpack!(&date_created))], )); @@ -458,7 +459,7 @@ pub async fn assign_labels( let db_params: Vec<_> = labels_ids .into_iter() .map(|(label_id, name)| { - sync_params.extend(sync.relation_create( + sync_params.push(sync.relation_create( prisma_sync::label_on_object::SyncId { label: prisma_sync::label::SyncId { name }, object: prisma_sync::object::SyncId { diff --git a/crates/ffmpeg/src/frame_decoder.rs b/crates/ffmpeg/src/frame_decoder.rs index 95516b6f2..4a98202e3 100644 --- a/crates/ffmpeg/src/frame_decoder.rs +++ b/crates/ffmpeg/src/frame_decoder.rs @@ -92,7 +92,7 @@ impl FrameDecoder { }) } - pub(crate) fn use_embedded(&mut self) -> bool { + pub(crate) const fn use_embedded(&self) -> bool { self.embedded } diff --git a/crates/sync/src/factory.rs b/crates/sync/src/factory.rs index b029dd8aa..065c665e3 100644 --- a/crates/sync/src/factory.rs +++ b/crates/sync/src/factory.rs @@ -19,6 +19,7 @@ macro_rules! msgpack { pub trait OperationFactory { fn get_clock(&self) -> &HLC; + fn get_device_pub_id(&self) -> DevicePubId; fn new_op>( @@ -39,8 +40,8 @@ pub trait OperationFactory { &self, id: impl SyncId, values: impl IntoIterator + 'static, - ) -> Vec { - vec![self.new_op( + ) -> CRDTOperation { + self.new_op( &id, CRDTOperationData::Create( values @@ -48,7 +49,7 @@ pub trait OperationFactory { .map(|(name, value)| (name.to_string(), value)) .collect(), ), - )] + ) } fn shared_update( @@ -74,8 +75,8 @@ pub trait OperationFactory { &self, id: impl RelationSyncId, values: impl IntoIterator + 'static, - ) -> Vec { - vec![self.new_op( + ) -> CRDTOperation { + self.new_op( &id, CRDTOperationData::Create( values @@ -83,7 +84,7 @@ pub trait OperationFactory { .map(|(name, value)| (name.to_string(), value)) .collect(), ), - )] + ) } fn relation_update( &self, From f18b35e72545e2474cd5ad67c27fc5c38c2e24f8 Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Wed, 28 Aug 2024 02:03:19 -0300 Subject: [PATCH 095/218] First draft on cloud sync api with rspc --- .../crates/cloud-services/src/cloud_client.rs | 10 + .../cloud-services/src/cloud_p2p/mod.rs | 4 +- .../cloud-services/src/cloud_p2p/runner.rs | 31 +- core/crates/cloud-services/src/error.rs | 2 + .../src/key_manager/key_store.rs | 29 +- .../cloud-services/src/key_manager/mod.rs | 28 ++ core/src/api/cloud/mod.rs | 9 +- core/src/api/cloud/sync_groups.rs | 290 ++++++++++++++++++ 8 files changed, 375 insertions(+), 28 deletions(-) create mode 100644 core/src/api/cloud/sync_groups.rs diff --git a/core/crates/cloud-services/src/cloud_client.rs b/core/crates/cloud-services/src/cloud_client.rs index 986765b5b..50922248c 100644 --- a/core/crates/cloud-services/src/cloud_client.rs +++ b/core/crates/cloud-services/src/cloud_client.rs @@ -307,6 +307,16 @@ impl CloudServices { pub async fn set_cloud_p2p(&self, cloud_p2p: CloudP2P) { self.cloud_p2p.write().await.replace(Arc::new(cloud_p2p)); } + + pub async fn cloud_p2p(&self) -> Result, Error> { + self.cloud_p2p + .read() + .await + .as_ref() + .map_or(Err(Error::CloudP2PNotInitialized), |cloud_p2p| { + Ok(Arc::clone(cloud_p2p)) + }) + } } #[cfg(test)] diff --git a/core/crates/cloud-services/src/cloud_p2p/mod.rs b/core/crates/cloud-services/src/cloud_p2p/mod.rs index 49a11807d..f050110ed 100644 --- a/core/crates/cloud-services/src/cloud_p2p/mod.rs +++ b/core/crates/cloud-services/src/cloud_p2p/mod.rs @@ -126,13 +126,13 @@ impl CloudP2P { /// Will panic if the actor channel is closed, which should never happen pub async fn request_join_sync_group( &self, - devices_connection_ids: Vec, + devices_in_group: Vec<(devices::PubId, NodeId)>, req: authorize_new_device_in_sync_group::Request, ) { self.msgs_tx .send_async(runner::Message::Request(runner::Request::JoinSyncGroup { req, - devices_connection_ids, + devices_in_group, })) .await .expect("Channel closed"); diff --git a/core/crates/cloud-services/src/cloud_p2p/runner.rs b/core/crates/cloud-services/src/cloud_p2p/runner.rs index df2be63d5..b8d7fbc02 100644 --- a/core/crates/cloud-services/src/cloud_p2p/runner.rs +++ b/core/crates/cloud-services/src/cloud_p2p/runner.rs @@ -38,7 +38,7 @@ use tokio::{ time::{interval, Instant, MissedTickBehavior}, }; use tokio_stream::wrappers::IntervalStream; -use tracing::{error, warn}; +use tracing::{debug, error, warn}; use super::{JoinSyncGroupResponse, NotifyUser, Ticket, UserResponse}; @@ -54,7 +54,7 @@ pub enum Message { pub enum Request { JoinSyncGroup { req: authorize_new_device_in_sync_group::Request, - devices_connection_ids: Vec, + devices_in_group: Vec<(devices::PubId, NodeId)>, }, } @@ -177,8 +177,8 @@ impl Runner { StreamMessage::Message(Message::Request(Request::JoinSyncGroup { req, - devices_connection_ids, - })) => self.dispatch_join_requests(req, devices_connection_ids, &mut rng), + devices_in_group, + })) => self.dispatch_join_requests(req, devices_in_group, &mut rng), StreamMessage::UserResponse(UserResponse::AcceptDeviceInSyncGroup { ticket, @@ -200,7 +200,7 @@ impl Runner { fn dispatch_join_requests( &self, req: authorize_new_device_in_sync_group::Request, - devices_connection_ids: Vec, + devices_in_group: Vec<(devices::PubId, NodeId)>, rng: &mut CryptoRng, ) { async fn inner( @@ -208,14 +208,12 @@ impl Runner { endpoint: Endpoint, mut rng: CryptoRng, req: authorize_new_device_in_sync_group::Request, - devices_connection_ids: Vec, + devices_in_group: Vec<(devices::PubId, NodeId)>, ) -> Result { let group_pub_id = req.sync_group.pub_id; loop { let client = - match connect_to_first_available_client(&endpoint, &devices_connection_ids) - .await - { + match connect_to_first_available_client(&endpoint, &devices_in_group).await { Ok(client) => client, Err(e) => { return Ok(JoinSyncGroupResponse::Failed(e)); @@ -241,6 +239,9 @@ impl Runner { &mut rng, ) .await?; + + // TODO(@fogodev): Figure out a way to dispatch sync related actors now that we have the keys + return Ok(JoinSyncGroupResponse::Accepted { authorizor_device }); } // In case of timeout, we will try again @@ -260,7 +261,7 @@ impl Runner { if let Err(SendError(response)) = notify_user_tx .send_async(NotifyUser::ReceivedJoinSyncGroupResponse { - response: inner(key_manager, endpoint, rng, req, devices_connection_ids) + response: inner(key_manager, endpoint, rng, req, devices_in_group) .await .unwrap_or_else(|e| { error!( @@ -472,14 +473,16 @@ impl Runner { async fn connect_to_first_available_client( endpoint: &Endpoint, - devices_connection_ids: &[NodeId], + devices_in_group: &[(devices::PubId, NodeId)], ) -> Result, Service>, CloudP2PError> { - for device_connection_id in devices_connection_ids { + for (device_pub_id, device_connection_id) in devices_in_group { if let Ok(connection) = endpoint .connect_by_node_id(*device_connection_id, CloudP2PALPN::LATEST) .await - .map_err(|e| error!(?e, "Failed to connect to authorizor device candidate")) - { + .map_err( + |e| error!(?e, %device_pub_id, "Failed to connect to authorizor device candidate"), + ) { + debug!(%device_pub_id, "Connected to authorizor device candidate"); return Ok(Client::new(RpcClient::new( QuinnConnection::::from_connection(connection), ))); diff --git a/core/crates/cloud-services/src/error.rs b/core/crates/cloud-services/src/error.rs index 07eefc0db..ebf506185 100644 --- a/core/crates/cloud-services/src/error.rs +++ b/core/crates/cloud-services/src/error.rs @@ -59,6 +59,8 @@ pub enum Error { ConnectToCloudP2PNode(anyhow::Error), #[error("Communication error with Cloud P2P node: {0}")] CloudP2PRpcCommunication(#[from] quic_rpc::pattern::rpc::Error>), + #[error("Cloud P2P not initialized")] + CloudP2PNotInitialized, } #[derive(thiserror::Error, Debug)] diff --git a/core/crates/cloud-services/src/key_manager/key_store.rs b/core/crates/cloud-services/src/key_manager/key_store.rs index 6a3ac7497..be0fe0266 100644 --- a/core/crates/cloud-services/src/key_manager/key_store.rs +++ b/core/crates/cloud-services/src/key_manager/key_store.rs @@ -41,14 +41,22 @@ impl KeyStore { } pub fn add_key(&mut self, group_pub_id: groups::PubId, key: SecretKey) { - let mut hasher = blake3::Hasher::new(); - hasher.update(key.as_ref()); - let hash = hasher.finalize(); + self.keys.entry(group_pub_id).or_default().push_front(( + KeyHash(blake3::hash(key.as_ref()).to_hex().to_string()), + key, + )); + } + pub fn add_key_with_hash( + &mut self, + group_pub_id: groups::PubId, + key: SecretKey, + key_hash: KeyHash, + ) { self.keys .entry(group_pub_id) .or_default() - .push_front((KeyHash(hash.to_hex().to_string()), key)); + .push_front((key_hash, key)); } pub fn add_many_keys( @@ -61,14 +69,17 @@ impl KeyStore { // We reverse the secret keys as a implementation detail to // keep the keys in the same order as they were added as a stack for key in keys.into_iter().rev() { - let mut hasher = blake3::Hasher::new(); - hasher.update(key.as_ref()); - let hash = hasher.finalize(); - - group_entry.push_front((KeyHash(hash.to_hex().to_string()), key)); + group_entry.push_front(( + KeyHash(blake3::hash(key.as_ref()).to_hex().to_string()), + key, + )); } } + pub fn remove_group(&mut self, group_pub_id: groups::PubId) { + self.keys.remove(&group_pub_id); + } + pub fn iroh_secret_key(&self) -> IrohSecretKey { self.iroh_secret_key.clone() } diff --git a/core/crates/cloud-services/src/key_manager/mod.rs b/core/crates/cloud-services/src/key_manager/mod.rs index 6264cfa52..79ac7ecaa 100644 --- a/core/crates/cloud-services/src/key_manager/mod.rs +++ b/core/crates/cloud-services/src/key_manager/mod.rs @@ -109,6 +109,34 @@ impl KeyManager { .await } + pub async fn add_key_with_hash( + &self, + group_pub_id: groups::PubId, + key: SecretKey, + key_hash: KeyHash, + rng: &mut CryptoRng, + ) -> Result<(), Error> { + let mut store = self.store.write().await; + store.add_key_with_hash(group_pub_id, key, key_hash); + // Keeping the write lock here, this way we ensure that we can't corrupt the file + store + .encrypt(&self.master_key, rng, &self.keys_file_path) + .await + } + + pub async fn remove_group( + &self, + group_pub_id: groups::PubId, + rng: &mut CryptoRng, + ) -> Result<(), Error> { + let mut store = self.store.write().await; + store.remove_group(group_pub_id); + // Keeping the write lock here, this way we ensure that we can't corrupt the file + store + .encrypt(&self.master_key, rng, &self.keys_file_path) + .await + } + pub async fn add_many_keys( &self, group_pub_id: groups::PubId, diff --git a/core/src/api/cloud/mod.rs b/core/src/api/cloud/mod.rs index da13ca1ab..d4da321f5 100644 --- a/core/src/api/cloud/mod.rs +++ b/core/src/api/cloud/mod.rs @@ -21,6 +21,7 @@ use super::{Ctx, R}; mod devices; mod libraries; mod locations; +mod sync_groups; async fn try_get_cloud_services_client( node: &Node, @@ -38,6 +39,7 @@ pub(crate) fn mount() -> AlphaRouter { .merge("libraries.", libraries::mount()) .merge("locations.", locations::mount()) .merge("devices.", devices::mount()) + .merge("syncGroups.", sync_groups::mount()) .procedure("bootstrap", { R.mutation( |node, (access_token, refresh_token): (auth::AccessToken, auth::RefreshToken)| async move { @@ -69,9 +71,8 @@ pub(crate) fn mount() -> AlphaRouter { let NodeConfig { id, name, os, .. } = node.config.get().await; (devices::PubId(id.into()), name, os) }; - let mut hasher = blake3::Hasher::new(); - hasher.update(device_pub_id.0.as_bytes().as_slice()); - let hashed_pub_id = hasher.finalize(); + + let hashed_pub_id = blake3::hash(device_pub_id.0.as_bytes().as_slice()); let key_manager = match handle_comm_error( client @@ -144,6 +145,8 @@ pub(crate) fn mount() -> AlphaRouter { ) .await; + // TODO(@fogodev): Verify existing sync groups and dispatch sync related actors + Ok(()) }, ) diff --git a/core/src/api/cloud/sync_groups.rs b/core/src/api/cloud/sync_groups.rs new file mode 100644 index 000000000..f471333dd --- /dev/null +++ b/core/src/api/cloud/sync_groups.rs @@ -0,0 +1,290 @@ +use crate::api::{utils::library, Ctx, R}; + +use sd_cloud_schema::{ + auth::AccessToken, + cloud_p2p, devices, libraries, + sync::{groups, KeyHash}, +}; + +use futures_concurrency::future::TryJoin; +use rspc::alpha::AlphaRouter; +use sd_crypto::{cloud::secret_key::SecretKey, CryptoRng, SeedableRng}; +use serde::Deserialize; +use tracing::debug; + +pub fn mount() -> AlphaRouter { + R.router() + .procedure("create", { + R.with2(library()) + .mutation(|(node, library), access_token: AccessToken| async move { + let (client, device_pub_id, mut rng, key_manager) = ( + super::try_get_cloud_services_client(&node), + async { Ok(devices::PubId(node.config.get().await.id.into())) }, + async { + Ok(CryptoRng::from_seed( + node.master_rng.lock().await.generate_fixed(), + )) + }, + node.cloud_services.key_manager(), + ) + .try_join() + .await?; + + let new_key = SecretKey::generate(&mut rng); + let key_hash = KeyHash(blake3::hash(new_key.as_ref()).to_hex().to_string()); + + let groups::create::Response(group_pub_id) = super::handle_comm_error( + client + .sync() + .groups() + .create(groups::create::Request { + access_token: access_token.clone(), + key_hash: key_hash.clone(), + library_pub_id: libraries::PubId(library.id), + device_pub_id, + }) + .await, + "Failed to create sync group;", + )??; + + if let Err(e) = key_manager + .add_key_with_hash(group_pub_id, new_key, key_hash, &mut rng) + .await + { + super::handle_comm_error( + client + .sync() + .groups() + .delete(groups::delete::Request { + access_token, + pub_id: group_pub_id, + }) + .await, + "Failed to delete sync group after we failed to store secret key in key manager;", + )??; + + return Err(e.into()); + } + + // TODO(@fogodev): use the group_pub_id to dispatch actors for syncing to this group + + debug!(%group_pub_id, "Created sync group"); + + Ok(()) + }) + }) + .procedure("delete", { + R.mutation(|node, req: groups::delete::Request| async move { + let group_pub_id = req.pub_id; + super::handle_comm_error( + super::try_get_cloud_services_client(&node) + .await? + .sync() + .groups() + .delete(req) + .await, + "Failed to delete sync group;", + )??; + + debug!(%group_pub_id, "Deleted sync group"); + + Ok(()) + }) + }) + .procedure("get", { + R.query(|node, req: groups::get::Request| async move { + let groups::get::Response(group) = super::handle_comm_error( + super::try_get_cloud_services_client(&node) + .await? + .sync() + .groups() + .get(req) + .await, + "Failed to get sync group;", + )??; + + debug!(?group, "Got sync group"); + + Ok(group) + }) + }) + .procedure("leave", { + #[derive(Deserialize, specta::Type)] + struct SyncGroupsLeaveArgs { + access_token: AccessToken, + group_pub_id: groups::PubId, + } + + R.query( + |node, + SyncGroupsLeaveArgs { + access_token, + group_pub_id, + }: SyncGroupsLeaveArgs| async move { + let (device_pub_id, client, key_manager) = ( + async { Ok(node.config.get().await.id) }, + super::try_get_cloud_services_client(&node), + node.cloud_services.key_manager(), + ) + .try_join() + .await?; + + super::handle_comm_error( + client + .sync() + .groups() + .leave(groups::leave::Request { + access_token, + pub_id: group_pub_id, + current_device_pub_id: devices::PubId(device_pub_id.into()), + }) + .await, + "Failed to leave sync group;", + )??; + + let mut rng = + CryptoRng::from_seed(node.master_rng.lock().await.generate_fixed()); + + key_manager.remove_group(group_pub_id, &mut rng).await?; + + debug!(%group_pub_id, "Left sync group"); + + Ok(()) + }, + ) + }) + .procedure("list", { + R.query(|node, req: groups::list::Request| async move { + let groups::list::Response(groups) = super::handle_comm_error( + super::try_get_cloud_services_client(&node) + .await? + .sync() + .groups() + .list(req) + .await, + "Failed to list groups;", + )??; + + debug!(?groups, "Listed sync groups"); + + Ok(groups) + }) + }) + .procedure("remove_device", { + #[derive(Deserialize, specta::Type)] + struct SyncGroupsRemoveDeviceArgs { + access_token: AccessToken, + group_pub_id: groups::PubId, + to_remove_device_pub_id: devices::PubId, + } + R.query( + |node, + SyncGroupsRemoveDeviceArgs { + access_token, + group_pub_id, + to_remove_device_pub_id, + }: SyncGroupsRemoveDeviceArgs| async move { + let (client, current_device_pub_id, mut rng, key_manager) = ( + super::try_get_cloud_services_client(&node), + async { Ok(devices::PubId(node.config.get().await.id.into())) }, + async { + Ok(CryptoRng::from_seed( + node.master_rng.lock().await.generate_fixed(), + )) + }, + node.cloud_services.key_manager(), + ) + .try_join() + .await?; + + let new_key = SecretKey::generate(&mut rng); + let new_key_hash = KeyHash(blake3::hash(new_key.as_ref()).to_hex().to_string()); + + key_manager + .add_key_with_hash(group_pub_id, new_key, new_key_hash.clone(), &mut rng) + .await?; + + super::handle_comm_error( + client + .sync() + .groups() + .remove_device(groups::remove_device::Request { + access_token, + group_pub_id, + new_key_hash, + current_device_pub_id, + to_remove_device_pub_id, + }) + .await, + "Failed to list libraries;", + )??; + + debug!(%to_remove_device_pub_id, %group_pub_id, "Removed device"); + + Ok(()) + }, + ) + }) + .procedure("request_join", { + #[derive(Deserialize, specta::Type)] + struct SyncGroupsRequestJoinArgs { + access_token: AccessToken, + sync_group: groups::GroupWithLibraryAndDevices, + asking_device: devices::Device, + } + + R.mutation( + |node, + SyncGroupsRequestJoinArgs { + access_token, + sync_group, + asking_device, + }: SyncGroupsRequestJoinArgs| async move { + let (client, current_device_pub_id, cloud_p2p) = ( + super::try_get_cloud_services_client(&node), + async { Ok(devices::PubId(node.config.get().await.id.into())) }, + node.cloud_services.cloud_p2p(), + ) + .try_join() + .await?; + + let group_pub_id = sync_group.pub_id; + + if asking_device.pub_id != current_device_pub_id { + return Err(rspc::Error::new( + rspc::ErrorCode::BadRequest, + String::from("Asking device must be the current device"), + )); + } + + let groups::request_join::Response(existing_devices) = + super::handle_comm_error( + client + .sync() + .groups() + .request_join(groups::request_join::Request { + access_token, + group_pub_id, + current_device_pub_id, + }) + .await, + "Failed to update library;", + )??; + + cloud_p2p + .request_join_sync_group( + existing_devices, + cloud_p2p::authorize_new_device_in_sync_group::Request { + sync_group, + asking_device, + }, + ) + .await; + + debug!(%group_pub_id, "Requested to join sync group"); + + Ok(()) + }, + ) + }) +} From 54f76c46b30e157c28b447f99214977ffff79a64 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Wed, 28 Aug 2024 14:45:22 -0400 Subject: [PATCH 096/218] Updated `core.ts` file --- packages/client/src/core.ts | 34 +++++++++++++++++++++++++--------- 1 file changed, 25 insertions(+), 9 deletions(-) diff --git a/packages/client/src/core.ts b/packages/client/src/core.ts index a3aa89a56..587118ebc 100644 --- a/packages/client/src/core.ts +++ b/packages/client/src/core.ts @@ -9,9 +9,11 @@ export type Procedures = { { key: "cloud.devices.list", input: never, result: MockDevice[] } | { key: "cloud.libraries.get", input: LibraryGetRequest, result: Library } | { key: "cloud.libraries.list", input: LibraryListRequest, result: Library[] } | - { key: "cloud.library.get", input: LibraryArgs, result: null } | - { key: "cloud.library.list", input: never, result: null } | { key: "cloud.locations.list", input: LocationListRequest, result: CloudLocation[] } | + { key: "cloud.syncGroups.get", input: SyncGroupGetRequest, result: SyncGroup } | + { key: "cloud.syncGroups.leave", input: SyncGroupsLeaveArgs, result: null } | + { key: "cloud.syncGroups.list", input: SyncGroupListRequest, result: SyncGroup[] } | + { key: "cloud.syncGroups.remove_device", input: SyncGroupsRemoveDeviceArgs, result: null } | { key: "ephemeralFiles.getMediaData", input: string, result: MediaData | null } | { key: "files.get", input: LibraryArgs, result: ObjectWithFilePaths2 | null } | { key: "files.getConvertibleImageExtensions", input: never, result: string[] } | @@ -67,12 +69,13 @@ export type Procedures = { { key: "cloud.devices.update", input: DeviceUpdateRequest, result: null } | { key: "cloud.libraries.create", input: LibraryArgs, result: null } | { key: "cloud.libraries.delete", input: LibraryArgs, result: null } | + { key: "cloud.libraries.sync", input: LibraryArgs, result: null } | { key: "cloud.libraries.update", input: LibraryArgs, result: null } | - { key: "cloud.library.create", input: LibraryArgs, result: null } | - { key: "cloud.library.join", input: string, result: null } | - { key: "cloud.library.sync", input: LibraryArgs, result: null } | { key: "cloud.locations.create", input: LocationCreateRequest, result: null } | { key: "cloud.locations.delete", input: LocationDeleteRequest, result: null } | + { key: "cloud.syncGroups.create", input: LibraryArgs, result: null } | + { key: "cloud.syncGroups.delete", input: SyncGroupDeleteRequest, result: null } | + { key: "cloud.syncGroups.request_join", input: SyncGroupsRequestJoinArgs, result: null } | { key: "cloud.userResponse", input: UserResponse, result: null } | { key: "ephemeralFiles.copyFiles", input: LibraryArgs, result: null } | { key: "ephemeralFiles.createFile", input: LibraryArgs, result: string } | @@ -148,8 +151,7 @@ export type Procedures = { { key: "notifications.listen", input: never, result: Notification } | { key: "p2p.events", input: never, result: P2PEvent } | { key: "search.ephemeralPaths", input: LibraryArgs, result: { entries: ExplorerItem[]; errors: Error[] } } | - { key: "sync.active", input: LibraryArgs, result: SyncStatus } | - { key: "sync.newMessage", input: LibraryArgs, result: null } + { key: "sync.active", input: LibraryArgs, result: SyncStatus } }; /** @@ -571,7 +573,7 @@ export type NotificationId = { type: "library"; id: [string, number] } | { type: export type NotificationKind = "info" | "success" | "error" | "warning" -export type NotifyUser = { kind: "ReceivedJoinSyncGroupRequest"; data: { ticket: CloudP2PTicket; asking_device: Device; sync_group: SyncGroup } } | { kind: "ReceivedJoinSyncGroupResponse"; data: { response: JoinSyncGroupResponse; sync_group: SyncGroup } } | { kind: "SendingJoinSyncGroupResponseError"; data: { error: JoinSyncGroupError; sync_group: SyncGroup } } | { kind: "TimedOutJoinRequest"; data: { device: Device; succeeded: boolean } } +export type NotifyUser = { kind: "ReceivedJoinSyncGroupRequest"; data: { ticket: CloudP2PTicket; asking_device: Device; sync_group: SyncGroupWithLibraryAndDevices } } | { kind: "ReceivedJoinSyncGroupResponse"; data: { response: JoinSyncGroupResponse; sync_group: SyncGroupWithLibraryAndDevices } } | { kind: "SendingJoinSyncGroupResponseError"; data: { error: JoinSyncGroupError; sync_group: SyncGroupWithLibraryAndDevices } } | { kind: "TimedOutJoinRequest"; data: { device: Device; succeeded: boolean } } export type Object = { id: number; pub_id: number[]; kind: number | null; key_id: number | null; hidden: boolean | null; favorite: boolean | null; important: boolean | null; note: string | null; date_created: string | null; date_accessed: string | null } @@ -686,10 +688,24 @@ export type Stream = { id: number; name: string | null; codec: Codec | null; asp export type SubtitleProps = { width: number; height: number } -export type SyncGroup = { pub_id: SyncGroupPubId; name: string; latest_key_hash: KeyHash; library: Library; devices: Device[]; created_at: string; updated_at: string } +export type SyncGroup = { pub_id: SyncGroupPubId; name: string; latest_key_hash: KeyHash; library: Library | null; devices: Device[] | null; total_sync_messages_bytes: bigint | null; total_space_files_bytes: bigint | null; created_at: string; updated_at: string } + +export type SyncGroupDeleteRequest = { access_token: AccessToken; pub_id: SyncGroupPubId } + +export type SyncGroupGetRequest = { access_token: AccessToken; pub_id: SyncGroupPubId; with_library: boolean; with_devices: boolean; with_used_storage: boolean } + +export type SyncGroupListRequest = { access_token: AccessToken; with_library: boolean; with_devices: boolean } export type SyncGroupPubId = string +export type SyncGroupWithLibraryAndDevices = { pub_id: SyncGroupPubId; name: string; latest_key_hash: KeyHash; library: Library; devices: Device[]; created_at: string; updated_at: string } + +export type SyncGroupsLeaveArgs = { access_token: AccessToken; group_pub_id: SyncGroupPubId } + +export type SyncGroupsRemoveDeviceArgs = { access_token: AccessToken; group_pub_id: SyncGroupPubId; to_remove_device_pub_id: DevicePubId } + +export type SyncGroupsRequestJoinArgs = { access_token: AccessToken; sync_group: SyncGroupWithLibraryAndDevices; asking_device: Device } + export type SyncStatus = { ingest: boolean; cloud_send: boolean; cloud_receive: boolean; cloud_ingest: boolean } export type SystemLocations = { desktop: string | null; documents: string | null; downloads: string | null; pictures: string | null; music: string | null; videos: string | null } From 55f02aa8ed3b48d71e772dde6935fda4868615b3 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Wed, 28 Aug 2024 16:42:25 -0400 Subject: [PATCH 097/218] Fixed `ipc` errors --- Cargo.lock | Bin 329403 -> 329403 bytes .../src-tauri/capabilities/default.json | 4 ++-- apps/desktop/src-tauri/tauri.conf.json | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9a651846b729e3fbc21cb80b615e581cbb0bb2a3..501dccadc9c86095158e99c8d7e2d6a9dc63ca01 100644 GIT binary patch delta 70 zcmdnpE3&&+q@jheg=q_OTfTaViFsS#pv^vQeUWqLHbgk*T?XnTe%^k%gh5xn-(F aqIpu1nfdgEcNoRm59c#)Kb+6P- diff --git a/apps/desktop/src-tauri/capabilities/default.json b/apps/desktop/src-tauri/capabilities/default.json index a06855f57..e3b199d7a 100644 --- a/apps/desktop/src-tauri/capabilities/default.json +++ b/apps/desktop/src-tauri/capabilities/default.json @@ -36,10 +36,10 @@ "url": "http://**" }, { - "url": "http://localhost:9420/" + "url": "http://localhost:9420/*" }, { - "url": "https://plausible.io/" + "url": "https://plausible.io/*" } ] } diff --git a/apps/desktop/src-tauri/tauri.conf.json b/apps/desktop/src-tauri/tauri.conf.json index 0dc0fe09c..2c32c8786 100644 --- a/apps/desktop/src-tauri/tauri.conf.json +++ b/apps/desktop/src-tauri/tauri.conf.json @@ -36,7 +36,7 @@ } ], "security": { - "csp": "default-src webkit-pdfjs-viewer: asset: https://asset.localhost blob: data: filesystem: ws: wss: http: https: tauri: 'unsafe-eval' 'unsafe-inline' 'self' img-src: 'self'" + "csp": "default-src 'self' webkit-pdfjs-viewer: asset: https://asset.localhost blob: data: filesystem: ws: wss: http: https: tauri: 'unsafe-eval' 'unsafe-inline'; img-src 'self' data:; connect-src 'self' tauri://localhost http://localhost:9420 https://plausible.io ipc: http://ipc.localhost;" } }, "bundle": { From 0b569a832b17af83e9195e6417208a0c54484682 Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Fri, 30 Aug 2024 03:34:03 -0300 Subject: [PATCH 098/218] Sender actor for cloud sync messages --- Cargo.lock | Bin 329403 -> 329465 bytes core/crates/cloud-services/Cargo.toml | 5 + .../src/{cloud_client.rs => client.rs} | 11 +- core/crates/cloud-services/src/error.rs | 46 +- core/crates/cloud-services/src/lib.rs | 13 +- .../src/{cloud_p2p => p2p}/mod.rs | 0 .../src/{cloud_p2p => p2p}/runner.rs | 2 +- core/crates/cloud-services/src/sync/ingest.rs | 121 ++++ core/crates/cloud-services/src/sync/mod.rs | 116 ++++ .../cloud-services/src}/sync/receive.rs | 43 +- core/crates/cloud-services/src/sync/send.rs | 546 ++++++++++++++++++ .../heavy-lifting/src/file_identifier/job.rs | 8 +- .../heavy-lifting/src/file_identifier/mod.rs | 6 +- .../src/file_identifier/shallow.rs | 2 +- .../src/file_identifier/tasks/identifier.rs | 6 +- .../file_identifier/tasks/object_processor.rs | 6 +- core/crates/heavy-lifting/src/indexer/job.rs | 18 +- .../heavy-lifting/src/indexer/shallow.rs | 8 +- .../heavy-lifting/src/indexer/tasks/saver.rs | 8 +- .../src/indexer/tasks/updater.rs | 8 +- .../heavy-lifting/src/job_system/job.rs | 2 +- .../heavy-lifting/src/media_processor/job.rs | 6 +- .../src/media_processor/shallow.rs | 6 +- .../tasks/media_data_extractor.rs | 10 +- core/crates/sync/Cargo.toml | 1 + core/crates/sync/src/ingest.rs | 57 +- core/crates/sync/src/lib.rs | 30 +- core/crates/sync/src/manager.rs | 68 ++- core/crates/sync/tests/lib.rs | 378 ++++++------ core/crates/sync/tests/mock_instance.rs | 244 ++++---- core/src/api/libraries.rs | 24 +- core/src/api/sync.rs | 10 +- core/src/cloud/mod.rs | 35 -- core/src/cloud/sync/ingest.rs | 124 ---- core/src/cloud/sync/mod.rs | 104 ---- core/src/cloud/sync/send.rs | 139 ----- core/src/context.rs | 4 +- core/src/lib.rs | 1 - core/src/library/library.rs | 44 +- core/src/library/manager/mod.rs | 21 +- core/src/p2p/sync/mod.rs | 92 +-- crates/actors/src/lib.rs | 51 +- crates/ai/src/old_image_labeler/old_actor.rs | 14 +- crates/ai/src/old_image_labeler/process.rs | 2 +- crates/crypto/src/cloud/encrypt.rs | 9 +- crates/crypto/src/cloud/mod.rs | 4 + crates/crypto/src/primitives.rs | 3 +- crates/crypto/src/rng/csprng.rs | 2 +- crates/sync/src/factory.rs | 17 +- 49 files changed, 1483 insertions(+), 992 deletions(-) rename core/crates/cloud-services/src/{cloud_client.rs => client.rs} (97%) rename core/crates/cloud-services/src/{cloud_p2p => p2p}/mod.rs (100%) rename core/crates/cloud-services/src/{cloud_p2p => p2p}/runner.rs (99%) create mode 100644 core/crates/cloud-services/src/sync/ingest.rs create mode 100644 core/crates/cloud-services/src/sync/mod.rs rename core/{src/cloud => crates/cloud-services/src}/sync/receive.rs (85%) create mode 100644 core/crates/cloud-services/src/sync/send.rs delete mode 100644 core/src/cloud/mod.rs delete mode 100644 core/src/cloud/sync/ingest.rs delete mode 100644 core/src/cloud/sync/mod.rs delete mode 100644 core/src/cloud/sync/send.rs diff --git a/Cargo.lock b/Cargo.lock index 501dccadc9c86095158e99c8d7e2d6a9dc63ca01..2aa8fbff46b566151ff90bdaa7cd5c57241d0b03 100644 GIT binary patch delta 133 zcmV;00DAwsjTHHf6o9k=RF0RGQvn@^agG7EagG8N;U_jQH8W;0Gi5O{H8Wx}Wi~f8 zH)J>, get_cloud_api_address: Url, @@ -128,6 +128,11 @@ impl CloudServices { self.notify_user_rx.stream() } + #[must_use] + pub const fn http_client(&self) -> &ClientWithMiddleware { + &self.http_client + } + /// Send back a user response to the Cloud P2P actor /// /// # Panics diff --git a/core/crates/cloud-services/src/error.rs b/core/crates/cloud-services/src/error.rs index ebf506185..3d9b819c8 100644 --- a/core/crates/cloud-services/src/error.rs +++ b/core/crates/cloud-services/src/error.rs @@ -1,12 +1,16 @@ -use sd_cloud_schema::cloud_p2p::Service; +use sd_cloud_schema::{cloud_p2p, sync::groups, Service}; use sd_utils::error::FileIOError; use std::{io, net::AddrParseError}; -use quic_rpc::transport::quinn::QuinnConnection; +use quic_rpc::{ + pattern::{bidi_streaming, rpc}, + transport::quinn::QuinnConnection, +}; #[derive(thiserror::Error, Debug)] pub enum Error { + // Setup errors #[error("Couldn't parse Cloud Services API address URL: {0}")] InvalidUrl(reqwest_middleware::reqwest::Error), #[error("Failed to initialize http client: {0}")] @@ -58,9 +62,45 @@ pub enum Error { #[error("Failed to connect to Cloud P2P node: {0}")] ConnectToCloudP2PNode(anyhow::Error), #[error("Communication error with Cloud P2P node: {0}")] - CloudP2PRpcCommunication(#[from] quic_rpc::pattern::rpc::Error>), + CloudP2PRpcCommunication(#[from] rpc::Error>), #[error("Cloud P2P not initialized")] CloudP2PNotInitialized, + + // Communication errors + #[error("Failed to communicate with RPC backend: {0}")] + RpcCommunication(#[from] rpc::Error>), + #[error("Failed to communicate with Bidi Streaming RPC backend: {0}")] + BidiStreamCommunication(#[from] bidi_streaming::Error>), + #[error("Failed to receive next response from Bidi Streaming RPC backend: {0}")] + BidiStreamRecv(#[from] bidi_streaming::ItemError>), + #[error("Error from backend: {0}")] + Backend(#[from] sd_cloud_schema::Error), + #[error("Failed to get access token from refresher: {0}")] + GetToken(#[from] GetTokenError), + #[error("Unexpected empty response from backend, context: {0}")] + EmptyResponse(&'static str), + #[error("Unexpected response from backend, context: {0}")] + UnexpectedResponse(&'static str), + + // Sync error + #[error("Sync error: {0}")] + Sync(#[from] sd_core_sync::Error), + #[error("Tried to sync messages with a group without having needed key")] + MissingSyncGroupKey(groups::PubId), + #[error("Failed to encrypt sync messages: {0}")] + Encrypt(sd_crypto::Error), + #[error("Failed to decrypt sync messages: {0}")] + Decrypt(sd_crypto::Error), + #[error("Failed to upload sync messages: {0}")] + UploadSyncMessages(reqwest_middleware::Error), + #[error("Received an error response from uploading sync messages: {0}")] + ErrorResponseUploadSyncMessages(reqwest_middleware::reqwest::Error), + #[error("Critical error while uploading sync messages")] + CriticalErrorWhileUploadingSyncMessages, + #[error("Failed to send End update to push sync messages")] + EndUpdatePushSyncMessages(io::Error), + #[error("Unexpected end of stream while encrypting sync messages")] + UnexpectedEndOfStream, } #[derive(thiserror::Error, Debug)] diff --git a/core/crates/cloud-services/src/lib.rs b/core/crates/cloud-services/src/lib.rs index 1e84e9ef3..d4fa13741 100644 --- a/core/crates/cloud-services/src/lib.rs +++ b/core/crates/cloud-services/src/lib.rs @@ -30,15 +30,20 @@ mod error; -mod cloud_client; -mod cloud_p2p; +mod client; mod key_manager; +mod p2p; +mod sync; mod token_refresher; -pub use cloud_client::CloudServices; -pub use cloud_p2p::{CloudP2P, JoinSyncGroupResponse, NotifyUser, Ticket, UserResponse}; +pub use client::CloudServices; pub use error::{Error, GetTokenError}; pub use key_manager::KeyManager; +pub use p2p::{CloudP2P, JoinSyncGroupResponse, NotifyUser, Ticket, UserResponse}; +pub use sync::{ + declare_actors as declare_cloud_sync, SyncActors as CloudSyncActors, + SyncActorsState as CloudSyncActorsState, +}; // Re-exports pub use iroh_base::key::{NodeId, SecretKey as IrohSecretKey}; diff --git a/core/crates/cloud-services/src/cloud_p2p/mod.rs b/core/crates/cloud-services/src/p2p/mod.rs similarity index 100% rename from core/crates/cloud-services/src/cloud_p2p/mod.rs rename to core/crates/cloud-services/src/p2p/mod.rs diff --git a/core/crates/cloud-services/src/cloud_p2p/runner.rs b/core/crates/cloud-services/src/p2p/runner.rs similarity index 99% rename from core/crates/cloud-services/src/cloud_p2p/runner.rs rename to core/crates/cloud-services/src/p2p/runner.rs index b8d7fbc02..30194eb18 100644 --- a/core/crates/cloud-services/src/cloud_p2p/runner.rs +++ b/core/crates/cloud-services/src/p2p/runner.rs @@ -1,5 +1,5 @@ use crate::{ - cloud_p2p::JoinSyncGroupError, token_refresher::TokenRefresher, CloudServices, Error, + p2p::JoinSyncGroupError, token_refresher::TokenRefresher, CloudServices, Error, KeyManager, }; diff --git a/core/crates/cloud-services/src/sync/ingest.rs b/core/crates/cloud-services/src/sync/ingest.rs new file mode 100644 index 000000000..1522a04ea --- /dev/null +++ b/core/crates/cloud-services/src/sync/ingest.rs @@ -0,0 +1,121 @@ +use sd_core_sync::SyncManager; + +use sd_actors::Stopper; +use sd_prisma::prisma::cloud_crdt_operation; + +use std::{ + future::IntoFuture, + pin::pin, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, +}; + +use futures::{FutureExt, StreamExt}; +use futures_concurrency::future::Race; +use tokio::sync::Notify; +use tracing::debug; + +// Responsible for taking sync operations received from the cloud, +// and applying them to the local database via the sync system's ingest actor. + +pub async fn run_actor( + sync: SyncManager, + notify: Arc, + state: Arc, + state_notify: Arc, + stop: Stopper, +) { + enum Race { + Notified, + Stopped, + } + + loop { + state.store(true, Ordering::Relaxed); + state_notify.notify_waiters(); + + // { + // let mut rx = pin!(sync.ingest.req_rx.clone()); + + // if sync + // .ingest + // .event_tx + // .send(sd_core_sync::Event::Notification) + // .await + // .is_ok() + // { + // while let Some(req) = rx.next().await { + // const OPS_PER_REQUEST: u32 = 1000; + + // // FIXME: If there are exactly a multiple of OPS_PER_REQUEST operations, + // // then this will bug, as we sent `has_more` as true, but we don't have + // // more operations to send. + + // use sd_core_sync::*; + + // let timestamps = match req { + // Request::FinishedIngesting => { + // break; + // } + // Request::Messages { timestamps, .. } => timestamps, + // }; + + // let (ops_ids, ops): (Vec<_>, Vec<_>) = + // err_break!(sync.get_cloud_ops(OPS_PER_REQUEST, timestamps,).await) + // .into_iter() + // .unzip(); + + // if ops.is_empty() { + // break; + // } + + // debug!( + // messages_count = ops.len(), + // first_message = ?ops.first().map(|operation| operation.timestamp.as_u64()), + // last_message = ?ops.last().map(|operation| operation.timestamp.as_u64()), + // "Sending messages to ingester", + // ); + + // let (wait_tx, wait_rx) = tokio::sync::oneshot::channel::<()>(); + + // err_break!( + // sync.ingest + // .event_tx + // .send(sd_core_sync::Event::Messages(MessagesEvent { + // device_pub_id: sync.device_pub_id.clone(), + // has_more: ops.len() == OPS_PER_REQUEST as usize, + // messages: CompressedCRDTOperationsPerModelPerDevice::new(ops), + // wait_tx: Some(wait_tx) + // })) + // .await + // ); + + // err_break!(wait_rx.await); + + // err_break!( + // sync.db + // .cloud_crdt_operation() + // .delete_many(vec![cloud_crdt_operation::id::in_vec(ops_ids)]) + // .exec() + // .await + // ); + // } + // } + // } + + state.store(false, Ordering::Relaxed); + state_notify.notify_waiters(); + + if let Race::Stopped = ( + notify.notified().map(|()| Race::Notified), + stop.into_future().map(|()| Race::Stopped), + ) + .race() + .await + { + break; + } + } +} diff --git a/core/crates/cloud-services/src/sync/mod.rs b/core/crates/cloud-services/src/sync/mod.rs new file mode 100644 index 000000000..5c20efda4 --- /dev/null +++ b/core/crates/cloud-services/src/sync/mod.rs @@ -0,0 +1,116 @@ +use crate::CloudServices; + +use sd_actors::ActorsCollection; +use sd_cloud_schema::sync::groups; +use sd_core_sync::SyncManager; + +use sd_crypto::CryptoRng; +use sd_prisma::prisma::PrismaClient; + +use std::{ + fmt, + sync::{atomic::AtomicBool, Arc}, +}; + +use tokio::sync::Notify; + +pub mod ingest; +pub mod receive; +pub mod send; + +#[derive(Default)] +pub struct SyncActorsState { + pub send_active: Arc, + pub receive_active: Arc, + pub ingest_active: Arc, + pub notifier: Arc, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, specta::Type)] +#[specta(rename = "CloudSyncActors")] +pub enum SyncActors { + Ingester, + Sender, + Receiver, +} + +impl fmt::Display for SyncActors { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Ingester => write!(f, "Cloud Sync Ingester"), + Self::Sender => write!(f, "Cloud Sync Sender"), + Self::Receiver => write!(f, "Cloud Sync Receiver"), + } + } +} + +pub async fn declare_actors( + cloud_services: Arc, + actors: &ActorsCollection, + actors_state: &SyncActorsState, + sync_group_pub_id: groups::PubId, + sync: SyncManager, + db: Arc, + rng: CryptoRng, +) { + let ingest_notify = Arc::new(Notify::new()); + + actors + .declare(SyncActors::Sender, { + let sync = sync.clone(); + let cloud_services = Arc::clone(&cloud_services); + let active = Arc::clone(&actors_state.send_active); + let active_notifier = Arc::clone(&actors_state.notifier); + + move |stop| { + send::run_actor( + sync_group_pub_id, + sync, + cloud_services, + active, + active_notifier, + rng, + stop, + ) + } + }) + .await; + + actors + .declare(SyncActors::Receiver, { + let sync = sync.clone(); + let cloud_services = cloud_services.clone(); + let db = Arc::clone(&db); + let active = Arc::clone(&actors_state.receive_active); + let ingest_notify = Arc::clone(&ingest_notify); + let active_notifier = Arc::clone(&actors_state.notifier); + + move |stop| { + receive::run_actor( + db, + sync_group_pub_id, + cloud_services, + sync, + ingest_notify, + (active, active_notifier), + stop, + ) + } + }) + .await; + + // actors + // .declare( + // "Cloud Sync Ingest", + // { + // let active = state.ingest_active.clone(); + // let active_notifier = state.notifier.clone(); + + // move |stop| { + // ingest::run_actor(sync.clone(), ingest_notify, active, active_notifier, stop) + // } + // }, + // autorun, + // ) + // .await; +} diff --git a/core/src/cloud/sync/receive.rs b/core/crates/cloud-services/src/sync/receive.rs similarity index 85% rename from core/src/cloud/sync/receive.rs rename to core/crates/cloud-services/src/sync/receive.rs index 908eee018..d3c597f62 100644 --- a/core/src/cloud/sync/receive.rs +++ b/core/crates/cloud-services/src/sync/receive.rs @@ -1,11 +1,10 @@ -use crate::{library::Libraries, Node}; +use crate::CloudServices; -use sd_core_sync::{DevicePubId, SyncManager}; +use sd_cloud_schema::sync::groups; +use sd_core_sync::{cloud_crdt_op_db, CRDTOperation, DevicePubId, SyncManager}; use sd_actors::Stopper; -use sd_p2p::RemoteIdentity; use sd_prisma::prisma::{cloud_crdt_operation, device, instance, PrismaClient}; -use sd_sync::CRDTOperation; use sd_utils::uuid_to_bytes; use std::{ @@ -21,17 +20,13 @@ use uuid::Uuid; // Responsible for downloading sync operations from the cloud to be processed by the ingester -#[allow(clippy::too_many_arguments)] pub async fn run_actor( - libraries: Arc, db: Arc, - library_id: Uuid, - instance_uuid: Uuid, - sync: Arc, + sync_group_pub_id: groups::PubId, + cloud_services: Arc, + sync: SyncManager, ingest_notify: Arc, - node: Arc, - active: Arc, - active_notify: Arc, + (active, active_notify): (Arc, Arc), stop: Stopper, ) { // enum Race { @@ -228,24 +223,16 @@ pub async fn run_actor( // } } -async fn write_cloud_ops_to_db( +pub async fn write_cloud_ops_to_db( ops: Vec, db: &PrismaClient, -) -> Result<(), prisma_client_rust::QueryError> { - db._batch(ops.into_iter().map(|op| crdt_op_db(&op).to_query(db))) - .await?; +) -> Result<(), sd_core_sync::Error> { + db._batch( + ops.into_iter() + .map(|op| cloud_crdt_op_db(&op).map(|op| op.to_query(db))) + .collect::, _>>()?, + ) + .await?; Ok(()) } - -fn crdt_op_db(op: &CRDTOperation) -> cloud_crdt_operation::Create { - cloud_crdt_operation::Create { - timestamp: op.timestamp.0 as i64, - device: device::pub_id::equals(uuid_to_bytes(&op.device_pub_id)), - kind: op.data.as_kind().to_string(), - data: to_vec(&op.data).expect("unable to serialize data"), - model: op.model_id as i32, - record_id: rmp_serde::to_vec(&op.record_id).expect("unable to serialize record id"), - _params: vec![], - } -} diff --git a/core/crates/cloud-services/src/sync/send.rs b/core/crates/cloud-services/src/sync/send.rs new file mode 100644 index 000000000..5ad300c7d --- /dev/null +++ b/core/crates/cloud-services/src/sync/send.rs @@ -0,0 +1,546 @@ +use crate::{CloudServices, Error}; + +use sd_core_sync::{SyncEvent, SyncManager, NTP64}; + +use sd_actors::Stopper; +use sd_cloud_schema::{ + devices, + sync::{self, groups, messages}, + Service, +}; +use sd_crypto::{ + cloud::{OneShotEncryption, SecretKey, StreamEncryption}, + primitives::EncryptedBlock, + CryptoRng, SeedableRng, +}; + +use std::{ + future::IntoFuture, + num::NonZero, + pin::{pin, Pin}, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + time::{Duration, SystemTime, UNIX_EPOCH}, +}; + +use async_stream::try_stream; +use chrono::{DateTime, Utc}; +use futures::{FutureExt, SinkExt, Stream, StreamExt, TryStream}; +use futures_concurrency::future::{Race, TryJoin}; +use quic_rpc::{client::UpdateSink, pattern::bidi_streaming, transport::quinn::QuinnConnection}; +use reqwest_middleware::reqwest::{header, Body}; +use tokio::{ + io, spawn, + sync::{broadcast, oneshot, Notify, Semaphore}, + time::sleep, +}; +use tracing::{debug, error}; +use uuid::Uuid; + +const TEN_SECONDS: Duration = Duration::from_secs(10); +const THIRTY_SECONDS: Duration = Duration::from_secs(30); +const ONE_MINUTE: Duration = Duration::from_secs(60); + +enum RaceNotifiedOrStopped { + Notified, + Stopped, +} + +type LatestTimestamp = NTP64; + +pub async fn run_actor( + sync_group_pub_id: groups::PubId, + sync: SyncManager, + cloud_services: Arc, + is_active: Arc, + state_notify: Arc, + mut rng: CryptoRng, + stop: Stopper, +) { + let mut maybe_latest_timestamp = None; + + loop { + is_active.store(true, Ordering::Relaxed); + state_notify.notify_waiters(); + + let res = run_loop_iteration( + sync_group_pub_id, + &sync, + &cloud_services, + &mut rng, + &maybe_latest_timestamp, + ) + .await; + + is_active.store(false, Ordering::Relaxed); + state_notify.notify_waiters(); + + match res { + Ok(timestamp) => { + maybe_latest_timestamp = Some(timestamp); + } + Err(e) => { + error!(?e, "Error during cloud sync sender actor iteration"); + sleep(ONE_MINUTE).await; + continue; + } + } + + if let RaceNotifiedOrStopped::Stopped = ( + // recreate subscription each time so that existing messages are dropped + wait_notification(sync.subscribe()), + stop.into_future().map(|()| RaceNotifiedOrStopped::Stopped), + ) + .race() + .await + { + break; + } + + sleep(TEN_SECONDS).await; + } +} + +async fn run_loop_iteration( + sync_group_pub_id: groups::PubId, + sync: &SyncManager, + cloud_services: &CloudServices, + rng: &mut CryptoRng, + maybe_latest_timestamp: &Option, +) -> Result { + let current_device_pub_id = devices::PubId(Uuid::from(&sync.device_pub_id)); + + let (cloud_client, key_manager) = (cloud_services.client(), cloud_services.key_manager()) + .try_join() + .await?; + + let (key_hash, secret_key) = key_manager + .get_latest_key(sync_group_pub_id) + .await + .ok_or(Error::MissingSyncGroupKey(sync_group_pub_id))?; + + let current_latest_timestamp = if let Some(latest_timestamp) = maybe_latest_timestamp { + *latest_timestamp + } else { + let messages::get_latest_time::Response { + latest_time, + latest_device_pub_id, + } = cloud_client + .sync() + .messages() + .get_latest_time(messages::get_latest_time::Request { + access_token: cloud_services.token_refresher.get_access_token().await?, + group_pub_id: sync_group_pub_id, + current_device_pub_id, + kind: messages::get_latest_time::Kind::ForCurrentDevice, + }) + .await??; + + assert_eq!(latest_device_pub_id, current_device_pub_id); + + LatestTimestamp::from( + SystemTime::from(latest_time) + .duration_since(UNIX_EPOCH) + .expect("hardcoded earlier time, nothing is earlier than UNIX_EPOCH"), + ) + }; + + let mut crdt_ops_stream = + pin!(sync.stream_device_ops(&sync.device_pub_id, 1000, current_latest_timestamp)); + + let mut new_latest_timestamp = current_latest_timestamp; + while let Some(ops_res) = crdt_ops_stream.next().await { + let ops = ops_res?; + + let (Some(first), Some(last)) = (ops.first(), ops.last()) else { + break; + }; + + let operations_count = ops.len() as u32; + + new_latest_timestamp = last.timestamp; + + let start_time = DateTime::::from(first.timestamp.to_system_time()); + let end_time = DateTime::::from(last.timestamp.to_system_time()); + + let messages_bytes = postcard::to_stdvec(&ops)?; + + let (mut push_updates, mut push_responses) = cloud_client + .sync() + .messages() + .push(messages::push::Request { + access_token: cloud_services.token_refresher.get_access_token().await?, + group_pub_id: sync_group_pub_id, + device_pub_id: current_device_pub_id, + key_hash: key_hash.clone(), + operations_count, + start_time, + end_time, + expected_blob_size: messages_bytes.len() as u64, + }) + .await?; + + let Some(response) = push_responses.next().await else { + return Err(Error::EmptyResponse("push initial response")); + }; + + let messages::push::Response(response_kind) = response??; + + match response_kind { + messages::push::ResponseKind::SinglePresignedUrl(url) => { + upload_to_single_url( + url, + secret_key.clone(), + cloud_services.http_client(), + messages_bytes, + rng, + ) + .await? + } + messages::push::ResponseKind::ManyPresignedUrls(urls) => { + upload_to_many_urls( + urls, + secret_key.clone(), + cloud_services.http_client().clone(), + messages_bytes, + rng, + &mut push_updates, + &mut push_responses, + ) + .await? + } + messages::push::ResponseKind::Pong => { + return Err(Error::UnexpectedResponse( + "Pong on first messages push request", + )) + } + messages::push::ResponseKind::End => { + return Err(Error::UnexpectedResponse( + "End on first messages push request", + )) + } + } + + push_updates + .send(messages::push::RequestUpdate( + messages::push::UpdateKind::End, + )) + .await + .map_err(Error::EndUpdatePushSyncMessages)?; + + let Some(response) = push_responses.next().await else { + return Err(Error::EmptyResponse("push initial response")); + }; + + let messages::push::Response(response_kind) = response??; + + match response_kind { + messages::push::ResponseKind::SinglePresignedUrl(_) + | messages::push::ResponseKind::ManyPresignedUrls(_) => { + return Err(Error::UnexpectedResponse( + "Urls responses on final messages push response", + )) + } + messages::push::ResponseKind::Pong => { + return Err(Error::UnexpectedResponse( + "Pong on final message push response", + )) + } + messages::push::ResponseKind::End => { + /* + Everything is awesome! + */ + } + } + } + + Ok(new_latest_timestamp) +} + +async fn upload_to_many_urls( + urls: Vec, + secret_key: SecretKey, + http_client: reqwest_middleware::ClientWithMiddleware, + messages_bytes: Vec, + rng: &mut CryptoRng, + push_updates: &mut UpdateSink< + Service, + QuinnConnection, + messages::push::RequestUpdate, + sync::Service, + >, + push_responses: &mut Pin< + Box< + dyn Stream< + Item = Result< + Result, + bidi_streaming::ItemError>, + >, + > + Send + + Sync, + >, + >, +) -> Result<(), Error> { + let stop_ping_pong = Arc::new(AtomicBool::new(false)); + let (out_tx, mut out_rx) = oneshot::channel(); + let rng = CryptoRng::from_seed(rng.generate_fixed()); + + let handle = spawn(handle_multipart_upload( + urls, + secret_key, + http_client, + messages_bytes, + rng, + Arc::clone(&stop_ping_pong), + out_tx, + )); + + loop { + if stop_ping_pong.load(Ordering::Acquire) { + break; + } + + if let Err(e) = push_updates + .send(messages::push::RequestUpdate( + messages::push::UpdateKind::Ping, + )) + .await + { + error!(?e, "Failed to send push ping update"); + sleep(TEN_SECONDS).await; + continue; + } + + let Some(response) = push_responses.next().await else { + error!("Empty response from push ping response"); + continue; + }; + + match response { + Ok(Ok(messages::push::Response( + messages::push::ResponseKind::SinglePresignedUrl(_) + | messages::push::ResponseKind::ManyPresignedUrls(_), + ))) => { + unreachable!("can't receive url if we didn't send an initial request") + } + + Ok(Ok(messages::push::Response(messages::push::ResponseKind::Pong))) => { + /* + Everything is awesome! + */ + } + Ok(Ok(messages::push::Response(messages::push::ResponseKind::End))) => { + unreachable!("Can't receive an End if we didn't send an End first"); + } + + Ok(Err(e)) => { + error!(?e, "Error from push ping response"); + sleep(TEN_SECONDS).await; + continue; + } + + Err(e) => { + error!(?e, "Error from push ping response"); + sleep(TEN_SECONDS).await; + continue; + } + } + + if stop_ping_pong.load(Ordering::Acquire) { + break; + } + + sleep(THIRTY_SECONDS).await; + } + + let Ok(out) = out_rx.try_recv() else { + // SAFETY: This try_recv error can only happen if the upload task panicked + // so we're good to unwrap the error. + let e = handle.await.unwrap_err(); + error!(?e, "Critical error while uploading sync messages"); + return Err(Error::CriticalErrorWhileUploadingSyncMessages); + }; + + out +} + +async fn handle_multipart_upload( + urls: Vec, + secret_key: SecretKey, + http_client: reqwest_middleware::ClientWithMiddleware, + messages_bytes: Vec, + rng: CryptoRng, + stop_ping_pong: Arc, + out_tx: oneshot::Sender>, +) { + async fn inner( + urls: Vec, + secret_key: SecretKey, + http_client: reqwest_middleware::ClientWithMiddleware, + messages_bytes: Vec, + mut rng: CryptoRng, + ) -> Result<(), Error> { + let urls_count = urls.len(); + let message_size = messages_bytes.len(); + let blocks_per_url = message_size / urls_count / EncryptedBlock::PLAIN_TEXT_SIZE; + let cipher_text_size = StreamEncryption::cipher_text_size(&secret_key, message_size); + + let parallel_upload_semaphore = Arc::new(Semaphore::new( + std::thread::available_parallelism() + .map(NonZero::get) + .unwrap_or(1), + )); + + // If we're uploading to many URLs, it implies that the message size is bigger than a single + // encryption block, so we always use stream encryption. + + let mut buffers = vec![Vec::with_capacity(cipher_text_size / urls_count); urls_count]; + let (nonce, cipher_stream) = + StreamEncryption::encrypt(&secret_key, messages_bytes.as_slice(), &mut rng); + + buffers[0].extend_from_slice(&nonce); + + let mut cipher_stream = pin!(cipher_stream); + + let mut handles = Vec::with_capacity(urls_count); + + for (idx, (mut buffer, url)) in buffers.into_iter().zip(urls).enumerate() { + for _ in 0..blocks_per_url { + if let Some(cipher_res) = cipher_stream.next().await { + buffer.extend(cipher_res.map_err(Error::Encrypt)?); + } else { + return Err(Error::UnexpectedEndOfStream); + } + } + + handles.push(spawn(upload_part( + idx, + url, + http_client.clone(), + buffer, + Arc::clone(¶llel_upload_semaphore), + ))); + } + + assert!( + cipher_stream.next().await.is_none(), + "Unexpected ciphered bytes still on stream" + ); + + handles.try_join().await.map_err(|e| { + error!(?e, "Error while uploading sync messages"); + Error::CriticalErrorWhileUploadingSyncMessages + })?; + + Ok(()) + } + + let res = inner(urls, secret_key, http_client, messages_bytes, rng).await; + stop_ping_pong.store(true, Ordering::Release); + out_tx + .send(res) + .expect("upload output channel never closes"); +} + +async fn upload_part( + idx: usize, + url: reqwest::Url, + http_client: reqwest_middleware::ClientWithMiddleware, + buffer: Vec, + parallel_upload_semaphore: Arc, +) -> Result<(), Error> { + let _permit = parallel_upload_semaphore + .acquire() + .await + .expect("Semaphore never closes"); + + let response = http_client + .put(url) + .header(header::CONTENT_LENGTH, buffer.len()) + .body(buffer) + .send() + .await + .map_err(Error::UploadSyncMessages)? + .error_for_status() + .map_err(Error::ErrorResponseUploadSyncMessages)?; + + debug!(?response, idx, "Uploaded sync messages part"); + + Ok(()) +} + +async fn upload_to_single_url( + url: reqwest::Url, + secret_key: SecretKey, + http_client: &reqwest_middleware::ClientWithMiddleware, + messages_bytes: Vec, + rng: &mut CryptoRng, +) -> Result<(), Error> { + let (cipher_text_size, body) = if messages_bytes.len() > EncryptedBlock::PLAIN_TEXT_SIZE { + let EncryptedBlock { nonce, cipher_text } = + OneShotEncryption::encrypt(&secret_key, messages_bytes.as_slice(), rng) + .map_err(Error::Encrypt)?; + + ( + nonce.len() + cipher_text.len(), + Body::wrap_stream(futures::stream::iter([ + Ok::<_, io::Error>(nonce.to_vec()), + Ok(cipher_text), + ])), + ) + } else { + let mut rng = CryptoRng::from_seed(rng.generate_fixed()); + ( + StreamEncryption::cipher_text_size(&secret_key, messages_bytes.len()), + Body::wrap_stream(stream_encryption(secret_key, messages_bytes, &mut rng)), + ) + }; + + let response = http_client + .put(url) + .header(header::CONTENT_LENGTH, cipher_text_size) + .body(body) + .send() + .await + .map_err(Error::UploadSyncMessages)? + .error_for_status() + .map_err(Error::ErrorResponseUploadSyncMessages)?; + + debug!(?response, "Uploaded sync messages"); + + Ok(()) +} + +fn stream_encryption( + secret_key: SecretKey, + messages_bytes: Vec, + rng: &mut CryptoRng, +) -> impl TryStream, Error = Error> + Send + 'static { + let mut rng = CryptoRng::from_seed(rng.generate_fixed()); + + try_stream! { + let (nonce, cipher_stream) = + StreamEncryption::encrypt(&secret_key, messages_bytes.as_slice(), &mut rng); + + let mut cipher_stream = pin!(cipher_stream); + + yield nonce.to_vec(); + + while let Some(res) = cipher_stream.next().await { + yield res.map_err(Error::Encrypt)?; + } + } +} + +async fn wait_notification(mut rx: broadcast::Receiver) -> RaceNotifiedOrStopped { + // wait until Created message comes in + loop { + if let Ok(SyncEvent::Created) = rx.recv().await { + break; + }; + } + + RaceNotifiedOrStopped::Notified +} diff --git a/core/crates/heavy-lifting/src/file_identifier/job.rs b/core/crates/heavy-lifting/src/file_identifier/job.rs index a90c2ea6a..3563605ce 100644 --- a/core/crates/heavy-lifting/src/file_identifier/job.rs +++ b/core/crates/heavy-lifting/src/file_identifier/job.rs @@ -128,14 +128,14 @@ impl Job for FileIdentifier { match task_kind { TaskKind::Identifier => tasks::Identifier::deserialize( &task_bytes, - (Arc::clone(ctx.db()), Arc::clone(ctx.sync())), + (Arc::clone(ctx.db()), ctx.sync().clone()), ) .await .map(IntoTask::into_task), TaskKind::ObjectProcessor => tasks::ObjectProcessor::deserialize( &task_bytes, - (Arc::clone(ctx.db()), Arc::clone(ctx.sync())), + (Arc::clone(ctx.db()), ctx.sync().clone()), ) .await .map(IntoTask::into_task), @@ -702,7 +702,7 @@ impl FileIdentifier { orphan_paths, true, Arc::clone(ctx.db()), - Arc::clone(ctx.sync()), + ctx.sync().clone(), )) .await?, ); @@ -785,7 +785,7 @@ impl FileIdentifier { orphan_paths, false, Arc::clone(ctx.db()), - Arc::clone(ctx.sync()), + ctx.sync().clone(), )) .await?, ); diff --git a/core/crates/heavy-lifting/src/file_identifier/mod.rs b/core/crates/heavy-lifting/src/file_identifier/mod.rs index 9d7d2833a..b24077835 100644 --- a/core/crates/heavy-lifting/src/file_identifier/mod.rs +++ b/core/crates/heavy-lifting/src/file_identifier/mod.rs @@ -217,7 +217,7 @@ where .dispatch(tasks::ObjectProcessor::new( HashMap::from([(cas_id, objects_to_create_or_link)]), Arc::clone(ctx.db()), - Arc::clone(ctx.sync()), + ctx.sync().clone(), with_priority, )) .await?, @@ -239,7 +239,7 @@ where .dispatch(tasks::ObjectProcessor::new( mem::take(&mut current_batch), Arc::clone(ctx.db()), - Arc::clone(ctx.sync()), + ctx.sync().clone(), with_priority, )) .await?, @@ -256,7 +256,7 @@ where .dispatch(tasks::ObjectProcessor::new( current_batch, Arc::clone(ctx.db()), - Arc::clone(ctx.sync()), + ctx.sync().clone(), with_priority, )) .await?, diff --git a/core/crates/heavy-lifting/src/file_identifier/shallow.rs b/core/crates/heavy-lifting/src/file_identifier/shallow.rs index cd165867d..a7522cf9d 100644 --- a/core/crates/heavy-lifting/src/file_identifier/shallow.rs +++ b/core/crates/heavy-lifting/src/file_identifier/shallow.rs @@ -103,7 +103,7 @@ pub async fn shallow( orphan_paths, true, Arc::clone(ctx.db()), - Arc::clone(ctx.sync()), + ctx.sync().clone(), )) .await else { diff --git a/core/crates/heavy-lifting/src/file_identifier/tasks/identifier.rs b/core/crates/heavy-lifting/src/file_identifier/tasks/identifier.rs index 2f3d0d7d7..c1015b6e7 100644 --- a/core/crates/heavy-lifting/src/file_identifier/tasks/identifier.rs +++ b/core/crates/heavy-lifting/src/file_identifier/tasks/identifier.rs @@ -72,7 +72,7 @@ pub struct Identifier { // Dependencies db: Arc, - sync: Arc, + sync: SyncManager, } /// Output from the `[Identifier]` task @@ -324,7 +324,7 @@ impl Identifier { file_paths: Vec, with_priority: bool, db: Arc, - sync: Arc, + sync: SyncManager, ) -> Self { let mut output = Output::default(); @@ -512,7 +512,7 @@ impl SerializableTask for Identifier { type DeserializeError = rmp_serde::decode::Error; - type DeserializeCtx = (Arc, Arc); + type DeserializeCtx = (Arc, SyncManager); async fn serialize(self) -> Result, Self::SerializeError> { let Self { diff --git a/core/crates/heavy-lifting/src/file_identifier/tasks/object_processor.rs b/core/crates/heavy-lifting/src/file_identifier/tasks/object_processor.rs index ebcb57533..90aebff56 100644 --- a/core/crates/heavy-lifting/src/file_identifier/tasks/object_processor.rs +++ b/core/crates/heavy-lifting/src/file_identifier/tasks/object_processor.rs @@ -35,7 +35,7 @@ pub struct ObjectProcessor { // Dependencies db: Arc, - sync: Arc, + sync: SyncManager, } #[derive(Debug, Serialize, Deserialize)] @@ -194,7 +194,7 @@ impl ObjectProcessor { pub fn new( file_paths_by_cas_id: HashMap, Vec>, db: Arc, - sync: Arc, + sync: SyncManager, with_priority: bool, ) -> Self { Self { @@ -375,7 +375,7 @@ impl SerializableTask for ObjectProcessor { type DeserializeError = rmp_serde::decode::Error; - type DeserializeCtx = (Arc, Arc); + type DeserializeCtx = (Arc, SyncManager); async fn serialize(self) -> Result, Self::SerializeError> { let Self { diff --git a/core/crates/heavy-lifting/src/indexer/job.rs b/core/crates/heavy-lifting/src/indexer/job.rs index 22546950e..1ede3f737 100644 --- a/core/crates/heavy-lifting/src/indexer/job.rs +++ b/core/crates/heavy-lifting/src/indexer/job.rs @@ -116,13 +116,13 @@ impl Job for Indexer { TaskKind::Save => tasks::Saver::deserialize( &task_bytes, - (Arc::clone(ctx.db()), Arc::clone(ctx.sync())), + (Arc::clone(ctx.db()), ctx.sync().clone()), ) .await .map(IntoTask::into_task), TaskKind::Update => tasks::Updater::deserialize( &task_bytes, - (Arc::clone(ctx.db()), Arc::clone(ctx.sync())), + (Arc::clone(ctx.db()), ctx.sync().clone()), ) .await .map(IntoTask::into_task), @@ -687,7 +687,7 @@ impl Indexer { self.location.pub_id.clone(), self.to_create_buffer.drain(..).collect(), Arc::clone(ctx.db()), - Arc::clone(ctx.sync()), + ctx.sync().clone(), ) .into_task(), ); @@ -707,7 +707,7 @@ impl Indexer { tasks::Updater::new_deep( self.to_update_buffer.drain(..).collect(), Arc::clone(ctx.db()), - Arc::clone(ctx.sync()), + ctx.sync().clone(), ) .into_task(), ); @@ -759,7 +759,7 @@ impl Indexer { self.location.pub_id.clone(), chunked_saves, Arc::clone(ctx.db()), - Arc::clone(ctx.sync()), + ctx.sync().clone(), ) }) .collect::>(); @@ -806,7 +806,7 @@ impl Indexer { self.location.pub_id.clone(), chunked_saves, Arc::clone(ctx.db()), - Arc::clone(ctx.sync()), + ctx.sync().clone(), ) }) .collect::>(); @@ -824,7 +824,7 @@ impl Indexer { tasks::Updater::new_shallow( chunked_updates, Arc::clone(ctx.db()), - Arc::clone(ctx.sync()), + ctx.sync().clone(), ) }) .collect::>(); @@ -851,7 +851,7 @@ impl Indexer { self.location.pub_id.clone(), chunked_saves, Arc::clone(ctx.db()), - Arc::clone(ctx.sync()), + ctx.sync().clone(), )); } save_tasks @@ -878,7 +878,7 @@ impl Indexer { update_tasks.push(tasks::Updater::new_deep( chunked_updates, Arc::clone(ctx.db()), - Arc::clone(ctx.sync()), + ctx.sync().clone(), )); } update_tasks diff --git a/core/crates/heavy-lifting/src/indexer/shallow.rs b/core/crates/heavy-lifting/src/indexer/shallow.rs index 1c4c89ca7..3a9d1c074 100644 --- a/core/crates/heavy-lifting/src/indexer/shallow.rs +++ b/core/crates/heavy-lifting/src/indexer/shallow.rs @@ -96,7 +96,7 @@ pub async fn shallow( to_create, to_update, Arc::clone(db), - Arc::clone(sync), + sync.clone(), dispatcher, ) .await? @@ -203,7 +203,7 @@ async fn save_and_update( to_create: Vec, to_update: Vec, db: Arc, - sync: Arc, + sync: SyncManager, dispatcher: &BaseTaskDispatcher, ) -> Result, Error> { let save_and_update_tasks = to_create @@ -216,7 +216,7 @@ async fn save_and_update( location.pub_id.clone(), chunk.collect::>(), Arc::clone(&db), - Arc::clone(&sync), + sync.clone(), ) }) .map(IntoTask::into_task) @@ -229,7 +229,7 @@ async fn save_and_update( tasks::Updater::new_shallow( chunk.collect::>(), Arc::clone(&db), - Arc::clone(&sync), + sync.clone(), ) }) .map(IntoTask::into_task), diff --git a/core/crates/heavy-lifting/src/indexer/tasks/saver.rs b/core/crates/heavy-lifting/src/indexer/tasks/saver.rs index fcf2d84a7..6afbf8cc9 100644 --- a/core/crates/heavy-lifting/src/indexer/tasks/saver.rs +++ b/core/crates/heavy-lifting/src/indexer/tasks/saver.rs @@ -36,7 +36,7 @@ pub struct Saver { // Dependencies db: Arc, - sync: Arc, + sync: SyncManager, } /// [`Save`] Task output @@ -188,7 +188,7 @@ impl Saver { location_pub_id: location::pub_id::Type, walked_entries: Vec, db: Arc, - sync: Arc, + sync: SyncManager, ) -> Self { Self { id: TaskId::new_v4(), @@ -207,7 +207,7 @@ impl Saver { location_pub_id: location::pub_id::Type, walked_entries: Vec, db: Arc, - sync: Arc, + sync: SyncManager, ) -> Self { Self { id: TaskId::new_v4(), @@ -236,7 +236,7 @@ impl SerializableTask for Saver { type DeserializeError = rmp_serde::decode::Error; - type DeserializeCtx = (Arc, Arc); + type DeserializeCtx = (Arc, SyncManager); async fn serialize(self) -> Result, Self::SerializeError> { let Self { diff --git a/core/crates/heavy-lifting/src/indexer/tasks/updater.rs b/core/crates/heavy-lifting/src/indexer/tasks/updater.rs index f737dd1d4..26047d43f 100644 --- a/core/crates/heavy-lifting/src/indexer/tasks/updater.rs +++ b/core/crates/heavy-lifting/src/indexer/tasks/updater.rs @@ -39,7 +39,7 @@ pub struct Updater { // Dependencies db: Arc, - sync: Arc, + sync: SyncManager, } /// [`Update`] Task output @@ -186,7 +186,7 @@ impl Updater { pub fn new_deep( walked_entries: Vec, db: Arc, - sync: Arc, + sync: SyncManager, ) -> Self { Self { id: TaskId::new_v4(), @@ -202,7 +202,7 @@ impl Updater { pub fn new_shallow( walked_entries: Vec, db: Arc, - sync: Arc, + sync: SyncManager, ) -> Self { Self { id: TaskId::new_v4(), @@ -264,7 +264,7 @@ impl SerializableTask for Updater { type DeserializeError = rmp_serde::decode::Error; - type DeserializeCtx = (Arc, Arc); + type DeserializeCtx = (Arc, SyncManager); async fn serialize(self) -> Result, Self::SerializeError> { let Self { diff --git a/core/crates/heavy-lifting/src/job_system/job.rs b/core/crates/heavy-lifting/src/job_system/job.rs index 30b499840..4afa393d1 100644 --- a/core/crates/heavy-lifting/src/job_system/job.rs +++ b/core/crates/heavy-lifting/src/job_system/job.rs @@ -98,7 +98,7 @@ impl ProgressUpdate { pub trait OuterContext: Send + Sync + Clone + 'static { fn id(&self) -> Uuid; fn db(&self) -> &Arc; - fn sync(&self) -> &Arc; + fn sync(&self) -> &SyncManager; fn invalidate_query(&self, query: &'static str); fn query_invalidator(&self) -> impl Fn(&'static str) + Send + Sync; fn report_update(&self, update: UpdateEvent); diff --git a/core/crates/heavy-lifting/src/media_processor/job.rs b/core/crates/heavy-lifting/src/media_processor/job.rs index bab8e506c..cadeb5f03 100644 --- a/core/crates/heavy-lifting/src/media_processor/job.rs +++ b/core/crates/heavy-lifting/src/media_processor/job.rs @@ -125,7 +125,7 @@ impl Job for MediaProcessor { TaskKind::MediaDataExtractor => { tasks::MediaDataExtractor::deserialize( &task_bytes, - (Arc::clone(ctx.db()), Arc::clone(ctx.sync())), + (Arc::clone(ctx.db()), ctx.sync().clone()), ) .await .map(IntoTask::into_task) @@ -632,7 +632,7 @@ impl MediaProcessor { parent_iso_file_path.location_id(), Arc::clone(&self.location_path), Arc::clone(db), - Arc::clone(sync), + sync.clone(), ) }) .map(IntoTask::into_task) @@ -648,7 +648,7 @@ impl MediaProcessor { parent_iso_file_path.location_id(), Arc::clone(&self.location_path), Arc::clone(db), - Arc::clone(sync), + sync.clone(), ) }) .map(IntoTask::into_task), diff --git a/core/crates/heavy-lifting/src/media_processor/shallow.rs b/core/crates/heavy-lifting/src/media_processor/shallow.rs index 3f79192ba..fd7caac14 100644 --- a/core/crates/heavy-lifting/src/media_processor/shallow.rs +++ b/core/crates/heavy-lifting/src/media_processor/shallow.rs @@ -154,7 +154,7 @@ pub async fn shallow( async fn dispatch_media_data_extractor_tasks( db: &Arc, - sync: &Arc, + sync: &SyncManager, parent_iso_file_path: &IsolatedFilePathData<'_>, location_path: &Arc, dispatcher: &BaseTaskDispatcher, @@ -185,7 +185,7 @@ async fn dispatch_media_data_extractor_tasks( parent_iso_file_path.location_id(), Arc::clone(location_path), Arc::clone(db), - Arc::clone(sync), + sync.clone(), ) }) .map(IntoTask::into_task) @@ -201,7 +201,7 @@ async fn dispatch_media_data_extractor_tasks( parent_iso_file_path.location_id(), Arc::clone(location_path), Arc::clone(db), - Arc::clone(sync), + sync.clone(), ) }) .map(IntoTask::into_task), diff --git a/core/crates/heavy-lifting/src/media_processor/tasks/media_data_extractor.rs b/core/crates/heavy-lifting/src/media_processor/tasks/media_data_extractor.rs index eaf3b261a..cd1c962da 100644 --- a/core/crates/heavy-lifting/src/media_processor/tasks/media_data_extractor.rs +++ b/core/crates/heavy-lifting/src/media_processor/tasks/media_data_extractor.rs @@ -69,7 +69,7 @@ pub struct MediaDataExtractor { // Dependencies db: Arc, - sync: Arc, + sync: SyncManager, } #[derive(Debug, Serialize, Deserialize)] @@ -275,7 +275,7 @@ impl MediaDataExtractor { location_id: location::id::Type, location_path: Arc, db: Arc, - sync: Arc, + sync: SyncManager, ) -> Self { let mut output = Output::default(); @@ -316,7 +316,7 @@ impl MediaDataExtractor { location_id: location::id::Type, location_path: Arc, db: Arc, - sync: Arc, + sync: SyncManager, ) -> Self { Self::new(Kind::Exif, file_paths, location_id, location_path, db, sync) } @@ -327,7 +327,7 @@ impl MediaDataExtractor { location_id: location::id::Type, location_path: Arc, db: Arc, - sync: Arc, + sync: SyncManager, ) -> Self { Self::new( Kind::FFmpeg, @@ -550,7 +550,7 @@ impl SerializableTask for MediaDataExtractor { type DeserializeError = rmp_serde::decode::Error; - type DeserializeCtx = (Arc, Arc); + type DeserializeCtx = (Arc, SyncManager); async fn serialize(self) -> Result, Self::SerializeError> { let Self { diff --git a/core/crates/sync/Cargo.toml b/core/crates/sync/Cargo.toml index e4f6a80fe..5e87856af 100644 --- a/core/crates/sync/Cargo.toml +++ b/core/crates/sync/Cargo.toml @@ -18,6 +18,7 @@ sd-utils = { path = "../../../crates/utils" } # Workspace dependencies async-channel = { workspace = true } +async-stream = { workspace = true } futures = { workspace = true } futures-concurrency = { workspace = true } prisma-client-rust = { workspace = true, features = ["rspc"] } diff --git a/core/crates/sync/src/ingest.rs b/core/crates/sync/src/ingest.rs index e5fa60050..5fc0ac49c 100644 --- a/core/crates/sync/src/ingest.rs +++ b/core/crates/sync/src/ingest.rs @@ -230,38 +230,34 @@ impl Actor { pub async fn declare(shared: Arc) -> Handler { let (io, HandlerIO { event_tx, req_rx }) = create_actor_io::(); - shared - .actors - .declare( - "Sync Ingest", - { - let shared = Arc::clone(&shared); - move |stop| async move { - enum Race { - Ticked, - Stopped, - } + // shared + // .actors + // .declare("Sync Ingest", { + // let shared = Arc::clone(&shared); + // move |stop| async move { + // enum Race { + // Ticked, + // Stopped, + // } - let mut this = Self { - state: Some(State::default()), - io, - shared, - }; + // let mut this = Self { + // state: Some(State::default()), + // io, + // shared, + // }; - while matches!( - ( - this.tick().map(|()| Race::Ticked), - stop.into_future().map(|()| Race::Stopped), - ) - .race() - .await, - Race::Ticked - ) { /* Everything is Awesome! */ } - } - }, - true, - ) - .await; + // while matches!( + // ( + // this.tick().map(|()| Race::Ticked), + // stop.into_future().map(|()| Race::Stopped), + // ) + // .race() + // .await, + // Race::Ticked + // ) { /* Everything is Awesome! */ } + // } + // }) + // .await; Handler { event_tx, req_rx } } @@ -626,7 +622,6 @@ mod test { emit_messages_flag: Arc::new(AtomicBool::new(true)), active: AtomicBool::default(), active_notify: Notify::default(), - actors: Arc::default(), }); (Actor::declare(Arc::clone(&shared)).await, shared) diff --git a/core/crates/sync/src/lib.rs b/core/crates/sync/src/lib.rs index 0ce608947..1b437e7e1 100644 --- a/core/crates/sync/src/lib.rs +++ b/core/crates/sync/src/lib.rs @@ -27,8 +27,9 @@ #![forbid(deprecated_in_future)] #![allow(clippy::missing_errors_doc, clippy::module_name_repetitions)] -use sd_prisma::prisma::{crdt_operation, device, PrismaClient}; -use sd_sync::{CRDTOperation, ModelId}; +use sd_prisma::prisma::{cloud_crdt_operation, crdt_operation, device, PrismaClient}; +use sd_sync::ModelId; +use sd_utils::uuid_to_bytes; use std::{ collections::HashMap, @@ -54,6 +55,10 @@ pub enum SyncEvent { } pub use sd_core_prisma_helpers::DevicePubId; +pub use sd_sync::{ + CRDTOperation, OperationFactory, RelationSyncId, RelationSyncModel, SharedSyncModel, SyncId, + SyncModel, +}; pub type TimestampPerDevice = Arc>>; @@ -65,7 +70,6 @@ pub struct SharedState { pub clock: uhlc::HLC, pub active: AtomicBool, pub active_notify: Notify, - pub actors: Arc, } #[derive(thiserror::Error, Debug)] @@ -108,7 +112,7 @@ pub fn crdt_op_db(op: &CRDTOperation) -> Result { op.timestamp.as_u64() as i64 } }, - device: device::pub_id::equals(op.device_pub_id.as_bytes().to_vec()), + device: device::pub_id::equals(uuid_to_bytes(&op.device_pub_id)), kind: op.kind().to_string(), data: rmp_serde::to_vec(&op.data)?, model: i32::from(op.model_id), @@ -137,3 +141,21 @@ pub fn crdt_op_unchecked_db( _params: vec![], }) } + +pub fn cloud_crdt_op_db(op: &CRDTOperation) -> Result { + Ok(cloud_crdt_operation::Create { + timestamp: { + #[allow(clippy::cast_possible_wrap)] + // SAFETY: we had to store using i64 due to SQLite limitations + { + op.timestamp.as_u64() as i64 + } + }, + device: device::pub_id::equals(uuid_to_bytes(&op.device_pub_id)), + kind: op.data.as_kind().to_string(), + data: rmp_serde::to_vec(&op.data)?, + model: i32::from(op.model_id), + record_id: rmp_serde::to_vec(&op.record_id)?, + _params: vec![], + }) +} diff --git a/core/crates/sync/src/manager.rs b/core/crates/sync/src/manager.rs index 837f9c116..abb35f392 100644 --- a/core/crates/sync/src/manager.rs +++ b/core/crates/sync/src/manager.rs @@ -14,6 +14,8 @@ use std::{ }, }; +use async_stream::stream; +use futures::Stream; use prisma_client_rust::{and, operator::or}; use tokio::sync::{broadcast, Mutex, Notify, RwLock}; use tracing::warn; @@ -23,15 +25,15 @@ use uuid::Uuid; use super::{ crdt_op_db, db_operation::{into_cloud_ops, into_ops}, - ingest, Error, SharedState, SyncEvent, NTP64, + Error, SharedState, SyncEvent, NTP64, }; /// Wrapper that spawns the ingest actor and provides utilities for reading and writing sync operations. +#[derive(Clone)] pub struct Manager { pub tx: broadcast::Sender, - pub ingest: ingest::Handler, pub shared: Arc, - pub sync_lock: Mutex<()>, + pub sync_lock: Arc>, } impl fmt::Debug for Manager { @@ -53,7 +55,6 @@ impl Manager { db: Arc, current_device_pub_id: &DevicePubId, emit_messages_flag: Arc, - actors: Arc, ) -> Result<(Self, broadcast::Receiver), Error> { let existing_devices = db.device().find_many(vec![]).exec().await?; @@ -62,7 +63,6 @@ impl Manager { current_device_pub_id, emit_messages_flag, &existing_devices, - actors, ) .await } @@ -79,7 +79,6 @@ impl Manager { current_device_pub_id: &DevicePubId, emit_messages_flag: Arc, existing_devices: &[device::Data], - actors: Arc, ) -> Result<(Self, broadcast::Receiver), Error> { let latest_timestamp_per_device = db ._batch( @@ -124,22 +123,19 @@ impl Manager { emit_messages_flag, active: AtomicBool::default(), active_notify: Notify::default(), - actors, }); - let ingest = ingest::Actor::declare(shared.clone()).await; - Ok(( Self { tx, - ingest, shared, - sync_lock: Mutex::default(), + sync_lock: Arc::new(Mutex::default()), }, rx, )) } + #[must_use] pub fn subscribe(&self) -> broadcast::Receiver { self.tx.subscribe() } @@ -246,6 +242,56 @@ impl Manager { .collect() } + pub fn stream_device_ops<'a>( + &'a self, + device_pub_id: &'a DevicePubId, + chunk_size: u32, + initial_timestamp: NTP64, + ) -> impl Stream, Error>> + Send + '_ { + stream! { + let mut current_initial_timestamp = initial_timestamp; + + loop { + match self.db.crdt_operation() + .find_many(vec![ + crdt_operation::device_pub_id::equals(device_pub_id.to_db()), + #[allow(clippy::cast_possible_wrap)] + crdt_operation::timestamp::gt(current_initial_timestamp.as_u64() as i64), + ]) + .take(i64::from(chunk_size)) + .order_by(crdt_operation::timestamp::order(SortOrder::Asc)) + .exec() + .await + { + Ok(ops) => { + if ops.is_empty() { + break; + } + + match ops.into_iter().map(into_ops).collect::, _>>() { + Ok(ops) => { + if let Some(last_op) = ops.last() { + current_initial_timestamp = last_op.timestamp; + } + + yield Ok(ops); + }, + Err(e) => { + yield Err(e); + break; + }, + } + } + + Err(e) => { + yield Err(e.into()); + break; + } + } + } + } + } + pub async fn get_ops( &self, count: u32, diff --git a/core/crates/sync/tests/lib.rs b/core/crates/sync/tests/lib.rs index 708494382..5c9dbf584 100644 --- a/core/crates/sync/tests/lib.rs +++ b/core/crates/sync/tests/lib.rs @@ -1,234 +1,234 @@ -mod mock_instance; +// mod mock_instance; -use sd_core_sync::*; +// use sd_core_sync::*; -use sd_prisma::{prisma::location, prisma_sync}; -use sd_sync::*; -use sd_utils::{msgpack, uuid_to_bytes}; +// use sd_prisma::{prisma::location, prisma_sync}; +// use sd_sync::*; +// use sd_utils::{msgpack, uuid_to_bytes}; -use mock_instance::Device; -use tracing::info; -use tracing_test::traced_test; -use uuid::Uuid; +// use mock_instance::Device; +// use tracing::info; +// use tracing_test::traced_test; +// use uuid::Uuid; -const MOCK_LOCATION_NAME: &str = "Location 0"; -const MOCK_LOCATION_PATH: &str = "/User/Anon/Documents"; +// const MOCK_LOCATION_NAME: &str = "Location 0"; +// const MOCK_LOCATION_PATH: &str = "/User/Anon/Documents"; -async fn write_test_location(instance: &Device) -> location::Data { - let location_pub_id = Uuid::new_v4(); +// async fn write_test_location(instance: &Device) -> location::Data { +// let location_pub_id = Uuid::new_v4(); - let (sync_ops, db_ops): (Vec<_>, Vec<_>) = [ - sync_db_entry!(MOCK_LOCATION_NAME, location::name), - sync_db_entry!(MOCK_LOCATION_PATH, location::path), - ] - .into_iter() - .unzip(); +// let (sync_ops, db_ops): (Vec<_>, Vec<_>) = [ +// sync_db_entry!(MOCK_LOCATION_NAME, location::name), +// sync_db_entry!(MOCK_LOCATION_PATH, location::path), +// ] +// .into_iter() +// .unzip(); - let location = instance - .sync - .write_op( - &instance.db, - instance.sync.shared_create( - prisma_sync::location::SyncId { - pub_id: uuid_to_bytes(&location_pub_id), - }, - sync_ops, - ), - instance - .db - .location() - .create(uuid_to_bytes(&location_pub_id), db_ops), - ) - .await - .expect("failed to create mock location"); +// let location = instance +// .sync +// .write_op( +// &instance.db, +// instance.sync.shared_create( +// prisma_sync::location::SyncId { +// pub_id: uuid_to_bytes(&location_pub_id), +// }, +// sync_ops, +// ), +// instance +// .db +// .location() +// .create(uuid_to_bytes(&location_pub_id), db_ops), +// ) +// .await +// .expect("failed to create mock location"); - instance - .sync - .write_ops(&instance.db, { - let (sync_ops, db_ops): (Vec<_>, Vec<_>) = [ - sync_db_entry!(1024, location::total_capacity), - sync_db_entry!(512, location::available_capacity), - ] - .into_iter() - .unzip(); +// instance +// .sync +// .write_ops(&instance.db, { +// let (sync_ops, db_ops): (Vec<_>, Vec<_>) = [ +// sync_db_entry!(1024, location::total_capacity), +// sync_db_entry!(512, location::available_capacity), +// ] +// .into_iter() +// .unzip(); - ( - sync_ops - .into_iter() - .map(|(k, v)| { - instance.sync.shared_update( - prisma_sync::location::SyncId { - pub_id: uuid_to_bytes(&location_pub_id), - }, - k, - v, - ) - }) - .collect::>(), - instance - .db - .location() - .update(location::id::equals(location.id), db_ops), - ) - }) - .await - .expect("failed to create mock location"); +// ( +// sync_ops +// .into_iter() +// .map(|(k, v)| { +// instance.sync.shared_update( +// prisma_sync::location::SyncId { +// pub_id: uuid_to_bytes(&location_pub_id), +// }, +// k, +// v, +// ) +// }) +// .collect::>(), +// instance +// .db +// .location() +// .update(location::id::equals(location.id), db_ops), +// ) +// }) +// .await +// .expect("failed to create mock location"); - location -} +// location +// } -#[tokio::test] -#[traced_test] -async fn writes_operations_and_rows_together() -> Result<(), Box> { - let instance = Device::new(Uuid::new_v4()).await; +// #[tokio::test] +// #[traced_test] +// async fn writes_operations_and_rows_together() -> Result<(), Box> { +// let instance = Device::new(Uuid::new_v4()).await; - write_test_location(&instance).await; +// write_test_location(&instance).await; - let operations = instance - .db - .crdt_operation() - .find_many(vec![]) - .exec() - .await?; +// let operations = instance +// .db +// .crdt_operation() +// .find_many(vec![]) +// .exec() +// .await?; - // 1 create, 2 update - assert_eq!(operations.len(), 3); - assert_eq!(operations[0].model, prisma_sync::location::MODEL_ID as i32); +// // 1 create, 2 update +// assert_eq!(operations.len(), 3); +// assert_eq!(operations[0].model, prisma_sync::location::MODEL_ID as i32); - let out = instance.sync.get_ops(100, vec![]).await?; +// let out = instance.sync.get_ops(100, vec![]).await?; - assert_eq!(out.len(), 3); +// assert_eq!(out.len(), 3); - let locations = instance.db.location().find_many(vec![]).exec().await?; +// let locations = instance.db.location().find_many(vec![]).exec().await?; - assert_eq!(locations.len(), 1); - let location = locations.first().unwrap(); - assert_eq!(location.name.as_deref(), Some(MOCK_LOCATION_NAME)); - assert_eq!(location.path.as_deref(), Some(MOCK_LOCATION_PATH)); +// assert_eq!(locations.len(), 1); +// let location = locations.first().unwrap(); +// assert_eq!(location.name.as_deref(), Some(MOCK_LOCATION_NAME)); +// assert_eq!(location.path.as_deref(), Some(MOCK_LOCATION_PATH)); - Ok(()) -} +// Ok(()) +// } -#[tokio::test] -#[traced_test] -async fn operations_send_and_ingest() -> Result<(), Box> { - let instance1 = Device::new(Uuid::new_v4()).await; - let instance2 = Device::new(Uuid::new_v4()).await; +// #[tokio::test] +// #[traced_test] +// async fn operations_send_and_ingest() -> Result<(), Box> { +// let instance1 = Device::new(Uuid::new_v4()).await; +// let instance2 = Device::new(Uuid::new_v4()).await; - let mut instance2_sync_rx = instance2.sync_rx.resubscribe(); +// let mut instance2_sync_rx = instance2.sync_rx.resubscribe(); - info!("Created instances!"); +// info!("Created instances!"); - Device::pair(&instance1, &instance2).await; +// Device::pair(&instance1, &instance2).await; - info!("Paired instances!"); +// info!("Paired instances!"); - write_test_location(&instance1).await; +// write_test_location(&instance1).await; - info!("Created mock location!"); +// info!("Created mock location!"); - assert!(matches!( - instance2_sync_rx.recv().await?, - SyncEvent::Ingested - )); +// assert!(matches!( +// instance2_sync_rx.recv().await?, +// SyncEvent::Ingested +// )); - let out = instance2.sync.get_ops(100, vec![]).await?; +// let out = instance2.sync.get_ops(100, vec![]).await?; - assert_locations_equality( - &instance1.db.location().find_many(vec![]).exec().await?[0], - &instance2.db.location().find_many(vec![]).exec().await?[0], - ); +// assert_locations_equality( +// &instance1.db.location().find_many(vec![]).exec().await?[0], +// &instance2.db.location().find_many(vec![]).exec().await?[0], +// ); - assert_eq!(out.len(), 3); +// assert_eq!(out.len(), 3); - instance1.teardown().await; - instance2.teardown().await; +// instance1.teardown().await; +// instance2.teardown().await; - Ok(()) -} +// Ok(()) +// } -#[tokio::test] -async fn no_update_after_delete() -> Result<(), Box> { - let instance1 = Device::new(Uuid::new_v4()).await; - let instance2 = Device::new(Uuid::new_v4()).await; +// #[tokio::test] +// async fn no_update_after_delete() -> Result<(), Box> { +// let instance1 = Device::new(Uuid::new_v4()).await; +// let instance2 = Device::new(Uuid::new_v4()).await; - let mut instance2_sync_rx = instance2.sync_rx.resubscribe(); +// let mut instance2_sync_rx = instance2.sync_rx.resubscribe(); - Device::pair(&instance1, &instance2).await; +// Device::pair(&instance1, &instance2).await; - let location = write_test_location(&instance1).await; +// let location = write_test_location(&instance1).await; - assert!(matches!( - instance2_sync_rx.recv().await?, - SyncEvent::Ingested - )); +// assert!(matches!( +// instance2_sync_rx.recv().await?, +// SyncEvent::Ingested +// )); - instance2 - .sync - .write_op( - &instance2.db, - instance2.sync.shared_delete(prisma_sync::location::SyncId { - pub_id: location.pub_id.clone(), - }), - instance2.db.location().delete_many(vec![]), - ) - .await?; +// instance2 +// .sync +// .write_op( +// &instance2.db, +// instance2.sync.shared_delete(prisma_sync::location::SyncId { +// pub_id: location.pub_id.clone(), +// }), +// instance2.db.location().delete_many(vec![]), +// ) +// .await?; - assert!(matches!( - instance1.sync_rx.resubscribe().recv().await?, - SyncEvent::Ingested - )); +// assert!(matches!( +// instance1.sync_rx.resubscribe().recv().await?, +// SyncEvent::Ingested +// )); - instance1 - .sync - .write_op( - &instance1.db, - instance1.sync.shared_update( - prisma_sync::location::SyncId { - pub_id: location.pub_id.clone(), - }, - "name", - msgpack!("New Location"), - ), - instance1.db.location().find_many(vec![]), - ) - .await?; +// instance1 +// .sync +// .write_op( +// &instance1.db, +// instance1.sync.shared_update( +// prisma_sync::location::SyncId { +// pub_id: location.pub_id.clone(), +// }, +// "name", +// msgpack!("New Location"), +// ), +// instance1.db.location().find_many(vec![]), +// ) +// .await?; - // one spare update operation that actually gets ignored by instance 2 - assert_eq!(instance1.db.crdt_operation().count(vec![]).exec().await?, 5); - assert_eq!(instance2.db.crdt_operation().count(vec![]).exec().await?, 4); +// // one spare update operation that actually gets ignored by instance 2 +// assert_eq!(instance1.db.crdt_operation().count(vec![]).exec().await?, 5); +// assert_eq!(instance2.db.crdt_operation().count(vec![]).exec().await?, 4); - assert_eq!(instance1.db.location().count(vec![]).exec().await?, 0); - // the whole point of the test - the update (which is ingested as an upsert) should be ignored - assert_eq!(instance2.db.location().count(vec![]).exec().await?, 0); +// assert_eq!(instance1.db.location().count(vec![]).exec().await?, 0); +// // the whole point of the test - the update (which is ingested as an upsert) should be ignored +// assert_eq!(instance2.db.location().count(vec![]).exec().await?, 0); - instance1.teardown().await; - instance2.teardown().await; +// instance1.teardown().await; +// instance2.teardown().await; - Ok(()) -} +// Ok(()) +// } -fn assert_locations_equality(l1: &location::Data, l2: &location::Data) { - assert_eq!(l1.pub_id, l2.pub_id, "pub id"); - assert_eq!(l1.name, l2.name, "name"); - assert_eq!(l1.path, l2.path, "path"); - assert_eq!(l1.total_capacity, l2.total_capacity, "total capacity"); - assert_eq!( - l1.available_capacity, l2.available_capacity, - "available capacity" - ); - assert_eq!(l1.size_in_bytes, l2.size_in_bytes, "size in bytes"); - assert_eq!(l1.is_archived, l2.is_archived, "is archived"); - assert_eq!( - l1.generate_preview_media, l2.generate_preview_media, - "generate preview media" - ); - assert_eq!( - l1.sync_preview_media, l2.sync_preview_media, - "sync preview media" - ); - assert_eq!(l1.hidden, l2.hidden, "hidden"); - assert_eq!(l1.date_created, l2.date_created, "date created"); - assert_eq!(l1.scan_state, l2.scan_state, "scan state"); - assert_eq!(l1.instance_id, l2.instance_id, "instance id"); -} +// fn assert_locations_equality(l1: &location::Data, l2: &location::Data) { +// assert_eq!(l1.pub_id, l2.pub_id, "pub id"); +// assert_eq!(l1.name, l2.name, "name"); +// assert_eq!(l1.path, l2.path, "path"); +// assert_eq!(l1.total_capacity, l2.total_capacity, "total capacity"); +// assert_eq!( +// l1.available_capacity, l2.available_capacity, +// "available capacity" +// ); +// assert_eq!(l1.size_in_bytes, l2.size_in_bytes, "size in bytes"); +// assert_eq!(l1.is_archived, l2.is_archived, "is archived"); +// assert_eq!( +// l1.generate_preview_media, l2.generate_preview_media, +// "generate preview media" +// ); +// assert_eq!( +// l1.sync_preview_media, l2.sync_preview_media, +// "sync preview media" +// ); +// assert_eq!(l1.hidden, l2.hidden, "hidden"); +// assert_eq!(l1.date_created, l2.date_created, "date created"); +// assert_eq!(l1.scan_state, l2.scan_state, "scan state"); +// assert_eq!(l1.instance_id, l2.instance_id, "instance id"); +// } diff --git a/core/crates/sync/tests/mock_instance.rs b/core/crates/sync/tests/mock_instance.rs index 14779588e..9dd5f1aff 100644 --- a/core/crates/sync/tests/mock_instance.rs +++ b/core/crates/sync/tests/mock_instance.rs @@ -1,143 +1,143 @@ -use sd_core_sync::*; +// use sd_core_sync::*; -use sd_prisma::prisma; -use sd_sync::CompressedCRDTOperationsPerModelPerDevice; +// use sd_prisma::prisma; +// use sd_sync::CompressedCRDTOperationsPerModelPerDevice; -use std::sync::{atomic::AtomicBool, Arc}; +// use std::sync::{atomic::AtomicBool, Arc}; -use tokio::{fs, spawn, sync::broadcast}; -use tracing::{info, instrument, warn, Instrument}; -use uuid::Uuid; +// use tokio::{fs, spawn, sync::broadcast}; +// use tracing::{info, instrument, warn, Instrument}; +// use uuid::Uuid; -fn db_path(id: Uuid) -> String { - format!("/tmp/test-{id}.db") -} +// fn db_path(id: Uuid) -> String { +// format!("/tmp/test-{id}.db") +// } -#[derive(Clone)] -pub struct Device { - pub pub_id: DevicePubId, - pub db: Arc, - pub sync: Arc, - pub sync_rx: Arc>, -} +// #[derive(Clone)] +// pub struct Device { +// pub pub_id: DevicePubId, +// pub db: Arc, +// pub sync: Arc, +// pub sync_rx: Arc>, +// } -impl Device { - pub async fn new(id: Uuid) -> Arc { - let url = format!("file:{}", db_path(id)); - let device_pub_id = DevicePubId::from(id); +// impl Device { +// pub async fn new(id: Uuid) -> Arc { +// let url = format!("file:{}", db_path(id)); +// let device_pub_id = DevicePubId::from(id); - let db = Arc::new( - prisma::PrismaClient::_builder() - .with_url(url.to_string()) - .build() - .await - .unwrap(), - ); +// let db = Arc::new( +// prisma::PrismaClient::_builder() +// .with_url(url.to_string()) +// .build() +// .await +// .unwrap(), +// ); - db._db_push().await.unwrap(); +// db._db_push().await.unwrap(); - db.device() - .create(device_pub_id.to_db(), vec![]) - .exec() - .await - .unwrap(); +// db.device() +// .create(device_pub_id.to_db(), vec![]) +// .exec() +// .await +// .unwrap(); - let (sync, sync_rx) = sd_core_sync::SyncManager::new( - Arc::clone(&db), - &device_pub_id, - Arc::new(AtomicBool::new(true)), - Default::default(), - ) - .await - .expect("failed to create sync manager"); +// // let (sync, sync_rx) = sd_core_sync::SyncManager::new( +// // Arc::clone(&db), +// // &device_pub_id, +// // Arc::new(AtomicBool::new(true)), +// // Default::default(), +// // ) +// // .await +// // .expect("failed to create sync manager"); - Arc::new(Self { - pub_id: device_pub_id, - db, - sync: Arc::new(sync), - sync_rx: Arc::new(sync_rx), - }) - } +// // Arc::new(Self { +// // pub_id: device_pub_id, +// // db, +// // sync: Arc::new(sync), +// // sync_rx: Arc::new(sync_rx), +// // }) +// } - pub async fn teardown(&self) { - fs::remove_file(db_path(Uuid::from(&self.pub_id))) - .await - .unwrap(); - } +// pub async fn teardown(&self) { +// fs::remove_file(db_path(Uuid::from(&self.pub_id))) +// .await +// .unwrap(); +// } - pub async fn pair(instance1: &Arc, instance2: &Arc) { - #[instrument(skip(left, right))] - async fn half(left: &Arc, right: &Arc, context: &'static str) { - left.db - .device() - .create(right.pub_id.to_db(), vec![]) - .exec() - .await - .unwrap(); +// pub async fn pair(instance1: &Arc, instance2: &Arc) { +// #[instrument(skip(left, right))] +// async fn half(left: &Arc, right: &Arc, context: &'static str) { +// left.db +// .device() +// .create(right.pub_id.to_db(), vec![]) +// .exec() +// .await +// .unwrap(); - spawn({ - let mut sync_rx_left = left.sync_rx.resubscribe(); - let right = Arc::clone(right); +// spawn({ +// let mut sync_rx_left = left.sync_rx.resubscribe(); +// let right = Arc::clone(right); - async move { - while let Ok(msg) = sync_rx_left.recv().await { - info!(?msg, "sync_rx_left received message"); - if matches!(msg, SyncEvent::Created) { - right - .sync - .ingest - .event_tx - .send(ingest::Event::Notification) - .await - .unwrap(); - info!("sent notification to instance 2"); - } - } - } - .in_current_span() - }); +// async move { +// while let Ok(msg) = sync_rx_left.recv().await { +// info!(?msg, "sync_rx_left received message"); +// if matches!(msg, SyncEvent::Created) { +// right +// .sync +// .ingest +// .event_tx +// .send(ingest::Event::Notification) +// .await +// .unwrap(); +// info!("sent notification to instance 2"); +// } +// } +// } +// .in_current_span() +// }); - spawn({ - let left = Arc::clone(left); - let right = Arc::clone(right); +// spawn({ +// let left = Arc::clone(left); +// let right = Arc::clone(right); - async move { - while let Ok(msg) = right.sync.ingest.req_rx.recv().await { - info!(?msg, "right instance received request"); - match msg { - ingest::Request::Messages { timestamps, tx } => { - let messages = left.sync.get_ops(100, timestamps).await.unwrap(); +// async move { +// while let Ok(msg) = right.sync.ingest.req_rx.recv().await { +// info!(?msg, "right instance received request"); +// match msg { +// ingest::Request::Messages { timestamps, tx } => { +// let messages = left.sync.get_ops(100, timestamps).await.unwrap(); - let ingest = &right.sync.ingest; +// let ingest = &right.sync.ingest; - ingest - .event_tx - .send(ingest::Event::Messages(ingest::MessagesEvent { - messages: CompressedCRDTOperationsPerModelPerDevice::new( - messages, - ), - has_more: false, - device_pub_id: left.pub_id.clone(), - wait_tx: None, - })) - .await - .unwrap(); +// ingest +// .event_tx +// .send(ingest::Event::Messages(ingest::MessagesEvent { +// messages: CompressedCRDTOperationsPerModelPerDevice::new( +// messages, +// ), +// has_more: false, +// device_pub_id: left.pub_id.clone(), +// wait_tx: None, +// })) +// .await +// .unwrap(); - if tx.send(()).is_err() { - warn!("failed to send ack to instance 1"); - } - } - ingest::Request::FinishedIngesting => { - right.sync.tx.send(SyncEvent::Ingested).unwrap(); - } - } - } - } - .in_current_span() - }); - } +// if tx.send(()).is_err() { +// warn!("failed to send ack to instance 1"); +// } +// } +// ingest::Request::FinishedIngesting => { +// right.sync.tx.send(SyncEvent::Ingested).unwrap(); +// } +// } +// } +// } +// .in_current_span() +// }); +// } - half(instance1, instance2, "instance1 -> instance2").await; - half(instance2, instance1, "instance2 -> instance1").await; - } -} +// half(instance1, instance2, "instance1 -> instance2").await; +// half(instance2, instance1, "instance2 -> instance1").await; +// } +// } diff --git a/core/src/api/libraries.rs b/core/src/api/libraries.rs index e0c64d6dd..31250fb63 100644 --- a/core/src/api/libraries.rs +++ b/core/src/api/libraries.rs @@ -471,37 +471,19 @@ pub(crate) fn mount() -> AlphaRouter { .procedure( "actors", R.with2(library()).subscription(|(_, library), _: ()| { - let mut rx = library.actors.invalidate_rx.resubscribe(); + let mut rx = library.cloud_sync_actors.invalidate_rx.resubscribe(); async_stream::stream! { - let actors = library.actors.get_state().await; + let actors = library.cloud_sync_actors.get_state().await; yield actors; while let Ok(()) = rx.recv().await { - let actors = library.actors.get_state().await; + let actors = library.cloud_sync_actors.get_state().await; yield actors; } } }), ) - .procedure( - "startActor", - R.with2(library()) - .mutation(|(_, library), name: String| async move { - library.actors.start(&name).await; - - Ok(()) - }), - ) - .procedure( - "stopActor", - R.with2(library()) - .mutation(|(_, library), name: String| async move { - library.actors.stop(&name).await; - - Ok(()) - }), - ) .procedure( "vacuumDb", R.with2(library()) diff --git a/core/src/api/sync.rs b/core/src/api/sync.rs index 09e3a62b4..fd4d0cae9 100644 --- a/core/src/api/sync.rs +++ b/core/src/api/sync.rs @@ -56,19 +56,19 @@ pub(crate) fn mount() -> AlphaRouter { } async_stream::stream! { - let cloud_sync = &library.cloud.sync; + let cloud_sync_state = &library.cloud_sync_state; let sync = &library.sync.shared; loop { yield Data { ingest: sync.active.load(Ordering::Relaxed), - cloud_send: cloud_sync.send_active.load(Ordering::Relaxed), - cloud_receive: cloud_sync.receive_active.load(Ordering::Relaxed), - cloud_ingest: cloud_sync.ingest_active.load(Ordering::Relaxed), + cloud_send: cloud_sync_state.send_active.load(Ordering::Relaxed), + cloud_receive: cloud_sync_state.receive_active.load(Ordering::Relaxed), + cloud_ingest: cloud_sync_state.ingest_active.load(Ordering::Relaxed), }; tokio::select! { - _ = cloud_sync.notifier.notified() => {}, + _ = cloud_sync_state.notifier.notified() => {}, _ = sync.active_notify.notified() => {} } } diff --git a/core/src/cloud/mod.rs b/core/src/cloud/mod.rs deleted file mode 100644 index 1efcbafe5..000000000 --- a/core/src/cloud/mod.rs +++ /dev/null @@ -1,35 +0,0 @@ -use crate::Node; - -use sd_core_sync::SyncManager; - -use std::sync::Arc; - -use uuid::Uuid; - -pub mod sync; - -#[derive(Default)] -pub struct State { - pub sync: sync::State, -} - -pub async fn start( - node: &Arc, - actors: &Arc, - library_id: Uuid, - instance_uuid: Uuid, - sync: &Arc, - db: &Arc, -) -> State { - let sync = sync::declare_actors( - node, - actors, - library_id, - instance_uuid, - sync.clone(), - db.clone(), - ) - .await; - - State { sync } -} diff --git a/core/src/cloud/sync/ingest.rs b/core/src/cloud/sync/ingest.rs deleted file mode 100644 index 7a7ae8ba3..000000000 --- a/core/src/cloud/sync/ingest.rs +++ /dev/null @@ -1,124 +0,0 @@ -use crate::cloud::sync::err_break; - -use sd_core_sync::SyncManager; - -use sd_actors::Stopper; -use sd_prisma::prisma::cloud_crdt_operation; -use sd_sync::CompressedCRDTOperationsPerModelPerDevice; - -use std::{ - future::IntoFuture, - pin::pin, - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, - }, -}; - -use futures::{FutureExt, StreamExt}; -use futures_concurrency::future::Race; -use tokio::sync::Notify; -use tracing::debug; - -// Responsible for taking sync operations received from the cloud, -// and applying them to the local database via the sync system's ingest actor. - -pub async fn run_actor( - sync: Arc, - notify: Arc, - state: Arc, - state_notify: Arc, - stop: Stopper, -) { - enum Race { - Notified, - Stopped, - } - - loop { - state.store(true, Ordering::Relaxed); - state_notify.notify_waiters(); - - { - let mut rx = pin!(sync.ingest.req_rx.clone()); - - if sync - .ingest - .event_tx - .send(sd_core_sync::Event::Notification) - .await - .is_ok() - { - while let Some(req) = rx.next().await { - const OPS_PER_REQUEST: u32 = 1000; - - // FIXME: If there are exactly a multiple of OPS_PER_REQUEST operations, - // then this will bug, as we sent `has_more` as true, but we don't have - // more operations to send. - - use sd_core_sync::*; - - let timestamps = match req { - Request::FinishedIngesting => { - break; - } - Request::Messages { timestamps, .. } => timestamps, - }; - - let (ops_ids, ops): (Vec<_>, Vec<_>) = - err_break!(sync.get_cloud_ops(OPS_PER_REQUEST, timestamps,).await) - .into_iter() - .unzip(); - - if ops.is_empty() { - break; - } - - debug!( - messages_count = ops.len(), - first_message = ?ops.first().map(|operation| operation.timestamp.as_u64()), - last_message = ?ops.last().map(|operation| operation.timestamp.as_u64()), - "Sending messages to ingester", - ); - - let (wait_tx, wait_rx) = tokio::sync::oneshot::channel::<()>(); - - err_break!( - sync.ingest - .event_tx - .send(sd_core_sync::Event::Messages(MessagesEvent { - device_pub_id: sync.device_pub_id.clone(), - has_more: ops.len() == OPS_PER_REQUEST as usize, - messages: CompressedCRDTOperationsPerModelPerDevice::new(ops), - wait_tx: Some(wait_tx) - })) - .await - ); - - err_break!(wait_rx.await); - - err_break!( - sync.db - .cloud_crdt_operation() - .delete_many(vec![cloud_crdt_operation::id::in_vec(ops_ids)]) - .exec() - .await - ); - } - } - } - - state.store(false, Ordering::Relaxed); - state_notify.notify_waiters(); - - if let Race::Stopped = ( - notify.notified().map(|()| Race::Notified), - stop.into_future().map(|()| Race::Stopped), - ) - .race() - .await - { - break; - } - } -} diff --git a/core/src/cloud/sync/mod.rs b/core/src/cloud/sync/mod.rs deleted file mode 100644 index 272bb5465..000000000 --- a/core/src/cloud/sync/mod.rs +++ /dev/null @@ -1,104 +0,0 @@ -use crate::Node; - -use sd_core_sync::SyncManager; - -use std::sync::{atomic::AtomicBool, Arc}; -use tokio::sync::Notify; -use uuid::Uuid; - -pub mod ingest; -pub mod receive; -pub mod send; - -#[derive(Default)] -pub struct State { - pub send_active: Arc, - pub receive_active: Arc, - pub ingest_active: Arc, - pub notifier: Arc, -} - -pub async fn declare_actors( - node: &Arc, - actors: &Arc, - library_id: Uuid, - instance_uuid: Uuid, - sync: Arc, - db: Arc, -) -> State { - let ingest_notify = Arc::new(Notify::new()); - - // actors - // .declare( - // "Cloud Sync Sender", - // { - // let sync = sync.clone(); - // let node = node.clone(); - // let active = state.send_active.clone(); - // let active_notifier = state.notifier.clone(); - - // move |stop| send::run_actor(library_id, sync, node, active, active_notifier, stop) - // }, - // autorun, - // ) - // .await; - - // actors - // .declare( - // "Cloud Sync Receiver", - // { - // let sync = sync.clone(); - // let node = node.clone(); - // let ingest_notify = ingest_notify.clone(); - // let active_notifier = state.notifier.clone(); - // let active = state.receive_active.clone(); - - // move |stop| { - // receive::run_actor( - // node.libraries.clone(), - // db.clone(), - // library_id, - // instance_uuid, - // sync, - // ingest_notify, - // node, - // active, - // active_notifier, - // stop, - // ) - // } - // }, - // autorun, - // ) - // .await; - - // actors - // .declare( - // "Cloud Sync Ingest", - // { - // let active = state.ingest_active.clone(); - // let active_notifier = state.notifier.clone(); - - // move |stop| { - // ingest::run_actor(sync.clone(), ingest_notify, active, active_notifier, stop) - // } - // }, - // autorun, - // ) - // .await; - - State::default() -} - -macro_rules! err_break { - ($e:expr) => { - match $e { - Ok(d) => d, - Err(e) => { - tracing::error!(?e); - break; - } - } - }; -} -pub(crate) use err_break; diff --git a/core/src/cloud/sync/send.rs b/core/src/cloud/sync/send.rs deleted file mode 100644 index 46cfd9556..000000000 --- a/core/src/cloud/sync/send.rs +++ /dev/null @@ -1,139 +0,0 @@ -use sd_core_cloud_services::CloudServices; -use sd_core_sync::{SyncEvent, SyncManager}; - -use sd_actors::Stopper; - -use std::{ - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, - }, - time::Duration, -}; - -use tokio::{ - sync::{broadcast, Notify}, - time::sleep, -}; -use uuid::Uuid; - -enum RaceNotifiedOrStopped { - Notified, - Stopped, -} - -pub async fn run_actor( - library_id: Uuid, - sync: Arc, - cloud_services: CloudServices, - is_active: Arc, - state_notify: Arc, - stop: Stopper, -) { - loop { - is_active.store(true, Ordering::Relaxed); - state_notify.notify_waiters(); - - // loop { - // // all available instances will have a default timestamp from create_instance - // let instances = sync - // .timestamp_per_device - // .read() - // .await - // .keys() - // .cloned() - // .collect::>(); - - // // obtains a lock on the timestamp collections for the instances we have - - // debug!( - // total_operations = req_adds.len(), - // "Preparing to send instance's operations to cloud;" - // ); - - // // gets new operations for each instance to send to cloud - // for req_add in req_adds { - // let ops = err_break!( - // sync.get_instance_ops( - // 1000, - // req_add.instance_uuid, - // NTP64( - // req_add - // .from_time - // .unwrap_or_else(|| "0".to_string()) - // .parse() - // .expect("couldn't parse ntp64 value"), - // ) - // ) - // .await - // ); - - // if ops.is_empty() { - // continue; - // } - - // let start_time = ops[0].timestamp.0.to_string(); - // let end_time = ops[ops.len() - 1].timestamp.0.to_string(); - - // let ops_len = ops.len(); - - // use base64::prelude::*; - - // debug!(instance_id = %req_add.instance_uuid, %start_time, %end_time); - - // instances.push(do_add::Input { - // uuid: req_add.instance_uuid, - // key: req_add.key, - // start_time, - // end_time, - // contents: BASE64_STANDARD.encode( - // rmp_serde::to_vec_named(&CompressedCRDTOperations::new(ops)) - // .expect("CompressedCRDTOperation should serialize!"), - // ), - // ops_count: ops_len, - // }) - // } - - // if instances.is_empty() { - // break; - // } - - // // uses lock we acquired earlier to send the operations to the cloud - // err_break!( - // do_add( - // cloud_api_config_provider.get_request_config().await, - // library_id, - // instances, - // ) - // .await - // ); - // } - - // is_active.store(false, Ordering::Relaxed); - // state_notify.notify_waiters(); - - // if let RaceNotifiedOrStopped::Stopped = ( - // // recreate subscription each time so that existing messages are dropped - // wait_notification(sync.subscribe()), - // stop.into_future().map(|()| RaceNotifiedOrStopped::Stopped), - // ) - // .race() - // .await - // { - // break; - // } - - sleep(Duration::from_millis(1000)).await; - } -} - -async fn wait_notification(mut rx: broadcast::Receiver) -> RaceNotifiedOrStopped { - // wait until Created message comes in - loop { - if let Ok(SyncEvent::Created) = rx.recv().await { - break; - }; - } - - RaceNotifiedOrStopped::Notified -} diff --git a/core/src/context.rs b/core/src/context.rs index 3a3f23ee0..519034d30 100644 --- a/core/src/context.rs +++ b/core/src/context.rs @@ -50,7 +50,7 @@ impl OuterContext for NodeContext { &self.library.db } - fn sync(&self) -> &Arc { + fn sync(&self) -> &SyncManager { &self.library.sync } @@ -97,7 +97,7 @@ impl OuterContext for JobContext &Arc { + fn sync(&self) -> &SyncManager { self.outer_ctx.sync() } diff --git a/core/src/lib.rs b/core/src/lib.rs index 44686e23e..f204e36e7 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -37,7 +37,6 @@ use tracing_subscriber::{ }; pub mod api; -mod cloud; mod context; pub mod custom_uri; pub mod library; diff --git a/core/src/library/library.rs b/core/src/library/library.rs index 7423c870a..cf98603ae 100644 --- a/core/src/library/library.rs +++ b/core/src/library/library.rs @@ -1,10 +1,14 @@ -use crate::{api::CoreEvent, cloud, Node}; +use crate::{api::CoreEvent, Node}; +use sd_core_cloud_services::{declare_cloud_sync, CloudSyncActors, CloudSyncActorsState}; use sd_core_file_path_helper::IsolatedFilePathData; use sd_core_heavy_lifting::media_processor::ThumbnailKind; use sd_core_prisma_helpers::{file_path_to_full_path, CasId}; - use sd_core_sync::SyncManager; + +use sd_actors::ActorsCollection; +use sd_cloud_schema::sync::groups; +use sd_crypto::{CryptoRng, SeedableRng}; use sd_p2p::Identity; use sd_prisma::prisma::{file_path, location, PrismaClient}; use sd_utils::{db::maybe_missing, error::FileIOError}; @@ -30,8 +34,8 @@ pub struct Library { config: RwLock, /// db holds the database client for the current library. pub db: Arc, - pub sync: Arc, - pub cloud: cloud::State, + pub sync: SyncManager, + /// key manager that provides encryption keys to functions that require them // pub key_manager: Arc, /// p2p identity @@ -46,7 +50,8 @@ pub struct Library { // TODO(@Oscar): Get rid of this with the new invalidation system. event_bus_tx: broadcast::Sender, - pub actors: Arc, + pub cloud_sync_state: CloudSyncActorsState, + pub cloud_sync_actors: ActorsCollection, } impl Debug for Library { @@ -71,27 +76,44 @@ impl Library { identity: Arc, db: Arc, node: &Arc, - sync: Arc, - cloud: cloud::State, + sync: SyncManager, do_cloud_sync: broadcast::Sender<()>, - actors: Arc, ) -> Arc { Arc::new(Self { id, config: RwLock::new(config), sync, - cloud, db: db.clone(), - // key_manager, identity, // orphan_remover: OrphanRemoverActor::spawn(db), instance_uuid, do_cloud_sync, event_bus_tx: node.event_bus.0.clone(), - actors, + cloud_sync_state: CloudSyncActorsState::default(), + cloud_sync_actors: ActorsCollection::default(), }) } + pub async fn init_cloud_sync(&self, node: &Node, sync_group_pub_id: groups::PubId) { + let rng = CryptoRng::from_seed(node.master_rng.lock().await.generate_fixed()); + + declare_cloud_sync( + node.cloud_services.clone(), + &self.cloud_sync_actors, + &self.cloud_sync_state, + sync_group_pub_id, + self.sync.clone(), + Arc::clone(&self.db), + rng, + ) + .await; + + // TODO(@fogodev): Uncomment when they're ready + // self.cloud_sync_actors.start(CloudSyncActors::Sender).await; + // self.cloud_sync_actors.start(CloudSyncActors::Receiver).await; + // self.cloud_sync_actors.start(CloudSyncActors::Ingester).await; + } + pub async fn config(&self) -> LibraryConfig { self.config.read().await.clone() } diff --git a/core/src/library/manager/mod.rs b/core/src/library/manager/mod.rs index d5da65a6e..275061989 100644 --- a/core/src/library/manager/mod.rs +++ b/core/src/library/manager/mod.rs @@ -548,35 +548,16 @@ impl Libraries { // TODO: Move this reconciliation into P2P and do reconciliation of both local and remote nodes. - let actors = Default::default(); - let (sync, sync_rx) = SyncManager::with_existing_devices( Arc::clone(&db), &device_pub_id, Arc::clone(&config.generate_sync_operations), &devices, - Arc::clone(&actors), ) .await?; - let sync_manager = Arc::new(sync); - - let cloud = crate::cloud::start(node, &actors, id, instance_id, &sync_manager, &db).await; let (tx, mut rx) = broadcast::channel(10); - let library = Library::new( - id, - config, - instance_id, - identity, - // key_manager, - db, - node, - sync_manager, - cloud, - tx, - actors, - ) - .await; + let library = Library::new(id, config, instance_id, identity, db, node, sync, tx).await; // This is an exception. Generally subscribe to this by `self.tx.subscribe`. spawn(sync_rx_actor(library.clone(), node.clone(), sync_rx)); diff --git a/core/src/p2p/sync/mod.rs b/core/src/p2p/sync/mod.rs index f1255d144..f14a19689 100644 --- a/core/src/p2p/sync/mod.rs +++ b/core/src/p2p/sync/mod.rs @@ -88,7 +88,7 @@ mod originator { #[instrument(skip(sync, p2p))] /// REMEMBER: This only syncs one direction! - pub async fn run(library: Arc, sync: &Arc, p2p: &Arc) { + pub async fn run(library: Arc, sync: &SyncManager, p2p: &Arc) { for (remote_identity, peer) in p2p.get_library_instances(&library.id) { if !peer.is_connected() { continue; @@ -203,65 +203,65 @@ mod responder { stream: &mut (impl AsyncRead + AsyncWrite + Unpin), library: Arc, ) -> Result<(), ()> { - use sync::ingest::*; + // use sync::ingest::*; - let ingest = &library.sync.ingest; + // let ingest = &library.sync.ingest; - ingest.event_tx.send(Event::Notification).await.unwrap(); + // ingest.event_tx.send(Event::Notification).await.unwrap(); - let mut rx = pin!(ingest.req_rx.clone()); + // let mut rx = pin!(ingest.req_rx.clone()); - while let Some(req) = rx.next().await { - const OPS_PER_REQUEST: u32 = 1000; + // while let Some(req) = rx.next().await { + // const OPS_PER_REQUEST: u32 = 1000; - let timestamps = match req { - Request::FinishedIngesting => break, - Request::Messages { timestamps, .. } => timestamps, - }; + // let timestamps = match req { + // Request::FinishedIngesting => break, + // Request::Messages { timestamps, .. } => timestamps, + // }; - debug!(?timestamps, "Getting ops for timestamps;"); + // debug!(?timestamps, "Getting ops for timestamps;"); - stream - .write_all( - &tx::MainRequest::GetOperations(sync::GetOpsArgs { - timestamp_per_device: timestamps, - count: OPS_PER_REQUEST, - }) - .to_bytes(), - ) - .await - .unwrap(); - stream.flush().await.unwrap(); + // stream + // .write_all( + // &tx::MainRequest::GetOperations(sync::GetOpsArgs { + // timestamp_per_device: timestamps, + // count: OPS_PER_REQUEST, + // }) + // .to_bytes(), + // ) + // .await + // .unwrap(); + // stream.flush().await.unwrap(); - let rx::Operations(ops) = rx::Operations::from_stream(stream).await.unwrap(); + // let rx::Operations(ops) = rx::Operations::from_stream(stream).await.unwrap(); - let (wait_tx, wait_rx) = tokio::sync::oneshot::channel::<()>(); + // let (wait_tx, wait_rx) = tokio::sync::oneshot::channel::<()>(); - // FIXME: If there are exactly a multiple of OPS_PER_REQUEST operations, - // then this will bug, as we sent `has_more` as true, but we don't have - // more operations to send. + // // FIXME: If there are exactly a multiple of OPS_PER_REQUEST operations, + // // then this will bug, as we sent `has_more` as true, but we don't have + // // more operations to send. - ingest - .event_tx - .send(Event::Messages(MessagesEvent { - device_pub_id: library.sync.device_pub_id.clone(), - has_more: ops.len() == OPS_PER_REQUEST as usize, - messages: ops, - wait_tx: Some(wait_tx), - })) - .await - .expect("TODO: Handle ingest channel closed, so we don't loose ops"); + // ingest + // .event_tx + // .send(Event::Messages(MessagesEvent { + // device_pub_id: library.sync.device_pub_id.clone(), + // has_more: ops.len() == OPS_PER_REQUEST as usize, + // messages: ops, + // wait_tx: Some(wait_tx), + // })) + // .await + // .expect("TODO: Handle ingest channel closed, so we don't loose ops"); - wait_rx.await.unwrap() - } + // wait_rx.await.unwrap() + // } - debug!("Sync responder done"); + // debug!("Sync responder done"); - stream - .write_all(&tx::MainRequest::Done.to_bytes()) - .await - .unwrap(); - stream.flush().await.unwrap(); + // stream + // .write_all(&tx::MainRequest::Done.to_bytes()) + // .await + // .unwrap(); + // stream.flush().await.unwrap(); Ok(()) } diff --git a/crates/actors/src/lib.rs b/crates/actors/src/lib.rs index 9c9c263fb..0890e8684 100644 --- a/crates/actors/src/lib.rs +++ b/crates/actors/src/lib.rs @@ -29,7 +29,9 @@ use std::{ collections::HashMap, + fmt, future::{Future, IntoFuture}, + hash::Hash, panic::{panic_any, AssertUnwindSafe}, pin::Pin, sync::{ @@ -62,25 +64,29 @@ pub struct Actor { stop_rx: chan::Receiver<()>, } -pub struct Actors { +/// Actors holder, holds all actors for some generic purpose, like for cloud sync. +/// You should use an enum to identify the actors. +pub struct ActorsCollection { pub invalidate_rx: broadcast::Receiver<()>, invalidate_tx: broadcast::Sender<()>, - actors: Arc>>, + actors: Arc>>, } -impl Actors { +impl ActorsCollection +where + Id: Hash + Eq + fmt::Debug + fmt::Display + Copy + Send + Sync + 'static, +{ pub async fn declare( - self: &Arc, - name: &'static str, + &self, + identifier: Id, actor_fn: impl FnOnce(Stopper) -> Fut + Send + Sync + Clone + 'static, - autostart: bool, ) where Fut: Future + Send + 'static, { let (stop_tx, stop_rx) = chan::bounded(1); self.actors.write().await.insert( - name, + identifier, Actor { spawn_fn: Arc::new(move |stop| Box::pin((actor_fn.clone())(stop))), maybe_handle: None, @@ -89,15 +95,11 @@ impl Actors { stop_rx, }, ); - - if autostart { - self.start(name).await; - } } #[instrument(skip(self))] - pub async fn start(self: &Arc, name: &str) { - if let Some(actor) = self.actors.write().await.get_mut(name) { + pub async fn start(&self, identifier: Id) { + if let Some(actor) = self.actors.write().await.get_mut(&identifier) { if actor.is_running.load(Ordering::Acquire) { warn!("Actor already running!"); return; @@ -146,8 +148,8 @@ impl Actors { } #[instrument(skip(self))] - pub async fn stop(self: &Arc, name: &str) { - if let Some(actor) = self.actors.write().await.get_mut(name) { + pub async fn stop(&self, identifier: Id) { + if let Some(actor) = self.actors.write().await.get_mut(&identifier) { if !actor.is_running.load(Ordering::Acquire) { warn!("Actor already stopped!"); return; @@ -172,12 +174,17 @@ impl Actors { .read() .await .iter() - .map(|(&name, actor)| (name.to_string(), actor.is_running.load(Ordering::Relaxed))) + .map(|(&identifier, actor)| { + ( + identifier.to_string(), + actor.is_running.load(Ordering::Relaxed), + ) + }) .collect() } } -impl Default for Actors { +impl Default for ActorsCollection { fn default() -> Self { let (invalidate_tx, invalidate_rx) = broadcast::channel(1); @@ -189,6 +196,16 @@ impl Default for Actors { } } +impl Clone for ActorsCollection { + fn clone(&self) -> Self { + Self { + actors: Arc::clone(&self.actors), + invalidate_rx: self.invalidate_rx.resubscribe(), + invalidate_tx: self.invalidate_tx.clone(), + } + } +} + pub struct Stopper(chan::Receiver<()>); impl Stopper { diff --git a/crates/ai/src/old_image_labeler/old_actor.rs b/crates/ai/src/old_image_labeler/old_actor.rs index 108f0dfa6..aa6ff276d 100644 --- a/crates/ai/src/old_image_labeler/old_actor.rs +++ b/crates/ai/src/old_image_labeler/old_actor.rs @@ -39,7 +39,7 @@ const PENDING_BATCHES_FILE: &str = "pending_image_labeler_batches.bin"; type ResumeBatchRequest = ( BatchToken, Arc, - Arc, + SyncManager, oneshot::Sender, ImageLabelerError>>, ); @@ -56,7 +56,7 @@ pub(super) struct Batch { pub(super) output_tx: chan::Sender, pub(super) is_resumable: bool, pub(super) db: Arc, - pub(super) sync: Arc, + pub(super) sync: SyncManager, } #[derive(Serialize, Deserialize, Debug)] @@ -169,7 +169,7 @@ impl OldImageLabeler { location_path: PathBuf, file_paths: Vec, db: Arc, - sync: Arc, + sync: SyncManager, is_resumable: bool, ) -> (BatchToken, chan::Receiver) { let (tx, rx) = chan::bounded(usize::max(file_paths.len(), 1)); @@ -207,7 +207,7 @@ impl OldImageLabeler { location_path: PathBuf, file_paths: Vec, db: Arc, - sync: Arc, + sync: SyncManager, ) -> chan::Receiver { self.new_batch_inner(location_id, location_path, file_paths, db, sync, false) .await @@ -221,7 +221,7 @@ impl OldImageLabeler { location_path: PathBuf, file_paths: Vec, db: Arc, - sync: Arc, + sync: SyncManager, ) -> (BatchToken, chan::Receiver) { self.new_batch_inner(location_id, location_path, file_paths, db, sync, true) .await @@ -292,7 +292,7 @@ impl OldImageLabeler { &self, token: BatchToken, db: Arc, - sync: Arc, + sync: SyncManager, ) -> Result, ImageLabelerError> { let (tx, rx) = oneshot::channel(); @@ -345,7 +345,7 @@ async fn actor_loop( ResumeBatch( BatchToken, Arc, - Arc, + SyncManager, oneshot::Sender, ImageLabelerError>>, ), UpdateModel( diff --git a/crates/ai/src/old_image_labeler/process.rs b/crates/ai/src/old_image_labeler/process.rs index a6554b35b..c528bfca7 100644 --- a/crates/ai/src/old_image_labeler/process.rs +++ b/crates/ai/src/old_image_labeler/process.rs @@ -301,7 +301,7 @@ async fn spawned_process_single_file( chan::Sender, ), db: Arc, - sync: Arc, + sync: SyncManager, _permit: OwnedSemaphorePermit, ) { let image = diff --git a/crates/crypto/src/cloud/encrypt.rs b/crates/crypto/src/cloud/encrypt.rs index efdcd670a..a11dcb46e 100644 --- a/crates/crypto/src/cloud/encrypt.rs +++ b/crates/crypto/src/cloud/encrypt.rs @@ -5,7 +5,7 @@ use crate::{ use aead::{stream::EncryptorLE31, Aead, KeyInit}; use async_stream::stream; -use chacha20poly1305::{XChaCha20Poly1305, XNonce}; +use chacha20poly1305::{Tag, XChaCha20Poly1305, XNonce}; use futures::Stream; use rand::CryptoRng; use tokio::io::{AsyncBufReadExt, AsyncRead, BufReader}; @@ -25,6 +25,13 @@ pub trait StreamEncryption { StreamNonce, impl Stream, Error>> + Send, ); + + fn cipher_text_size(&self, plain_text_size: usize) -> usize { + size_of::() + + (plain_text_size / EncryptedBlock::PLAIN_TEXT_SIZE * EncryptedBlock::CIPHER_TEXT_SIZE) + + plain_text_size % EncryptedBlock::PLAIN_TEXT_SIZE + + size_of::() + } } impl OneShotEncryption for SecretKey { diff --git a/crates/crypto/src/cloud/mod.rs b/crates/crypto/src/cloud/mod.rs index 4a09a47d8..6fc76f574 100644 --- a/crates/crypto/src/cloud/mod.rs +++ b/crates/crypto/src/cloud/mod.rs @@ -1,3 +1,7 @@ pub mod decrypt; pub mod encrypt; pub mod secret_key; + +pub use decrypt::{OneShotDecryption, StreamDecryption}; +pub use encrypt::{OneShotEncryption, StreamEncryption}; +pub use secret_key::SecretKey; diff --git a/crates/crypto/src/primitives.rs b/crates/crypto/src/primitives.rs index efa7bd33b..ffe8577c8 100644 --- a/crates/crypto/src/primitives.rs +++ b/crates/crypto/src/primitives.rs @@ -3,10 +3,11 @@ // DO NOT EDIT THIS FILE. IF THESE CONSTANTS CHANGE, THINGS CAN (AND PROBABLY WILL) BREAK use aead::stream::{Nonce, StreamLE31}; -use chacha20poly1305::{Tag, XChaCha20Poly1305, XNonce}; +use chacha20poly1305::{XChaCha20Poly1305, XNonce}; pub type OneShotNonce = XNonce; pub type StreamNonce = Nonce>; +pub use chacha20poly1305::Tag; #[derive(Debug, Clone)] pub struct EncryptedBlock { diff --git a/crates/crypto/src/rng/csprng.rs b/crates/crypto/src/rng/csprng.rs index d65121724..2d38ef6e0 100644 --- a/crates/crypto/src/rng/csprng.rs +++ b/crates/crypto/src/rng/csprng.rs @@ -9,7 +9,7 @@ use zeroize::{Zeroize, Zeroizing}; /// /// On `Drop`, it re-seeds the inner RNG, erasing the previous state and making all future /// values unpredictable. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct CryptoRng(ChaCha20Rng); impl CryptoRng { diff --git a/crates/sync/src/factory.rs b/crates/sync/src/factory.rs index 065c665e3..9fed9a52f 100644 --- a/crates/sync/src/factory.rs +++ b/crates/sync/src/factory.rs @@ -5,18 +5,6 @@ use crate::{ use uhlc::HLC; -macro_rules! msgpack { - (nil) => { - ::rmpv::Value::Nil - }; - ($e:expr) => {{ - let bytes = rmp_serde::to_vec_named(&$e).expect("failed to serialize msgpack"); - let value: rmpv::Value = rmp_serde::from_slice(&bytes).expect("failed to deserialize msgpack"); - - value - }} -} - pub trait OperationFactory { fn get_clock(&self) -> &HLC; @@ -31,7 +19,10 @@ pub trait OperationFactory { device_pub_id: self.get_device_pub_id(), timestamp: *self.get_clock().new_timestamp().get_time(), model_id: ::MODEL_ID, - record_id: msgpack!(id), + record_id: rmp_serde::from_slice::( + &rmp_serde::to_vec_named(id).expect("failed to serialize record id to msgpack"), + ) + .expect("failed to deserialize record id to msgpack value"), data, } } From 33a75c3b3471975ee697030e7275d22a6c6a03fa Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Fri, 30 Aug 2024 07:36:11 -0400 Subject: [PATCH 099/218] Update Tauri Version & Try fixing CORS issues --- Cargo.lock | Bin 329465 -> 329465 bytes Cargo.toml | 2 +- apps/desktop/crates/linux/Cargo.toml | 2 +- apps/desktop/src-tauri/Cargo.toml | 20 +- .../src-tauri/capabilities/default.json | 15 +- apps/desktop/src-tauri/src/main.rs | 2 +- apps/desktop/src-tauri/tauri.conf.json | 33 +++- apps/desktop/src/App.tsx | 5 +- core/src/custom_uri/utils.rs | 12 +- .../settings/client/account/Profile.tsx | 2 +- .../settings/client/account/index.tsx | 186 +++++++++--------- 11 files changed, 154 insertions(+), 125 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2aa8fbff46b566151ff90bdaa7cd5c57241d0b03..dd1a80e07e99880fa558f51a130289bf1d4e61b9 100644 GIT binary patch delta 1753 zcmbtVONbps6y?3wW@ZvU`Hb)7BgsgF2!g45Z+(T#_{mmyQNe{osJd0LS~^N zk|6Y=5Q0B+DI_H3Z3S^9h`Mp3k_3Z-CE#=zmrq?B zv`vi4QAt^Qb@=ve_EBe3ys=zrCXAuDny65OQ2Vd)du79j&x?>R@s9<*%c>+;8ljt`$$(Ftc^GRImkYvQSx7Ls}(WoKx2M1O?dMws*tM-F}w~!H3M&k&oESYLbIs-t?%4#L} zj@Se%$629A8%0)U->=^@=~sSPXb&Igj<&a#y60{UIhU_wjfw@>Q#ApzswJQ)HE8Xs zo{v+b3st3Di^xJ1oE*woE-mJNsB9l@?>=gOEOpbhXp^y41!FisOlZiO`L@A0@40v_ z!WQL{2UV=mBm}7h;%Lo66K~1$+m7zq793I)v*iFvh6=*H4mN1SqS+}|9cPvmY%bQu zsEi3Bi@Z$~oFTpqx2L^%SNCz-cz$r=SkO@vNUA7?OyD8-8dKHt5tWRGHReLL3;C%635e!jcA*zn5m#A=YU=I)aZNQCAPNS4OJ0*g73g_4S@6tWmh zA$o;SVil?#q|nZH!*P3TXZL4+bwvy=E0iMnDmfd%$_0^78!Calptu}ZTv!XBVV#1K zN=QN^7GW2$t#5Yw+T~r{7yWl@(^ZL!ER}PWOn6k{;Zl>4oy2^kb@4^l;G|v@XOH9} zRq+F$h+Y!(n`b9&x~IFmrABp1dvCPlNU}?bjh91>s>*m2cZ8G_1O?z@m6!`IiTly2 z93@h|4Za;7YU=1{sr|IvU2oHiqiyYnd%MTyAv|V8|Kzon#EHe2NCT5}0Tr;xvj_y@ zQY~8PLfTuWO05Y|8i`UncdeVW&&S<2w*pO(xk?727(_t^@|cU0N#}9Q5^QOnm`J8& zPA-aF6({MaLL^b`&tICfM=x}X?Ohr!&Ys-cO$VT(h^!5wK79KYU6vXnEfUR%Fj@<~ zWR6vUwvvbcrf#uadU5nXJ9&Jx&<^eIKK-9?FU<2RwM9A9;#~>eBy?nhjWUWjCP`#N zh`vJ7IkT?eM delta 1777 zcmbtVONd-W6s5agoJpb*O*U3l)_F zrI8{FKMP$dhD2LjNI+Z(LEH<87!*V?E6qwzrYB^ti+)`^`klV#bI-ece0ur#^yryU zKc=m(+EX#!?7w!5xk`05G!v5#HWz~%L6S06X2-r;o$azqvpolP&MzGsmFxSvhex-+ zzHwqS-#9V8_w)Y-*^mA+n)dHs!)5OqlO6Nvp~>_29biSaX)D^0%~sfxufCd`N)D|_ za;~zAs-Lj!ntHB%t(Gh3xw0LdhVST}2wGKq2$MO>Y8q8dkG zt%a0J#-ok87!#^uREw$v8Ee`Z*qL4X9$I&uT zRkb#VlbXiJI&&?`;YqU)6bkGc^}D|st*<4Dk&I#+U8z<%ty=-5Bs!9~7%ch5@JUOJ ztR#UFS<A4T@T4lo60>m% z=@SQob@GXgbwR^$TPRX?mU*_ZZHxIwJBE+@r*Fb~TXXcyWpd33H{&?yVq0suWG}Jc z1Yl)zG&zR~LN3{;yvZSH?Kd6zW9M*jfyRYwaO9IG!Y7BEp})g+sH(va*0K$Z*{f{@ zxrkS8Irbc+1X_3lxZVA&8-`C7m{c2QLs>VIkxxxb!dx6|mMGaqA+}Ua@{F*u*q}uD z0ul@%xTiYWV*Trl!kkgIBiOKKU*s|(v2j5O@AjnI~2MPPl6rV{WA zbee6vvf3ZsHT<)tj+`j=d-dzrjfUaqeGfZNT>?{Qel&n?fMV~ z{PCvY+yX8d6&n|8N|M4F0mh!N=H_cq87zY>344T_PM0i%fCWz3dYw|r{leRTTi-pL z-;ys=(|AsYYCsd&;anSv0%A%jnT@Wcg`f%Kfd%JEiBUU>9??|Zm)XW$EB)Z3wsd-{`yCQJQ} zJ;T}m3HQ{3XH#fVlY&7h#Y$-9h;DSjD<5M@4KJ8b&^7Q3Hl?Q68P+vfh( z2X7l*;t}T7qfS1xAce_J~-qj5W^a3+WfW RL}bqXJ=r!ta(Fm-`d{0X3_kz> diff --git a/Cargo.toml b/Cargo.toml index 9b9010c13..df49d73c8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -89,7 +89,7 @@ rev = "a92c17d3f8" # We hack it to the high heavens [patch.crates-io.rspc] git = "https://github.com/spacedriveapp/rspc.git" -rev = "37d175c272" +rev = "a073ebd480" # Add `Control::open_stream_with_addrs` [patch.crates-io.libp2p] diff --git a/apps/desktop/crates/linux/Cargo.toml b/apps/desktop/crates/linux/Cargo.toml index 9c4755b73..156560178 100644 --- a/apps/desktop/crates/linux/Cargo.toml +++ b/apps/desktop/crates/linux/Cargo.toml @@ -13,5 +13,5 @@ tokio = { workspace = true, features = ["fs"] } [target.'cfg(target_os = "linux")'.dependencies] wgpu = { version = "22.1", default-features = false } # WARNING: gtk should follow the same version used by tauri -# https://github.com/tauri-apps/tauri/blob/tauri-v2.0.0-rc.2/core/tauri/Cargo.toml +# https://github.com/tauri-apps/tauri/blob/tauri-v2.0.0-rc.8/crates/tauri/Cargo.toml#L102 gtk = { version = "0.18", features = ["v3_24"] } diff --git a/apps/desktop/src-tauri/Cargo.toml b/apps/desktop/src-tauri/Cargo.toml index c09575569..eb1e95ade 100644 --- a/apps/desktop/src-tauri/Cargo.toml +++ b/apps/desktop/src-tauri/Cargo.toml @@ -36,21 +36,21 @@ uuid = { workspace = true, features = ["serde"] } # WARNING: Do NOT enable default features, as that vendors dbus (see below) opener = { version = "0.7.1", features = ["reveal"], default-features = false } specta-typescript = "=0.0.7" -tauri-plugin-deep-link = "=2.0.0-rc.0" -tauri-plugin-dialog = "=2.0.0-rc.0" -tauri-plugin-http = "=2.0.0-rc.0" -tauri-plugin-os = "=2.0.0-rc.0" -tauri-plugin-shell = "=2.0.0-rc.0" -tauri-plugin-updater = "=2.0.0-rc.0" +tauri-plugin-deep-link = "=2.0.0-rc.2" +tauri-plugin-dialog = "=2.0.0-rc.2" +tauri-plugin-http = "=2.0.0-rc.1" +tauri-plugin-os = "=2.0.0-rc.1" +tauri-plugin-shell = "=2.0.0-rc.3" +tauri-plugin-updater = "=2.0.0-rc.2" [dependencies.tauri] features = ["linux-libxdo", "macos-private-api", "native-tls-vendored", "unstable"] -version = "=2.0.0-rc.6" +version = "=2.0.0-rc.8" [dependencies.tauri-specta] features = ["derive", "typescript"] git = "https://github.com/spacedriveapp/tauri-specta" -rev = "e61a15ed78" +rev = "4a53c45224" [target.'cfg(target_os = "linux")'.dependencies] # Spacedrive Sub-crates @@ -59,7 +59,7 @@ sd-desktop-linux = { path = "../crates/linux" } # Specific Desktop dependencies # WARNING: dbus must NOT be vendored, as that breaks the app on Linux,X11,Nvidia dbus = { version = "0.9.7", features = ["stdfd"] } -# https://github.com/tauri-apps/tauri/blob/tauri-v2.0.0-rc.2/core/tauri/Cargo.toml#L86 +# https://github.com/tauri-apps/tauri/blob/tauri-v2.0.0-rc.8/crates/tauri/Cargo.toml#L102 webkit2gtk = { version = "=2.0.1", features = ["v2_38"] } [target.'cfg(target_os = "macos")'.dependencies] @@ -72,7 +72,7 @@ sd-desktop-windows = { path = "../crates/windows" } [build-dependencies] # Specific Desktop dependencies -tauri-build = "=2.0.0-rc.6" +tauri-build = "=2.0.0-rc.7" [features] ai-models = ["sd-core/ai"] diff --git a/apps/desktop/src-tauri/capabilities/default.json b/apps/desktop/src-tauri/capabilities/default.json index e3b199d7a..1a0bfa998 100644 --- a/apps/desktop/src-tauri/capabilities/default.json +++ b/apps/desktop/src-tauri/capabilities/default.json @@ -2,7 +2,9 @@ "$schema": "../gen/schemas/desktop-schema.json", "identifier": "default", "description": "Capability for the main window", - "windows": ["main"], + "windows": [ + "main" + ], "permissions": [ "core:app:default", "core:event:default", @@ -30,16 +32,19 @@ "identifier": "http:default", "allow": [ { - "url": "https://**" + "url": "http://ipc.localhost" }, { - "url": "http://**" + "url": "http://asset.localhost" }, { - "url": "http://localhost:9420/*" + "url": "http://tauri.localhost" }, { - "url": "https://plausible.io/*" + "url": "http://localhost:9420" + }, + { + "url": "https://plausible.io" } ] } diff --git a/apps/desktop/src-tauri/src/main.rs b/apps/desktop/src-tauri/src/main.rs index 58e781fd6..26d33ca65 100644 --- a/apps/desktop/src-tauri/src/main.rs +++ b/apps/desktop/src-tauri/src/main.rs @@ -222,6 +222,7 @@ async fn main() -> tauri::Result<()> { tauri::Builder::default() .invoke_handler(builder.invoke_handler()) + .plugin(tauri_plugin_http::init()) .plugin(tauri_plugin_deep_link::init()) .setup(move |app| { // We need a the app handle to determine the data directory now. @@ -363,7 +364,6 @@ async fn main() -> tauri::Result<()> { .plugin(tauri_plugin_dialog::init()) .plugin(tauri_plugin_os::init()) .plugin(tauri_plugin_shell::init()) - .plugin(tauri_plugin_http::init()) // TODO: Bring back Tauri Plugin Window State - it was buggy so we removed it. .plugin(tauri_plugin_updater::Builder::new().build()) .plugin(updater::plugin()) diff --git a/apps/desktop/src-tauri/tauri.conf.json b/apps/desktop/src-tauri/tauri.conf.json index 2c32c8786..24bc071a5 100644 --- a/apps/desktop/src-tauri/tauri.conf.json +++ b/apps/desktop/src-tauri/tauri.conf.json @@ -1,5 +1,5 @@ { - "$schema": "https://raw.githubusercontent.com/tauri-apps/tauri/tauri-v2.0.0-rc.2/core/tauri-config-schema/schema.json", + "$schema": "https://raw.githubusercontent.com/tauri-apps/tauri/tauri-v2.0.0-rc.8/crates/tauri-cli/tauri.config.schema.json", "productName": "Spacedrive", "identifier": "com.spacedrive.desktop", "build": { @@ -29,19 +29,30 @@ "transparent": true, "center": true, "windowEffects": { - "effects": ["sidebar"], + "effects": [ + "sidebar" + ], "state": "followsWindowActiveState", "radius": 9 } } ], "security": { - "csp": "default-src 'self' webkit-pdfjs-viewer: asset: https://asset.localhost blob: data: filesystem: ws: wss: http: https: tauri: 'unsafe-eval' 'unsafe-inline'; img-src 'self' data:; connect-src 'self' tauri://localhost http://localhost:9420 https://plausible.io ipc: http://ipc.localhost;" + "csp": { + "default-src": "'self' webkit-pdfjs-viewer: asset: https://asset.localhost blob: data: filesystem: http: https: tauri:", + "connect-src": "'self' ipc: http://ipc.localhost ws: wss: http: https: tauri:", + "img-src": "'self' asset: https://asset.localhost blob: data: filesystem: http: https: tauri:", + "style-src": "'self' 'unsafe-inline' http: https: tauri:" + } } }, "bundle": { "active": true, - "targets": ["deb", "msi", "dmg"], + "targets": [ + "deb", + "msi", + "dmg" + ], "publisher": "Spacedrive Technology Inc.", "copyright": "Spacedrive Technology Inc.", "category": "Productivity", @@ -60,14 +71,20 @@ "files": { "/usr/share/spacedrive/models/yolov8s.onnx": "../../.deps/models/yolov8s.onnx" }, - "depends": ["libc6", "libxdo3", "dbus"] + "depends": [ + "libc6", + "libxdo3", + "dbus" + ] } }, "macOS": { "minimumSystemVersion": "10.15", "exceptionDomain": null, "entitlements": null, - "frameworks": ["../../.deps/Spacedrive.framework"], + "frameworks": [ + "../../.deps/Spacedrive.framework" + ], "dmg": { "background": "dmg-background.png", "appPosition": { @@ -104,7 +121,9 @@ "deep-link": { "mobile": [], "desktop": { - "schemes": ["spacedrive"] + "schemes": [ + "spacedrive" + ] } } } diff --git a/apps/desktop/src/App.tsx b/apps/desktop/src/App.tsx index b47bb3b81..e28dc850e 100644 --- a/apps/desktop/src/App.tsx +++ b/apps/desktop/src/App.tsx @@ -46,7 +46,7 @@ SuperTokens.init({ appInfo: { apiDomain: 'http://localhost:9420', apiBasePath: '/api/auth', - appName: 'Spacedrive Auth Service' + appName: 'Spacedrive Auth Service' }, cookieHandler: getCookieHandler, windowHandler: getWindowHandler, @@ -61,7 +61,8 @@ const startupError = (window as any).__SD_ERROR__ as string | undefined; //Set global fetch to use tauri fetch // If the build in in production mode, we need to set the global fetch to use the tauri fetch -if (import.meta.env.DEV === false) globalThis.fetch = fetch; +// console.log('import.meta.env.DEV', import.meta.env.DEV); +globalThis.fetch = fetch; export default function App() { useEffect(() => { diff --git a/core/src/custom_uri/utils.rs b/core/src/custom_uri/utils.rs index 645da5106..2dc5014b9 100644 --- a/core/src/custom_uri/utils.rs +++ b/core/src/custom_uri/utils.rs @@ -50,7 +50,10 @@ pub(crate) async fn cors_middleware(req: Request, next: Next) -> Respon if req.method() == Method::OPTIONS { return Response::builder() .header("Access-Control-Allow-Methods", "GET, HEAD, POST, OPTIONS") - .header("Access-Control-Allow-Origin", "*") + .header( + "Access-Control-Allow-Origin", + "http://localhost:9420, http://ipc.localhost, http://tauri.localhost", + ) .header("Access-Control-Allow-Headers", "*") .header("Access-Control-Max-Age", "86400") .status(StatusCode::OK) @@ -65,7 +68,12 @@ pub(crate) async fn cors_middleware(req: Request, next: Next) -> Respon { let headers = response.headers_mut(); - headers.insert("Access-Control-Allow-Origin", HeaderValue::from_static("*")); + headers.insert( + "Access-Control-Allow-Origin", + HeaderValue::from_static( + "http://localhost:9420, http://ipc.localhost, http://tauri.localhost", + ), + ); headers.insert( "Access-Control-Allow-Headers", diff --git a/interface/app/$libraryId/settings/client/account/Profile.tsx b/interface/app/$libraryId/settings/client/account/Profile.tsx index ac32bf688..dd3ce1d6f 100644 --- a/interface/app/$libraryId/settings/client/account/Profile.tsx +++ b/interface/app/$libraryId/settings/client/account/Profile.tsx @@ -3,7 +3,7 @@ import { Card } from '@sd/ui'; import { TruncatedText } from '~/components'; import { AuthRequiredOverlay } from '~/components/AuthRequiredOverlay'; -const Profile = ({ email, authStore }: { email?: string; authStore: { status: string } }) => { +const Profile = ({ email }: { email?: string; }) => { const emailName = email?.split('@')[0]; const capitalizedEmailName = (emailName?.charAt(0).toUpperCase() ?? '') + emailName?.slice(1); diff --git a/interface/app/$libraryId/settings/client/account/index.tsx b/interface/app/$libraryId/settings/client/account/index.tsx index 236883694..795ea0d1b 100644 --- a/interface/app/$libraryId/settings/client/account/index.tsx +++ b/interface/app/$libraryId/settings/client/account/index.tsx @@ -18,14 +18,13 @@ type User = { export const Component = () => { const { t } = useLocale(); const [userInfo, setUserInfo] = useState(null); - const me = useBridgeQuery(['auth.me'], { retry: false }); - const authStore = auth.useStateSnapshot(); useEffect(() => { async function _() { const user_data = await fetch('http://localhost:9420/api/user', { method: 'GET' }); const data = await user_data.json(); + console.log("Data from user (auth API)", data); return data; } _().then((data) => { @@ -63,110 +62,107 @@ export const Component = () => { description={t('spacedrive_cloud_description')} />
- {userInfo === null ? ( - - ) : ( - - )} + {userInfo === null ? : }
- {useFeatureFlag('hostedLocations') && } + {/* {useFeatureFlag('hostedLocations') && } */} ); }; -function HostedLocationsPlayground() { - const locations = useBridgeQuery(['cloud.locations.list'], { retry: false }); +// Not supporting this feature for now +// function HostedLocationsPlayground() { +// const locations = useBridgeQuery(['cloud.locations.list'], { retry: false }); - const [locationName, setLocationName] = useState(''); - const [path, setPath] = useState(''); - const createLocation = useBridgeMutation('cloud.locations.create', { - onSuccess(data) { - // console.log('DATA', data); // TODO: Optimistic UI +// const [locationName, setLocationName] = useState(''); +// const [path, setPath] = useState(''); +// const createLocation = useBridgeMutation('cloud.locations.create', { +// onSuccess(data) { +// // console.log('DATA', data); // TODO: Optimistic UI - locations.refetch(); - setLocationName(''); - } - }); - const removeLocation = useBridgeMutation('cloud.locations.remove', { - onSuccess() { - // TODO: Optimistic UI +// locations.refetch(); +// setLocationName(''); +// } +// }); +// const removeLocation = useBridgeMutation('cloud.locations.remove', { +// onSuccess() { +// // TODO: Optimistic UI - locations.refetch(); - } - }); +// locations.refetch(); +// } +// }); - useEffect(() => { - if (path === '' && locations.data?.[0]) { - setPath(`location/${locations.data[0].id}/hello.txt`); - } - }, [path, locations.data]); +// useEffect(() => { +// if (path === '' && locations.data?.[0]) { +// setPath(`location/${locations.data[0].id}/hello.txt`); +// } +// }, [path, locations.data]); - const isLoading = createLocation.isLoading || removeLocation.isLoading; +// const isLoading = createLocation.isLoading || removeLocation.isLoading; - return ( - <> - - {/* TODO: We need UI for this. I wish I could use `prompt` for now but Tauri doesn't have it :( */} -
- setLocationName(e.currentTarget.value)} - placeholder="My sick location" - disabled={isLoading} - /> +// return ( +// <> +// +// {/* TODO: We need UI for this. I wish I could use `prompt` for now but Tauri doesn't have it :( */} +//
+// setLocationName(e.currentTarget.value)} +// placeholder="My sick location" +// disabled={isLoading} +// /> - -
-
- } - title="Hosted Locations" - description="Augment your local storage with our cloud!" - /> +// +//
+//
+// } +// title="Hosted Locations" +// description="Augment your local storage with our cloud!" +// /> - {/* TODO: Cleanup this mess + styles */} - {locations.status === 'loading' ?
Loading!
: null} - {locations.status !== 'loading' && locations.data?.length === 0 ? ( -
Looks like you don't have any!
- ) : ( -
- {locations.data?.map((location) => ( -
-

{location.name}

- -
- ))} -
- )} +// {/* TODO: Cleanup this mess + styles */} +// {locations.status === 'loading' ?
Loading!
: null} +// {locations.status !== 'loading' && locations.data?.length === 0 ? ( +//
Looks like you don't have any!
+// ) : ( +//
+// {locations.data?.map((location) => ( +//
+//

{location.name}

+// +//
+// ))} +//
+// )} -
-

Path to save when clicking 'Do the thing':

- setPath(e.currentTarget.value)} - disabled={isLoading} - /> -
- - ); -} +//
+//

Path to save when clicking 'Do the thing':

+// setPath(e.currentTarget.value)} +// disabled={isLoading} +// /> +//
+// +// ); +// } From 832c1fef1532caa0df4bfc3f177d8faa5639075c Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Wed, 4 Sep 2024 01:50:22 -0300 Subject: [PATCH 100/218] Receiver actor for cloud sync --- Cargo.lock | Bin 329465 -> 331764 bytes Cargo.toml | 1 + core/Cargo.toml | 2 +- core/crates/cloud-services/Cargo.toml | 24 +- core/crates/cloud-services/src/client.rs | 113 +-- core/crates/cloud-services/src/error.rs | 38 +- .../src/key_manager/key_store.rs | 6 +- core/crates/cloud-services/src/sync/mod.rs | 102 +-- .../crates/cloud-services/src/sync/receive.rs | 651 +++++++++----- core/crates/cloud-services/src/sync/send.rs | 445 +++++----- core/src/library/library.rs | 12 +- crates/actors/Cargo.toml | 1 + crates/actors/src/lib.rs | 160 +++- crates/crypto/src/cloud/decrypt.rs | 14 +- crates/crypto/src/cloud/secret_key.rs | 2 +- crates/crypto/src/crypto/mod.rs | 821 ------------------ crates/crypto/src/crypto/stream.rs | 314 ------- crates/crypto/src/lib.rs | 1 - crates/crypto/src/primitives.rs | 16 + packages/client/src/core.ts | 4 +- 20 files changed, 992 insertions(+), 1735 deletions(-) delete mode 100644 crates/crypto/src/crypto/mod.rs delete mode 100644 crates/crypto/src/crypto/stream.rs diff --git a/Cargo.lock b/Cargo.lock index dd1a80e07e99880fa558f51a130289bf1d4e61b9..c7064a8a1b33cc990135a86078d75bb1f7069a6b 100644 GIT binary patch delta 2239 zcmZuyYiw6%8PEGarxaR7t3v-2R_H(=#`Vv8zv!SNL9;0;bctcp5#Q^EbhNZEsBdvW=x@3x1Aaff&Cy1RRZ*V*a)WMpc3Wbb2>#t5K)A^lpDb49;g^aHD~8ui&+MN*ICaP$JTw&>DApm> zw$*peZW=_W+B7&=^)DOSsvOZ+U{8IJDQfANRZ?gklZe>aM70ck?H#B<_UBS6)Q}cif~C5M+nW9y`Bp%WX@1?fP&t z((IAzznjkA+>cfuk)P==9vszFVN0}7$pNx6hHz_B;Ig5TXk~enEa2!kG1xPzqVmdV zDkKn%MLzvTIZ*YpMY-odu{_^Dfeu3D^Ao6(Ux;W~zBE^M*X1N4s2dui97q)h(24xz z{ir*CViK*q)d!cJ5X^*dLScp#4kl9L8RtP6O|(@$L>8j)R3*=Z5;(*Zt;C8uY^%$I z0~Oxao7)G_n!W}~bnu8BpM2DIXbq4*Z0BbW747`Dr_j>;+%B}LzVm7H@w=*P&o{y% z&I6?;MGrh;ml!ot;p72?25U7)9zzqHI?0uDpmU9ljGj0Q>p$=>)UNwZpsChEIe|4M zf)Nsh(bPJw31iT+k4A%DfoB(O@Z3jhjS-krW1MBdGAS)Hja~9QEqXIMiC$l*NQ_UM zlHf^VSc1iX1uj`q)P)cP$c;r0h{;gHt>jQHf=&WYjtUd=>9TdR`HiHR{RR&!GOic1LS$l&2sJhIuXFK7pcjOcoSx zc!&Zs6Qq!wL`j1VmRpS#V;FSctc#FF3!2Y6KMOUx&Z7JB6J3RCAbRu3vuH)$+1=z; z0);Yg4xu0w79@$5ga~#8O%VnwRah%#1u=oMD1i(wBWRuop#{r-zuIc&fGd;z|yz@*kRO?sKgZP$t>SzCi{Xk){|(tPggu!7GW1@-KC8*Ql9&ZBDC0uPY9dB@t~{TcrSb=SYY zf!2Sa3-}S8eEAWyI;$hC)%oEc7h_vM?HV(P9j*;TTT+Hn9i2%aP6ti^`BP^xhUn)a zLO|&l6UR#V=tSPU9j#n?dlW5{uUE~qE_^jVb_Ml4_ty5U$~!G2u+7U=eQn+QK(VGPZ{A&eCBJ#1+>rn9Y1rD1-Nlmn#_nS8`>M{)?!1i`EAoXs zMQ?TdpNsR+ZRH*L!uDdgo*gfK+uDQ$?0~}ckl?#d%1<3C`tp`NXh}VDq zb;`lmuowlQL6Q>=QYaD(Y{q#SI%UkMC(emYw zypwc8N6I6c>W7Y(#jNiI|i9Opduy2qyJbaQ)rnNDR`n)AHBXxlU}DoXT$0H!-`))UBj1eyKk6wNwZy3|fUIh;^K<`|Td?{r%4W_wxVy?UrkYTRuP1 zHfzZ}jU7w$z;?89=fKLXPxfysjWjzKs`SC z4A@t`v>UwHR_%aYn0LW=;laEu_6&pRw@*kM-UjBEei%HoxHz%A-s_~}kSc6AR7x|i zsNsS-7^r2O8WJ=o8Y3hL*HA%Y3AKniim|}RqGJ4`&e9(Tt}=lJjpq`10~uzDYD#p_ z(qV_Kg^Cc46cSbtDyt>aW#Pc)&}Nh%>E zQXB6j63|mg(tV7#G9ZLZV8Uv}g|Z9^W{XYxTI1RcmFcnRB)FUO{wT8-q*gFEAnox&!q4@m>XpQGbz}|@y)Gfnw7kNI0{gD zpDc$s2ok8{NHfQbN0e&>lg~i}0XfDRA&y0G1kreGcDAj2e++Dfaoru2IT2n4E#<0l zFw#(tUj^MW8z;2fof@IzyFY^VIQA>Z%f4!5>7L@!3-iiD2P$I+<7YEKy*G^$U@#^K zOEp(iL7c{QQbEK7rcA<{FB(ZFy)x1%hjnlY5hgX)qS$fcb^!iu$Ucs*y^+l-zo}+R z$^Y&6Wp(!KWHHLEYqCp=;^_Lk-lLV))(dJ4PY+>Mp~PSjPFlE>Ud(y&PiyH7Q_3)F zG~-M_mBJoE)Q3P@{BujTIF9jbS|pEWy(Qd|sVP&E+@dRXjDm&n`M%6ga>pG{fX=c> zXD>F#PYzeA@$Pk%rs9u>jN>jR`6{$}`3+F6x8hOgO zV>qrm0cz#c!R%l)_Uz3X4u4Dkw!T!6@gI6UI?5sWvQ?8q1Vtl`IjAI4_grqi@vW+BJD& zj=B(zi`(OWx zTzlpchvwKhqL?5k?H;d$#Ww92Au&Ti`r@nRT6y||ykkRL)LW@d^*dMg>zxBWj*Vxn Yap9G`rPzM17B^nWSCzkA%^Pd~0d{@Ay#N3J diff --git a/Cargo.toml b/Cargo.toml index df49d73c8..c6e543c94 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -29,6 +29,7 @@ async-trait = "0.1.80" axum = "0.6.20" # Update blocked by hyper base64 = "0.22.1" blake3 = "1.5" +bytes = "1.7.1" # Update blocked by hyper chrono = "0.4.38" ed25519-dalek = "2.1" flume = "0.11.0" diff --git a/core/Cargo.toml b/core/Cargo.toml index 0a281ed45..50ab65f9a 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -52,6 +52,7 @@ async-trait = { workspace = true } axum = { workspace = true, features = ["ws"] } base64 = { workspace = true } blake3 = { workspace = true } +bytes = { workspace = true } chrono = { workspace = true, features = ["serde"] } futures = { workspace = true } futures-concurrency = { workspace = true } @@ -85,7 +86,6 @@ uuid = { workspace = true, features = ["serde", "v4", "v7"] } # Specific Core dependencies async-recursion = "1.1" base91 = "0.1.0" -bytes = "1.6" ctor = "0.2.8" directories = "5.0" flate2 = "1.0" diff --git a/core/crates/cloud-services/Cargo.toml b/core/crates/cloud-services/Cargo.toml index 20957a211..f7e5e8133 100644 --- a/core/crates/cloud-services/Cargo.toml +++ b/core/crates/cloud-services/Cargo.toml @@ -23,7 +23,6 @@ chrono = { workspace = true, features = ["serde"] } flume = { workspace = true } futures = { workspace = true } futures-concurrency = { workspace = true } -reqwest = { workspace = true, features = ["json", "native-tls-vendored"] } rspc = { workspace = true } serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true } @@ -31,29 +30,26 @@ specta = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true, features = ["sync", "time"] } tokio-stream = { workspace = true } +tokio-util = { workspace = true } tracing = { workspace = true } uuid = { workspace = true, features = ["serde"] } zeroize = { workspace = true } # External dependencies -anyhow = "1.0.86" -iroh-base = { version = "0.23.0", features = ["key"] } -iroh-net = "0.23.0" -paste = "=1.0.15" -postcard = { version = "1.0.8", features = ["use-std"] } -quic-rpc = { version = "0.11.0", features = ["quinn-transport"] } -quinn = { package = "iroh-quinn", version = "=0.10.5" } +anyhow = "1.0.86" +iroh-base = { version = "0.24", features = ["key"] } +iroh-net = "0.24" +paste = "=1.0.15" +postcard = { version = "1.0.8", features = ["use-std"] } +quic-rpc = { version = "0.12.0", features = ["quinn-transport"] } +quinn = { package = "iroh-quinn", version = "0.11" } +# Using whatever version of reqwest that reqwest-middleware uses, just putting here to enable some features +reqwest = { version = "0.12", features = ["json", "native-tls-vendored", "stream"] } reqwest-middleware = { version = "0.3", features = ["json"] } reqwest-retry = "0.6" rustls = { version = "0.23", default-features = false, features = ["brotli", "ring", "std"] } rustls-platform-verifier = "0.3.3" -[dependencies.rustls-old] -default-features = false -features = ["dangerous_configuration", "logging", "quic"] -package = "rustls" -version = "0.21.12" # Update blocked by quic-rpc - [dev-dependencies] tokio = { workspace = true, features = ["rt", "sync", "time"] } diff --git a/core/crates/cloud-services/src/client.rs b/core/crates/cloud-services/src/client.rs index edaddfc94..076d572f8 100644 --- a/core/crates/cloud-services/src/client.rs +++ b/core/crates/cloud-services/src/client.rs @@ -7,7 +7,7 @@ use std::{net::SocketAddr, sync::Arc, time::Duration}; use futures::Stream; use iroh_net::relay::RelayUrl; use quic_rpc::{transport::quinn::QuinnConnection, RpcClient}; -use quinn::{ClientConfig, Endpoint}; +use quinn::{crypto::rustls::QuicClientConfig, ClientConfig, Endpoint}; use reqwest::{IntoUrl, Url}; use reqwest_middleware::{reqwest, ClientBuilder, ClientWithMiddleware}; use reqwest_retry::{policies::ExponentialBackoff, RetryTransientMiddleware}; @@ -164,82 +164,56 @@ impl CloudServices { let mut crypto_config = { #[cfg(debug_assertions)] { - // FIXME(@fogodev): use this commented code when we can update to quic-rpc 0.12.0 or newer - // #[derive(Debug)] - // struct SkipServerVerification; - // impl rustls::client::danger::ServerCertVerifier for SkipServerVerification { - // fn verify_server_cert( - // &self, - // _end_entity: &rustls::pki_types::CertificateDer<'_>, - // _intermediates: &[rustls::pki_types::CertificateDer<'_>], - // _server_name: &rustls::pki_types::ServerName<'_>, - // _ocsp_response: &[u8], - // _now: rustls::pki_types::UnixTime, - // ) -> Result { - // Ok(rustls::client::danger::ServerCertVerified::assertion()) - // } - - // fn verify_tls12_signature( - // &self, - // _message: &[u8], - // _cert: &rustls::pki_types::CertificateDer<'_>, - // _dss: &rustls::DigitallySignedStruct, - // ) -> Result { - // Ok(rustls::client::danger::HandshakeSignatureValid::assertion()) - // } - - // fn verify_tls13_signature( - // &self, - // _message: &[u8], - // _cert: &rustls::pki_types::CertificateDer<'_>, - // _dss: &rustls::DigitallySignedStruct, - // ) -> Result { - // Ok(rustls::client::danger::HandshakeSignatureValid::assertion()) - // } - - // fn supported_verify_schemes(&self) -> Vec { - // vec![] - // } - // } - - // rustls::ClientConfig::builder_with_protocol_versions(&[&rustls::version::TLS13]) - // .dangerous() - // .with_custom_certificate_verifier(Arc::new(SkipServerVerification)) - // .with_no_client_auth() - + #[derive(Debug)] struct SkipServerVerification; - impl rustls_old::client::ServerCertVerifier for SkipServerVerification { + impl rustls::client::danger::ServerCertVerifier for SkipServerVerification { fn verify_server_cert( &self, - _end_entity: &rustls_old::Certificate, - _intermediates: &[rustls_old::Certificate], - _server_name: &rustls_old::ServerName, - _scts: &mut dyn Iterator, + _end_entity: &rustls::pki_types::CertificateDer<'_>, + _intermediates: &[rustls::pki_types::CertificateDer<'_>], + _server_name: &rustls::pki_types::ServerName<'_>, _ocsp_response: &[u8], - _now: std::time::SystemTime, - ) -> Result { - Ok(rustls_old::client::ServerCertVerified::assertion()) + _now: rustls::pki_types::UnixTime, + ) -> Result { + Ok(rustls::client::danger::ServerCertVerified::assertion()) + } + + fn verify_tls12_signature( + &self, + _message: &[u8], + _cert: &rustls::pki_types::CertificateDer<'_>, + _dss: &rustls::DigitallySignedStruct, + ) -> Result { + Ok(rustls::client::danger::HandshakeSignatureValid::assertion()) + } + + fn verify_tls13_signature( + &self, + _message: &[u8], + _cert: &rustls::pki_types::CertificateDer<'_>, + _dss: &rustls::DigitallySignedStruct, + ) -> Result { + Ok(rustls::client::danger::HandshakeSignatureValid::assertion()) + } + + fn supported_verify_schemes(&self) -> Vec { + vec![] } } - rustls_old::ClientConfig::builder() - .with_safe_defaults() + rustls::ClientConfig::builder_with_protocol_versions(&[&rustls::version::TLS13]) + .dangerous() .with_custom_certificate_verifier(Arc::new(SkipServerVerification)) .with_no_client_auth() } #[cfg(not(debug_assertions))] { - // FIXME(@fogodev): use this commented code when we can update to quic-rpc 0.12.0 or newer - // rustls::ClientConfig::builder_with_protocol_versions(&[&rustls::version::TLS13]) - // .dangerous() - // .with_custom_certificate_verifier(Arc::new( - // rustls_platform_verifier::Verifier::new(), - // )) - // .with_no_client_auth() - - rustls_old::ClientConfig::builder() - .with_safe_defaults() + rustls::ClientConfig::builder_with_protocol_versions(&[&rustls::version::TLS13]) + .dangerous() + .with_custom_certificate_verifier(Arc::new( + rustls_platform_verifier::Verifier::new(), + )) .with_no_client_auth() } }; @@ -248,13 +222,10 @@ impl CloudServices { .alpn_protocols .extend([ServicesALPN::LATEST.to_vec()]); - // FIXME(@fogodev): use this commented code when we can update to quic-rpc 0.12.0 or newer - // let client_config = ClientConfig::new(Arc::new( - // QuicClientConfig::try_from(crypto_config) - // .expect("misconfigured TLS client config, this is a bug and should crash"), - // )); - - let client_config = ClientConfig::new(Arc::new(crypto_config)); + let client_config = ClientConfig::new(Arc::new( + QuicClientConfig::try_from(crypto_config) + .expect("misconfigured TLS client config, this is a bug and should crash"), + )); let mut endpoint = Endpoint::client("[::]:0".parse().expect("hardcoded address")) .map_err(Error::FailedToCreateEndpoint)?; diff --git a/core/crates/cloud-services/src/error.rs b/core/crates/cloud-services/src/error.rs index 3d9b819c8..cefbd9d29 100644 --- a/core/crates/cloud-services/src/error.rs +++ b/core/crates/cloud-services/src/error.rs @@ -4,7 +4,7 @@ use sd_utils::error::FileIOError; use std::{io, net::AddrParseError}; use quic_rpc::{ - pattern::{bidi_streaming, rpc}, + pattern::{bidi_streaming, rpc, server_streaming}, transport::quinn::QuinnConnection, }; @@ -40,13 +40,13 @@ pub enum Error { #[error("Missing tokens on refresh response")] MissingTokensOnRefreshResponse, #[error("Failed to parse token header value to string: {0}")] - FailedToParseTokenHeaderValueToString(#[from] reqwest_middleware::reqwest::header::ToStrError), + FailedToParseTokenHeaderValueToString(#[from] reqwest::header::ToStrError), // Key Manager errors #[error("Failed to handle File on KeyManager: {0}")] FileIO(#[from] FileIOError), #[error("Failed to handle key store serialization: {0}")] - KeyStoreSerialization(#[from] postcard::Error), + KeyStoreSerialization(postcard::Error), #[error("Key store encryption related error: {{context: \"{context}\", source: {source}}}")] KeyStoreCrypto { #[source] @@ -69,6 +69,10 @@ pub enum Error { // Communication errors #[error("Failed to communicate with RPC backend: {0}")] RpcCommunication(#[from] rpc::Error>), + #[error("Failed to communicate with Server Streaming RPC backend: {0}")] + ServerStreamCommunication(#[from] server_streaming::Error>), + #[error("Failed to receive next response from Server Streaming RPC backend: {0}")] + ServerStreamRecv(#[from] server_streaming::ItemError>), #[error("Failed to communicate with Bidi Streaming RPC backend: {0}")] BidiStreamCommunication(#[from] bidi_streaming::Error>), #[error("Failed to receive next response from Bidi Streaming RPC backend: {0}")] @@ -93,14 +97,42 @@ pub enum Error { Decrypt(sd_crypto::Error), #[error("Failed to upload sync messages: {0}")] UploadSyncMessages(reqwest_middleware::Error), + #[error("Failed to download sync messages: {0}")] + DownloadSyncMessages(reqwest_middleware::Error), #[error("Received an error response from uploading sync messages: {0}")] ErrorResponseUploadSyncMessages(reqwest_middleware::reqwest::Error), + #[error("Received an error response from downloading sync messages: {0}")] + ErrorResponseDownloadSyncMessages(reqwest_middleware::reqwest::Error), + #[error( + "Received an error response from downloading sync messages while reading its bytes: {0}" + )] + ErrorResponseDownloadReadBytesSyncMessages(reqwest_middleware::reqwest::Error), #[error("Critical error while uploading sync messages")] CriticalErrorWhileUploadingSyncMessages, #[error("Failed to send End update to push sync messages")] EndUpdatePushSyncMessages(io::Error), #[error("Unexpected end of stream while encrypting sync messages")] UnexpectedEndOfStream, + #[error("Failed to read last timestamp keeper for pulling sync messages: {0}")] + FailedToReadLastTimestampKeeper(io::Error), + #[error("Failed to handle last timestamp keeper serialization: {0}")] + LastTimestampKeeperSerialization(postcard::Error), + #[error("Failed to write last timestamp keeper for pulling sync messages: {0}")] + FailedToWriteLastTimestampKeeper(io::Error), + #[error("Sync messages download and decrypt task panicked")] + SyncMessagesDownloadAndDecryptTaskPanicked, + #[error("Serialization failure to push sync messages: {0}")] + SerializationFailureToPushSyncMessages(postcard::Error), + #[error("Deserialization failure to pull sync messages: {0}")] + DeserializationFailureToPullSyncMessages(postcard::Error), + #[error("Read nonce stream decryption: {0}")] + ReadNonceStreamDecryption(io::Error), + #[error("Incomplete download bytes sync messages")] + IncompleteDownloadBytesSyncMessages, + + // Temporary errors + #[error("Device missing secret key for decrypting sync messages")] + MissingKeyHash, } #[derive(thiserror::Error, Debug)] diff --git a/core/crates/cloud-services/src/key_manager/key_store.rs b/core/crates/cloud-services/src/key_manager/key_store.rs index be0fe0266..087eff70b 100644 --- a/core/crates/cloud-services/src/key_manager/key_store.rs +++ b/core/crates/cloud-services/src/key_manager/key_store.rs @@ -115,7 +115,7 @@ impl KeyStore { rng: &mut CryptoRng, keys_file_path: &PathBuf, ) -> Result<(), Error> { - let plain_text_bytes = postcard::to_stdvec(self)?; + let plain_text_bytes = postcard::to_stdvec(self).map_err(Error::KeyStoreSerialization)?; let mut file = BufWriter::with_capacity( EncryptedBlock::CIPHER_TEXT_SIZE, fs::OpenOptions::new() @@ -234,7 +234,7 @@ impl KeyStore { )) })?; - key.decrypt(&EncryptedBlock { nonce, cipher_text }) + key.decrypt_owned(&EncryptedBlock { nonce, cipher_text }) .map_err(|e| Error::KeyStoreCrypto { source: e, context: "Failed to oneshot decrypt space keys file", @@ -266,7 +266,7 @@ impl KeyStore { key_store_bytes }) - .map_err(Into::into) + .map_err(Error::KeyStoreSerialization) } } diff --git a/core/crates/cloud-services/src/sync/mod.rs b/core/crates/cloud-services/src/sync/mod.rs index 5c20efda4..0221fd4f8 100644 --- a/core/crates/cloud-services/src/sync/mod.rs +++ b/core/crates/cloud-services/src/sync/mod.rs @@ -1,22 +1,28 @@ -use crate::CloudServices; +use crate::{CloudServices, Error}; -use sd_actors::ActorsCollection; +use futures_concurrency::future::TryJoin; +use sd_core_sync::{SyncManager, NTP64}; + +use sd_actors::{ActorsCollection, IntoActor}; use sd_cloud_schema::sync::groups; -use sd_core_sync::SyncManager; - use sd_crypto::CryptoRng; -use sd_prisma::prisma::PrismaClient; use std::{ fmt, + path::Path, sync::{atomic::AtomicBool, Arc}, + time::{SystemTime, UNIX_EPOCH}, }; +use chrono::{DateTime, Utc}; use tokio::sync::Notify; -pub mod ingest; -pub mod receive; -pub mod send; +mod ingest; +mod receive; +mod send; + +use receive::Receiver; +use send::Sender; #[derive(Default)] pub struct SyncActorsState { @@ -45,58 +51,40 @@ impl fmt::Display for SyncActors { } pub async fn declare_actors( + data_dir: Box, cloud_services: Arc, actors: &ActorsCollection, actors_state: &SyncActorsState, sync_group_pub_id: groups::PubId, sync: SyncManager, - db: Arc, rng: CryptoRng, -) { +) -> Result<(), Error> { let ingest_notify = Arc::new(Notify::new()); - actors - .declare(SyncActors::Sender, { - let sync = sync.clone(); - let cloud_services = Arc::clone(&cloud_services); - let active = Arc::clone(&actors_state.send_active); - let active_notifier = Arc::clone(&actors_state.notifier); - - move |stop| { - send::run_actor( - sync_group_pub_id, - sync, - cloud_services, - active, - active_notifier, - rng, - stop, - ) - } - }) - .await; + let (sender, receiver) = ( + Sender::new( + sync_group_pub_id, + sync.clone(), + Arc::clone(&cloud_services), + Arc::clone(&actors_state.send_active), + Arc::clone(&actors_state.notifier), + rng, + ), + Receiver::new( + data_dir, + sync_group_pub_id, + cloud_services, + sync, + ingest_notify, + Arc::clone(&actors_state.receive_active), + Arc::clone(&actors_state.notifier), + ), + ) + .try_join() + .await?; actors - .declare(SyncActors::Receiver, { - let sync = sync.clone(); - let cloud_services = cloud_services.clone(); - let db = Arc::clone(&db); - let active = Arc::clone(&actors_state.receive_active); - let ingest_notify = Arc::clone(&ingest_notify); - let active_notifier = Arc::clone(&actors_state.notifier); - - move |stop| { - receive::run_actor( - db, - sync_group_pub_id, - cloud_services, - sync, - ingest_notify, - (active, active_notifier), - stop, - ) - } - }) + .declare_many_boxed([sender.into_actor(), receiver.into_actor()]) .await; // actors @@ -113,4 +101,18 @@ pub async fn declare_actors( // autorun, // ) // .await; + + Ok(()) +} + +fn datetime_to_timestamp(latest_time: DateTime) -> NTP64 { + NTP64::from( + SystemTime::from(latest_time) + .duration_since(UNIX_EPOCH) + .expect("hardcoded earlier time, nothing is earlier than UNIX_EPOCH"), + ) +} + +fn timestamp_to_datetime(timestamp: NTP64) -> DateTime { + DateTime::from(timestamp.to_system_time()) } diff --git a/core/crates/cloud-services/src/sync/receive.rs b/core/crates/cloud-services/src/sync/receive.rs index d3c597f62..abaf10f31 100644 --- a/core/crates/cloud-services/src/sync/receive.rs +++ b/core/crates/cloud-services/src/sync/receive.rs @@ -1,228 +1,354 @@ -use crate::CloudServices; +use crate::{CloudServices, Error, KeyManager}; -use sd_cloud_schema::sync::groups; -use sd_core_sync::{cloud_crdt_op_db, CRDTOperation, DevicePubId, SyncManager}; +use sd_cloud_schema::{ + devices, + sync::{ + groups, + messages::{pull, MessagesCollection}, + }, + Client, Service, +}; +use sd_core_sync::{cloud_crdt_op_db, CRDTOperation, SyncManager}; -use sd_actors::Stopper; -use sd_prisma::prisma::{cloud_crdt_operation, device, instance, PrismaClient}; -use sd_utils::uuid_to_bytes; +use sd_actors::{Actor, Stopper}; +use sd_crypto::{ + cloud::{OneShotDecryption, SecretKey, StreamDecryption}, + primitives::{EncryptedBlock, OneShotNonce, StreamNonce}, +}; +use sd_prisma::prisma::PrismaClient; use std::{ - collections::HashMap, - sync::{atomic::AtomicBool, Arc}, + collections::{hash_map::Entry, HashMap}, + future::IntoFuture, + num::NonZero, + path::Path, + pin::Pin, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + task::{Context, Poll}, + time::Duration, }; -use chrono::Utc; -use futures::FutureExt; -use serde_json::to_vec; -use tokio::sync::Notify; +use chrono::{DateTime, Utc}; +use futures::{FutureExt, StreamExt, TryStreamExt}; +use futures_concurrency::future::{Join, Race, TryJoin}; +use quic_rpc::transport::quinn::QuinnConnection; +use reqwest::Response; +use reqwest_middleware::ClientWithMiddleware; +use serde::{Deserialize, Serialize}; +use tokio::{ + fs, + io::{self, AsyncRead, AsyncReadExt, ReadBuf}, + spawn, + sync::{Notify, Semaphore}, + time::sleep, +}; +use tokio_util::io::StreamReader; +use tracing::{error, instrument}; use uuid::Uuid; +use super::SyncActors; + +const CLOUD_SYNC_DATA_KEEPER_FILE: &str = "cloud_sync_data_keeper.bin"; +const ONE_MINUTE: Duration = Duration::from_secs(60); + // Responsible for downloading sync operations from the cloud to be processed by the ingester -pub async fn run_actor( - db: Arc, +pub struct Receiver { + keeper: LastTimestampKeeper, sync_group_pub_id: groups::PubId, + device_pub_id: devices::PubId, cloud_services: Arc, + cloud_client: Client>, + semaphore: Arc, + key_manager: Arc, sync: SyncManager, ingest_notify: Arc, - (active, active_notify): (Arc, Arc), - stop: Stopper, -) { - // enum Race { - // Continue, - // Stop, - // } - - // loop { - // active.store(true, Ordering::Relaxed); - // active_notify.notify_waiters(); - - // loop { - // // We need to know the latest operations we should be retrieving - // let mut cloud_timestamps = { - // let timestamps = sync.timestamps.read().await; - - // // looks up the most recent operation we've received (not ingested!) for each instance - // let db_timestamps = err_break!( - // db._batch( - // timestamps - // .keys() - // .map(|id| { - // db.cloud_crdt_operation() - // .find_first(vec![cloud_crdt_operation::instance::is(vec![ - // instance::pub_id::equals(uuid_to_bytes(id)), - // ])]) - // .order_by(cloud_crdt_operation::timestamp::order( - // SortOrder::Desc, - // )) - // }) - // .collect::>() - // ) - // .await - // ); - - // // compares the latest ingested timestamp with the latest received timestamp - // // and picks the highest one for each instance - // let mut cloud_timestamps = db_timestamps - // .into_iter() - // .zip(timestamps.iter()) - // .map(|(d, (id, sync_timestamp))| { - // let cloud_timestamp = d.map(|d| d.timestamp).unwrap_or_default() as u64; - - // debug!( - // instance_id = %id, - // sync_timestamp = sync_timestamp.as_u64(), - // %cloud_timestamp, - // "Comparing sync timestamps", - // ); - - // let max_timestamp = Ord::max(cloud_timestamp, sync_timestamp.as_u64()); - - // (*id, max_timestamp) - // }) - // .collect::>(); - - // cloud_timestamps.remove(&instance_uuid); - - // cloud_timestamps - // }; - - // let instance_timestamps: Vec = sync - // .timestamps - // .read() - // .await - // .keys() - // .map( - // |uuid| sd_cloud_api::library::message_collections::get::InstanceTimestamp { - // instance_uuid: *uuid, - // from_time: cloud_timestamps - // .get(uuid) - // .copied() - // .unwrap_or_default() - // .to_string(), - // }, - // ) - // .collect(); - - // let collections = err_break!( - // sd_cloud_api::library::message_collections::get( - // node.get_request_config().await, - // library_id, - // instance_uuid, - // instance_timestamps, - // ) - // .await - // ); - - // info!( - // collections_count = collections.len(), - // "Received collections;", - // ); - - // if collections.is_empty() { - // break; - // } - - // let mut cloud_library_data: Option> = None; - - // for collection in collections { - // if let Entry::Vacant(e) = cloud_timestamps.entry(collection.instance_uuid) { - // let fetched_library = match &cloud_library_data { - // None => { - // let Some(fetched_library) = err_break!( - // sd_cloud_api::library::get( - // node.get_request_config().await, - // library_id - // ) - // .await - // ) else { - // break; - // }; - - // cloud_library_data - // .insert(Some(fetched_library)) - // .as_ref() - // .expect("error inserting fetched library") - // } - // Some(None) => { - // break; - // } - // Some(Some(fetched_library)) => fetched_library, - // }; - - // let Some(instance) = fetched_library - // .instances - // .iter() - // .find(|i| i.uuid == collection.instance_uuid) - // else { - // break; - // }; - - // err_break!( - // upsert_instance( - // library_id, - // &db, - // &sync, - // &libraries, - // &collection.instance_uuid, - // instance.identity, - // &instance.node_id, - // RemoteIdentity::from_str(&instance.node_remote_identity) - // .expect("malformed remote identity in the DB"), - // node.p2p.peer_metadata(), - // ) - // .await - // ); - - // e.insert(0); - // } - - // let compressed_operations: CompressedCRDTOperations = err_break!( - // rmp_serde::from_slice(err_break!(&BASE64_STANDARD.decode(collection.contents))) - // ); - - // let operations = compressed_operations.into_ops(); - - // debug!( - // instance_id = %collection.instance_uuid, - // start = ?operations.first().map(|operation| operation.timestamp.as_u64()), - // end = ?operations.last().map(|operation| operation.timestamp.as_u64()), - // "Processing collection", - // ); - - // err_break!(write_cloud_ops_to_db(operations, &db).await); - - // let collection_timestamp: u64 = - // collection.end_time.parse().expect("unable to parse time"); - - // let timestamp = cloud_timestamps - // .entry(collection.instance_uuid) - // .or_insert(collection_timestamp); - - // if *timestamp < collection_timestamp { - // *timestamp = collection_timestamp; - // } - // } - - // ingest_notify.notify_waiters(); - // } - - // active.store(false, Ordering::Relaxed); - // active_notify.notify_waiters(); - - // if let Race::Stop = ( - // sleep(Duration::from_secs(60)).map(|()| Race::Continue), - // stop.into_future().map(|()| Race::Stop), - // ) - // .race() - // .await - // { - // break; - // } - // } + active: Arc, + active_notify: Arc, } +impl Actor for Receiver { + const IDENTIFIER: SyncActors = SyncActors::Receiver; + + async fn run(&mut self, stop: Stopper) { + enum Race { + Continue, + Stop, + } + + loop { + self.active.store(true, Ordering::Relaxed); + self.active_notify.notify_waiters(); + + let res = self.run_loop_iteration().await; + + self.active.store(false, Ordering::Relaxed); + + if let Err(e) = res { + error!(?e, "Error during cloud sync sender actor iteration"); + sleep(ONE_MINUTE).await; + continue; + } + + self.active_notify.notify_waiters(); + + if matches!( + ( + sleep(ONE_MINUTE).map(|()| Race::Continue), + stop.into_future().map(|()| Race::Stop), + ) + .race() + .await, + Race::Stop + ) { + break; + } + } + } +} + +impl Receiver { + pub async fn new( + data_dir: impl AsRef + Send, + sync_group_pub_id: groups::PubId, + cloud_services: Arc, + sync: SyncManager, + ingest_notify: Arc, + active: Arc, + active_notify: Arc, + ) -> Result { + let (keeper, cloud_client, key_manager) = ( + LastTimestampKeeper::load(data_dir.as_ref()), + cloud_services.client(), + cloud_services.key_manager(), + ) + .try_join() + .await?; + + Ok(Self { + keeper, + sync_group_pub_id, + device_pub_id: devices::PubId(Uuid::from(&sync.device_pub_id)), + cloud_services, + cloud_client, + semaphore: Arc::new(Semaphore::new( + std::thread::available_parallelism() + .map(NonZero::get) + .unwrap_or(1), + )), + key_manager, + sync, + ingest_notify, + active, + active_notify, + }) + } + + async fn run_loop_iteration(&mut self) -> Result<(), Error> { + let mut responses_stream = self + .cloud_client + .sync() + .messages() + .pull(pull::Request { + access_token: self + .cloud_services + .token_refresher + .get_access_token() + .await?, + group_pub_id: self.sync_group_pub_id, + current_device_pub_id: self.device_pub_id, + start_time_per_device: self + .keeper + .timestamps + .iter() + .map(|(device_pub_id, timestamp)| (*device_pub_id, *timestamp)) + .collect(), + }) + .await?; + + while let Some(new_messages_res) = responses_stream.next().await { + let pull::Response(new_messages) = new_messages_res??; + if new_messages.is_empty() { + break; + } + + self.handle_new_messages(new_messages).await?; + self.ingest_notify.notify_waiters(); + } + + self.keeper.save().await + } + + async fn handle_new_messages( + &mut self, + new_messages: Vec, + ) -> Result<(), Error> { + let handles = new_messages + .into_iter() + .map(|message| { + let sync_group_pub_id = self.sync_group_pub_id; + let semaphore = Arc::clone(&self.semaphore); + let key_manager = Arc::clone(&self.key_manager); + let sync = self.sync.clone(); + let http_client = self.cloud_services.http_client().clone(); + + async move { + spawn(handle_single_message( + sync_group_pub_id, + message, + semaphore, + key_manager, + sync, + http_client, + )) + .await + } + }) + .collect::>(); + + for res in handles.join().await { + let Ok(res) = res else { + return Err(Error::SyncMessagesDownloadAndDecryptTaskPanicked); + }; + + let (device_pub_id, timestamp) = res?; + + match self.keeper.timestamps.entry(device_pub_id) { + Entry::Occupied(mut entry) => { + if entry.get() < ×tamp { + *entry.get_mut() = timestamp; + } + } + Entry::Vacant(entry) => { + entry.insert(timestamp); + } + } + } + + Ok(()) + } +} + +async fn handle_single_message( + sync_group_pub_id: groups::PubId, + MessagesCollection { + original_device_pub_id, + end_time, + operations_count, + key_hash, + signed_download_link, + .. + }: MessagesCollection, + semaphore: Arc, + key_manager: Arc, + sync: SyncManager, + http_client: ClientWithMiddleware, +) -> Result<(devices::PubId, DateTime), Error> { + // FIXME(@fogodev): If we don't have the key hash, we need to fetch it from another device in the group if possible + let Some(secret_key) = key_manager.get_key(sync_group_pub_id, &key_hash).await else { + return Err(Error::MissingKeyHash); + }; + + let _permit = semaphore + .acquire() + .await + .expect("sync messages receiver semaphore never closes"); + + let response = http_client + .get(signed_download_link) + .send() + .await + .map_err(Error::DownloadSyncMessages)? + .error_for_status() + .map_err(Error::ErrorResponseDownloadSyncMessages)?; + + let crdt_ops = if let Some(size) = response.content_length() { + extract_messages_known_size(response, size, secret_key).await + } else { + extract_messages_unknown_size(response, secret_key).await + }?; + assert_eq!( + crdt_ops.len(), + operations_count as usize, + "Sync messages count mismatch" + ); + write_cloud_ops_to_db(crdt_ops, &sync.db).await?; + Ok((original_device_pub_id, end_time)) +} + +#[instrument(skip(response, secret_key), err)] +async fn extract_messages_known_size( + response: Response, + size: u64, + secret_key: SecretKey, +) -> Result, Error> { + let plain_text = if size <= EncryptedBlock::CIPHER_TEXT_SIZE as u64 { + OneShotDecryption::decrypt( + &secret_key, + response + .bytes() + .await + .map_err(Error::ErrorResponseDownloadReadBytesSyncMessages)? + .as_ref() + .into(), + ) + .map_err(Error::Decrypt)? + } else { + let mut reader = StreamReader::new(response.bytes_stream().map_err(|e| { + error!(?e, "Failed to read sync messages bytes stream"); + io::Error::new(io::ErrorKind::Other, e) + })); + + let mut nonce = StreamNonce::default(); + + reader + .read_exact(&mut nonce) + .await + .map_err(Error::ReadNonceStreamDecryption)?; + + // TODO: Reimplement using async streaming with serde if it ever gets implemented + + let mut plain_text = vec![]; + + StreamDecryption::decrypt(&secret_key, &nonce, reader, &mut plain_text) + .await + .map_err(Error::Decrypt)?; + + plain_text + }; + + postcard::from_bytes(&plain_text).map_err(Error::DeserializationFailureToPullSyncMessages) +} + +#[instrument(skip_all, err)] +async fn extract_messages_unknown_size( + response: Response, + secret_key: SecretKey, +) -> Result, Error> { + let plain_text = match UnknownDownloadKind::new(response).await? { + UnknownDownloadKind::OneShot(buffer) => { + OneShotDecryption::decrypt(&secret_key, buffer.as_slice().into()) + .map_err(Error::Decrypt)? + } + + UnknownDownloadKind::Stream((nonce, reader)) => { + let mut plain_text = vec![]; + + StreamDecryption::decrypt(&secret_key, &nonce, reader, &mut plain_text) + .await + .map_err(Error::Decrypt)?; + + plain_text + } + }; + + postcard::from_bytes(&plain_text).map_err(Error::DeserializationFailureToPullSyncMessages) +} + +#[instrument(skip_all, err)] pub async fn write_cloud_ops_to_db( ops: Vec, db: &PrismaClient, @@ -236,3 +362,110 @@ pub async fn write_cloud_ops_to_db( Ok(()) } + +#[derive(Serialize, Deserialize, Debug)] +struct LastTimestampKeeper { + timestamps: HashMap>, + file_path: Box, +} + +impl LastTimestampKeeper { + async fn load(data_dir: &Path) -> Result { + let file_path = data_dir.join(CLOUD_SYNC_DATA_KEEPER_FILE).into_boxed_path(); + + match fs::read(&file_path).await { + Ok(bytes) => Ok(Self { + timestamps: postcard::from_bytes(&bytes) + .map_err(Error::LastTimestampKeeperSerialization)?, + file_path, + }), + + Err(e) if e.kind() == io::ErrorKind::NotFound => Ok(Self { + timestamps: HashMap::new(), + file_path, + }), + + Err(e) => Err(Error::FailedToReadLastTimestampKeeper(e)), + } + } + + async fn save(&self) -> Result<(), Error> { + fs::write( + &self.file_path, + &postcard::to_stdvec(&self.timestamps) + .map_err(Error::LastTimestampKeeperSerialization)?, + ) + .await + .map_err(Error::FailedToWriteLastTimestampKeeper) + } +} + +struct UnknownDownloadSizeStreamer { + stream_reader: Box, + buffer: Vec, + was_read: usize, +} + +enum UnknownDownloadKind { + OneShot(Vec), + Stream((StreamNonce, UnknownDownloadSizeStreamer)), +} + +impl UnknownDownloadKind { + async fn new(response: Response) -> Result { + let mut buffer = Vec::with_capacity(EncryptedBlock::CIPHER_TEXT_SIZE * 2); + + let mut stream = response.bytes_stream(); + + while let Some(res) = stream.next().await { + buffer.extend(res.map_err(Error::ErrorResponseDownloadReadBytesSyncMessages)?); + if buffer.len() > EncryptedBlock::CIPHER_TEXT_SIZE { + break; + } + } + + if buffer.len() < size_of::() { + return Err(Error::IncompleteDownloadBytesSyncMessages); + } + + if buffer.len() <= EncryptedBlock::CIPHER_TEXT_SIZE { + Ok(Self::OneShot(buffer)) + } else { + let nonce_size = size_of::(); + + Ok(Self::Stream(( + StreamNonce::try_from(&buffer[..nonce_size]).expect("passing the right nonce size"), + UnknownDownloadSizeStreamer { + stream_reader: Box::new(StreamReader::new(stream.map_err(|e| { + error!(?e, "Failed to read sync messages bytes stream"); + io::Error::new(io::ErrorKind::Other, e) + }))), + buffer, + was_read: nonce_size, + }, + ))) + } + } +} + +impl AsyncRead for UnknownDownloadSizeStreamer { + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut ReadBuf<'_>, + ) -> Poll> { + if buf.remaining() == 0 { + return Poll::Ready(Ok(())); + } + + if self.was_read == self.buffer.len() { + Pin::new(&mut self.stream_reader).poll_read(cx, buf) + } else { + let len = std::cmp::min(self.buffer.len() - self.was_read, buf.remaining()); + buf.put_slice(&self.buffer[self.was_read..(self.was_read + len)]); + self.was_read += len; + + Poll::Ready(Ok(())) + } + } +} diff --git a/core/crates/cloud-services/src/sync/send.rs b/core/crates/cloud-services/src/sync/send.rs index 5ad300c7d..dcba941b7 100644 --- a/core/crates/cloud-services/src/sync/send.rs +++ b/core/crates/cloud-services/src/sync/send.rs @@ -1,12 +1,12 @@ -use crate::{CloudServices, Error}; +use crate::{CloudServices, Error, KeyManager}; use sd_core_sync::{SyncEvent, SyncManager, NTP64}; -use sd_actors::Stopper; +use sd_actors::{Actor, Stopper}; use sd_cloud_schema::{ devices, sync::{self, groups, messages}, - Service, + Client, Service, }; use sd_crypto::{ cloud::{OneShotEncryption, SecretKey, StreamEncryption}, @@ -22,11 +22,10 @@ use std::{ atomic::{AtomicBool, Ordering}, Arc, }, - time::{Duration, SystemTime, UNIX_EPOCH}, + time::Duration, }; use async_stream::try_stream; -use chrono::{DateTime, Utc}; use futures::{FutureExt, SinkExt, Stream, StreamExt, TryStream}; use futures_concurrency::future::{Race, TryJoin}; use quic_rpc::{client::UpdateSink, pattern::bidi_streaming, transport::quinn::QuinnConnection}; @@ -39,6 +38,8 @@ use tokio::{ use tracing::{debug, error}; use uuid::Uuid; +use super::{datetime_to_timestamp, timestamp_to_datetime, SyncActors}; + const TEN_SECONDS: Duration = Duration::from_secs(10); const THIRTY_SECONDS: Duration = Duration::from_secs(30); const ONE_MINUTE: Duration = Duration::from_secs(60); @@ -50,213 +51,275 @@ enum RaceNotifiedOrStopped { type LatestTimestamp = NTP64; -pub async fn run_actor( +type PushResponsesStream = Pin< + Box< + dyn Stream< + Item = Result< + Result, + bidi_streaming::ItemError>, + >, + > + Send + + Sync, + >, +>; + +#[derive(Debug)] +pub struct Sender { sync_group_pub_id: groups::PubId, sync: SyncManager, cloud_services: Arc, + cloud_client: Client>, + key_manager: Arc, is_active: Arc, state_notify: Arc, - mut rng: CryptoRng, - stop: Stopper, -) { - let mut maybe_latest_timestamp = None; + rng: CryptoRng, + maybe_latest_timestamp: Option, +} - loop { - is_active.store(true, Ordering::Relaxed); - state_notify.notify_waiters(); +impl Actor for Sender { + const IDENTIFIER: SyncActors = SyncActors::Sender; - let res = run_loop_iteration( - sync_group_pub_id, - &sync, - &cloud_services, - &mut rng, - &maybe_latest_timestamp, - ) - .await; + async fn run(&mut self, stop: Stopper) { + loop { + self.is_active.store(true, Ordering::Relaxed); + self.state_notify.notify_waiters(); - is_active.store(false, Ordering::Relaxed); - state_notify.notify_waiters(); + let res = self.run_loop_iteration().await; - match res { - Ok(timestamp) => { - maybe_latest_timestamp = Some(timestamp); - } - Err(e) => { + self.is_active.store(false, Ordering::Relaxed); + + if let Err(e) = res { error!(?e, "Error during cloud sync sender actor iteration"); sleep(ONE_MINUTE).await; continue; } - } - if let RaceNotifiedOrStopped::Stopped = ( - // recreate subscription each time so that existing messages are dropped - wait_notification(sync.subscribe()), - stop.into_future().map(|()| RaceNotifiedOrStopped::Stopped), - ) - .race() - .await - { - break; - } + self.state_notify.notify_waiters(); - sleep(TEN_SECONDS).await; + if matches!( + ( + // recreate subscription each time so that existing messages are dropped + wait_notification(self.sync.subscribe()), + stop.into_future().map(|()| RaceNotifiedOrStopped::Stopped), + ) + .race() + .await, + RaceNotifiedOrStopped::Stopped + ) { + break; + } + + sleep(TEN_SECONDS).await; + } } } -async fn run_loop_iteration( - sync_group_pub_id: groups::PubId, - sync: &SyncManager, - cloud_services: &CloudServices, - rng: &mut CryptoRng, - maybe_latest_timestamp: &Option, -) -> Result { - let current_device_pub_id = devices::PubId(Uuid::from(&sync.device_pub_id)); - - let (cloud_client, key_manager) = (cloud_services.client(), cloud_services.key_manager()) - .try_join() - .await?; - - let (key_hash, secret_key) = key_manager - .get_latest_key(sync_group_pub_id) - .await - .ok_or(Error::MissingSyncGroupKey(sync_group_pub_id))?; - - let current_latest_timestamp = if let Some(latest_timestamp) = maybe_latest_timestamp { - *latest_timestamp - } else { - let messages::get_latest_time::Response { - latest_time, - latest_device_pub_id, - } = cloud_client - .sync() - .messages() - .get_latest_time(messages::get_latest_time::Request { - access_token: cloud_services.token_refresher.get_access_token().await?, - group_pub_id: sync_group_pub_id, - current_device_pub_id, - kind: messages::get_latest_time::Kind::ForCurrentDevice, - }) - .await??; - - assert_eq!(latest_device_pub_id, current_device_pub_id); - - LatestTimestamp::from( - SystemTime::from(latest_time) - .duration_since(UNIX_EPOCH) - .expect("hardcoded earlier time, nothing is earlier than UNIX_EPOCH"), - ) - }; - - let mut crdt_ops_stream = - pin!(sync.stream_device_ops(&sync.device_pub_id, 1000, current_latest_timestamp)); - - let mut new_latest_timestamp = current_latest_timestamp; - while let Some(ops_res) = crdt_ops_stream.next().await { - let ops = ops_res?; - - let (Some(first), Some(last)) = (ops.first(), ops.last()) else { - break; - }; - - let operations_count = ops.len() as u32; - - new_latest_timestamp = last.timestamp; - - let start_time = DateTime::::from(first.timestamp.to_system_time()); - let end_time = DateTime::::from(last.timestamp.to_system_time()); - - let messages_bytes = postcard::to_stdvec(&ops)?; - - let (mut push_updates, mut push_responses) = cloud_client - .sync() - .messages() - .push(messages::push::Request { - access_token: cloud_services.token_refresher.get_access_token().await?, - group_pub_id: sync_group_pub_id, - device_pub_id: current_device_pub_id, - key_hash: key_hash.clone(), - operations_count, - start_time, - end_time, - expected_blob_size: messages_bytes.len() as u64, - }) +impl Sender { + pub async fn new( + sync_group_pub_id: groups::PubId, + sync: SyncManager, + cloud_services: Arc, + is_active: Arc, + state_notify: Arc, + rng: CryptoRng, + ) -> Result { + let (cloud_client, key_manager) = (cloud_services.client(), cloud_services.key_manager()) + .try_join() .await?; - let Some(response) = push_responses.next().await else { - return Err(Error::EmptyResponse("push initial response")); - }; + Ok(Self { + sync_group_pub_id, + sync, + cloud_services, + cloud_client, + key_manager, + is_active, + state_notify, + rng, + maybe_latest_timestamp: None, + }) + } - let messages::push::Response(response_kind) = response??; + async fn run_loop_iteration(&mut self) -> Result<(), Error> { + let current_device_pub_id = devices::PubId(Uuid::from(&self.sync.device_pub_id)); - match response_kind { - messages::push::ResponseKind::SinglePresignedUrl(url) => { - upload_to_single_url( - url, - secret_key.clone(), - cloud_services.http_client(), - messages_bytes, - rng, - ) - .await? - } - messages::push::ResponseKind::ManyPresignedUrls(urls) => { - upload_to_many_urls( - urls, - secret_key.clone(), - cloud_services.http_client().clone(), - messages_bytes, - rng, - &mut push_updates, - &mut push_responses, - ) - .await? - } - messages::push::ResponseKind::Pong => { - return Err(Error::UnexpectedResponse( - "Pong on first messages push request", - )) - } - messages::push::ResponseKind::End => { - return Err(Error::UnexpectedResponse( - "End on first messages push request", - )) + let (key_hash, secret_key) = self + .key_manager + .get_latest_key(self.sync_group_pub_id) + .await + .ok_or(Error::MissingSyncGroupKey(self.sync_group_pub_id))?; + + let current_latest_timestamp = self.get_latest_timestamp(current_device_pub_id).await?; + + let mut crdt_ops_stream = pin!(self.sync.stream_device_ops( + &self.sync.device_pub_id, + 1000, + current_latest_timestamp + )); + + let mut new_latest_timestamp = current_latest_timestamp; + while let Some(ops_res) = crdt_ops_stream.next().await { + let ops = ops_res?; + + let (Some(first), Some(last)) = (ops.first(), ops.last()) else { + break; + }; + + #[allow(clippy::cast_possible_truncation)] + let operations_count = ops.len() as u32; + + new_latest_timestamp = last.timestamp; + + let start_time = timestamp_to_datetime(first.timestamp); + let end_time = timestamp_to_datetime(last.timestamp); + + let messages_bytes = + postcard::to_stdvec(&ops).map_err(Error::SerializationFailureToPushSyncMessages)?; + + let (mut push_updates, mut push_responses) = self + .cloud_client + .sync() + .messages() + .push(messages::push::Request { + access_token: self + .cloud_services + .token_refresher + .get_access_token() + .await?, + group_pub_id: self.sync_group_pub_id, + device_pub_id: current_device_pub_id, + key_hash: key_hash.clone(), + operations_count, + start_time, + end_time, + expected_blob_size: messages_bytes.len() as u64, + }) + .await?; + + let Some(response) = push_responses.next().await else { + return Err(Error::EmptyResponse("push initial response")); + }; + + let messages::push::Response(response_kind) = response??; + + match response_kind { + messages::push::ResponseKind::SinglePresignedUrl(url) => { + upload_to_single_url( + url, + secret_key.clone(), + self.cloud_services.http_client(), + messages_bytes, + &mut self.rng, + ) + .await?; + } + messages::push::ResponseKind::ManyPresignedUrls(urls) => { + upload_to_many_urls( + urls, + secret_key.clone(), + self.cloud_services.http_client().clone(), + messages_bytes, + &mut self.rng, + &mut push_updates, + &mut push_responses, + ) + .await?; + } + messages::push::ResponseKind::Pong => { + return Err(Error::UnexpectedResponse( + "Pong on first messages push request", + )) + } + messages::push::ResponseKind::End => { + return Err(Error::UnexpectedResponse( + "End on first messages push request", + )) + } } + + finalize_protocol(&mut push_updates, &mut push_responses).await?; } - push_updates - .send(messages::push::RequestUpdate( - messages::push::UpdateKind::End, + self.maybe_latest_timestamp = Some(new_latest_timestamp); + + Ok(()) + } + + async fn get_latest_timestamp( + &self, + current_device_pub_id: devices::PubId, + ) -> Result { + if let Some(latest_timestamp) = &self.maybe_latest_timestamp { + Ok(*latest_timestamp) + } else { + let messages::get_latest_time::Response { + latest_time, + latest_device_pub_id, + } = self + .cloud_client + .sync() + .messages() + .get_latest_time(messages::get_latest_time::Request { + access_token: self + .cloud_services + .token_refresher + .get_access_token() + .await?, + group_pub_id: self.sync_group_pub_id, + current_device_pub_id, + kind: messages::get_latest_time::Kind::ForCurrentDevice, + }) + .await??; + + assert_eq!(latest_device_pub_id, current_device_pub_id); + + Ok(datetime_to_timestamp(latest_time)) + } + } +} + +async fn finalize_protocol( + push_updates: &mut UpdateSink< + Service, + QuinnConnection, + messages::push::RequestUpdate, + sync::Service, + >, + push_responses: &mut PushResponsesStream, +) -> Result<(), Error> { + push_updates + .send(messages::push::RequestUpdate( + messages::push::UpdateKind::End, + )) + .await + .map_err(Error::EndUpdatePushSyncMessages)?; + + let Some(response) = push_responses.next().await else { + return Err(Error::EmptyResponse("push initial response")); + }; + + let messages::push::Response(response_kind) = response??; + + match response_kind { + messages::push::ResponseKind::SinglePresignedUrl(_) + | messages::push::ResponseKind::ManyPresignedUrls(_) => { + return Err(Error::UnexpectedResponse( + "Urls responses on final messages push response", )) - .await - .map_err(Error::EndUpdatePushSyncMessages)?; - - let Some(response) = push_responses.next().await else { - return Err(Error::EmptyResponse("push initial response")); - }; - - let messages::push::Response(response_kind) = response??; - - match response_kind { - messages::push::ResponseKind::SinglePresignedUrl(_) - | messages::push::ResponseKind::ManyPresignedUrls(_) => { - return Err(Error::UnexpectedResponse( - "Urls responses on final messages push response", - )) - } - messages::push::ResponseKind::Pong => { - return Err(Error::UnexpectedResponse( - "Pong on final message push response", - )) - } - messages::push::ResponseKind::End => { - /* - Everything is awesome! - */ - } + } + messages::push::ResponseKind::Pong => { + return Err(Error::UnexpectedResponse( + "Pong on final message push response", + )) + } + messages::push::ResponseKind::End => { + /* + Everything is awesome! + */ } } - Ok(new_latest_timestamp) + Ok(()) } async fn upload_to_many_urls( @@ -271,17 +334,7 @@ async fn upload_to_many_urls( messages::push::RequestUpdate, sync::Service, >, - push_responses: &mut Pin< - Box< - dyn Stream< - Item = Result< - Result, - bidi_streaming::ItemError>, - >, - > + Send - + Sync, - >, - >, + push_responses: &mut PushResponsesStream, ) -> Result<(), Error> { let stop_ping_pong = Arc::new(AtomicBool::new(false)); let (out_tx, mut out_rx) = oneshot::channel(); @@ -358,7 +411,7 @@ async fn upload_to_many_urls( let Ok(out) = out_rx.try_recv() else { // SAFETY: This try_recv error can only happen if the upload task panicked // so we're good to unwrap the error. - let e = handle.await.unwrap_err(); + let e = handle.await.expect_err("upload task panicked"); error!(?e, "Critical error while uploading sync messages"); return Err(Error::CriticalErrorWhileUploadingSyncMessages); }; @@ -537,7 +590,7 @@ fn stream_encryption( async fn wait_notification(mut rx: broadcast::Receiver) -> RaceNotifiedOrStopped { // wait until Created message comes in loop { - if let Ok(SyncEvent::Created) = rx.recv().await { + if matches!(rx.recv().await, Ok(SyncEvent::Created)) { break; }; } diff --git a/core/src/library/library.rs b/core/src/library/library.rs index cf98603ae..cb67afa4d 100644 --- a/core/src/library/library.rs +++ b/core/src/library/library.rs @@ -94,24 +94,30 @@ impl Library { }) } - pub async fn init_cloud_sync(&self, node: &Node, sync_group_pub_id: groups::PubId) { + pub async fn init_cloud_sync( + &self, + node: &Node, + sync_group_pub_id: groups::PubId, + ) -> Result<(), sd_core_cloud_services::Error> { let rng = CryptoRng::from_seed(node.master_rng.lock().await.generate_fixed()); declare_cloud_sync( + node.data_dir.clone().into_boxed_path(), node.cloud_services.clone(), &self.cloud_sync_actors, &self.cloud_sync_state, sync_group_pub_id, self.sync.clone(), - Arc::clone(&self.db), rng, ) - .await; + .await?; // TODO(@fogodev): Uncomment when they're ready // self.cloud_sync_actors.start(CloudSyncActors::Sender).await; // self.cloud_sync_actors.start(CloudSyncActors::Receiver).await; // self.cloud_sync_actors.start(CloudSyncActors::Ingester).await; + + Ok(()) } pub async fn config(&self) -> LibraryConfig { diff --git a/crates/actors/Cargo.toml b/crates/actors/Cargo.toml index 65d33cff7..172ac53f3 100644 --- a/crates/actors/Cargo.toml +++ b/crates/actors/Cargo.toml @@ -8,6 +8,7 @@ repository.workspace = true [dependencies] async-channel = { workspace = true } +async-trait = { workspace = true } futures = { workspace = true } pin-project-lite = { workspace = true } tokio = { workspace = true } diff --git a/crates/actors/src/lib.rs b/crates/actors/src/lib.rs index 0890e8684..1666c974d 100644 --- a/crates/actors/src/lib.rs +++ b/crates/actors/src/lib.rs @@ -32,6 +32,7 @@ use std::{ fmt, future::{Future, IntoFuture}, hash::Hash, + marker::PhantomData, panic::{panic_any, AssertUnwindSafe}, pin::Pin, sync::{ @@ -46,7 +47,7 @@ use async_channel as chan; use futures::FutureExt; use tokio::{ spawn, - sync::{broadcast, RwLock}, + sync::{broadcast, Mutex, RwLock}, task::JoinHandle, time::timeout, }; @@ -54,10 +55,57 @@ use tracing::{error, instrument, warn}; const ONE_MINUTE: Duration = Duration::from_secs(60); -type ActorFn = dyn Fn(Stopper) -> Pin + Send>> + Send + Sync; +pub trait ActorId: Hash + Eq + Send + Sync + Copy + fmt::Debug + fmt::Display + 'static {} -pub struct Actor { - spawn_fn: Arc, +impl ActorId for T {} + +pub trait Actor: Send + Sync + 'static { + const IDENTIFIER: Id; + + fn run(&mut self, stop: Stopper) -> impl Future + Send; +} + +mod sealed { + pub trait Sealed {} +} + +#[async_trait::async_trait] +pub trait DynActor: Send + Sync + sealed::Sealed + 'static { + async fn run(&mut self, stop: Stopper); +} + +pub trait IntoActor: Send + Sync { + fn into_actor(self) -> (Id, Box>); +} + +struct AnyActor> { + actor: A, + _marker: PhantomData, +} + +impl> sealed::Sealed for AnyActor {} + +#[async_trait::async_trait] +impl> DynActor for AnyActor { + async fn run(&mut self, stop: Stopper) { + self.actor.run(stop).await; + } +} + +impl> IntoActor for A { + fn into_actor(self) -> (Id, Box>) { + ( + A::IDENTIFIER, + Box::new(AnyActor { + actor: self, + _marker: PhantomData, + }), + ) + } +} + +struct ActorHandler { + actor: Arc>>>, maybe_handle: Option>, is_running: Arc, stop_tx: chan::Sender<()>, @@ -66,40 +114,62 @@ pub struct Actor { /// Actors holder, holds all actors for some generic purpose, like for cloud sync. /// You should use an enum to identify the actors. -pub struct ActorsCollection { +pub struct ActorsCollection { pub invalidate_rx: broadcast::Receiver<()>, invalidate_tx: broadcast::Sender<()>, - actors: Arc>>, + actors_map: Arc>>>, } -impl ActorsCollection -where - Id: Hash + Eq + fmt::Debug + fmt::Display + Copy + Send + Sync + 'static, -{ - pub async fn declare( - &self, - identifier: Id, - actor_fn: impl FnOnce(Stopper) -> Fut + Send + Sync + Clone + 'static, - ) where - Fut: Future + Send + 'static, - { - let (stop_tx, stop_rx) = chan::bounded(1); +impl ActorsCollection { + pub async fn declare(&self, actor: impl IntoActor) { + async fn inner( + this: &ActorsCollection, + identifier: Id, + actor: Box>, + ) { + let (stop_tx, stop_rx) = chan::bounded(1); - self.actors.write().await.insert( - identifier, - Actor { - spawn_fn: Arc::new(move |stop| Box::pin((actor_fn.clone())(stop))), - maybe_handle: None, - is_running: Arc::new(AtomicBool::new(false)), - stop_tx, - stop_rx, - }, - ); + this.actors_map.write().await.insert( + identifier, + ActorHandler { + actor: Arc::new(Mutex::new(actor)), + maybe_handle: None, + is_running: Arc::new(AtomicBool::new(false)), + stop_tx, + stop_rx, + }, + ); + } + + let (identifier, actor) = actor.into_actor(); + inner(self, identifier, actor).await; + } + + pub async fn declare_many_boxed( + &self, + actors: impl IntoIterator>)> + Send, + ) { + let mut actor_map = self.actors_map.write().await; + + for (id, actor) in actors { + let (stop_tx, stop_rx) = chan::bounded(1); + + actor_map.insert( + id, + ActorHandler { + actor: Arc::new(Mutex::new(actor)), + maybe_handle: None, + is_running: Arc::new(AtomicBool::new(false)), + stop_tx, + stop_rx, + }, + ); + } } #[instrument(skip(self))] pub async fn start(&self, identifier: Id) { - if let Some(actor) = self.actors.write().await.get_mut(&identifier) { + if let Some(actor) = self.actors_map.write().await.get_mut(&identifier) { if actor.is_running.load(Ordering::Acquire) { warn!("Actor already running!"); return; @@ -124,15 +194,19 @@ where } actor.maybe_handle = Some(spawn({ - let spawn_fn = Arc::clone(&actor.spawn_fn); - let stop_actor = Stopper(actor.stop_rx.clone()); + let actor = Arc::clone(&actor.actor); async move { - if (AssertUnwindSafe((spawn_fn)(stop_actor))) - .catch_unwind() - .await - .is_err() + if (AssertUnwindSafe( + actor + .try_lock() + .expect("actors can only have a single run at a time") + .run(stop_actor), + )) + .catch_unwind() + .await + .is_err() { error!("Actor unexpectedly panicked"); } @@ -149,7 +223,7 @@ where #[instrument(skip(self))] pub async fn stop(&self, identifier: Id) { - if let Some(actor) = self.actors.write().await.get_mut(&identifier) { + if let Some(actor) = self.actors_map.write().await.get_mut(&identifier) { if !actor.is_running.load(Ordering::Acquire) { warn!("Actor already stopped!"); return; @@ -169,12 +243,12 @@ where } } - pub async fn get_state(&self) -> HashMap { - self.actors + pub async fn get_state(&self) -> Vec<(String, bool)> { + self.actors_map .read() .await .iter() - .map(|(&identifier, actor)| { + .map(|(identifier, actor)| { ( identifier.to_string(), actor.is_running.load(Ordering::Relaxed), @@ -184,22 +258,22 @@ where } } -impl Default for ActorsCollection { +impl Default for ActorsCollection { fn default() -> Self { let (invalidate_tx, invalidate_rx) = broadcast::channel(1); Self { - actors: Arc::default(), + actors_map: Arc::default(), invalidate_rx, invalidate_tx, } } } -impl Clone for ActorsCollection { +impl Clone for ActorsCollection { fn clone(&self) -> Self { Self { - actors: Arc::clone(&self.actors), + actors_map: Arc::clone(&self.actors_map), invalidate_rx: self.invalidate_rx.resubscribe(), invalidate_tx: self.invalidate_tx.clone(), } diff --git a/crates/crypto/src/cloud/decrypt.rs b/crates/crypto/src/cloud/decrypt.rs index 1ba41f35b..c45a99110 100644 --- a/crates/crypto/src/cloud/decrypt.rs +++ b/crates/crypto/src/cloud/decrypt.rs @@ -1,5 +1,5 @@ use crate::{ - primitives::{EncryptedBlock, StreamNonce}, + primitives::{EncryptedBlock, EncryptedBlockRef, StreamNonce}, Error, }; @@ -12,7 +12,8 @@ use tokio::io::{AsyncBufReadExt, AsyncRead, AsyncWrite, AsyncWriteExt, BufReader use super::secret_key::SecretKey; pub trait OneShotDecryption { - fn decrypt(&self, cipher_text: &EncryptedBlock) -> Result, Error>; + fn decrypt(&self, cipher_text: EncryptedBlockRef<'_>) -> Result, Error>; + fn decrypt_owned(&self, cipher_text: &EncryptedBlock) -> Result, Error>; } pub trait StreamDecryption { @@ -26,6 +27,15 @@ pub trait StreamDecryption { impl OneShotDecryption for SecretKey { fn decrypt( + &self, + EncryptedBlockRef { nonce, cipher_text }: EncryptedBlockRef<'_>, + ) -> Result, Error> { + XChaCha20Poly1305::new(&self.0) + .decrypt(&nonce, cipher_text) + .map_err(|aead::Error| Error::Decrypt) + } + + fn decrypt_owned( &self, EncryptedBlock { nonce, cipher_text }: &EncryptedBlock, ) -> Result, Error> { diff --git a/crates/crypto/src/cloud/secret_key.rs b/crates/crypto/src/cloud/secret_key.rs index d097389f6..8d203d112 100644 --- a/crates/crypto/src/cloud/secret_key.rs +++ b/crates/crypto/src/cloud/secret_key.rs @@ -161,7 +161,7 @@ mod tests { let key = SecretKey::generate(&mut rng); let encrypted_block = key.encrypt(message, &mut rng).unwrap(); - let decrypted_message = key.decrypt(&encrypted_block).unwrap(); + let decrypted_message = key.decrypt_owned(&encrypted_block).unwrap(); assert_eq!(message, decrypted_message.as_slice()); } diff --git a/crates/crypto/src/crypto/mod.rs b/crates/crypto/src/crypto/mod.rs deleted file mode 100644 index 6ab19cca1..000000000 --- a/crates/crypto/src/crypto/mod.rs +++ /dev/null @@ -1,821 +0,0 @@ -//! This module contains all encryption and decryption items. These are used throughout the crate for all encryption/decryption needs. - -mod stream; - -pub use self::stream::{Decryptor, Encryptor}; - -#[cfg(test)] -mod tests { - use std::io::Cursor; - - use crate::{ - crypto::{Decryptor, Encryptor}, - primitives::{ - AAD_LEN, AEAD_TAG_LEN, AES_256_GCM_SIV_NONCE_LEN, BLOCK_LEN, KEY_LEN, - XCHACHA20_POLY1305_NONCE_LEN, - }, - rng::CryptoRng, - types::{Aad, Algorithm, EncryptedKey, Key, Nonce}, - }; - - // const KEY: Key = Key::new([0x23; KEY_LEN]); - - const XCHACHA20_POLY1305_NONCE: Nonce = - Nonce::XChaCha20Poly1305([0xE9; XCHACHA20_POLY1305_NONCE_LEN]); - - const AES_256_GCM_SIV_NONCE: Nonce = Nonce::Aes256GcmSiv([0xE9; AES_256_GCM_SIV_NONCE_LEN]); - - const PLAINTEXT: [u8; 32] = [0x5A; 32]; - // const PLAINTEXT_KEY: Key = Key::new([1u8; KEY_LEN]); - - const AAD: Aad = Aad::Standard([0x92; AAD_LEN]); - - // for the `const` arrays below, [0] is without AAD, [1] is with AAD - - const AES_256_GCM_SIV_BYTES_EXPECTED: [[u8; 48]; 2] = [ - [ - 41, 231, 183, 92, 73, 104, 69, 207, 245, 250, 21, 50, 145, 41, 104, 165, 130, 59, 70, - 185, 65, 77, 215, 15, 131, 214, 183, 47, 166, 223, 185, 181, 117, 138, 62, 204, 246, - 227, 198, 32, 132, 5, 97, 120, 15, 70, 229, 218, - ], - [ - 3, 180, 75, 64, 231, 67, 228, 189, 149, 69, 47, 83, 8, 214, 103, 12, 21, 11, 39, 108, - 7, 142, 10, 169, 85, 163, 76, 53, 53, 69, 160, 134, 2, 87, 72, 121, 75, 186, 102, 176, - 163, 170, 81, 101, 242, 237, 173, 133, - ], - ]; - - const XCHACHA20_POLY1305_BYTES_EXPECTED: [[u8; 48]; 2] = [ - [ - 35, 174, 252, 59, 215, 65, 5, 237, 198, 2, 51, 72, 239, 88, 36, 177, 136, 252, 64, 157, - 141, 53, 138, 98, 185, 2, 75, 173, 253, 99, 133, 207, 145, 54, 100, 51, 44, 230, 60, 5, - 157, 70, 110, 145, 166, 41, 215, 95, - ], - [ - 35, 174, 252, 59, 215, 65, 5, 237, 198, 2, 51, 72, 239, 88, 36, 177, 136, 252, 64, 157, - 141, 53, 138, 98, 185, 2, 75, 173, 253, 99, 133, 207, 125, 139, 247, 158, 207, 216, 60, - 114, 72, 44, 6, 212, 233, 141, 251, 239, - ], - ]; - - const XCHACHA20_POLY1305_ENCRYPTED_KEY: EncryptedKey = EncryptedKey::new( - [ - 120, 245, 167, 96, 140, 26, 94, 182, 157, 89, 104, 19, 180, 3, 127, 234, 211, 167, 27, - 198, 214, 110, 209, 57, 226, 89, 16, 246, 166, 56, 222, 148, 40, 198, 237, 205, 45, 49, - 205, 18, 69, 102, 16, 78, 199, 141, 246, 165, - ], - XCHACHA20_POLY1305_NONCE, - ); - - const AES_256_GCM_ENCRYPTED_KEY: EncryptedKey = EncryptedKey::new( - [ - 227, 231, 27, 182, 122, 118, 64, 35, 125, 176, 152, 244, 156, 26, 234, 96, 178, 121, - 73, 213, 228, 189, 45, 152, 189, 68, 214, 187, 123, 182, 91, 83, 216, 50, 174, 13, 157, - 121, 165, 129, 227, 220, 139, 166, 9, 71, 215, 145, - ], - AES_256_GCM_SIV_NONCE, - ); - - #[test] - fn aes_256_gcm_siv_encrypt_bytes() { - let output = Encryptor::encrypt_bytes( - &Key::new([0x23; KEY_LEN]), - &AES_256_GCM_SIV_NONCE, - Algorithm::Aes256GcmSiv, - &PLAINTEXT, - Aad::Null, - ) - .unwrap(); - - assert_eq!(output, AES_256_GCM_SIV_BYTES_EXPECTED[0]); - } - - #[test] - fn aes_256_gcm_siv_encrypt_bytes_with_aad() { - let output = Encryptor::encrypt_bytes( - &Key::new([0x23; KEY_LEN]), - &AES_256_GCM_SIV_NONCE, - Algorithm::Aes256GcmSiv, - &PLAINTEXT, - AAD, - ) - .unwrap(); - - assert_eq!(output, AES_256_GCM_SIV_BYTES_EXPECTED[1]); - } - - #[test] - fn aes_256_gcm_siv_decrypt_bytes() { - let output = Decryptor::decrypt_bytes( - &Key::new([0x23; KEY_LEN]), - &AES_256_GCM_SIV_NONCE, - Algorithm::Aes256GcmSiv, - &AES_256_GCM_SIV_BYTES_EXPECTED[0], - Aad::Null, - ) - .unwrap(); - - assert_eq!(output.expose(), &PLAINTEXT); - } - - #[test] - fn aes_256_gcm_siv_decrypt_bytes_with_aad() { - let output = Decryptor::decrypt_bytes( - &Key::new([0x23; KEY_LEN]), - &AES_256_GCM_SIV_NONCE, - Algorithm::Aes256GcmSiv, - &AES_256_GCM_SIV_BYTES_EXPECTED[1], - AAD, - ) - .unwrap(); - - assert_eq!(output.expose(), &PLAINTEXT); - } - - #[test] - fn aes_256_gcm_siv_encrypt_key() { - let output = Encryptor::encrypt_key( - &Key::new([0x23; KEY_LEN]), - &AES_256_GCM_SIV_NONCE, - Algorithm::Aes256GcmSiv, - &Key::new([1u8; KEY_LEN]), - Aad::Null, - ) - .unwrap(); - - assert_eq!(output, AES_256_GCM_ENCRYPTED_KEY); - } - - #[test] - fn aes_256_gcm_siv_decrypt_key() { - let output = Decryptor::decrypt_key( - &Key::new([0x23; KEY_LEN]), - Algorithm::Aes256GcmSiv, - &AES_256_GCM_ENCRYPTED_KEY, - Aad::Null, - ) - .unwrap(); - - assert_eq!(output, Key::new([1u8; KEY_LEN])); - } - - #[test] - fn aes_256_gcm_siv_encrypt_tiny() { - let output = Encryptor::encrypt_tiny( - &Key::new([0x23; KEY_LEN]), - &AES_256_GCM_SIV_NONCE, - Algorithm::Aes256GcmSiv, - &PLAINTEXT, - Aad::Null, - ) - .unwrap(); - - assert_eq!(output, AES_256_GCM_SIV_BYTES_EXPECTED[0]); - } - - #[test] - fn aes_256_gcm_siv_decrypt_tiny() { - let output = Decryptor::decrypt_tiny( - &Key::new([0x23; KEY_LEN]), - &AES_256_GCM_SIV_NONCE, - Algorithm::Aes256GcmSiv, - &AES_256_GCM_SIV_BYTES_EXPECTED[0], - Aad::Null, - ) - .unwrap(); - - assert_eq!(output.expose(), &PLAINTEXT); - } - - #[test] - #[should_panic(expected = "LengthMismatch")] - fn aes_256_gcm_siv_encrypt_tiny_too_large() { - Encryptor::encrypt_tiny( - &Key::new([0x23; KEY_LEN]), - &AES_256_GCM_SIV_NONCE, - Algorithm::Aes256GcmSiv, - &vec![0u8; BLOCK_LEN], - Aad::Null, - ) - .unwrap(); - } - - #[test] - #[should_panic(expected = "LengthMismatch")] - fn aes_256_gcm_siv_decrypt_tiny_too_large() { - Decryptor::decrypt_tiny( - &Key::new([0x23; KEY_LEN]), - &AES_256_GCM_SIV_NONCE, - Algorithm::Aes256GcmSiv, - &vec![0u8; BLOCK_LEN + AEAD_TAG_LEN], - Aad::Null, - ) - .unwrap(); - } - - #[test] - #[should_panic(expected = "Decrypt")] - fn aes_256_gcm_siv_decrypt_bytes_missing_aad() { - Decryptor::decrypt_bytes( - &Key::new([0x23; KEY_LEN]), - &AES_256_GCM_SIV_NONCE, - Algorithm::Aes256GcmSiv, - &AES_256_GCM_SIV_BYTES_EXPECTED[1], - Aad::Null, - ) - .unwrap(); - } - - #[test] - #[cfg_attr(miri, ignore)] - fn aes_256_gcm_siv_encrypt_and_decrypt_5_blocks() { - let buf = CryptoRng::generate_vec(BLOCK_LEN * 5); - - let mut reader = Cursor::new(&buf); - let mut writer = Cursor::new(Vec::new()); - - let encryptor = Encryptor::new( - &Key::new([0x23; KEY_LEN]), - &AES_256_GCM_SIV_NONCE, - Algorithm::Aes256GcmSiv, - ) - .unwrap(); - - encryptor - .encrypt_streams(&mut reader, &mut writer, Aad::Null) - .unwrap(); - - let mut reader = Cursor::new(writer.into_inner()); - let mut writer = Cursor::new(Vec::new()); - - let decryptor = Decryptor::new( - &Key::new([0x23; KEY_LEN]), - &AES_256_GCM_SIV_NONCE, - Algorithm::Aes256GcmSiv, - ) - .unwrap(); - - decryptor - .decrypt_streams(&mut reader, &mut writer, Aad::Null) - .unwrap(); - - let output = writer.into_inner(); - - assert_eq!(buf, output); - } - - #[test] - #[ignore] - fn aes_256_gcm_siv_encrypt_and_decrypt_128mib() { - let buf = vec![1u8; BLOCK_LEN * 128].into_boxed_slice(); - - let mut reader = Cursor::new(&buf); - let mut writer = Cursor::new(Vec::new()); - - let encryptor = Encryptor::new( - &Key::new([0x23; KEY_LEN]), - &AES_256_GCM_SIV_NONCE, - Algorithm::Aes256GcmSiv, - ) - .unwrap(); - - encryptor - .encrypt_streams(&mut reader, &mut writer, Aad::Null) - .unwrap(); - - let mut reader = Cursor::new(writer.into_inner()); - let mut writer = Cursor::new(Vec::new()); - - let decryptor = Decryptor::new( - &Key::new([0x23; KEY_LEN]), - &AES_256_GCM_SIV_NONCE, - Algorithm::Aes256GcmSiv, - ) - .unwrap(); - - decryptor - .decrypt_streams(&mut reader, &mut writer, Aad::Null) - .unwrap(); - - let output = writer.into_inner().into_boxed_slice(); - - assert_eq!(buf, output); - } - - #[test] - #[cfg_attr(miri, ignore)] - fn aes_256_gcm_siv_encrypt_and_decrypt_5_blocks_with_aad() { - let buf = CryptoRng::generate_vec(BLOCK_LEN * 5); - - let mut reader = Cursor::new(&buf); - let mut writer = Cursor::new(Vec::new()); - - let encryptor = Encryptor::new( - &Key::new([0x23; KEY_LEN]), - &AES_256_GCM_SIV_NONCE, - Algorithm::Aes256GcmSiv, - ) - .unwrap(); - - encryptor - .encrypt_streams(&mut reader, &mut writer, AAD) - .unwrap(); - - let mut reader = Cursor::new(writer.into_inner()); - let mut writer = Cursor::new(Vec::new()); - - let decryptor = Decryptor::new( - &Key::new([0x23; KEY_LEN]), - &AES_256_GCM_SIV_NONCE, - Algorithm::Aes256GcmSiv, - ) - .unwrap(); - - decryptor - .decrypt_streams(&mut reader, &mut writer, AAD) - .unwrap(); - - let output = writer.into_inner(); - - assert_eq!(buf, output); - } - - #[tokio::test] - #[cfg_attr(miri, ignore)] - async fn aes_256_gcm_siv_encrypt_and_decrypt_5_blocks_async() { - let buf = CryptoRng::generate_vec(BLOCK_LEN * 5); - - let mut reader = Cursor::new(&buf); - let mut writer = Cursor::new(Vec::new()); - - let encryptor = Encryptor::new( - &Key::new([0x23; KEY_LEN]), - &AES_256_GCM_SIV_NONCE, - Algorithm::Aes256GcmSiv, - ) - .unwrap(); - - encryptor - .encrypt_streams_async(&mut reader, &mut writer, Aad::Null) - .await - .unwrap(); - - let mut reader = Cursor::new(writer.into_inner()); - let mut writer = Cursor::new(Vec::new()); - - let decryptor = Decryptor::new( - &Key::new([0x23; KEY_LEN]), - &AES_256_GCM_SIV_NONCE, - Algorithm::Aes256GcmSiv, - ) - .unwrap(); - - decryptor - .decrypt_streams_async(&mut reader, &mut writer, Aad::Null) - .await - .unwrap(); - - let output = writer.into_inner(); - - assert_eq!(buf, output); - } - - #[tokio::test] - #[cfg(feature = "tokio")] - #[cfg_attr(miri, ignore)] - async fn aes_256_gcm_siv_encrypt_and_decrypt_5_blocks_with_aad_async() { - let buf = CryptoRng::generate_vec(BLOCK_LEN * 5); - - let mut reader = Cursor::new(&buf); - let mut writer = Cursor::new(Vec::new()); - - let encryptor = Encryptor::new( - &Key::new([0x23; KEY_LEN]), - &AES_256_GCM_SIV_NONCE, - Algorithm::Aes256GcmSiv, - ) - .unwrap(); - - encryptor - .encrypt_streams_async(&mut reader, &mut writer, AAD) - .await - .unwrap(); - - let mut reader = Cursor::new(writer.into_inner()); - let mut writer = Cursor::new(Vec::new()); - - let decryptor = Decryptor::new( - &Key::new([0x23; KEY_LEN]), - &AES_256_GCM_SIV_NONCE, - Algorithm::Aes256GcmSiv, - ) - .unwrap(); - - decryptor - .decrypt_streams_async(&mut reader, &mut writer, AAD) - .await - .unwrap(); - - let output = writer.into_inner(); - - assert_eq!(buf, output); - } - - #[test] - fn xchacha20_poly1305_encrypt_bytes() { - let output = Encryptor::encrypt_bytes( - &Key::new([0x23; KEY_LEN]), - &XCHACHA20_POLY1305_NONCE, - Algorithm::XChaCha20Poly1305, - &PLAINTEXT, - Aad::Null, - ) - .unwrap(); - - assert_eq!(output, XCHACHA20_POLY1305_BYTES_EXPECTED[0]); - } - - #[test] - fn xchacha20_poly1305_encrypt_key() { - let output = Encryptor::encrypt_key( - &Key::new([0x23; KEY_LEN]), - &XCHACHA20_POLY1305_NONCE, - Algorithm::XChaCha20Poly1305, - &Key::new([1u8; KEY_LEN]), - Aad::Null, - ) - .unwrap(); - - assert_eq!(output, XCHACHA20_POLY1305_ENCRYPTED_KEY); - } - - #[test] - fn xchacha20_poly1305_decrypt_key() { - let output = Decryptor::decrypt_key( - &Key::new([0x23; KEY_LEN]), - Algorithm::XChaCha20Poly1305, - &XCHACHA20_POLY1305_ENCRYPTED_KEY, - Aad::Null, - ) - .unwrap(); - - assert_eq!(output, Key::new([1u8; KEY_LEN])); - } - - #[test] - fn xchacha20_poly1305_encrypt_tiny() { - let output = Encryptor::encrypt_tiny( - &Key::new([0x23; KEY_LEN]), - &XCHACHA20_POLY1305_NONCE, - Algorithm::XChaCha20Poly1305, - &PLAINTEXT, - Aad::Null, - ) - .unwrap(); - - assert_eq!(output, XCHACHA20_POLY1305_BYTES_EXPECTED[0]); - } - - #[test] - fn xchacha20_poly1305_decrypt_tiny() { - let output = Decryptor::decrypt_tiny( - &Key::new([0x23; KEY_LEN]), - &XCHACHA20_POLY1305_NONCE, - Algorithm::XChaCha20Poly1305, - &XCHACHA20_POLY1305_BYTES_EXPECTED[0], - Aad::Null, - ) - .unwrap(); - - assert_eq!(output.expose(), &PLAINTEXT); - } - - #[test] - #[should_panic(expected = "LengthMismatch")] - fn xchacha20_poly1305_encrypt_tiny_too_large() { - Encryptor::encrypt_tiny( - &Key::new([0x23; KEY_LEN]), - &XCHACHA20_POLY1305_NONCE, - Algorithm::XChaCha20Poly1305, - &vec![0u8; BLOCK_LEN], - Aad::Null, - ) - .unwrap(); - } - - #[test] - #[should_panic(expected = "LengthMismatch")] - fn xchacha20_poly1305_decrypt_tiny_too_large() { - Decryptor::decrypt_tiny( - &Key::new([0x23; KEY_LEN]), - &XCHACHA20_POLY1305_NONCE, - Algorithm::XChaCha20Poly1305, - &vec![0u8; BLOCK_LEN + AEAD_TAG_LEN], - Aad::Null, - ) - .unwrap(); - } - - #[test] - fn xchacha20_poly1305_encrypt_bytes_with_aad() { - let output = Encryptor::encrypt_bytes( - &Key::new([0x23; KEY_LEN]), - &XCHACHA20_POLY1305_NONCE, - Algorithm::XChaCha20Poly1305, - &PLAINTEXT, - AAD, - ) - .unwrap(); - - assert_eq!(output, XCHACHA20_POLY1305_BYTES_EXPECTED[1]); - } - - #[test] - fn xchacha20_poly1305_decrypt_bytes() { - let output = Decryptor::decrypt_bytes( - &Key::new([0x23; KEY_LEN]), - &XCHACHA20_POLY1305_NONCE, - Algorithm::XChaCha20Poly1305, - &XCHACHA20_POLY1305_BYTES_EXPECTED[0], - Aad::Null, - ) - .unwrap(); - - assert_eq!(output.expose(), &PLAINTEXT); - } - - #[test] - fn xchacha20_poly1305_decrypt_bytes_with_aad() { - let output = Decryptor::decrypt_bytes( - &Key::new([0x23; KEY_LEN]), - &XCHACHA20_POLY1305_NONCE, - Algorithm::XChaCha20Poly1305, - &XCHACHA20_POLY1305_BYTES_EXPECTED[1], - AAD, - ) - .unwrap(); - - assert_eq!(output.expose(), &PLAINTEXT); - } - - #[test] - #[should_panic(expected = "Decrypt")] - fn xchacha20_poly1305_decrypt_bytes_missing_aad() { - Decryptor::decrypt_bytes( - &Key::new([0x23; KEY_LEN]), - &XCHACHA20_POLY1305_NONCE, - Algorithm::XChaCha20Poly1305, - &XCHACHA20_POLY1305_BYTES_EXPECTED[1], - Aad::Null, - ) - .unwrap(); - } - - #[test] - #[cfg_attr(miri, ignore)] - fn xchacha20_poly1305_encrypt_and_decrypt_5_blocks() { - let buf = CryptoRng::generate_vec(BLOCK_LEN * 5); - - let mut reader = Cursor::new(&buf); - let mut writer = Cursor::new(Vec::new()); - - let encryptor = Encryptor::new( - &Key::new([0x23; KEY_LEN]), - &XCHACHA20_POLY1305_NONCE, - Algorithm::XChaCha20Poly1305, - ) - .unwrap(); - - encryptor - .encrypt_streams(&mut reader, &mut writer, Aad::Null) - .unwrap(); - - let mut reader = Cursor::new(writer.into_inner()); - let mut writer = Cursor::new(Vec::new()); - - let decryptor = Decryptor::new( - &Key::new([0x23; KEY_LEN]), - &XCHACHA20_POLY1305_NONCE, - Algorithm::XChaCha20Poly1305, - ) - .unwrap(); - - decryptor - .decrypt_streams(&mut reader, &mut writer, Aad::Null) - .unwrap(); - - let output = writer.into_inner(); - - assert_eq!(buf, output); - } - - #[test] - #[ignore] - fn xchacha20_poly1305_encrypt_and_decrypt_128mib() { - let buf = vec![1u8; BLOCK_LEN * 128].into_boxed_slice(); - - let mut reader = Cursor::new(&buf); - let mut writer = Cursor::new(Vec::new()); - - let encryptor = Encryptor::new( - &Key::new([0x23; KEY_LEN]), - &XCHACHA20_POLY1305_NONCE, - Algorithm::XChaCha20Poly1305, - ) - .unwrap(); - - encryptor - .encrypt_streams(&mut reader, &mut writer, Aad::Null) - .unwrap(); - - let mut reader = Cursor::new(writer.into_inner()); - let mut writer = Cursor::new(Vec::new()); - - let decryptor = Decryptor::new( - &Key::new([0x23; KEY_LEN]), - &XCHACHA20_POLY1305_NONCE, - Algorithm::XChaCha20Poly1305, - ) - .unwrap(); - - decryptor - .decrypt_streams(&mut reader, &mut writer, Aad::Null) - .unwrap(); - - let output = writer.into_inner().into_boxed_slice(); - - assert_eq!(buf, output); - } - - #[test] - #[cfg_attr(miri, ignore)] - fn xchacha20_poly1305_encrypt_and_decrypt_5_blocks_with_aad() { - let buf = CryptoRng::generate_vec(BLOCK_LEN * 5); - - let mut reader = Cursor::new(&buf); - let mut writer = Cursor::new(Vec::new()); - - let encryptor = Encryptor::new( - &Key::new([0x23; KEY_LEN]), - &XCHACHA20_POLY1305_NONCE, - Algorithm::XChaCha20Poly1305, - ) - .unwrap(); - - encryptor - .encrypt_streams(&mut reader, &mut writer, AAD) - .unwrap(); - - let mut reader = Cursor::new(writer.into_inner()); - let mut writer = Cursor::new(Vec::new()); - - let decryptor = Decryptor::new( - &Key::new([0x23; KEY_LEN]), - &XCHACHA20_POLY1305_NONCE, - Algorithm::XChaCha20Poly1305, - ) - .unwrap(); - - decryptor - .decrypt_streams(&mut reader, &mut writer, AAD) - .unwrap(); - - let output = writer.into_inner(); - - assert_eq!(buf, output); - } - - #[tokio::test] - #[cfg(feature = "tokio")] - #[cfg_attr(miri, ignore)] - async fn xchacha20_poly1305_encrypt_and_decrypt_5_blocks_async() { - let buf = CryptoRng::generate_vec(BLOCK_LEN * 5); - - let mut reader = Cursor::new(&buf); - let mut writer = Cursor::new(Vec::new()); - - let encryptor = Encryptor::new( - &Key::new([0x23; KEY_LEN]), - &XCHACHA20_POLY1305_NONCE, - Algorithm::XChaCha20Poly1305, - ) - .unwrap(); - - encryptor - .encrypt_streams_async(&mut reader, &mut writer, Aad::Null) - .await - .unwrap(); - - let mut reader = Cursor::new(writer.into_inner()); - let mut writer = Cursor::new(Vec::new()); - - let decryptor = Decryptor::new( - &Key::new([0x23; KEY_LEN]), - &XCHACHA20_POLY1305_NONCE, - Algorithm::XChaCha20Poly1305, - ) - .unwrap(); - - decryptor - .decrypt_streams_async(&mut reader, &mut writer, Aad::Null) - .await - .unwrap(); - - let output = writer.into_inner(); - - assert_eq!(buf, output); - } - - #[tokio::test] - #[cfg(feature = "tokio")] - #[cfg_attr(miri, ignore)] - async fn xchacha20_poly1305_encrypt_and_decrypt_5_blocks_with_aad_async() { - let buf = CryptoRng::generate_vec(BLOCK_LEN * 5); - - let mut reader = Cursor::new(&buf); - let mut writer = Cursor::new(Vec::new()); - - let encryptor = Encryptor::new( - &Key::new([0x23; KEY_LEN]), - &XCHACHA20_POLY1305_NONCE, - Algorithm::XChaCha20Poly1305, - ) - .unwrap(); - - encryptor - .encrypt_streams_async(&mut reader, &mut writer, AAD) - .await - .unwrap(); - - let mut reader = Cursor::new(writer.into_inner()); - let mut writer = Cursor::new(Vec::new()); - - let decryptor = Decryptor::new( - &Key::new([0x23; KEY_LEN]), - &XCHACHA20_POLY1305_NONCE, - Algorithm::XChaCha20Poly1305, - ) - .unwrap(); - - decryptor - .decrypt_streams_async(&mut reader, &mut writer, AAD) - .await - .unwrap(); - - let output = writer.into_inner(); - - assert_eq!(buf, output); - } - - #[test] - #[should_panic(expected = "Validity")] - fn encrypt_with_invalid_nonce() { - Encryptor::encrypt_bytes( - &Key::new([0x23; KEY_LEN]), - &AES_256_GCM_SIV_NONCE, - Algorithm::XChaCha20Poly1305, - &PLAINTEXT, - Aad::Null, - ) - .unwrap(); - } - - #[test] - #[should_panic(expected = "Validity")] - fn encrypt_with_null_nonce() { - Encryptor::encrypt_bytes( - &Key::new([0x23; KEY_LEN]), - &Nonce::XChaCha20Poly1305([0u8; 20]), - Algorithm::XChaCha20Poly1305, - &PLAINTEXT, - Aad::Null, - ) - .unwrap(); - } - - #[test] - #[should_panic(expected = "Validity")] - fn encrypt_with_null_key() { - Encryptor::encrypt_bytes( - &Key::new([0u8; KEY_LEN]), - &XCHACHA20_POLY1305_NONCE, - Algorithm::XChaCha20Poly1305, - &PLAINTEXT, - Aad::Null, - ) - .unwrap(); - } - - #[test] - #[should_panic(expected = "Validity")] - fn decrypt_with_invalid_nonce() { - Decryptor::decrypt_bytes( - &Key::new([0x23; KEY_LEN]), - &AES_256_GCM_SIV_NONCE, - Algorithm::XChaCha20Poly1305, - &XCHACHA20_POLY1305_BYTES_EXPECTED[0], - Aad::Null, - ) - .unwrap(); - } -} diff --git a/crates/crypto/src/crypto/stream.rs b/crates/crypto/src/crypto/stream.rs deleted file mode 100644 index dce48d57b..000000000 --- a/crates/crypto/src/crypto/stream.rs +++ /dev/null @@ -1,314 +0,0 @@ -use std::io::{Cursor, Read, Write}; - -use crate::{ - primitives::{AEAD_TAG_LEN, BLOCK_LEN}, - types::{Aad, Algorithm, EncryptedKey, Key, Nonce}, - utils::ToArray, - Error, Protected, Result, -}; -use aead::{ - stream::{DecryptorLE31, EncryptorLE31}, - Payload, -}; -use aes_gcm_siv::Aes256GcmSiv; -use chacha20poly1305::XChaCha20Poly1305; - -#[cfg(feature = "tokio")] -use tokio::io::{AsyncReadExt, AsyncWriteExt}; - -macro_rules! impl_stream { - ( - $name:ident, // "Decryptor", "Encryptor" - $error:expr, - $next_fn:ident, // "encrypt_next" - $last_fn:ident, // "encrypt_last" - $last_in_place_fn:ident, - $stream_primitive:ident, // "DecryptorLE31" - $streams_fn:ident, // "encrypt_streams" - $streams_fn_async:ident, // "encrypt_streams_async" - $bytes_fn:ident, // "encrypt_bytes" - $bytes_return:ty, - $size:expr, - $($algorithm:tt),* -) => { - pub enum $name { - $( - $algorithm(Box<$stream_primitive<$algorithm>>), - )* - } - - impl $name { - /// This should be used to initialize a stream object. - /// - /// The desired master key, nonce and algorithm should be provided. - /// - /// This function ensures that both the nonce and key are *valid*. - /// For more information, view `Key::validate()` and `Nonce::validate()` - pub fn new(key: &Key, nonce: &Nonce, algorithm: Algorithm) -> Result { - nonce.validate(algorithm)?; - - let s = match algorithm { - $( - Algorithm::$algorithm => Self::$algorithm(Box::new($stream_primitive::new(&key.into(), &nonce.into()))), - )* - }; - - Ok(s) - } - - fn $next_fn<'msg, 'aad>( - &mut self, - payload: impl Into>, - ) -> Result> { - match self { - $( - Self::$algorithm(s) => s.$next_fn(payload), - )* - } - .map_err(|_| $error) - } - - fn $last_fn<'msg, 'aad>(self, payload: impl Into>) -> Result> { - match self { - $( - Self::$algorithm(s) => s.$last_fn(payload), - )* - } - .map_err(|_| $error) - } - - fn $last_in_place_fn(self, aad: Aad, buf: &mut dyn aead::Buffer) -> Result<()> { - match self { - $( - Self::$algorithm(s) => s.$last_in_place_fn(aad.inner(), buf), - )* - } - .map_err(|_| $error) - } - - /// This function should be used for large amounts of data. - /// - /// The streaming implementation reads blocks of data in `BLOCK_LEN`, encrypts/decrypts, and writes to the writer. - /// - /// It requires a reader, a writer, and any relevant AAD. - /// - /// The AAD will be authenticated with every block of data. - pub fn $streams_fn( - mut self, - mut reader: R, - mut writer: W, - aad: Aad, - ) -> Result<()> - where - R: Read, - W: Write, - { - let mut buffer = vec![0u8; $size].into_boxed_slice(); - - loop { - let count = reader.read(&mut buffer)?; - - let payload = Payload { - aad: aad.inner(), - msg: &buffer[..count], - }; - - if count == $size { - let data = self.$next_fn(payload)?; - writer.write_all(&data)?; - } else { - let data = self.$last_fn(payload)?; - writer.write_all(&data)?; - break; - } - } - - writer.flush()?; - - Ok(()) - } - - /// This function should be used for large amounts of data. - /// - /// The streaming implementation reads blocks of data in `BLOCK_LEN`, encrypts/decrypts, and writes to the writer. - /// - /// It requires a reader, a writer, and any relevant AAD. - /// - /// The AAD will be authenticated with every block of data. - #[cfg(feature = "tokio")] - pub async fn $streams_fn_async( - mut self, - mut reader: R, - mut writer: W, - aad: Aad, - ) -> Result<()> - where - R: AsyncReadExt + Unpin + Send, - W: AsyncWriteExt + Unpin + Send, - { - let mut buffer = vec![0u8; $size].into_boxed_slice(); - - loop { - let count = reader.read(&mut buffer).await?; - - // TODO(brxken128): block on `next_fn` and `last_fn` exclusively - - let payload = Payload { - aad: aad.inner(), - msg: &buffer[..count], - }; - - if count == $size { - let data = self.$next_fn(payload)?; - writer.write_all(&data).await?; - } else { - let data = self.$last_fn(payload)?; - writer.write_all(&data).await?; - break; - } - } - - writer.flush().await?; - - Ok(()) - } - - /// This should ideally only be used for small amounts of data. - /// - /// It is just a thin wrapper around the associated `encrypt/decrypt_streams` function. - pub fn $bytes_fn( - key: &Key, - nonce: &Nonce, - algorithm: Algorithm, - bytes: &[u8], - aad: Aad, - ) -> Result<$bytes_return> { - let mut writer = Cursor::new(Vec::new()); - let s = Self::new(key, nonce, algorithm)?; - - s - .$streams_fn(bytes, &mut writer, aad) - .map(|()| writer.into_inner().into()) - } - } - }; -} - -impl Encryptor { - pub fn encrypt_key( - key: &Key, - nonce: &Nonce, - algorithm: Algorithm, - key_to_encrypt: &Key, - aad: Aad, - ) -> Result { - Self::encrypt_tiny(key, nonce, algorithm, key_to_encrypt.expose(), aad) - .map(|b| Ok(EncryptedKey::new(b.to_array()?, *nonce))) - .map_err(|_| Error::Encrypt)? - } - - /// This is only for encrypting inputs < `BLOCK_LEN`. For anything larger, - /// see [`Encryptor::encrypt_bytes`] or [`Encryptor::encrypt_streams`]. - /// - /// It uses `encrypt_last_in_place` under the hood due to the input always being less than `BLOCK_LEN`. - /// - /// It's faster than the alternatives (for small sizes) as we don't need to allocate the - /// full buffer - we only allocate what is required. - pub fn encrypt_tiny( - key: &Key, - nonce: &Nonce, - algorithm: Algorithm, - bytes: &[u8], - aad: Aad, - ) -> Result> { - if bytes.len() >= BLOCK_LEN { - return Err(Error::LengthMismatch); - } - - let s = Self::new(key, nonce, algorithm)?; - let mut buffer = Vec::with_capacity(bytes.len() + AEAD_TAG_LEN); - buffer.extend_from_slice(bytes); - s.encrypt_last_in_place(aad, &mut buffer)?; - - Ok(buffer) - } -} - -impl Decryptor { - pub fn decrypt_key( - key: &Key, - algorithm: Algorithm, - encrypted_key: &EncryptedKey, - aad: Aad, - ) -> Result { - Self::decrypt_tiny( - key, - encrypted_key.nonce(), - algorithm, - encrypted_key.inner(), - aad, - ) - .map(Key::try_from) - .map_err(|_| Error::Decrypt)? - } - - /// This is only for decrypting inputs < `BLOCK_LEN + AEAD_TAG_LEN`. For anything larger, - /// see [`Decryptor::decrypt_bytes`] or [`Decryptor::decrypt_streams`]. - /// - /// It uses `decrypt_last_in_place` under the hood due to the input always being less than `BLOCK_LEN + AEAD_TAG_LEN`. - /// - /// It's faster than the alternatives (for small sizes) as we don't need to allocate the - /// full buffer - we only allocate what is required. - pub fn decrypt_tiny( - key: &Key, - nonce: &Nonce, - algorithm: Algorithm, - bytes: &[u8], - aad: Aad, - ) -> Result>> { - if bytes.len() >= (BLOCK_LEN + AEAD_TAG_LEN) { - return Err(Error::LengthMismatch); - } - - let s = Self::new(key, nonce, algorithm)?; - let mut buffer = Vec::with_capacity(bytes.len() + AEAD_TAG_LEN); - buffer.extend_from_slice(bytes); - s.decrypt_last_in_place(aad, &mut buffer)?; - - buffer.truncate(bytes.len() - AEAD_TAG_LEN); - - Ok(buffer.into()) - } -} - -impl_stream!( - Encryptor, - Error::Encrypt, - encrypt_next, - encrypt_last, - encrypt_last_in_place, - EncryptorLE31, - encrypt_streams, - encrypt_streams_async, - encrypt_bytes, - Vec, - BLOCK_LEN, - Aes256GcmSiv, - XChaCha20Poly1305 -); - -impl_stream!( - Decryptor, - Error::Decrypt, - decrypt_next, - decrypt_last, - decrypt_last_in_place, - DecryptorLE31, - decrypt_streams, - decrypt_streams_async, - decrypt_bytes, - Protected>, - (BLOCK_LEN + AEAD_TAG_LEN), - Aes256GcmSiv, - XChaCha20Poly1305 -); diff --git a/crates/crypto/src/lib.rs b/crates/crypto/src/lib.rs index d5b5fee06..8238e9e21 100644 --- a/crates/crypto/src/lib.rs +++ b/crates/crypto/src/lib.rs @@ -11,7 +11,6 @@ clippy::unwrap_used, unused_qualifications, rust_2018_idioms, - clippy::expect_used, trivial_casts, trivial_numeric_casts, unused_allocation, diff --git a/crates/crypto/src/primitives.rs b/crates/crypto/src/primitives.rs index ffe8577c8..a37981a01 100644 --- a/crates/crypto/src/primitives.rs +++ b/crates/crypto/src/primitives.rs @@ -15,6 +15,22 @@ pub struct EncryptedBlock { pub cipher_text: Vec, } +pub struct EncryptedBlockRef<'e> { + pub nonce: OneShotNonce, + pub cipher_text: &'e [u8], +} + +impl<'e> From<&'e [u8]> for EncryptedBlockRef<'e> { + fn from(cipher_text: &'e [u8]) -> Self { + let (nonce, cipher_text) = cipher_text.split_at(size_of::()); + + Self { + nonce: OneShotNonce::try_from(nonce).expect("we split the correct amount"), + cipher_text, + } + } +} + impl EncryptedBlock { /// The block size used for STREAM encryption/decryption. This size seems to offer /// the best performance compared to alternatives. diff --git a/packages/client/src/core.ts b/packages/client/src/core.ts index 587118ebc..57833680c 100644 --- a/packages/client/src/core.ts +++ b/packages/client/src/core.ts @@ -110,8 +110,6 @@ export type Procedures = { { key: "library.create", input: CreateLibraryArgs, result: LibraryConfigWrapped } | { key: "library.delete", input: string, result: null } | { key: "library.edit", input: EditLibraryArgs, result: null } | - { key: "library.startActor", input: LibraryArgs, result: null } | - { key: "library.stopActor", input: LibraryArgs, result: null } | { key: "library.vacuumDb", input: LibraryArgs, result: null } | { key: "locations.addLibrary", input: LibraryArgs, result: number | null } | { key: "locations.create", input: LibraryArgs, result: number | null } | @@ -144,7 +142,7 @@ export type Procedures = { { key: "jobs.newFilePathIdentified", input: LibraryArgs, result: number[] } | { key: "jobs.newThumbnail", input: LibraryArgs, result: ThumbKey } | { key: "jobs.progress", input: LibraryArgs, result: JobProgressEvent } | - { key: "library.actors", input: LibraryArgs, result: { [key in string]: boolean } } | + { key: "library.actors", input: LibraryArgs, result: ([string, boolean])[] } | { key: "library.updatedKindStatistic", input: LibraryArgs, result: KindStatistic } | { key: "locations.online", input: never, result: number[][] } | { key: "locations.quickRescan", input: LibraryArgs, result: null } | From 5ae87e37ce8075f0a1dad682478863ad595e1070 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Fri, 6 Sep 2024 09:53:37 -0400 Subject: [PATCH 101/218] Working Tauri fetch in prod It finally works! --- apps/desktop/src-tauri/src/main.rs | 2 +- apps/desktop/src/App.tsx | 15 ++++++++------- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/apps/desktop/src-tauri/src/main.rs b/apps/desktop/src-tauri/src/main.rs index 26d33ca65..58e781fd6 100644 --- a/apps/desktop/src-tauri/src/main.rs +++ b/apps/desktop/src-tauri/src/main.rs @@ -222,7 +222,6 @@ async fn main() -> tauri::Result<()> { tauri::Builder::default() .invoke_handler(builder.invoke_handler()) - .plugin(tauri_plugin_http::init()) .plugin(tauri_plugin_deep_link::init()) .setup(move |app| { // We need a the app handle to determine the data directory now. @@ -364,6 +363,7 @@ async fn main() -> tauri::Result<()> { .plugin(tauri_plugin_dialog::init()) .plugin(tauri_plugin_os::init()) .plugin(tauri_plugin_shell::init()) + .plugin(tauri_plugin_http::init()) // TODO: Bring back Tauri Plugin Window State - it was buggy so we removed it. .plugin(tauri_plugin_updater::Builder::new().build()) .plugin(updater::plugin()) diff --git a/apps/desktop/src/App.tsx b/apps/desktop/src/App.tsx index 4bd285dca..36888052c 100644 --- a/apps/desktop/src/App.tsx +++ b/apps/desktop/src/App.tsx @@ -42,11 +42,15 @@ import { queryClient } from './query'; import { createMemoryRouterWithHistory } from './router'; import { createUpdater } from './updater'; +//Set global fetch to use tauri fetch +// If the build in in production mode, we need to set the global fetch to use the tauri fetch +// console.log('import.meta.env.DEV', import.meta.env.DEV); + SuperTokens.init({ appInfo: { apiDomain: 'http://localhost:9420', apiBasePath: '/api/auth', - appName: 'Spacedrive Auth Service' + appName: 'Spacedrive Auth Service' }, cookieHandler: getCookieHandler, windowHandler: getWindowHandler, @@ -59,15 +63,12 @@ SuperTokens.init({ const startupError = (window as any).__SD_ERROR__ as string | undefined; -//Set global fetch to use tauri fetch -// If the build in in production mode, we need to set the global fetch to use the tauri fetch -// console.log('import.meta.env.DEV', import.meta.env.DEV); -globalThis.fetch = fetch; - export default function App() { useEffect(() => { // This tells Tauri to show the current window because it's finished loading - commands.appReady(); + commands.appReady().then(() => { + if (import.meta.env.PROD) window.fetch = fetch; + }); }, []); useEffect(() => { From 07dea56316ce5272f90759abb6eeef3fe9fd6834 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Fri, 6 Sep 2024 10:15:26 -0400 Subject: [PATCH 102/218] Disabling social logins for now --- interface/app/$libraryId/settings/client/account/Tabs.tsx | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/interface/app/$libraryId/settings/client/account/Tabs.tsx b/interface/app/$libraryId/settings/client/account/Tabs.tsx index 890565c64..2502ce3fc 100644 --- a/interface/app/$libraryId/settings/client/account/Tabs.tsx +++ b/interface/app/$libraryId/settings/client/account/Tabs.tsx @@ -140,7 +140,8 @@ const Tabs = () => {

OR

-
+ {/* Disabling for now for demo purposes. We need to figure out on the backend how the tokens are recieved so we can a) store them in the frontend and b) use them as auth tokens for our cloud services. - @Rocky43007 */} + {/*
{SocialLogins.map((social) => ( ))} -
+
*/}
); From 2a1a9cfa0a178e3a3040a21e39d12e921fb7bbbb Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Fri, 6 Sep 2024 10:16:25 -0400 Subject: [PATCH 103/218] Forgot to comment a bit more code --- interface/app/$libraryId/settings/client/account/Tabs.tsx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/interface/app/$libraryId/settings/client/account/Tabs.tsx b/interface/app/$libraryId/settings/client/account/Tabs.tsx index 2502ce3fc..015d67d77 100644 --- a/interface/app/$libraryId/settings/client/account/Tabs.tsx +++ b/interface/app/$libraryId/settings/client/account/Tabs.tsx @@ -135,13 +135,13 @@ const Tabs = () => {
{activeTab === 'Login' ? : } -
+ {/* Disabling for now for demo purposes. We need to figure out on the backend how the tokens are recieved so we can a) store them in the frontend and b) use them as auth tokens for our cloud services. - @Rocky43007 */} + {/*

OR

- {/* Disabling for now for demo purposes. We need to figure out on the backend how the tokens are recieved so we can a) store them in the frontend and b) use them as auth tokens for our cloud services. - @Rocky43007 */} - {/*
+
{SocialLogins.map((social) => ( +
); }; diff --git a/packages/client/src/core.ts b/packages/client/src/core.ts index f4b0be7ae..ca22b2b03 100644 --- a/packages/client/src/core.ts +++ b/packages/client/src/core.ts @@ -69,7 +69,6 @@ export type Procedures = { { key: "cloud.devices.update", input: DeviceUpdateRequest, result: null } | { key: "cloud.libraries.create", input: LibraryArgs, result: null } | { key: "cloud.libraries.delete", input: LibraryArgs, result: null } | - { key: "cloud.libraries.sync", input: LibraryArgs, result: null } | { key: "cloud.libraries.update", input: LibraryArgs, result: null } | { key: "cloud.locations.create", input: LocationCreateRequest, result: null } | { key: "cloud.locations.delete", input: LocationDeleteRequest, result: null } | From a29b86ab32e9731c628ee8a02978cca67ee9db23 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Tue, 10 Sep 2024 12:17:03 -0400 Subject: [PATCH 113/218] Update cargo lock --- Cargo.lock | Bin 331764 -> 331729 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b52ae94f4dd7eb42719d4d118ffb8ab9f9160b5b..54bbaf6c2b9f0e9ad5f7eb14d8474dbd806df597 100644 GIT binary patch delta 249 zcmYL@t4;$!6hJd{sYGp9gCIemNM@!pckWFf`w9F2W_IR|sVGZ$)D=O3rD^CGlKuqA zB5_E9;wQi~#j~#NoTJWWRer6?`&aR@6e=J74G!|fv)JoCmSX%XY@YuJRZlK`d&4JV zuCMs^%t129#$!C%BFhL0iI_N{(@}BL8z?f4v-%WO_x-T5ULOY~15s!iPZ80iS&<&@>U6p5Zab9aomcmE|2doo&YeM9};H5RJ0s5AbcfL^s c7QI7sRvQhLdDL$#+TAL@Z;G-zeJb8&|1d~UtpET3 delta 251 zcmWN|p-uxq5CG8JagGF4T7^aL%GtFBVOSjW!=k#Hw7)m=DI?f~&Vd>mO%n~Pk}x(=o%Mi*5Exhl6+8hW dg^1v#juyn&cJnoEXFtc?$Fe-{uOF*$`wv!-Q&0c^ From ed8c59eb02c3bd2eb6a82dfea6b4b7fcd6ecb4a6 Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Tue, 10 Sep 2024 23:04:50 -0300 Subject: [PATCH 114/218] Adding accepted TLS signature verifiers --- core/crates/cloud-services/src/client.rs | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/core/crates/cloud-services/src/client.rs b/core/crates/cloud-services/src/client.rs index 076d572f8..d5c0b14f6 100644 --- a/core/crates/cloud-services/src/client.rs +++ b/core/crates/cloud-services/src/client.rs @@ -197,7 +197,21 @@ impl CloudServices { } fn supported_verify_schemes(&self) -> Vec { - vec![] + vec![ + rustls::SignatureScheme::RSA_PKCS1_SHA1, + rustls::SignatureScheme::ECDSA_SHA1_Legacy, + rustls::SignatureScheme::RSA_PKCS1_SHA256, + rustls::SignatureScheme::ECDSA_NISTP256_SHA256, + rustls::SignatureScheme::RSA_PKCS1_SHA384, + rustls::SignatureScheme::ECDSA_NISTP384_SHA384, + rustls::SignatureScheme::RSA_PKCS1_SHA512, + rustls::SignatureScheme::ECDSA_NISTP521_SHA512, + rustls::SignatureScheme::RSA_PSS_SHA256, + rustls::SignatureScheme::RSA_PSS_SHA384, + rustls::SignatureScheme::RSA_PSS_SHA512, + rustls::SignatureScheme::ED25519, + rustls::SignatureScheme::ED448, + ] } } From dc19f79408519a43c5b5274837e6f491350e26d7 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Wed, 11 Sep 2024 00:52:40 -0400 Subject: [PATCH 115/218] Device routes & registering of device (mostly) Still wip though --- Cargo.lock | Bin 331729 -> 331729 bytes core/crates/cloud-services/Cargo.toml | 2 +- core/src/api/cloud/devices.rs | 224 +++++++++--------- core/src/api/cloud/mod.rs | 15 +- .../settings/client/account/Profile.tsx | 45 +++- packages/client/src/core.ts | 10 +- 6 files changed, 171 insertions(+), 125 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 54bbaf6c2b9f0e9ad5f7eb14d8474dbd806df597..edc615580bca727103b3407ec04ecb2de46f4650 100644 GIT binary patch delta 452 zcmXw#y=zoK6ver3-XdXfS;HC;5wmP0X_Pl_=FQyM6lo-d7N&_x+#>VwUb+-XVd)lv z*hLmIok?0DSw9)bf+047jRY(I06P&D#M+6$=AL^vm&=cHveP=*Y4zX3pDo|+k}JOI zlcqly!TE6WIjq=FcbVu+kiFVu|w?pG2Oaic{?IQ^7 zqvY#dU?B#>co6n3khQs}6J2t6&41g6rG+YY+GuSh7eY~47D8xeG9i&^q*ilOoLhkk zH8mL;#U!Vha85{CaqHjA!IbY^PwRejg{%P%vJcenI+BLwL(-{5D!xkY0s7%$a_MwT zhfk|yHW_S8E-eUd<5!E4E91DWm^6hg847DD7X@ZgO2bPmWNxKV(&#GBEwYMpZpv~n zel_K{3~7d!mNZ(k=#-IpSrtg762>xJXp~AjMKh;L$27wTea3wFB&mnrkI3=4$QaA& zgzt~Z{n5Yb!=3qbcJ9?zDxcGGvUYE^k?rc=u?fz delta 458 zcmX|7J!@1!6y?5|V8Y@Ob+e)fF<-}&#J?Zm?7o_p>+a1Q5mt8u#3=go6`Jf6(LqeXNLWSZ^6t0L30I=Y|gpW!k9+Z;QD zG~L_#;!%f>Ages#p!`hj~TD0kq-Vk6T36TV8pw4GS#;68T{iZ^S$E#YbyQxgnzaE E0xEuuW&i*H diff --git a/core/crates/cloud-services/Cargo.toml b/core/crates/cloud-services/Cargo.toml index f7e5e8133..3a52a29ec 100644 --- a/core/crates/cloud-services/Cargo.toml +++ b/core/crates/cloud-services/Cargo.toml @@ -47,7 +47,7 @@ quinn = { package = "iroh-quinn", version = "0.11" } reqwest = { version = "0.12", features = ["json", "native-tls-vendored", "stream"] } reqwest-middleware = { version = "0.3", features = ["json"] } reqwest-retry = "0.6" -rustls = { version = "0.23", default-features = false, features = ["brotli", "ring", "std"] } +rustls = { version = "=0.23.13", default-features = false, features = ["ring", "std", "brotli"]} rustls-platform-verifier = "0.3.3" diff --git a/core/src/api/cloud/devices.rs b/core/src/api/cloud/devices.rs index efcc02c57..b01ce0471 100644 --- a/core/src/api/cloud/devices.rs +++ b/core/src/api/cloud/devices.rs @@ -1,12 +1,13 @@ -use crate::{ - api::{Ctx, R}, - node::HardwareModel, -}; +use crate::api::{Ctx, R}; use futures::{SinkExt, StreamExt}; use sd_cloud_schema::{ auth::AccessToken, - devices::{self, DeviceOS, PubId}, + devices::{ + self, + register::{Request, RequestUpdate, Response, State}, + DeviceOS, HardwareModel, PubId, + }, opaque_ke::{ ClientLogin, ClientLoginFinishParameters, ClientLoginFinishResult, ClientLoginStartResult, ClientRegistration, ClientRegistrationFinishParameters, ClientRegistrationFinishResult, @@ -50,114 +51,115 @@ pub fn mount() -> AlphaRouter { // "Failed to get device;", // )??; - let device = MockDevice { - name: "Mac Device".to_string(), - pub_id: PubId(Uuid::now_v7()), - // Date: 8th Aug 2024 12:00:00 UTC - created_at: DateTime::parse_from_rfc3339("2024-08-08T12:00:00Z") - .expect("Failed to parse created_at datetime") - .with_timezone(&chrono::Utc), - // Always set to the current time - updated_at: chrono::Utc::now(), - os: DeviceOS::MacOS, - used_storage: 100 * 1024 * 1024 * 1024, - // Always set to 256 GB in bytes (u64) - storage_size: 256 * 1024 * 1024 * 1024, - device_model: HardwareModel::MacBookPro, - }; + // let device = MockDevice { + // name: "Mac Device".to_string(), + // pub_id: PubId(Uuid::now_v7()), + // // Date: 8th Aug 2024 12:00:00 UTC + // created_at: DateTime::parse_from_rfc3339("2024-08-08T12:00:00Z") + // .expect("Failed to parse created_at datetime") + // .with_timezone(&chrono::Utc), + // // Always set to the current time + // updated_at: chrono::Utc::now(), + // os: DeviceOS::MacOS, + // used_storage: 100 * 1024 * 1024 * 1024, + // // Always set to 256 GB in bytes (u64) + // storage_size: 256 * 1024 * 1024 * 1024, + // device_model: HardwareModel::MacBookPro, + // }; - debug!(?device, "Got device"); + // debug!(?device, "Got device"); - Ok(device) + Ok(()) }) }) .procedure("list", { - R.query(|_node, _: ()| async move { - // let devices::list::Response(devices) = super::handle_comm_error( - // try_get_cloud_services_client!(node)? - // .devices() - // .list(req) - // .await, - // "Failed to list devices;", - // )??; + R.query(|node, req: devices::list::Request| async move { + let devices::list::Response(devices) = super::handle_comm_error( + try_get_cloud_services_client(&node) + .await? + .devices() + .list(req) + .await, + "Failed to list devices;", + )??; - let devices: Vec = vec![ - MockDevice { - name: "Mac Device".to_string(), - pub_id: PubId(Uuid::now_v7()), - // Date: 8th Aug 2024 12:00:00 UTC - created_at: DateTime::parse_from_rfc3339("2024-08-08T12:00:00Z") - .expect("Failed to parse created_at datetime") - .with_timezone(&chrono::Utc), - // Always set to the current time - updated_at: chrono::Utc::now(), - os: DeviceOS::MacOS, - // Randomize between 256 GB and 1 TB in bytes (u64) - storage_size: 256 * 1024 * 1024 * 1024, - used_storage: 100 * 1024 * 1024 * 1024, - device_model: HardwareModel::MacMini, - }, - MockDevice { - name: "Windows Device".to_string(), - pub_id: PubId(Uuid::now_v7()), - // Date: 8th Aug 2024 12:00:00 UTC - created_at: DateTime::parse_from_rfc3339("2024-08-08T12:00:00Z") - .expect("Failed to parse created_at datetime") - .with_timezone(&chrono::Utc), - // Always set to the current time - updated_at: chrono::Utc::now(), - os: DeviceOS::Windows, - // Randomize between 256 GB and 1 TB in bytes (u64) - storage_size: 256 * 1024 * 1024 * 1024, - used_storage: 10 * 1024 * 1024 * 1024, - device_model: HardwareModel::Other, - }, - MockDevice { - name: "Linux Device".to_string(), - pub_id: PubId(Uuid::now_v7()), - // Date: 8th Aug 2024 12:00:00 UTC - created_at: DateTime::parse_from_rfc3339("2024-08-08T12:00:00Z") - .expect("Failed to parse created_at datetime") - .with_timezone(&chrono::Utc), - // Always set to the current time - updated_at: chrono::Utc::now(), - os: DeviceOS::Linux, - // Always set to 256 GB in bytes (u64) - storage_size: 256 * 1024 * 1024 * 1024, - used_storage: 50 * 1024 * 1024 * 1024, - device_model: HardwareModel::Other, - }, - MockDevice { - name: "Android Device".to_string(), - pub_id: PubId(Uuid::now_v7()), - // Date: 8th Aug 2024 12:00:00 UTC - created_at: DateTime::parse_from_rfc3339("2024-08-08T12:00:00Z") - .expect("Failed to parse created_at datetime") - .with_timezone(&chrono::Utc), - // Always set to the current time - updated_at: chrono::Utc::now(), - os: DeviceOS::Android, - // Always set to 256 GB in bytes (u64) - storage_size: 256 * 1024 * 1024 * 1024, - used_storage: 150 * 1024 * 1024 * 1024, - device_model: HardwareModel::Android, - }, - MockDevice { - name: "iOS Device".to_string(), - pub_id: PubId(Uuid::now_v7()), - // Date: 8th Aug 2024 12:00:00 UTC - created_at: DateTime::parse_from_rfc3339("2024-08-08T12:00:00Z") - .expect("Failed to parse created_at datetime") - .with_timezone(&chrono::Utc), - // Always set to the current time - updated_at: chrono::Utc::now(), - os: DeviceOS::IOS, - // Always set to 256 GB in bytes (u64) - storage_size: 256 * 1024 * 1024 * 1024, - used_storage: 200 * 1024 * 1024 * 1024, - device_model: HardwareModel::IPhone, - }, - ]; + // let devices: Vec = vec![ + // MockDevice { + // name: "Mac Device".to_string(), + // pub_id: PubId(Uuid::now_v7()), + // // Date: 8th Aug 2024 12:00:00 UTC + // created_at: DateTime::parse_from_rfc3339("2024-08-08T12:00:00Z") + // .expect("Failed to parse created_at datetime") + // .with_timezone(&chrono::Utc), + // // Always set to the current time + // updated_at: chrono::Utc::now(), + // os: DeviceOS::MacOS, + // // Randomize between 256 GB and 1 TB in bytes (u64) + // storage_size: 256 * 1024 * 1024 * 1024, + // used_storage: 100 * 1024 * 1024 * 1024, + // device_model: HardwareModel::MacMini, + // }, + // MockDevice { + // name: "Windows Device".to_string(), + // pub_id: PubId(Uuid::now_v7()), + // // Date: 8th Aug 2024 12:00:00 UTC + // created_at: DateTime::parse_from_rfc3339("2024-08-08T12:00:00Z") + // .expect("Failed to parse created_at datetime") + // .with_timezone(&chrono::Utc), + // // Always set to the current time + // updated_at: chrono::Utc::now(), + // os: DeviceOS::Windows, + // // Randomize between 256 GB and 1 TB in bytes (u64) + // storage_size: 256 * 1024 * 1024 * 1024, + // used_storage: 10 * 1024 * 1024 * 1024, + // device_model: HardwareModel::Other, + // }, + // MockDevice { + // name: "Linux Device".to_string(), + // pub_id: PubId(Uuid::now_v7()), + // // Date: 8th Aug 2024 12:00:00 UTC + // created_at: DateTime::parse_from_rfc3339("2024-08-08T12:00:00Z") + // .expect("Failed to parse created_at datetime") + // .with_timezone(&chrono::Utc), + // // Always set to the current time + // updated_at: chrono::Utc::now(), + // os: DeviceOS::Linux, + // // Always set to 256 GB in bytes (u64) + // storage_size: 256 * 1024 * 1024 * 1024, + // used_storage: 50 * 1024 * 1024 * 1024, + // device_model: HardwareModel::Other, + // }, + // MockDevice { + // name: "Android Device".to_string(), + // pub_id: PubId(Uuid::now_v7()), + // // Date: 8th Aug 2024 12:00:00 UTC + // created_at: DateTime::parse_from_rfc3339("2024-08-08T12:00:00Z") + // .expect("Failed to parse created_at datetime") + // .with_timezone(&chrono::Utc), + // // Always set to the current time + // updated_at: chrono::Utc::now(), + // os: DeviceOS::Android, + // // Always set to 256 GB in bytes (u64) + // storage_size: 256 * 1024 * 1024 * 1024, + // used_storage: 150 * 1024 * 1024 * 1024, + // device_model: HardwareModel::Android, + // }, + // MockDevice { + // name: "iOS Device".to_string(), + // pub_id: PubId(Uuid::now_v7()), + // // Date: 8th Aug 2024 12:00:00 UTC + // created_at: DateTime::parse_from_rfc3339("2024-08-08T12:00:00Z") + // .expect("Failed to parse created_at datetime") + // .with_timezone(&chrono::Utc), + // // Always set to the current time + // updated_at: chrono::Utc::now(), + // os: DeviceOS::IOS, + // // Always set to 256 GB in bytes (u64) + // storage_size: 256 * 1024 * 1024 * 1024, + // used_storage: 200 * 1024 * 1024 * 1024, + // device_model: HardwareModel::IPhone, + // }, + // ]; debug!(?devices, "Listed devices"); @@ -313,6 +315,8 @@ pub struct DeviceRegisterData { pub os: DeviceOS, pub storage_size: u64, pub connection_id: NodeId, + pub hardware_model: HardwareModel, + pub used_storage: u64, } pub async fn register( @@ -324,12 +328,12 @@ pub async fn register( os, storage_size, connection_id, + hardware_model, + used_storage, }: DeviceRegisterData, hashed_pub_id: Hash, rng: &mut CryptoRng, ) -> Result { - use devices::register::{Request, RequestUpdate, Response, State}; - let ClientRegistrationStartResult { message, state } = ClientRegistration::::start( rng, @@ -354,6 +358,8 @@ pub async fn register( storage_size, connection_id, opaque_register_message: Box::new(message), + hardware_model, + used_storage, }) .await, "Failed to send device register request;", diff --git a/core/src/api/cloud/mod.rs b/core/src/api/cloud/mod.rs index d4da321f5..4456d3c2b 100644 --- a/core/src/api/cloud/mod.rs +++ b/core/src/api/cloud/mod.rs @@ -1,4 +1,8 @@ -use crate::{node::config::NodeConfig, volume::get_volumes, Node}; +use crate::{ + node::{config::NodeConfig, HardwareModel}, + volume::get_volumes, + Node, +}; use sd_core_cloud_services::{CloudP2P, IrohSecretKey, KeyManager, QuinnConnection, UserResponse}; @@ -14,7 +18,7 @@ use std::pin::pin; use async_stream::stream; use futures::StreamExt; use rspc::alpha::AlphaRouter; -use tracing::error; +use tracing::{debug, error}; use super::{Ctx, R}; @@ -100,6 +104,9 @@ pub(crate) fn mount() -> AlphaRouter { Err(Error::Client(ClientSideError::NotFound(_))) => { // Device not registered, we execute a device register flow let iroh_secret_key = IrohSecretKey::generate_with_rng(&mut rng); + let hardware_model = Into::into( + HardwareModel::try_get().unwrap_or(HardwareModel::Other), + ); let master_key = self::devices::register( &client, @@ -115,12 +122,16 @@ pub(crate) fn mount() -> AlphaRouter { .map(|volume| volume.total_capacity) .sum(), connection_id: iroh_secret_key.public(), + hardware_model, + used_storage: 0, }, hashed_pub_id, &mut rng, ) .await?; + debug!("Device registered successfully"); + KeyManager::new(master_key, iroh_secret_key, data_directory, &mut rng) .await? } diff --git a/interface/app/$libraryId/settings/client/account/Profile.tsx b/interface/app/$libraryId/settings/client/account/Profile.tsx index 6853ab479..f573bb33b 100644 --- a/interface/app/$libraryId/settings/client/account/Profile.tsx +++ b/interface/app/$libraryId/settings/client/account/Profile.tsx @@ -1,13 +1,26 @@ import { Envelope } from '@phosphor-icons/react'; import { getAccessToken } from 'supertokens-web-js/recipe/session'; -import { useBridgeMutation } from '@sd/client'; +import { useBridgeMutation, useBridgeQuery } from '@sd/client'; import { Button, Card } from '@sd/ui'; import { TruncatedText } from '~/components'; const Profile = ({ email }: { email?: string }) => { const emailName = email?.split('@')[0]; const capitalizedEmailName = (emailName?.charAt(0).toUpperCase() ?? '') + emailName?.slice(1); + const refreshToken: string = + JSON.parse(window.localStorage.getItem('frontendCookies') ?? '[]') + .find((cookie: string) => cookie.startsWith('st-refresh-token')) + ?.split('=')[1] + .split(';')[0] || ''; + const accessToken: string = + JSON.parse(window.localStorage.getItem('frontendCookies') ?? '[]') + .find((cookie: string) => cookie.startsWith('st-access-token')) + ?.split('=')[1] + .split(';')[0] || ''; const cloudBootstrap = useBridgeMutation('cloud.bootstrap'); + const cloudDeleteDevice = useBridgeMutation('cloud.devices.delete'); + const devices = useBridgeQuery(['cloud.devices.list', { access_token: accessToken.trim() }]); + console.log(devices.data); return (
@@ -31,18 +44,34 @@ const Profile = ({ email }: { email?: string }) => { + + {/* List all devices from const devices */} + {/* {devices.data?.map((device: any) => ( + +
+ +
+ {device.pub_id} +
+ ))} */}
); }; diff --git a/packages/client/src/core.ts b/packages/client/src/core.ts index ca22b2b03..4a3260e19 100644 --- a/packages/client/src/core.ts +++ b/packages/client/src/core.ts @@ -5,8 +5,8 @@ export type Procedures = { queries: { key: "backups.getAll", input: never, result: GetAll } | { key: "buildInfo", input: never, result: BuildInfo } | - { key: "cloud.devices.get", input: never, result: MockDevice } | - { key: "cloud.devices.list", input: never, result: MockDevice[] } | + { key: "cloud.devices.get", input: never, result: null } | + { key: "cloud.devices.list", input: DeviceListRequest, result: Device[] } | { key: "cloud.libraries.get", input: LibraryGetRequest, result: Library } | { key: "cloud.libraries.list", input: LibraryListRequest, result: Library[] } | { key: "cloud.locations.list", input: LocationListRequest, result: CloudLocation[] } | @@ -241,11 +241,13 @@ export type Device = { pub_id: DevicePubId; name: string; os: DeviceOS; storage_ export type DeviceDeleteRequest = { access_token: AccessToken; pub_id: DevicePubId } +export type DeviceListRequest = { access_token: AccessToken } + export type DeviceOS = "Linux" | "Windows" | "MacOS" | "iOS" | "Android" export type DevicePubId = string -export type DeviceUpdateRequest = { access_token: AccessToken; pub_id: DevicePubId; name: string; storage_size: bigint } +export type DeviceUpdateRequest = { access_token: AccessToken; pub_id: DevicePubId; name: string; storage_size: bigint; used_storage: bigint } /** * The method used for the discovery of this peer. @@ -513,8 +515,6 @@ export type MediaLocation = { latitude: number; longitude: number; pluscode: Plu export type Metadata = { album: string | null; album_artist: string | null; artist: string | null; comment: string | null; composer: string | null; copyright: string | null; creation_time: string | null; date: string | null; disc: number | null; encoder: string | null; encoded_by: string | null; filename: string | null; genre: string | null; language: string | null; performer: string | null; publisher: string | null; service_name: string | null; service_provider: string | null; title: string | null; track: number | null; variant_bit_rate: number | null; custom: { [key in string]: string } } -export type MockDevice = { pub_id: DevicePubId; name: string; os: DeviceOS; used_storage: bigint; storage_size: bigint; created_at: string; updated_at: string; device_model: CoreHardwareModel } - export type NodeConfigP2P = { discovery?: P2PDiscoveryState; port: Port; disabled: boolean; disable_ipv6: boolean; disable_relay: boolean; enable_remote_access: boolean; /** * A list of peer addresses to try and manually connect to, instead of relying on discovery. From a810327846bf0ab3e33cbf610c191cd958fb3683 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Fri, 13 Sep 2024 09:01:10 -0400 Subject: [PATCH 116/218] Working device registration and listing --- Cargo.lock | Bin 331729 -> 331729 bytes core/src/node/hardware.rs | 1 + .../settings/client/account/Profile.tsx | 29 ++++++++++++------ packages/client/src/core.ts | 8 ++--- 4 files changed, 24 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index edc615580bca727103b3407ec04ecb2de46f4650..660af10e749d86f55cd82600999447c4b5838ad1 100644 GIT binary patch delta 134 zcmcaOU*zI^k%lddbEYvGPA^nqmYE(im65Og_%z1t$EPtpeV}HNWMOWRYGjdYlxk$0 zVw!4fo|a}|WS(ego@Qv6Y-(m?n3`sgGC5F2eEO~yChqnx<;>f^l(W1xojyUAiDNp; e1{Rg=-0N6AGJ^Trf&48@a4FU8PdBl=i2(r4-!Llx delta 131 zcmcaOU*zI^k%lddbEZuXea9j@J!UE+U;FWCjN6Y-V|w~P-6F*@$=oP4&D6rs+|n#5 z**MWO(Zbv^F~!o*AjL8@#W>N}!Z6Kb`bG^#vFW>7n7G@&lrwMtQqJ<)bXq7Q=XBN$ dEXv!t*RgzL1aY?mxtp0_5-QuDZen>80|3>@Fiije diff --git a/core/src/node/hardware.rs b/core/src/node/hardware.rs index 04873e51c..bf6a93ede 100644 --- a/core/src/node/hardware.rs +++ b/core/src/node/hardware.rs @@ -1,4 +1,5 @@ use std::io; +use std::io::Error; use serde::{Deserialize, Serialize}; use specta::Type; diff --git a/interface/app/$libraryId/settings/client/account/Profile.tsx b/interface/app/$libraryId/settings/client/account/Profile.tsx index f573bb33b..3903ca7c3 100644 --- a/interface/app/$libraryId/settings/client/account/Profile.tsx +++ b/interface/app/$libraryId/settings/client/account/Profile.tsx @@ -2,7 +2,9 @@ import { Envelope } from '@phosphor-icons/react'; import { getAccessToken } from 'supertokens-web-js/recipe/session'; import { useBridgeMutation, useBridgeQuery } from '@sd/client'; import { Button, Card } from '@sd/ui'; +import StatCard from '~/app/$libraryId/overview/StatCard'; import { TruncatedText } from '~/components'; +import { hardwareModelToIcon } from '~/util/hardware'; const Profile = ({ email }: { email?: string }) => { const emailName = email?.split('@')[0]; @@ -61,17 +63,24 @@ const Profile = ({ email }: { email?: string }) => { Delete Device {/* List all devices from const devices */} - {/* {devices.data?.map((device: any) => ( - ( + // + + // + -
- -
- {device.pub_id} -
- ))} */} + name={device.name} + // TODO (Optional): Use Brand Type for Different Android Models/iOS Models using DeviceInfo.getBrand() + icon={hardwareModelToIcon(device.hardware_model)} + totalSpace={device.storage_size.toString()} + freeSpace={(device.storage_size - device.used_storage).toString()} + color="#0362FF" + connectionType={'cloud'} + /> + ))}
); }; diff --git a/packages/client/src/core.ts b/packages/client/src/core.ts index 4a3260e19..cd40e30e3 100644 --- a/packages/client/src/core.ts +++ b/packages/client/src/core.ts @@ -483,7 +483,7 @@ export type LocationCreateRequest = { access_token: AccessToken; pub_id: Locatio export type LocationDeleteRequest = { access_token: AccessToken; pub_id: LocationPubId } -export type LocationListRequest = { access_token: AccessToken; with_library: boolean; with_device: boolean } +export type LocationListRequest = { access_token: AccessToken; library_pub_id: LibraryPubId; with_library: boolean; with_device: boolean } export type LocationPubId = string @@ -685,17 +685,17 @@ export type Stream = { id: number; name: string | null; codec: Codec | null; asp export type SubtitleProps = { width: number; height: number } -export type SyncGroup = { pub_id: SyncGroupPubId; name: string; latest_key_hash: KeyHash; library: Library | null; devices: Device[] | null; total_sync_messages_bytes: bigint | null; total_space_files_bytes: bigint | null; created_at: string; updated_at: string } +export type SyncGroup = { pub_id: SyncGroupPubId; latest_key_hash: KeyHash; library: Library | null; devices: Device[] | null; total_sync_messages_bytes: bigint | null; total_space_files_bytes: bigint | null; created_at: string; updated_at: string } export type SyncGroupDeleteRequest = { access_token: AccessToken; pub_id: SyncGroupPubId } export type SyncGroupGetRequest = { access_token: AccessToken; pub_id: SyncGroupPubId; with_library: boolean; with_devices: boolean; with_used_storage: boolean } -export type SyncGroupListRequest = { access_token: AccessToken; with_library: boolean; with_devices: boolean } +export type SyncGroupListRequest = { access_token: AccessToken; with_library: boolean } export type SyncGroupPubId = string -export type SyncGroupWithLibraryAndDevices = { pub_id: SyncGroupPubId; name: string; latest_key_hash: KeyHash; library: Library; devices: Device[]; created_at: string; updated_at: string } +export type SyncGroupWithLibraryAndDevices = { pub_id: SyncGroupPubId; latest_key_hash: KeyHash; library: Library; devices: Device[]; created_at: string; updated_at: string } export type SyncGroupsLeaveArgs = { access_token: AccessToken; group_pub_id: SyncGroupPubId } From c15ee018b96a9a02b0531be0929dc18c375151e9 Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Fri, 13 Sep 2024 12:54:36 -0300 Subject: [PATCH 117/218] Fix key size mismatch between opaque and sd_crypto --- core/src/api/cloud/devices.rs | 123 +++----------------------- crates/crypto/src/cloud/secret_key.rs | 14 ++- 2 files changed, 26 insertions(+), 111 deletions(-) diff --git a/core/src/api/cloud/devices.rs b/core/src/api/cloud/devices.rs index b01ce0471..d8631acdb 100644 --- a/core/src/api/cloud/devices.rs +++ b/core/src/api/cloud/devices.rs @@ -1,6 +1,5 @@ use crate::api::{Ctx, R}; -use futures::{SinkExt, StreamExt}; use sd_cloud_schema::{ auth::AccessToken, devices::{ @@ -20,9 +19,9 @@ use sd_crypto::{cloud::secret_key::SecretKey, CryptoRng}; use blake3::Hash; use chrono::DateTime; +use futures::{SinkExt, StreamExt}; use rspc::alpha::AlphaRouter; use tracing::{debug, error}; -use uuid::Uuid; use super::{handle_comm_error, try_get_cloud_services_client}; @@ -41,35 +40,19 @@ struct MockDevice { pub fn mount() -> AlphaRouter { R.router() .procedure("get", { - // R.query(|node, req: devices::get::Request| async move { - R.query(|_node, _: ()| async move { - // let devices::get::Response(device) = super::handle_comm_error( - // try_get_cloud_services_client!(node)? - // .devices() - // .get(req) - // .await, - // "Failed to get device;", - // )??; + R.query(|node, req: devices::get::Request| async move { + let devices::get::Response(device) = super::handle_comm_error( + try_get_cloud_services_client(&node) + .await? + .devices() + .get(req) + .await, + "Failed to get device;", + )??; - // let device = MockDevice { - // name: "Mac Device".to_string(), - // pub_id: PubId(Uuid::now_v7()), - // // Date: 8th Aug 2024 12:00:00 UTC - // created_at: DateTime::parse_from_rfc3339("2024-08-08T12:00:00Z") - // .expect("Failed to parse created_at datetime") - // .with_timezone(&chrono::Utc), - // // Always set to the current time - // updated_at: chrono::Utc::now(), - // os: DeviceOS::MacOS, - // used_storage: 100 * 1024 * 1024 * 1024, - // // Always set to 256 GB in bytes (u64) - // storage_size: 256 * 1024 * 1024 * 1024, - // device_model: HardwareModel::MacBookPro, - // }; + debug!(?device, "Got device"); - // debug!(?device, "Got device"); - - Ok(()) + Ok(device) }) }) .procedure("list", { @@ -83,84 +66,6 @@ pub fn mount() -> AlphaRouter { "Failed to list devices;", )??; - // let devices: Vec = vec![ - // MockDevice { - // name: "Mac Device".to_string(), - // pub_id: PubId(Uuid::now_v7()), - // // Date: 8th Aug 2024 12:00:00 UTC - // created_at: DateTime::parse_from_rfc3339("2024-08-08T12:00:00Z") - // .expect("Failed to parse created_at datetime") - // .with_timezone(&chrono::Utc), - // // Always set to the current time - // updated_at: chrono::Utc::now(), - // os: DeviceOS::MacOS, - // // Randomize between 256 GB and 1 TB in bytes (u64) - // storage_size: 256 * 1024 * 1024 * 1024, - // used_storage: 100 * 1024 * 1024 * 1024, - // device_model: HardwareModel::MacMini, - // }, - // MockDevice { - // name: "Windows Device".to_string(), - // pub_id: PubId(Uuid::now_v7()), - // // Date: 8th Aug 2024 12:00:00 UTC - // created_at: DateTime::parse_from_rfc3339("2024-08-08T12:00:00Z") - // .expect("Failed to parse created_at datetime") - // .with_timezone(&chrono::Utc), - // // Always set to the current time - // updated_at: chrono::Utc::now(), - // os: DeviceOS::Windows, - // // Randomize between 256 GB and 1 TB in bytes (u64) - // storage_size: 256 * 1024 * 1024 * 1024, - // used_storage: 10 * 1024 * 1024 * 1024, - // device_model: HardwareModel::Other, - // }, - // MockDevice { - // name: "Linux Device".to_string(), - // pub_id: PubId(Uuid::now_v7()), - // // Date: 8th Aug 2024 12:00:00 UTC - // created_at: DateTime::parse_from_rfc3339("2024-08-08T12:00:00Z") - // .expect("Failed to parse created_at datetime") - // .with_timezone(&chrono::Utc), - // // Always set to the current time - // updated_at: chrono::Utc::now(), - // os: DeviceOS::Linux, - // // Always set to 256 GB in bytes (u64) - // storage_size: 256 * 1024 * 1024 * 1024, - // used_storage: 50 * 1024 * 1024 * 1024, - // device_model: HardwareModel::Other, - // }, - // MockDevice { - // name: "Android Device".to_string(), - // pub_id: PubId(Uuid::now_v7()), - // // Date: 8th Aug 2024 12:00:00 UTC - // created_at: DateTime::parse_from_rfc3339("2024-08-08T12:00:00Z") - // .expect("Failed to parse created_at datetime") - // .with_timezone(&chrono::Utc), - // // Always set to the current time - // updated_at: chrono::Utc::now(), - // os: DeviceOS::Android, - // // Always set to 256 GB in bytes (u64) - // storage_size: 256 * 1024 * 1024 * 1024, - // used_storage: 150 * 1024 * 1024 * 1024, - // device_model: HardwareModel::Android, - // }, - // MockDevice { - // name: "iOS Device".to_string(), - // pub_id: PubId(Uuid::now_v7()), - // // Date: 8th Aug 2024 12:00:00 UTC - // created_at: DateTime::parse_from_rfc3339("2024-08-08T12:00:00Z") - // .expect("Failed to parse created_at datetime") - // .with_timezone(&chrono::Utc), - // // Always set to the current time - // updated_at: chrono::Utc::now(), - // os: DeviceOS::IOS, - // // Always set to 256 GB in bytes (u64) - // storage_size: 256 * 1024 * 1024 * 1024, - // used_storage: 200 * 1024 * 1024 * 1024, - // device_model: HardwareModel::IPhone, - // }, - // ]; - debug!(?devices, "Listed devices"); Ok(devices) @@ -433,9 +338,7 @@ pub async fn register( } Ok(Response(State::End)) => { // Protocol completed successfully - Ok(SecretKey::new(export_key.as_slice().try_into().expect( - "Key mismatch between OPAQUE and crypto crate; this is a serious bug and should crash;", - ))) + Ok(SecretKey::from(export_key)) } Err(e) => { error!(?e, "Device register final response error;"); diff --git a/crates/crypto/src/cloud/secret_key.rs b/crates/crypto/src/cloud/secret_key.rs index 8d203d112..d816acb28 100644 --- a/crates/crypto/src/cloud/secret_key.rs +++ b/crates/crypto/src/cloud/secret_key.rs @@ -10,7 +10,7 @@ use aead::array::Array; use blake3::{Hash, Hasher}; use generic_array::GenericArray; use serde::{Deserialize, Serialize}; -use typenum::consts::U32; +use typenum::{consts::U32, U64}; use zeroize::{Zeroize, ZeroizeOnDrop}; /// This should be used for encrypting and decrypting data. @@ -139,6 +139,18 @@ impl From> for SecretKey { } } +/// We take only the first 32 bytes of the key, since the rest doesn't fit +impl From> for SecretKey { + fn from(key: GenericArray) -> Self { + Self(Array([ + key[0], key[1], key[2], key[3], key[4], key[5], key[6], key[7], key[8], key[9], + key[10], key[11], key[12], key[13], key[14], key[15], key[16], key[17], key[18], + key[19], key[20], key[21], key[22], key[23], key[24], key[25], key[26], key[27], + key[28], key[29], key[30], key[31], + ])) + } +} + #[cfg(test)] mod tests { use std::pin::pin; From 4d662b2ff65eb9e3ee7d60cab14ccd1c8a1d8839 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Fri, 13 Sep 2024 14:16:01 -0400 Subject: [PATCH 118/218] Get sd core to compile on mobile + deps update --- apps/mobile/package.json | 6 +++--- core/src/node/hardware.rs | 4 ++-- pnpm-lock.yaml | Bin 1053978 -> 1056367 bytes 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/apps/mobile/package.json b/apps/mobile/package.json index 6994e29c1..86c4055ae 100644 --- a/apps/mobile/package.json +++ b/apps/mobile/package.json @@ -37,12 +37,12 @@ "class-variance-authority": "^0.7.0", "dayjs": "^1.11.10", "event-target-polyfill": "^0.0.4", - "expo": "~51.0.28", - "expo-av": "^14.0.6", + "expo": "~51.0.32", + "expo-av": "^14.0.7", "expo-blur": "^13.0.2", "expo-build-properties": "~0.12.5", "expo-haptics": "~13.0.1", - "expo-image": "^1.12.13", + "expo-image": "^1.12.15", "expo-linking": "~6.3.1", "expo-media-library": "~16.0.4", "expo-splash-screen": "~0.27.5", diff --git a/core/src/node/hardware.rs b/core/src/node/hardware.rs index bf6a93ede..b0d6625cf 100644 --- a/core/src/node/hardware.rs +++ b/core/src/node/hardware.rs @@ -1,5 +1,4 @@ use std::io; -use std::io::Error; use serde::{Deserialize, Serialize}; use specta::Type; @@ -129,6 +128,7 @@ impl HardwareModel { #[cfg(target_os = "ios")] { use std::ffi::CString; + use std::io::Error; use std::ptr; extern "C" { @@ -186,7 +186,7 @@ impl HardwareModel { } if let Some(device_type) = get_device_type() { - let hardware_model = Self::from_display_name(&device_type.as_str()); + let hardware_model = HardwareModel::from(device_type.as_str()); Ok(hardware_model) } else { diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 1dff34f49a07da04c1ca4a74d5d9df0b437e41a1..b2c05f7b372e509b745460b64a9178b78d3ac47a 100644 GIT binary patch delta 6145 zcmcH-X;2&2d315?+ttGcY;zjlCIq2d0>_B}0pbz}0S1y##3jTbByNat?WDCQwVPnC z@fRO)>;?zhaT**t-6TzIleSKev~gydv}v0OJ<>LrPHWmu+9ti;?n*+~*iGH(4~_PH z_xs-WzT+J`Z~4P3mYJV2?0nXCRz02x7piFG>?Wm_lIpaqrZf+#zKS*lzHxf`*0vE} zuf?EIsZ}DLy_i@UU|!E&PQb8@LnH9iD<~2kdkMv}A0{~OOwt-Q={YO=Mi)IhnWS4O z*Q!j}>|FU?@k5i7%BpqI>_e&@0szJzpbi94nQ)?sqS&3q4=>L(drhsDiYQ5m9-5{R z1!^r)fOiU~uyF%%?66MMb4dTApio}l3U;?S8e$$th)prP62x2xeyCXGwgGW*gP{T= zyI^Wb>eThPJcfQxyJ^I&*BLv!9^Ht^H`wmer^CThXgyJ%z@9hn3Cawr#cX%>t3lv@ z(g^BbBZ^IaE?;op=kHNVf}En)PEAI`{27X28<}zb>_XVtYxlY(5*<4i8yguoW7xA5 z7em@K6TzOgB(v4iaVz!Y@J%-d!u33c)#m00PMhVIS92@Em+g6li%S)|kQ*DPQE9X) zjbV9dhx4tA^bDJBEel^>wf2N9XFpePAh69P&cC)klASA>32l)jfZ3zAwg9c9(Y&!|2?}P2o#t3%88NpVTiNOJKDN0u= zd->~`Y`tr>OsCS#_Pg3qAkU=FuIP!02`qla|HH&Il@4Ajy;aBxf%uM?J+R9eCF0Lg zs@ddyw2qEd^KJc$L>8m9}uR0@H{BaI3xA)!tQ6Fj&}Eoo6*TbG3OoPeER`al~WJ7#TFQw$`|^T6{yJwfRoF zH^bHMF3YdBbu}9rM(k#H^Ps1>zQm*N*X+A=S;k$uEMGYdGHt}>n!=&dhLO?H3~iRb zCBMg)(WWV?F4gtx$86??3Qbjer_XO6=`iJ1bvre^HnXjApi7fG+K^k`YtRnZ#&n|& zT}`on#G$QkF3!#`Y}TudhHg0MQ|P!|ZiQ4P(SYXZ$Su_xkJdg|-{;YFj_R~yt=>GB z&6Ypv(hS=)qvg5%#TmN%{GQJ2v2Mp$cSpZ7JFj)PrBk~@*J5a^F%%Bjv>p9+$Edy{ zqe5f%mK%Gsd{!6CrAXEtd4h?6-*+fzP<1I@gFV~jn``>_67C5V2 z^(7tdR)1BQH>2DzG^p;j8tg^(R&80=NKui&)>l=lb34b{MjZ~Lxvr|C&uTZfl?-*Y zln!=QmX_31m-o5g;6Z7aHqUWp1+?YKH*xQDE50NJ|7rPVBBZUjP21Mu8SD0$y0u16 zz1iN=V#prTwrZ+s@(o@`!$5Jir=-_o7_=4C)Mkwh4w&2YV^w8^wcdtib7@6Oc}sz{ zzi-HuS7xwQdi|~KJ~#K5UPUL#l5kN@HfJm<=%^pERe499S;oB40f)<7)Ro`e@A5Yf z4(ASZ+jY6dUTeG8US3sN)-z_b7i9HiRCT(u$C@4PnhN8X$L()6s_Q!q-MMu-t+mH9 zQe_3#gVKb3ANdvJ7eRiJ+(-T>R}0Z8k;}K#`094F^tndL)q^9s&Wr)OI=3h*uTWb( z>hG~ts5SZqwMkvmUy|V~9I79za{A4sHnX*7htJzw*QWLrR9bCy4Yj($mfYr&E>CBP zq1RsE-2vLsyHj3C!Q=fSSFNQ?$i5h#V4SQVF9+6En`~tDL@mUmr?b(mwx%ZnXPadBR7J^yA2@g(DV*cVB zXQ;a*;j(G4a8xw-o<|Dq^|RDepborQi8A11782VHAd=UC#egu(oR!AHKkh@F+%+{i zml|eFQzjkmJAvh?OVVg)d|66?yAruYVj}t_;p>ooGn~34je-a3P#pJ$15JsR!Nn_R zHTOj)dN{Bq9C!b<&XsuK3hq*jY<*$Q}Q8hL|ssY3AVI*c9@ZCp5ko(|!HB{2%F zPoka>7@AHZ6?fqXDwibOvP*-X*U)SK4+aXY#BBnb*nBu->1{Uf%^=YYo?7DAn|&n& zc6``nUg9wRGArR<&mz(_vM2(+_ERatPG|4CL6;_HnFe@?;%NAvPW1Lx-o`5`m^q2W z&in>@{%{^$7q-FQ+7yoL@e_HVsr+vTAMS4#QKsb1m{)Oke+9)z?~EC9WaP@aL(LdG zFQKM8Q@n$;3S=B*A*7^wkC{OACWPcvcbC>Z&O+6WWJNg&SDp!;F> zt0WuEH&Y63_*Ha4oaThHxFK{M0nY*T^Uyul}g<~W) zsI~M~xKbcfK%#-(09iUZ5H+h?S0l2t?lnX3mzeQo_M zeIDO{#h?D$R5Ud}5QW;ATwpURtbPfgbpsU@bF& zg+h13O9k}rIFis~C1Pq1dJf(wrK16VMiv3{`E)Ih6t9AiglC?hqq*M|(ou`PBncwG zv66ecnEs0vcDYD7{J}Vl*i*4`)^a`ye!r7eErQ|bB^l-{s4 zCCrS{g!W$VfQ1amYL!NP(}*0hSeVeGjxuTR-J?uGn0~=O#OzWyc8sBwH&59i5evr| z_p*#vPcUo35NjcMLfABKE4inFlXr#@EUJ9>By$}OKA=p751wM`!bpDL`J}KN__QZP39YV`kP%G zR2d0r-;0a^`AU4?(&iTO9;(2=6v?kNZFl)2gB&p$yn?^!EqAZbtDC!7LcLSX31+uFb}?GlPLiQ6TK#f zwg?&jS}M-FRjJsz7K_0QH-||u%3cdI=Xx8aVSF7HU!`DicTGDOC|QrivbfxZSfquC z4R}ltNn?#f5tT-KlWc#IL91IOC zEdO>)W=o!BlZ+n}9p{*{ko6B`1neEBm0ZhqjABJG{!09T@I_2n#q~Sz)g%#dix`UI zBD{FYAhHDC$3XDU?i?}@k_YDf0+MVLw)_>Rf$j_z+Ya$HkvNrxpP#|vB*VMI^9){{ zCMvy^p9SMGU&DTdP(L4i9j7IV9D*C3c^@0dHhGnQejkquqw#ZS4fo=2aC7Fr0Uq$L A2LJ#7 delta 6456 zcmc&2X>?T8`OGAfnR)lUxk)BuCksm?M8eEuA;X$v!h~#)O!h(OWZxIaLI_F`Z7DTK z$}Rb%ZYdy?5XeAmpY(LW>e04tsmCfxq11x*xB(v9TJ>n}yKk0Ej8<*W>5q5Lz4yD{ z{=M&h-*^2NnLirKbY2u7$Hp5;cfXb#(jmCv;8l~QIuoZfWx$=kM4?b~4$XIOghdSl z-Cdb!#$-b>g_k7bXjrP@W69`-1&j{-Swul(b>uZS#@9#3F#04?lfSO2C-2^|iagZ8 zPu@HCLJ&;6je;R=oC~GQYS@kpr-zd$L#4Fg;Asc<9q%P@0@A?hb2<)ozNl8iy*1np zFVu&`WUtAf^uS_ZcpL>m<|?Fu{bR}~5|H+Q3_Neb8hF@%b>x|}v+RfpaKnQbDzV(- zyVb(_6R6jFHl1%GJakbRKn|NXMb>Q}=xxrCrVhYz2JgUORlv#*a*XDRGUtC-0Sr1q{-0R8o+#y+o&&5b6>C1C^6M9y?ZW*r3#hC0ZJM?+bWK}ux zI9J9PxgTjpE-8b^$%+{W6oN@j z+OATKoCq`LAb#@5UY_OG402<65UhBU=Eh}kbvhyKT|@Wn8M;qFKczTypI`D~aO~r` zJ-0pKC*Jmi|Mg?=uEQ3T3a(LDdhtx+X_v!>r+drD^u%>%#CfiSs88B zEyZcpfktC{SxS0)ecQS=(*{#}vDH>;&ClsC94N~u8%$c3Th`W<);PEgYVKFKl#1sJeo7d)~8FKOm>j(2Jg*HQTp>dhjp4C*5y0x|_HzlvKWYwxWIF4+whKNHw zsuG$d;pWGF3FWpVLvLw?!QPl=OUfBopIVvHQD|+i=&!EmZplxo@2%ci)REm#pJ&@< z$VyAy){tf>u@xCxOAWbOOxg8=O{VOaeLX%e3<-W}%VySANjyvGX9ZtWk2!fYKQHS@IC!x`& zd!PF_1eSilErm@(ycT-iQG~!74O|lgN{r4)D?Jk&`X1rqL|YS=m(9Q;HAR#sZLn;v z8#&2MNq=AlbicqYB!+{rFgVWL?fGlx&seo z#V|S_v&upb6cP-MjkNCI%h4A33Pm21SDx|#&Q~BUbl--8#hn%C7!xl-TwR6ka^)$C zlfvMzgX8sZs~yRP6EE1&NUDc!G(5N;jYwO@ItV^}MHvZ>14vK0cW|(O7|oZJ>VA@o zlJ2I0;|ItLKYW$~GL>j0Bpg5m5e83diLDuW^8hchdj zuUhcP5mEdgIw|v!Vsu8iGX3Fol{n6E)Xna*2nrlU_ewkY>2*C~!LZ@*%mAXGI~bZc zd_VfW;!El)h_BJcK=J!Z4RNMx;ra-=?|&s9d;7j>gv+n#S5XP5N~iZwh^eR**PcP|`%AUu*KdJD+5)Xqd^23BePaDjl=$w?(I@`}6{!j9 z2mUcM7e=4uVxjEUXva5~X$kAe;N;_I#WyD+H+JyKFpCt(82d2gaL4_=hfmLB8A~avN)Hsh2=~L@i!Ksb))2jtOyG@#)O5Ew>_OBm`(`s z(g|%0^WotW!W^g@6>RYR78MVH=Ma+JHwUVsbdfN8T#yccIoE|rof{3brU-+EQ6U#b z??V2Np27u3NfU}CHT{{!&$pyAWhsPRKNjXltY!!)wUzF(NGQ(aRPf?qAzYHdtHMug zoDM8U1Zg_ry?l?In-9fhTrhleM3&l(;Sa^%LJJ^pH-$}%3M%MpR|UdG`4&}&DH{#@ z>|DGr0jhQc?0p6^T>2NeRfW5c3iGBhcbm8X_8t}HN&pv89}7k>GtFgYYV7Ll%`}>5 z?{4;KyWQ%zDa!3WBpwsu!oLt_HfB*0PB@rS=xhH-r%LZTK$^*>03wbHyv}z7@cB!B zJ1#`es`8&Zf-tO;(s8qpH#2#Nl*c2inwlJBpN98uF{Eqays$3rx*W`r8_Q<3Pk5)I#*#2afvts4!w`U#pIvs?oO03 zeS08ECkY0x>V72wy;Tl4uIc1DoiYC4nl3!Sm2TJ2$tHa`=fY7Qi&D-^7~yb$_)`I| z2>1dHiD37`w?JSw&(Bg(`SOCPxi}DRe^#xB#8B+&WoYt@8d)Om^zsclBbo!7!|-hL zDcOp&t`CF#b@Fs`hACwl4?M39gmaO2NS1pRE}uqWSCJ$cN<0^h{ao)L71M?0>k66j zvRHgpCQ6~kk;B?(C~iQTpde0n3|^>KgouuKT;nAb2%r6lwi3ffQKtvZ8wW0x4VRQ^ z*!+@OO}a8%hGD#KEWk%3tO&@QKp|vzO*C9uh`%Ef@^v>G?Ks+nIPkovaQYK$g-z@D za=3X5W@D<^;y?I#?r9wlNs~AfsuyF{>$s$(GcP@aCAnQUn|1gJ%tOPiSnh6N zy4@qhJD1=>W|&}+;vSWkYyc)oBWAfQ5oynOfGwG<8o;YCFYZdi78ksG4IX1lZwDvU zA>u?fp6>^R@6jfqNfVd=8+Or#Xr~GXK-Ft>R_7=P6(3!OKUT`Yp80!DBRaG30>6<2 zn#t0K(5*0ai4TC-zwis-3ePVXQPXmPzB^%ZCXcD-k{o_A+ a4;#Hi!Frvx_SA|TjPK@xMByf`UGi^sfJv|b From 9ccd9f415799c842269ee397c7565baba65a394a Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Fri, 13 Sep 2024 19:27:34 -0300 Subject: [PATCH 119/218] Initialize cloud sync --- Cargo.lock | Bin 331729 -> 331732 bytes Cargo.toml | 2 +- core/crates/cloud-services/src/error.rs | 6 + core/crates/cloud-services/src/lib.rs | 4 +- core/crates/cloud-services/src/p2p/mod.rs | 20 +++- core/crates/cloud-services/src/p2p/runner.rs | 52 +++++++-- core/src/api/cloud/devices.rs | 8 +- core/src/api/cloud/mod.rs | 113 ++++++++++++++++--- core/src/api/cloud/sync_groups.rs | 71 +++++++++++- core/src/library/config.rs | 18 ++- core/src/library/library.rs | 30 ++--- core/src/library/manager/error.rs | 2 + core/src/library/manager/mod.rs | 55 ++++----- core/src/util/debug_initializer.rs | 2 +- 14 files changed, 300 insertions(+), 83 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 660af10e749d86f55cd82600999447c4b5838ad1..23eb291d2656b11a3df905834a31af6fc9a2cc6a 100644 GIT binary patch delta 90 zcmcaOU*yVsk%kt=7N#xC3(C!kQp;?OOjAvgERE7qQj`%)OViXO1M}2GlcY3r)07nB mL__lwqeSz>l*D8Mv*{Z(7{#XVYGLAT|5DDp{YyE^-RS^_V;|uF delta 80 zcmcaIU*zI^k%kt=7N#xC3(Ad>iW2jZGi-AcGxL;9k}S+EQjIK kjLZ`)&C?7GlTFQx3{%q#QYHt=h_}BfXWss%oaM!I0CF%K6aWAK diff --git a/Cargo.toml b/Cargo.toml index c6e543c94..f3174c6b2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,7 +20,7 @@ rust-version = "1.80" [workspace.dependencies] # First party dependencies -sd-cloud-schema = { git = "https://github.com/spacedriveapp/cloud-services-schema", branch = "main" } +sd-cloud-schema = { git = "https://github.com/spacedriveapp/cloud-services-schema", rev = "25e4b92fdd" } # Third party dependencies used by one or more of our crates async-channel = "2.3" diff --git a/core/crates/cloud-services/src/error.rs b/core/crates/cloud-services/src/error.rs index c2f23ff2b..b4fa2b7de 100644 --- a/core/crates/cloud-services/src/error.rs +++ b/core/crates/cloud-services/src/error.rs @@ -150,3 +150,9 @@ impl From for rspc::Error { Self::with_cause(rspc::ErrorCode::InternalServerError, e.to_string(), e) } } + +impl From for rspc::Error { + fn from(e: GetTokenError) -> Self { + Self::with_cause(rspc::ErrorCode::InternalServerError, e.to_string(), e) + } +} diff --git a/core/crates/cloud-services/src/lib.rs b/core/crates/cloud-services/src/lib.rs index d4fa13741..a0b1583a3 100644 --- a/core/crates/cloud-services/src/lib.rs +++ b/core/crates/cloud-services/src/lib.rs @@ -39,7 +39,9 @@ mod token_refresher; pub use client::CloudServices; pub use error::{Error, GetTokenError}; pub use key_manager::KeyManager; -pub use p2p::{CloudP2P, JoinSyncGroupResponse, NotifyUser, Ticket, UserResponse}; +pub use p2p::{ + CloudP2P, JoinSyncGroupResponse, JoinedLibraryCreateArgs, NotifyUser, Ticket, UserResponse, +}; pub use sync::{ declare_actors as declare_cloud_sync, SyncActors as CloudSyncActors, SyncActorsState as CloudSyncActorsState, diff --git a/core/crates/cloud-services/src/p2p/mod.rs b/core/crates/cloud-services/src/p2p/mod.rs index f050110ed..cf266b174 100644 --- a/core/crates/cloud-services/src/p2p/mod.rs +++ b/core/crates/cloud-services/src/p2p/mod.rs @@ -3,6 +3,7 @@ use crate::{CloudServices, Error}; use sd_cloud_schema::{ cloud_p2p::{authorize_new_device_in_sync_group, CloudP2PALPN, CloudP2PError}, devices::{self, Device}, + libraries, sync::groups::GroupWithLibraryAndDevices, }; use sd_crypto::{CryptoRng, SeedableRng}; @@ -14,13 +15,20 @@ use iroh_net::{ Endpoint, NodeId, }; use serde::{Deserialize, Serialize}; -use tokio::spawn; +use tokio::{spawn, sync::oneshot}; use tracing::error; mod runner; use runner::Runner; +#[derive(Debug)] +pub struct JoinedLibraryCreateArgs { + pub pub_id: libraries::PubId, + pub name: String, + pub description: Option, +} + #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize, specta::Type)] #[serde(transparent)] #[repr(transparent)] @@ -66,7 +74,13 @@ pub enum JoinSyncGroupResponse { #[derive(Debug, Deserialize, specta::Type)] #[serde(tag = "kind", content = "data")] pub enum UserResponse { - AcceptDeviceInSyncGroup { ticket: Ticket, accepted: bool }, + AcceptDeviceInSyncGroup { + ticket: Ticket, + accepted: bool, + library_pub_id: libraries::PubId, + library_name: String, + library_description: Option, + }, } #[derive(Debug, Clone)] pub struct CloudP2P { @@ -128,11 +142,13 @@ impl CloudP2P { &self, devices_in_group: Vec<(devices::PubId, NodeId)>, req: authorize_new_device_in_sync_group::Request, + tx: oneshot::Sender, ) { self.msgs_tx .send_async(runner::Message::Request(runner::Request::JoinSyncGroup { req, devices_in_group, + tx, })) .await .expect("Channel closed"); diff --git a/core/crates/cloud-services/src/p2p/runner.rs b/core/crates/cloud-services/src/p2p/runner.rs index d720f94e0..18fb2426b 100644 --- a/core/crates/cloud-services/src/p2p/runner.rs +++ b/core/crates/cloud-services/src/p2p/runner.rs @@ -7,6 +7,7 @@ use sd_cloud_schema::{ self, authorize_new_device_in_sync_group, Client, CloudP2PALPN, CloudP2PError, Service, }, devices::{self, Device}, + libraries, sync::groups, }; use sd_crypto::{CryptoRng, SeedableRng}; @@ -32,14 +33,14 @@ use quic_rpc::{ }; use tokio::{ spawn, - sync::Mutex, + sync::{oneshot, Mutex}, task::JoinHandle, time::{interval, Instant, MissedTickBehavior}, }; use tokio_stream::wrappers::IntervalStream; use tracing::{debug, error, warn}; -use super::{JoinSyncGroupResponse, NotifyUser, Ticket, UserResponse}; +use super::{JoinSyncGroupResponse, JoinedLibraryCreateArgs, NotifyUser, Ticket, UserResponse}; const TEN_SECONDS: Duration = Duration::from_secs(10); const FIVE_MINUTES: Duration = Duration::from_secs(60 * 5); @@ -54,6 +55,7 @@ pub enum Request { JoinSyncGroup { req: authorize_new_device_in_sync_group::Request, devices_in_group: Vec<(devices::PubId, NodeId)>, + tx: oneshot::Sender, }, } @@ -177,13 +179,24 @@ impl Runner { StreamMessage::Message(Message::Request(Request::JoinSyncGroup { req, devices_in_group, - })) => self.dispatch_join_requests(req, devices_in_group, &mut rng), + tx, + })) => self.dispatch_join_requests(req, devices_in_group, &mut rng, tx), StreamMessage::UserResponse(UserResponse::AcceptDeviceInSyncGroup { ticket, accepted, + library_pub_id, + library_name, + library_description, }) => { - self.handle_join_response(ticket, accepted).await; + self.handle_join_response( + ticket, + accepted, + library_pub_id, + library_name, + library_description, + ) + .await; } StreamMessage::Tick => self.tick().await, @@ -201,6 +214,7 @@ impl Runner { req: authorize_new_device_in_sync_group::Request, devices_in_group: Vec<(devices::PubId, NodeId)>, rng: &mut CryptoRng, + tx: oneshot::Sender, ) { async fn inner( key_manager: Arc, @@ -208,6 +222,7 @@ impl Runner { mut rng: CryptoRng, req: authorize_new_device_in_sync_group::Request, devices_in_group: Vec<(devices::PubId, NodeId)>, + tx: oneshot::Sender, ) -> Result { let group_pub_id = req.sync_group.pub_id; loop { @@ -226,6 +241,9 @@ impl Runner { Ok(authorize_new_device_in_sync_group::Response { authorizor_device, keys, + library_pub_id, + library_name, + library_description, }) => { key_manager .add_many_keys( @@ -239,7 +257,17 @@ impl Runner { ) .await?; - // TODO(@fogodev): Figure out a way to dispatch sync related actors now that we have the keys + if tx + .send(JoinedLibraryCreateArgs { + pub_id: library_pub_id, + name: library_name, + description: library_description, + }) + .is_err() + { + error!("Failed to handle library creation locally from received library data"); + return Ok(JoinSyncGroupResponse::CriticalError); + } return Ok(JoinSyncGroupResponse::Accepted { authorizor_device }); } @@ -260,7 +288,7 @@ impl Runner { if let Err(SendError(response)) = notify_user_tx .send_async(NotifyUser::ReceivedJoinSyncGroupResponse { - response: inner(key_manager, endpoint, rng, req, devices_in_group) + response: inner(key_manager, endpoint, rng, req, devices_in_group, tx) .await .unwrap_or_else(|e| { error!( @@ -326,7 +354,14 @@ impl Runner { } } - async fn handle_join_response(&self, ticket: Ticket, accepted: bool) { + async fn handle_join_response( + &self, + ticket: Ticket, + accepted: bool, + library_pub_id: libraries::PubId, + library_name: String, + library_description: Option, + ) { let Some(PendingSyncGroupJoin { channel, request, @@ -355,6 +390,9 @@ impl Runner { .into_iter() .map(Into::into) .collect(), + library_pub_id, + library_name, + library_description, }) } else { Err(CloudP2PError::Rejected) diff --git a/core/src/api/cloud/devices.rs b/core/src/api/cloud/devices.rs index d8631acdb..f4df3ec12 100644 --- a/core/src/api/cloud/devices.rs +++ b/core/src/api/cloud/devices.rs @@ -218,10 +218,10 @@ pub struct DeviceRegisterData { pub pub_id: PubId, pub name: String, pub os: DeviceOS, - pub storage_size: u64, - pub connection_id: NodeId, pub hardware_model: HardwareModel, + pub storage_size: u64, pub used_storage: u64, + pub connection_id: NodeId, } pub async fn register( @@ -231,10 +231,10 @@ pub async fn register( pub_id, name, os, - storage_size, - connection_id, hardware_model, + storage_size, used_storage, + connection_id, }: DeviceRegisterData, hashed_pub_id: Hash, rng: &mut CryptoRng, diff --git a/core/src/api/cloud/mod.rs b/core/src/api/cloud/mod.rs index 4456d3c2b..873bfc2eb 100644 --- a/core/src/api/cloud/mod.rs +++ b/core/src/api/cloud/mod.rs @@ -1,4 +1,5 @@ use crate::{ + library::LibraryManagerError, node::{config::NodeConfig, HardwareModel}, volume::get_volumes, Node, @@ -9,6 +10,7 @@ use sd_core_cloud_services::{CloudP2P, IrohSecretKey, KeyManager, QuinnConnectio use sd_cloud_schema::{ auth, error::{ClientSideError, Error}, + sync::groups, users, Client, Service, }; use sd_crypto::{CryptoRng, SeedableRng}; @@ -17,8 +19,9 @@ use std::pin::pin; use async_stream::stream; use futures::StreamExt; +use futures_concurrency::future::TryJoin; use rspc::alpha::AlphaRouter; -use tracing::{debug, error}; +use tracing::{debug, error, instrument}; use super::{Ctx, R}; @@ -51,7 +54,7 @@ pub(crate) fn mount() -> AlphaRouter { node.cloud_services .token_refresher - .init(access_token.clone(), refresh_token) + .init(access_token, refresh_token) .await?; let client = try_get_cloud_services_client(&node).await?; @@ -65,7 +68,11 @@ pub(crate) fn mount() -> AlphaRouter { client .users() .create(users::create::Request { - access_token: access_token.clone(), + access_token: node + .cloud_services + .token_refresher + .get_access_token() + .await?, }) .await, "Failed to create user;", @@ -82,7 +89,11 @@ pub(crate) fn mount() -> AlphaRouter { client .devices() .get(devices::get::Request { - access_token: access_token.clone(), + access_token: node + .cloud_services + .token_refresher + .get_access_token() + .await?, pub_id: device_pub_id, }) .await, @@ -92,7 +103,10 @@ pub(crate) fn mount() -> AlphaRouter { // Device registered, we execute a device hello flow let master_key = self::devices::hello( &client, - access_token, + node.cloud_services + .token_refresher + .get_access_token() + .await?, device_pub_id, hashed_pub_id, &mut rng, @@ -108,22 +122,31 @@ pub(crate) fn mount() -> AlphaRouter { HardwareModel::try_get().unwrap_or(HardwareModel::Other), ); + let (storage_size, used_storage) = get_volumes() + .await + .into_iter() + .fold((0, 0), |(storage_size, used_storage), volume| { + ( + storage_size + volume.total_capacity, + used_storage + + (volume.total_capacity - volume.available_capacity), + ) + }); + let master_key = self::devices::register( &client, - access_token, + node.cloud_services + .token_refresher + .get_access_token() + .await?, self::devices::DeviceRegisterData { pub_id: device_pub_id, name, os, - // TODO(@fogodev): We should use storage statistics from sqlite db - storage_size: get_volumes() - .await - .into_iter() - .map(|volume| volume.total_capacity) - .sum(), + storage_size, connection_id: iroh_secret_key.public(), hardware_model, - used_storage: 0, + used_storage, }, hashed_pub_id, &mut rng, @@ -156,7 +179,51 @@ pub(crate) fn mount() -> AlphaRouter { ) .await; - // TODO(@fogodev): Verify existing sync groups and dispatch sync related actors + let groups::list::Response(groups) = handle_comm_error( + client + .sync() + .groups() + .list(groups::list::Request { + access_token: node + .cloud_services + .token_refresher + .get_access_token() + .await?, + with_library: true, + }) + .await, + "Failed to list sync groups on bootstrap", + )??; + + groups + .into_iter() + .map( + |groups::Group { + pub_id, + library, + // TODO(@fogodev): We can use this latest key hash to check if we + // already have the latest key hash for this group locally + // issuing a ask for key hash request for other devices if we don't + latest_key_hash: _latest_key_hash, + .. + }| { + let node = &node; + + async move { + initialize_cloud_sync( + pub_id, + library.expect( + "we asked backend to receive a library, this is a bug and should crash" + ), + node, + ) + .await + } + }, + ) + .collect::>() + .try_join() + .await?; Ok(()) }, @@ -194,3 +261,21 @@ fn handle_comm_error Result<(), LibraryManagerError> { + let library = node + .libraries + .get_library(&library_pub_id) + .await + .ok_or(LibraryManagerError::LibraryNotFound)?; + + library.init_cloud_sync(node, group_pub_id).await +} diff --git a/core/src/api/cloud/sync_groups.rs b/core/src/api/cloud/sync_groups.rs index f471333dd..27cf22002 100644 --- a/core/src/api/cloud/sync_groups.rs +++ b/core/src/api/cloud/sync_groups.rs @@ -1,4 +1,10 @@ -use crate::api::{utils::library, Ctx, R}; +use crate::{ + api::{utils::library, Ctx, R}, + library::LibraryName, + Node, +}; + +use sd_core_cloud_services::JoinedLibraryCreateArgs; use sd_cloud_schema::{ auth::AccessToken, @@ -6,11 +12,14 @@ use sd_cloud_schema::{ sync::{groups, KeyHash}, }; +use std::sync::Arc; + use futures_concurrency::future::TryJoin; use rspc::alpha::AlphaRouter; use sd_crypto::{cloud::secret_key::SecretKey, CryptoRng, SeedableRng}; use serde::Deserialize; -use tracing::debug; +use tokio::{spawn, sync::oneshot}; +use tracing::{debug, error}; pub fn mount() -> AlphaRouter { R.router() @@ -66,7 +75,7 @@ pub fn mount() -> AlphaRouter { return Err(e.into()); } - // TODO(@fogodev): use the group_pub_id to dispatch actors for syncing to this group + library.init_cloud_sync(&node, group_pub_id).await?; debug!(%group_pub_id, "Created sync group"); @@ -271,6 +280,8 @@ pub fn mount() -> AlphaRouter { "Failed to update library;", )??; + let (tx, rx) = oneshot::channel(); + cloud_p2p .request_join_sync_group( existing_devices, @@ -278,9 +289,17 @@ pub fn mount() -> AlphaRouter { sync_group, asking_device, }, + tx, ) .await; + JoinedSyncGroupReceiver { + node, + group_pub_id, + rx, + } + .dispatch(); + debug!(%group_pub_id, "Requested to join sync group"); Ok(()) @@ -288,3 +307,49 @@ pub fn mount() -> AlphaRouter { ) }) } + +struct JoinedSyncGroupReceiver { + node: Arc, + group_pub_id: groups::PubId, + rx: oneshot::Receiver, +} + +impl JoinedSyncGroupReceiver { + fn dispatch(self) { + spawn(async move { + let Self { + node, + group_pub_id, + rx, + } = self; + + if let Ok(JoinedLibraryCreateArgs { + pub_id: libraries::PubId(pub_id), + name, + description, + }) = rx.await + { + let Ok(name) = + LibraryName::new(name).map_err(|e| error!(?e, "Invalid library name")) + else { + return; + }; + + let Ok(library) = node + .libraries + .create_with_uuid(pub_id, name, description, true, None, &node) + .await + .map_err(|e| { + error!(?e, "Failed to create library from sync group join response") + }) + else { + return; + }; + + if let Err(e) = library.init_cloud_sync(&node, group_pub_id).await { + error!(?e, "Failed to initialize cloud sync for library"); + } + } + }); + } +} diff --git a/core/src/library/config.rs b/core/src/library/config.rs index 53390fad8..20c245d10 100644 --- a/core/src/library/config.rs +++ b/core/src/library/config.rs @@ -8,7 +8,7 @@ use sd_prisma::prisma::{file_path, indexer_rule, instance, location, PrismaClien use sd_utils::{db::maybe_missing, error::FileIOError}; use std::{ - path::Path, + path::{Path, PathBuf}, sync::{atomic::AtomicBool, Arc}, }; @@ -43,6 +43,9 @@ pub struct LibraryConfig { #[serde(default)] pub generate_sync_operations: Arc, version: LibraryConfigVersion, + + #[serde(skip, default)] + pub config_path: PathBuf, } #[derive( @@ -87,7 +90,6 @@ impl LibraryConfig { description: Option, instance_id: i32, path: impl AsRef, - generate_sync_operations: bool, ) -> Result { let this = Self { name, @@ -95,8 +97,8 @@ impl LibraryConfig { instance_id, version: Self::LATEST_VERSION, cloud_id: None, - // will always be `true` eventually - generate_sync_operations: Arc::new(AtomicBool::new(generate_sync_operations)), + generate_sync_operations: Arc::new(AtomicBool::new(false)), + config_path: path.as_ref().to_path_buf(), }; this.save(path).await.map(|()| this) @@ -109,7 +111,7 @@ impl LibraryConfig { ) -> Result { let path = path.as_ref(); - VersionManager::::migrate_and_load( + let mut loaded_config = VersionManager::::migrate_and_load( path, |current, next| async move { match (current, next) { @@ -407,7 +409,11 @@ impl LibraryConfig { Ok(()) }, ) - .await + .await?; + + loaded_config.config_path = path.to_path_buf(); + + Ok(loaded_config) } pub(crate) async fn save(&self, path: impl AsRef) -> Result<(), LibraryConfigError> { diff --git a/core/src/library/library.rs b/core/src/library/library.rs index 9b26367e2..bed66ea93 100644 --- a/core/src/library/library.rs +++ b/core/src/library/library.rs @@ -17,7 +17,7 @@ use std::{ collections::HashMap, fmt::{Debug, Formatter}, path::{Path, PathBuf}, - sync::Arc, + sync::{atomic::Ordering, Arc}, }; use futures_concurrency::future::Join; @@ -94,7 +94,7 @@ impl Library { &self, node: &Node, sync_group_pub_id: groups::PubId, - ) -> Result<(), sd_core_cloud_services::Error> { + ) -> Result<(), LibraryManagerError> { let rng = CryptoRng::from_seed(node.master_rng.lock().await.generate_fixed()); declare_cloud_sync( @@ -108,16 +108,21 @@ impl Library { ) .await?; - // TODO(@fogodev): Uncomment when they're ready - // ( - // self.cloud_sync_actors.start(CloudSyncActors::Sender), - // self.cloud_sync_actors.start(CloudSyncActors::Receiver), - // self.cloud_sync_actors.start(CloudSyncActors::Ingester), - // ) - // .join() - // .await; + ( + self.cloud_sync_actors.start(CloudSyncActors::Sender), + self.cloud_sync_actors.start(CloudSyncActors::Receiver), + self.cloud_sync_actors.start(CloudSyncActors::Ingester), + ) + .join() + .await; - Ok(()) + self.update_config(|config| { + config + .generate_sync_operations + .store(true, Ordering::Relaxed) + }) + .await + .map_err(Into::into) } pub async fn config(&self) -> LibraryConfig { @@ -127,13 +132,12 @@ impl Library { pub async fn update_config( &self, update_fn: impl FnOnce(&mut LibraryConfig), - config_path: impl AsRef, ) -> Result<(), LibraryManagerError> { let mut config = self.config.write().await; update_fn(&mut config); - config.save(config_path).await.map_err(Into::into) + config.save(&config.config_path).await.map_err(Into::into) } // TODO: Remove this once we replace the old invalidation system diff --git a/core/src/library/manager/error.rs b/core/src/library/manager/error.rs index 5a12ff221..4fc01dd4e 100644 --- a/core/src/library/manager/error.rs +++ b/core/src/library/manager/error.rs @@ -47,6 +47,8 @@ pub enum LibraryManagerError { #[error(transparent)] LibraryConfig(#[from] LibraryConfigError), #[error(transparent)] + CloudServices(#[from] sd_core_cloud_services::Error), + #[error(transparent)] Sync(#[from] sd_core_sync::Error), } diff --git a/core/src/library/manager/mod.rs b/core/src/library/manager/mod.rs index 5a4d66d7b..a83368f40 100644 --- a/core/src/library/manager/mod.rs +++ b/core/src/library/manager/mod.rs @@ -154,12 +154,11 @@ impl Libraries { description: Option, node: &Arc, ) -> Result, LibraryManagerError> { - self.create_with_uuid(Uuid::now_v7(), name, description, true, None, node, false) + self.create_with_uuid(Uuid::now_v7(), name, description, true, None, node) .await } #[instrument(skip(self, instance, node), err)] - #[allow(clippy::too_many_arguments)] pub(crate) async fn create_with_uuid( self: &Arc, id: Uuid, @@ -169,7 +168,6 @@ impl Libraries { // `None` will fallback to default as library must be created with at least one instance instance: Option, node: &Arc, - generate_sync_operations: bool, ) -> Result, LibraryManagerError> { if name.as_ref().is_empty() || name.as_ref().chars().all(|x| x.is_whitespace()) { return Err(LibraryManagerError::InvalidConfig( @@ -185,7 +183,6 @@ impl Libraries { // First instance will be zero 0, &config_path, - generate_sync_operations, ) .await?; @@ -274,33 +271,28 @@ impl Libraries { ); library - .update_config( - |config| { - // update the library - if let Some(name) = name { - config.name = name; - } - match description { - MaybeUndefined::Undefined => {} - MaybeUndefined::Null => config.description = None, - MaybeUndefined::Value(description) => { - config.description = Some(description) - } - } - match cloud_id { - MaybeUndefined::Undefined => {} - MaybeUndefined::Null => config.cloud_id = None, - MaybeUndefined::Value(cloud_id) => config.cloud_id = Some(cloud_id), - } - match enable_sync { - None => {} - Some(value) => config - .generate_sync_operations - .store(value, Ordering::SeqCst), - } - }, - self.libraries_dir.join(format!("{id}.sdlibrary")), - ) + .update_config(|config| { + // update the library + if let Some(name) = name { + config.name = name; + } + match description { + MaybeUndefined::Undefined => {} + MaybeUndefined::Null => config.description = None, + MaybeUndefined::Value(description) => config.description = Some(description), + } + match cloud_id { + MaybeUndefined::Undefined => {} + MaybeUndefined::Null => config.cloud_id = None, + MaybeUndefined::Value(cloud_id) => config.cloud_id = Some(cloud_id), + } + match enable_sync { + None => {} + Some(value) => config + .generate_sync_operations + .store(value, Ordering::SeqCst), + } + }) .await?; self.tx @@ -429,6 +421,7 @@ impl Libraries { self.libraries.read().await.get(library_id).is_some() } + #[allow(clippy::too_many_arguments)] // TODO: remove this when we remove instance stuff #[instrument( skip_all, fields( diff --git a/core/src/util/debug_initializer.rs b/core/src/util/debug_initializer.rs index 8221aa77e..562ca7b07 100644 --- a/core/src/util/debug_initializer.rs +++ b/core/src/util/debug_initializer.rs @@ -130,7 +130,7 @@ impl InitConfig { lib } else { let library = library_manager - .create_with_uuid(lib.id, lib.name, lib.description, true, None, node, false) + .create_with_uuid(lib.id, lib.name, lib.description, true, None, node) .await?; let Some(lib) = library_manager.get_library(&library.id).await else { From feab71f84adb939333a432f714fdbd27c9c14b31 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Sun, 15 Sep 2024 02:20:09 -0400 Subject: [PATCH 120/218] Disabled social logins on mobile --- .../settings/client/AccountSettings/AccountLogin.tsx | 10 ++++++---- packages/client/src/core.ts | 6 ++++-- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/apps/mobile/src/screens/settings/client/AccountSettings/AccountLogin.tsx b/apps/mobile/src/screens/settings/client/AccountSettings/AccountLogin.tsx index e49631736..0c0884b74 100644 --- a/apps/mobile/src/screens/settings/client/AccountSettings/AccountLogin.tsx +++ b/apps/mobile/src/screens/settings/client/AccountSettings/AccountLogin.tsx @@ -1,15 +1,16 @@ +import { useNavigation } from '@react-navigation/native'; import { MotiView } from 'moti'; import { AppleLogo, GithubLogo, GoogleLogo, IconProps } from 'phosphor-react-native'; -import { useState } from 'react'; +import { useEffect, useState } from 'react'; import { Text, View } from 'react-native'; import { LinearTransition } from 'react-native-reanimated'; import { getAuthorisationURLWithQueryParamsAndSetState } from 'supertokens-web-js/recipe/thirdparty'; import Card from '~/components/layout/Card'; import ScreenContainer from '~/components/layout/ScreenContainer'; import { Button } from '~/components/primitive/Button'; -import { Divider } from '~/components/primitive/Divider'; import { toast } from '~/components/primitive/Toast'; import { tw, twStyle } from '~/lib/tailwind'; +import { SettingsStackScreenProps } from '~/navigation/tabs/SettingsStack'; import Login from './Login'; import Register from './Register'; @@ -141,7 +142,8 @@ const AccountLogin = () => { {activeTab === 'Login' ? : } - + {/* Disabled for now */} + {/* OR @@ -157,7 +159,7 @@ const AccountLogin = () => { ))} - + */} diff --git a/packages/client/src/core.ts b/packages/client/src/core.ts index cd40e30e3..07b04edc0 100644 --- a/packages/client/src/core.ts +++ b/packages/client/src/core.ts @@ -5,7 +5,7 @@ export type Procedures = { queries: { key: "backups.getAll", input: never, result: GetAll } | { key: "buildInfo", input: never, result: BuildInfo } | - { key: "cloud.devices.get", input: never, result: null } | + { key: "cloud.devices.get", input: DeviceGetRequest, result: Device } | { key: "cloud.devices.list", input: DeviceListRequest, result: Device[] } | { key: "cloud.libraries.get", input: LibraryGetRequest, result: Library } | { key: "cloud.libraries.list", input: LibraryListRequest, result: Library[] } | @@ -241,6 +241,8 @@ export type Device = { pub_id: DevicePubId; name: string; os: DeviceOS; storage_ export type DeviceDeleteRequest = { access_token: AccessToken; pub_id: DevicePubId } +export type DeviceGetRequest = { access_token: AccessToken; pub_id: DevicePubId } + export type DeviceListRequest = { access_token: AccessToken } export type DeviceOS = "Linux" | "Windows" | "MacOS" | "iOS" | "Android" @@ -727,7 +729,7 @@ export type ThumbKey = { shard_hex: string; cas_id: CasId; base_directory_str: s export type UpdateThumbnailerPreferences = Record -export type UserResponse = { kind: "AcceptDeviceInSyncGroup"; data: { ticket: CloudP2PTicket; accepted: boolean } } +export type UserResponse = { kind: "AcceptDeviceInSyncGroup"; data: { ticket: CloudP2PTicket; accepted: boolean; library_pub_id: LibraryPubId; library_name: string; library_description: string | null } } export type VideoProps = { pixel_format: string | null; color_range: string | null; bits_per_channel: number | null; color_space: string | null; color_primaries: string | null; color_transfer: string | null; field_order: string | null; chroma_location: string | null; width: number; height: number; aspect_ratio_num: number | null; aspect_ratio_den: number | null; properties: string[] } From 7d34e88406672195d7a568d973cfcce3b2901618 Mon Sep 17 00:00:00 2001 From: myung03 Date: Mon, 16 Sep 2024 14:06:40 -0700 Subject: [PATCH 121/218] completed frontend for devices in settings --- interface/app/$libraryId/Explorer/util.ts | 9 ++ interface/app/$libraryId/overview/index.tsx | 8 +- .../node/libraries/DeleteDeviceDialog.tsx | 94 +++++++++++++++++++ .../settings/node/libraries/DeviceItem.tsx | 20 +++- .../settings/node/libraries/ListItem.tsx | 10 +- interface/hooks/index.ts | 1 + interface/hooks/useAccessToken.ts | 8 ++ interface/locales/en/common.json | 8 +- packages/ui/src/forms/Form.tsx | 4 +- 9 files changed, 143 insertions(+), 19 deletions(-) create mode 100644 interface/app/$libraryId/settings/node/libraries/DeleteDeviceDialog.tsx create mode 100644 interface/hooks/useAccessToken.ts diff --git a/interface/app/$libraryId/Explorer/util.ts b/interface/app/$libraryId/Explorer/util.ts index 8daccaf8e..373b27a03 100644 --- a/interface/app/$libraryId/Explorer/util.ts +++ b/interface/app/$libraryId/Explorer/util.ts @@ -185,3 +185,12 @@ export function translateKindName(kindName: string): string { return kindName; } } + +export function fetchAccessToken(): string { + const accessToken: string = + JSON.parse(window.localStorage.getItem('frontendCookies') ?? '[]') + .find((cookie: string) => cookie.startsWith('st-access-token')) + ?.split('=')[1] + .split(';')[0] || ''; + return accessToken; +} diff --git a/interface/app/$libraryId/overview/index.tsx b/interface/app/$libraryId/overview/index.tsx index 71569d20c..b4578d36c 100644 --- a/interface/app/$libraryId/overview/index.tsx +++ b/interface/app/$libraryId/overview/index.tsx @@ -1,7 +1,7 @@ import { Key } from 'react'; import { Link } from 'react-router-dom'; import { HardwareModel, useBridgeQuery, useLibraryQuery } from '@sd/client'; -import { useLocale, useOperatingSystem } from '~/hooks'; +import { useAccessToken, useLocale, useOperatingSystem } from '~/hooks'; import { useRouteTitle } from '~/hooks/useRouteTitle'; import { hardwareModelToIcon } from '~/util/hardware'; @@ -28,16 +28,14 @@ export const Component = () => { const os = useOperatingSystem(); const { t } = useLocale(); + const accessToken = useAccessToken(); const locationsQuery = useLibraryQuery(['locations.list'], { keepPreviousData: true }); const locations = locationsQuery.data ?? []; // not sure if we'll need the node state in the future, as it should be returned with the cloud.devices.list query // const { data: node } = useBridgeQuery(['nodeState']); - const cloudDevicesList = useBridgeQuery(['cloud.devices.list'], { - suspense: true, - retry: false - }); + const cloudDevicesList = useBridgeQuery(['cloud.devices.list', { access_token: accessToken }]); const search = useSearchFromSearchParams({ defaultTarget: 'paths' }); diff --git a/interface/app/$libraryId/settings/node/libraries/DeleteDeviceDialog.tsx b/interface/app/$libraryId/settings/node/libraries/DeleteDeviceDialog.tsx new file mode 100644 index 000000000..db19933de --- /dev/null +++ b/interface/app/$libraryId/settings/node/libraries/DeleteDeviceDialog.tsx @@ -0,0 +1,94 @@ +import { useQueryClient } from '@tanstack/react-query'; +import { useEffect } from 'react'; +import { useNavigate } from 'react-router'; +import { HardwareModel, useBridgeMutation, useBridgeQuery, useZodForm } from '@sd/client'; +import { Dialog, ErrorMessage, useDialog, UseDialogProps } from '@sd/ui'; +import { Icon } from '~/components'; +import { useAccessToken, useLocale } from '~/hooks'; +import { hardwareModelToIcon } from '~/util/hardware'; +import { usePlatform } from '~/util/Platform'; + +interface Props extends UseDialogProps { + pubId: string; + name: string; + device_model: string; +} + +interface CorePubId { + Uuid: string; +} + +export default function DeleteLibraryDialog(props: Props) { + const { t } = useLocale(); + + const queryClient = useQueryClient(); + const platform = usePlatform(); + const navigate = useNavigate(); + const accessToken = useAccessToken(); + const { data: node } = useBridgeQuery(['nodeState']); + const deleteDevice = useBridgeMutation('cloud.devices.delete'); + const deviceAmount = useBridgeQuery(['cloud.devices.list', { access_token: accessToken }]).data + ?.length; + + const form = useZodForm(); + + // Check if the current device matches the UUID or if it's the only device + useEffect(() => { + if (deviceAmount === 1) { + form.setError('pubId', { + type: 'manual', + message: t('error_only_device') + }); + } else if ((node?.id as CorePubId).Uuid === props.pubId) { + form.setError('pubId', { + type: 'manual', + message: t('error_current_device') + }); + } + }, [form, node, props.pubId, deviceAmount, t]); + + const onSubmit = form.handleSubmit(async () => { + try { + // Check for form errors before proceeding + if (form.formState.errors.pubId) { + return; + } + + await deleteDevice.mutateAsync({ + access_token: accessToken, + pub_id: props.pubId + }); + queryClient.invalidateQueries(['library.list']); + + platform.refreshMenuBar && platform.refreshMenuBar(); + navigate('/'); + } catch (e) { + alert(`Failed to delete device: ${e}`); + } + }); + + return ( + +
+ +

{props.name}

+ +
+
+ ); +} diff --git a/interface/app/$libraryId/settings/node/libraries/DeviceItem.tsx b/interface/app/$libraryId/settings/node/libraries/DeviceItem.tsx index 1fc9fca0e..8c0e734c4 100644 --- a/interface/app/$libraryId/settings/node/libraries/DeviceItem.tsx +++ b/interface/app/$libraryId/settings/node/libraries/DeviceItem.tsx @@ -2,11 +2,13 @@ import { Trash } from '@phosphor-icons/react'; import { iconNames } from '@sd/assets/util'; import { Key } from 'react'; import { HardwareModel, humanizeSize } from '@sd/client'; -import { Button, Card, Tooltip } from '@sd/ui'; +import { Button, Card, dialogManager, Tooltip } from '@sd/ui'; import { Icon } from '~/components'; -import { useLocale } from '~/hooks'; +import { useAccessToken, useLocale } from '~/hooks'; import { hardwareModelToIcon } from '~/util/hardware'; +import DeleteDeviceDialog from './DeleteDeviceDialog'; + interface DeviceItemProps { pub_id: Key | null | undefined; name: string; @@ -46,7 +48,19 @@ export default (props: DeviceItemProps) => { }} > - + { + dialogManager.create((dp) => ( + + )); + }} + className="size-4" + /> diff --git a/interface/app/$libraryId/settings/node/libraries/ListItem.tsx b/interface/app/$libraryId/settings/node/libraries/ListItem.tsx index b6ff11a8e..78bc8beec 100644 --- a/interface/app/$libraryId/settings/node/libraries/ListItem.tsx +++ b/interface/app/$libraryId/settings/node/libraries/ListItem.tsx @@ -4,7 +4,7 @@ import { Key, useState } from 'react'; import { LibraryConfigWrapped, useBridgeQuery } from '@sd/client'; import { Button, ButtonLink, Card, dialogManager, Tooltip } from '@sd/ui'; import { Icon } from '~/components'; -import { useLocale } from '~/hooks'; +import { useAccessToken, useLocale } from '~/hooks'; import DeleteDialog from './DeleteDialog'; import DeviceItem from './DeviceItem'; @@ -18,12 +18,8 @@ export default (props: Props) => { const { t } = useLocale(); const [isExpanded, setIsExpanded] = useState(false); - const cloudDevicesList = useBridgeQuery(['cloud.devices.list'], { - suspense: true, - retry: false - }); - console.log(cloudDevicesList); - + const accessToken = useAccessToken(); + const cloudDevicesList = useBridgeQuery(['cloud.devices.list', { access_token: accessToken }]); const toggleExpansion = () => { setIsExpanded((prev) => !prev); }; diff --git a/interface/hooks/index.ts b/interface/hooks/index.ts index 8d1b54ac1..fa9720835 100644 --- a/interface/hooks/index.ts +++ b/interface/hooks/index.ts @@ -32,3 +32,4 @@ export * from './useZodParams'; export * from './useZodRouteParams'; export * from './useZodSearchParams'; export * from './useDeeplinkEventHandler'; +export * from './useAccessToken'; diff --git a/interface/hooks/useAccessToken.ts b/interface/hooks/useAccessToken.ts new file mode 100644 index 000000000..d0a20a1e9 --- /dev/null +++ b/interface/hooks/useAccessToken.ts @@ -0,0 +1,8 @@ +export function useAccessToken(): string { + const accessToken: string = + JSON.parse(window.localStorage.getItem('frontendCookies') ?? '[]') + .find((cookie: string) => cookie.startsWith('st-access-token')) + ?.split('=')[1] + .split(';')[0] || ''; + return accessToken.trim(); +} diff --git a/interface/locales/en/common.json b/interface/locales/en/common.json index 3f5af2450..0f97989b7 100644 --- a/interface/locales/en/common.json +++ b/interface/locales/en/common.json @@ -168,11 +168,13 @@ "default": "Default", "default_settings": "Default Settings", "delete": "Delete", + "delete_device": "Delete device", + "delete_device_description": "This is permanent! This device will lose access to its the corresponding library and be removed. ", "delete_dialog_title": "Delete {{prefix}} {{type}}", "delete_forever": "Delete Forever", "delete_info": "This will not delete the actual folder on disk. Preview media will be deleted.", "delete_library": "Delete Library", - "delete_library_description": "This is permanent! Original files will not be deleted, only the Spacedrive library.", + "delete_library_description": "This is permanent! Only the Spacedrive library will be deleted, and original files will remain untouched.", "delete_location": "Delete Location", "delete_location_description": "Deleting a location will also remove all files associated with it from the Spacedrive database, the files themselves will not be deleted.", "delete_object": "Delete object", @@ -191,7 +193,6 @@ "dialog": "Dialog", "dialog_shortcut_description": "To perform actions and operations", "direction": "Direction", - "drop_files_here_to_send_with": "Drop files here to send with Spacedrop", "directory_one": "directory", "directory_other": "directories", "disabled": "Disabled", @@ -212,6 +213,7 @@ "download": "Download", "downloading_update": "Downloading Update", "drag_to_resize": "Drag to resize", + "drop_files_here_to_send_with": "Drop files here to send with Spacedrop", "duplicate": "Duplicate", "duplicate_object": "Duplicate object", "duplicate_success": "Items duplicated", @@ -242,8 +244,10 @@ "erase_a_file": "Erase a file", "erase_a_file_description": "Configure your erasure settings.", "error": "Error", + "error_current_device": "You are currently on this device, and cannot delete the device from the library. Please use another device if you'd like to remove this device.", "error_loading_original_file": "Error loading original file", "error_message": "Error: {{error}}.", + "error_only_device": "You cannot delete this device as it is the only device that belongs to this library.", "error_unknown": "An unknown error occurred.", "executable": "Executable", "executable_one": "Executable", diff --git a/packages/ui/src/forms/Form.tsx b/packages/ui/src/forms/Form.tsx index 00165b408..cb7c7bdb0 100644 --- a/packages/ui/src/forms/Form.tsx +++ b/packages/ui/src/forms/Form.tsx @@ -50,7 +50,7 @@ export const Form = ({ }; export const errorStyles = cva( - 'flex justify-center gap-2 break-all rounded border border-red-500/40 bg-red-800/40 px-3 py-2 text-white', + 'flex justify-center gap-2 whitespace-normal break-words rounded border border-red-500/40 bg-red-800/40 px-3 py-2 text-white', { variants: { variant: { @@ -89,7 +89,7 @@ export const ErrorMessage = ({ name, variant, className }: ErrorMessageProps) => return typeof message === 'string' ? ( -

{message}

+

{message}

) : null; })} From 688286b978e72398e2f049bfda3f1fa5de65b7c4 Mon Sep 17 00:00:00 2001 From: myung03 Date: Mon, 16 Sep 2024 16:31:11 -0700 Subject: [PATCH 122/218] update uuid --- interface/app/$libraryId/saved-search/$id.tsx | 1 - 1 file changed, 1 deletion(-) diff --git a/interface/app/$libraryId/saved-search/$id.tsx b/interface/app/$libraryId/saved-search/$id.tsx index 4562a96d1..d5f57c5b7 100644 --- a/interface/app/$libraryId/saved-search/$id.tsx +++ b/interface/app/$libraryId/saved-search/$id.tsx @@ -127,7 +127,6 @@ function SaveButton({ searchId }: { searchId: number }) { const updateSavedSearch = useLibraryMutation(['search.saved.update']); const search = useSearchContext(); - return ( diff --git a/apps/mobile/src/screens/settings/client/AccountSettings/Login.tsx b/apps/mobile/src/screens/settings/client/AccountSettings/Login.tsx index b1788772f..913301b8a 100644 --- a/apps/mobile/src/screens/settings/client/AccountSettings/Login.tsx +++ b/apps/mobile/src/screens/settings/client/AccountSettings/Login.tsx @@ -1,3 +1,4 @@ +import AsyncStorage from '@react-native-async-storage/async-storage'; import { useNavigation } from '@react-navigation/native'; import { useState } from 'react'; import { Controller } from 'react-hook-form'; @@ -9,6 +10,7 @@ import { Input } from '~/components/primitive/Input'; import { toast } from '~/components/primitive/Toast'; import { tw } from '~/lib/tailwind'; import { SettingsStackScreenProps } from '~/navigation/tabs/SettingsStack'; +import { AUTH_SERVER_URL } from '~/utils'; import ShowPassword from './ShowPassword'; @@ -18,7 +20,7 @@ async function signInClicked( navigator: SettingsStackScreenProps<'AccountProfile'>['navigation'] ) { try { - const req = await fetch('http://localhost:9420/api/auth/signin', { + const req = await fetch(`${AUTH_SERVER_URL}/api/auth/signin`, { method: 'POST', headers: { 'Content-Type': 'application/json; charset=utf-8' @@ -67,6 +69,8 @@ async function signInClicked( // sign in successful. The session tokens are automatically handled by // the frontend SDK. toast.success('Sign in successful'); + // Save the access token to AsyncStorage, because SuperTokens doesn't store it correctly. Thanks to the React Native SDK. + await AsyncStorage.setItem('access_token', req.headers.get('st-access-token')!); // Refresh the page to show the user is logged in navigator.navigate('AccountProfile'); } @@ -104,19 +108,21 @@ const Login = () => { control={form.control} name="email" render={({ field }) => ( - + + + {form.formState.errors.email && ( + + {form.formState.errors.email.message} + + )} + )} /> - {form.formState.errors.email && ( - - {form.formState.errors.email.message} - - )} { onChangeText={field.onChange} secureTextEntry={!showPassword} /> + {form.formState.errors.password && ( + + {form.formState.errors.password.message} + + )} { )} /> - {form.formState.errors.password && ( - - {form.formState.errors.password.message} - - )} + ); diff --git a/apps/mobile/src/utils/index.ts b/apps/mobile/src/utils/index.ts index 089835f23..97ee7bade 100644 --- a/apps/mobile/src/utils/index.ts +++ b/apps/mobile/src/utils/index.ts @@ -1,8 +1,12 @@ import AsyncStorage from '@react-native-async-storage/async-storage'; -export async function getAccessToken() { - const fetched = await AsyncStorage.getItem('access_token'); - return fetched; +export async function getTokens() { + const fetchedToken = await AsyncStorage.getItem('access_token'); + const fetchedRefreshToken = await AsyncStorage.getItem('refresh_token'); + return { + accessToken: fetchedToken ?? '', + refreshToken: fetchedRefreshToken ?? '' + }; } export const AUTH_SERVER_URL = __DEV__ ? 'http://localhost:9420' : 'https://auth.spacedrive.com'; diff --git a/interface/app/$libraryId/settings/client/account/Profile.tsx b/interface/app/$libraryId/settings/client/account/Profile.tsx index 3903ca7c3..217bc0e19 100644 --- a/interface/app/$libraryId/settings/client/account/Profile.tsx +++ b/interface/app/$libraryId/settings/client/account/Profile.tsx @@ -1,5 +1,4 @@ import { Envelope } from '@phosphor-icons/react'; -import { getAccessToken } from 'supertokens-web-js/recipe/session'; import { useBridgeMutation, useBridgeQuery } from '@sd/client'; import { Button, Card } from '@sd/ui'; import StatCard from '~/app/$libraryId/overview/StatCard'; From d70adf7b75c7fd613c7860b81d2c323131716381 Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Wed, 18 Sep 2024 22:26:52 -0300 Subject: [PATCH 128/218] Converting internal pub_id to schema pub_id --- Cargo.lock | Bin 331732 -> 331752 bytes core/crates/prisma-helpers/Cargo.toml | 5 +++-- core/crates/prisma-helpers/src/lib.rs | 6 ++++++ 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 23eb291d2656b11a3df905834a31af6fc9a2cc6a..527f42843003b119956c025681524bc3317d5619 100644 GIT binary patch delta 50 zcmcaIU*yGnk%kt=7N#xCqHWVFtC>ZnbCxmlO%G^gmY-fSg;{a>hYDtv_VhO9?dfeS Gxt##qIubPi delta 25 hcmaDcU*yVsk%kt=7N#xCqHXP(ZOq#>+gSE@0sx453F`m= diff --git a/core/crates/prisma-helpers/Cargo.toml b/core/crates/prisma-helpers/Cargo.toml index 6a3a47a4c..8a4b490ea 100644 --- a/core/crates/prisma-helpers/Cargo.toml +++ b/core/crates/prisma-helpers/Cargo.toml @@ -9,8 +9,9 @@ repository.workspace = true [dependencies] # Spacedrive Sub-crates -sd-prisma = { path = "../../../crates/prisma" } -sd-utils = { path = "../../../crates/utils" } +sd-cloud-schema = { workspace = true } +sd-prisma = { path = "../../../crates/prisma" } +sd-utils = { path = "../../../crates/utils" } # Workspace dependencies prisma-client-rust = { workspace = true } diff --git a/core/crates/prisma-helpers/src/lib.rs b/core/crates/prisma-helpers/src/lib.rs index c9310aa44..4c65bbf2d 100644 --- a/core/crates/prisma-helpers/src/lib.rs +++ b/core/crates/prisma-helpers/src/lib.rs @@ -383,6 +383,12 @@ impl From<&CasId<'_>> for String { #[specta(rename = "CoreDevicePubId")] pub struct DevicePubId(PubId); +impl From for sd_cloud_schema::devices::PubId { + fn from(DevicePubId(pub_id): DevicePubId) -> Self { + Self(pub_id.into()) + } +} + #[derive(Debug, Serialize, Deserialize, Hash, PartialEq, Eq, Clone, specta::Type)] #[serde(transparent)] #[repr(transparent)] From 60e1258efe2c75140ad7aa12c455206f43028b81 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Wed, 18 Sep 2024 22:28:38 -0400 Subject: [PATCH 129/218] Lots of changes - Constant for Auth server URL - Modify config to save Pretty Name of device - Properly list devices from Cloud Backend --- Cargo.lock | Bin 331752 -> 332209 bytes .../src-tauri/capabilities/default.json | 7 +- apps/desktop/src/App.tsx | 3 +- .../client/AccountSettings/Register.tsx | 4 +- core/Cargo.toml | 1 + core/crates/cloud-services/src/lib.rs | 3 + .../cloud-services/src/token_refresher.rs | 6 +- core/src/api/cloud/devices.rs | 6 +- core/src/custom_uri/utils.rs | 10 +-- core/src/lib.rs | 6 +- core/src/node/config.rs | 23 +------ interface/app/$libraryId/overview/index.tsx | 62 ++++++++++-------- .../settings/client/account/Profile.tsx | 11 +++- .../settings/client/account/index.tsx | 3 +- interface/util/index.tsx | 2 + 15 files changed, 86 insertions(+), 61 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 527f42843003b119956c025681524bc3317d5619..1c5c8333c008a92c01543ce749e6dd612b9d4506 100644 GIT binary patch delta 312 zcmYk0F-yZh7(mJ8irT@&S`-mu99)y!cbCf`1)bakaS@lh; zp`x>(qGWd|IQcJhba$xL$>s6h_CAND$Gg(yvo*-f{F}XO4z8`)?kKbJQHCx)3Wrs3 zl$vqR_RLqy#@$-m&TDPFy@=ALS-nLo{r9DUkO}L^SWh~(Vr_}hHXu<3ZaltqPJe9v14CQRkn#sraxP*SqOl&Q$Jm9N; z`Tc}@Xzp*(EuN{9Mp`{~HXPSCol~@1Az_eE$wN)Kih;(2gA^X2jK~QAl9+0d#3bTG o0Rb6s7WqU^9z`T2>(`rQ(Kc61HibY_$LNBw_^bZxx zEbY~G%-gH$SUe@CKbXzJxxIS^i;39u3A#)i)1Pc$QQ7`_9m_{XFn>Fczl8}drMms- OW|nTI?E%|a5~Bd~q%Rf# diff --git a/apps/desktop/src-tauri/capabilities/default.json b/apps/desktop/src-tauri/capabilities/default.json index 2e223b20b..db83c88f1 100644 --- a/apps/desktop/src-tauri/capabilities/default.json +++ b/apps/desktop/src-tauri/capabilities/default.json @@ -2,7 +2,9 @@ "$schema": "../gen/schemas/desktop-schema.json", "identifier": "default", "description": "Capability for the main window", - "windows": ["main"], + "windows": [ + "main" + ], "permissions": [ "core:app:default", "core:event:default", @@ -41,6 +43,9 @@ { "url": "http://localhost:9420" }, + { + "url": "https://auth.spacedrive.com" + }, { "url": "https://plausible.io" } diff --git a/apps/desktop/src/App.tsx b/apps/desktop/src/App.tsx index c401ab356..632e3ba5b 100644 --- a/apps/desktop/src/App.tsx +++ b/apps/desktop/src/App.tsx @@ -35,6 +35,7 @@ import ThirdParty from 'supertokens-web-js/recipe/thirdparty'; import getCookieHandler from '@sd/interface/app/$libraryId/settings/client/account/handlers/cookieHandler'; import getWindowHandler from '@sd/interface/app/$libraryId/settings/client/account/handlers/windowHandler'; import { useLocale } from '@sd/interface/hooks'; +import { AUTH_SERVER_URL } from '@sd/interface/util'; import { commands } from './commands'; import { platform } from './platform'; @@ -44,7 +45,7 @@ import { createUpdater } from './updater'; SuperTokens.init({ appInfo: { - apiDomain: 'http://localhost:9420', + apiDomain: AUTH_SERVER_URL, apiBasePath: '/api/auth', appName: 'Spacedrive Auth Service' }, diff --git a/apps/mobile/src/screens/settings/client/AccountSettings/Register.tsx b/apps/mobile/src/screens/settings/client/AccountSettings/Register.tsx index e43d355a3..e7a86a3b7 100644 --- a/apps/mobile/src/screens/settings/client/AccountSettings/Register.tsx +++ b/apps/mobile/src/screens/settings/client/AccountSettings/Register.tsx @@ -11,6 +11,7 @@ import { Input } from '~/components/primitive/Input'; import { toast } from '~/components/primitive/Toast'; import { tw } from '~/lib/tailwind'; import { SettingsStackScreenProps } from '~/navigation/tabs/SettingsStack'; +import { AUTH_SERVER_URL } from '~/utils'; import ShowPassword from './ShowPassword'; @@ -32,7 +33,7 @@ async function signUpClicked( navigator: SettingsStackScreenProps<'AccountProfile'>['navigation'] ) { try { - const req = await fetch('http://localhost:9000/api/auth/signup', { + const req = await fetch(`${AUTH_SERVER_URL}/api/auth/signup`, { method: 'POST', headers: { 'Content-Type': 'application/json; charset=utf-8' @@ -81,6 +82,7 @@ async function signUpClicked( // this may be a custom error message sent from the API by you. toast.error(err.message); } else { + console.error(err); toast.error('Oops! Something went wrong.'); } } diff --git a/core/Cargo.toml b/core/Cargo.toml index 50ab65f9a..76f6f4192 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -102,6 +102,7 @@ sysinfo = "0.29.11" # Update blocked due to API breaking changes tar = "0.4.41" tower-service = "0.3.2" tracing-appender = "0.2.3" +whoami = "1.5.2" [dependencies.tokio] features = ["io-util", "macros", "process", "rt-multi-thread", "sync", "time"] diff --git a/core/crates/cloud-services/src/lib.rs b/core/crates/cloud-services/src/lib.rs index a0b1583a3..21477d15b 100644 --- a/core/crates/cloud-services/src/lib.rs +++ b/core/crates/cloud-services/src/lib.rs @@ -50,3 +50,6 @@ pub use sync::{ // Re-exports pub use iroh_base::key::{NodeId, SecretKey as IrohSecretKey}; pub use quic_rpc::transport::quinn::QuinnConnection; + +// Export URL for the auth server +pub const AUTH_SERVER_URL: &str = "https://auth.spacedrive.com"; diff --git a/core/crates/cloud-services/src/token_refresher.rs b/core/crates/cloud-services/src/token_refresher.rs index 73ab7591c..3416f2a87 100644 --- a/core/crates/cloud-services/src/token_refresher.rs +++ b/core/crates/cloud-services/src/token_refresher.rs @@ -322,6 +322,8 @@ mod tests { use reqwest::header; use serde_json::json; + use crate::AUTH_SERVER_URL; + use super::*; async fn get_tokens() -> (AccessToken, RefreshToken) { @@ -341,7 +343,7 @@ mod tests { }); let response = client - .post("http://localhost:9420/api/auth/public/signup") + .post(format!("{AUTH_SERVER_URL}/api/auth/public/signup")) .header("rid", "emailpassword") .header("st-auth-mode", "header") .json(&req_body) @@ -361,7 +363,7 @@ mod tests { ) } else { let response = client - .post("http://localhost:9420/api/auth/public/signin") + .post(format!("{AUTH_SERVER_URL}/api/auth/public/signin")) .header("rid", "emailpassword") .header("st-auth-mode", "header") .json(&req_body) diff --git a/core/src/api/cloud/devices.rs b/core/src/api/cloud/devices.rs index f4df3ec12..47506cd77 100644 --- a/core/src/api/cloud/devices.rs +++ b/core/src/api/cloud/devices.rs @@ -57,7 +57,7 @@ pub fn mount() -> AlphaRouter { }) .procedure("list", { R.query(|node, req: devices::list::Request| async move { - let devices::list::Response(devices) = super::handle_comm_error( + let devices::list::Response(mut devices) = super::handle_comm_error( try_get_cloud_services_client(&node) .await? .devices() @@ -68,6 +68,10 @@ pub fn mount() -> AlphaRouter { debug!(?devices, "Listed devices"); + let id = node.config.get().await.id.into(); + // Filter out the local device by matching pub_id + devices.retain(|device| device.pub_id != id); + Ok(devices) }) }) diff --git a/core/src/custom_uri/utils.rs b/core/src/custom_uri/utils.rs index 2dc5014b9..7a86a943d 100644 --- a/core/src/custom_uri/utils.rs +++ b/core/src/custom_uri/utils.rs @@ -1,5 +1,6 @@ use crate::util::InfallibleResponse; +use sd_core_cloud_services::AUTH_SERVER_URL; use std::{fmt::Debug, panic::Location}; use axum::{ @@ -52,7 +53,7 @@ pub(crate) async fn cors_middleware(req: Request, next: Next) -> Respon .header("Access-Control-Allow-Methods", "GET, HEAD, POST, OPTIONS") .header( "Access-Control-Allow-Origin", - "http://localhost:9420, http://ipc.localhost, http://tauri.localhost", + format!("{AUTH_SERVER_URL}, http://ipc.localhost, http://tauri.localhost"), ) .header("Access-Control-Allow-Headers", "*") .header("Access-Control-Max-Age", "86400") @@ -70,9 +71,10 @@ pub(crate) async fn cors_middleware(req: Request, next: Next) -> Respon headers.insert( "Access-Control-Allow-Origin", - HeaderValue::from_static( - "http://localhost:9420, http://ipc.localhost, http://tauri.localhost", - ), + HeaderValue::from_str( + format!("{AUTH_SERVER_URL}, http://ipc.localhost, http://tauri.localhost").as_str(), + ) + .expect("Invalid static response!"), ); headers.insert( diff --git a/core/src/lib.rs b/core/src/lib.rs index ee6881900..f4087ee29 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -55,6 +55,7 @@ use api::notifications::{Notification, NotificationData, NotificationId}; use context::{JobContext, NodeContext}; use node::config; use notifications::Notifications; +use sd_core_cloud_services::AUTH_SERVER_URL; use volume::save_storage_statistics; /// Represents a single running instance of the Spacedrive core. @@ -114,8 +115,9 @@ impl Node { #[cfg(debug_assertions)] { ( - std::env::var("SD_CLOUD_API_ADDRESS_URL") - .unwrap_or_else(|_| "http://localhost:9420/cloud-api-address".to_string()), + std::env::var("SD_CLOUD_API_ADDRESS_URL").unwrap_or_else(|_| { + format!("{AUTH_SERVER_URL}/cloud-api-address").to_string() + }), std::env::var("SD_CLOUD_P2P_RELAY_URL") .unwrap_or_else(|_| "http://relay.localhost:9999/".to_string()), std::env::var("SD_CLOUD_P2P_DNS_ORIGIN_NAME") diff --git a/core/src/node/config.rs b/core/src/node/config.rs index f8c38f207..6fa79879b 100644 --- a/core/src/node/config.rs +++ b/core/src/node/config.rs @@ -197,7 +197,7 @@ impl ManagedVersion for NodeConfig { type MigrationError = NodeConfigError; fn from_latest_version() -> Option { - let mut name = generate_device_name(); + let mut name = whoami::devicename(); name.truncate(255); let os = DeviceOS::from_env(); @@ -330,7 +330,7 @@ impl NodeConfig { config.remove("name"); config.insert( String::from("name"), - serde_json::to_value(generate_device_name()) + serde_json::to_value(whoami::devicename()) .map_err(VersionManagerError::SerdeJson)?, ); @@ -506,22 +506,3 @@ pub enum NodeConfigError { #[error(transparent)] FileIO(#[from] FileIOError), } - -fn generate_device_name() -> String { - #[cfg(target_os = "android")] - let name = "Android Spacedrive Device".into(); - #[cfg(not(target_os = "android"))] - let name = match hostname::get() { - Ok(hostname) => hostname.to_string_lossy().into_owned(), - Err(e) => { - error!( - ?e, - "Falling back to default node name as an error occurred getting your systems hostname;", - ); - - "my-spacedrive".into() - } - }; - - name -} diff --git a/interface/app/$libraryId/overview/index.tsx b/interface/app/$libraryId/overview/index.tsx index b4578d36c..44902b2ee 100644 --- a/interface/app/$libraryId/overview/index.tsx +++ b/interface/app/$libraryId/overview/index.tsx @@ -1,4 +1,4 @@ -import { Key } from 'react'; +import { Key, useEffect } from 'react'; import { Link } from 'react-router-dom'; import { HardwareModel, useBridgeQuery, useLibraryQuery } from '@sd/client'; import { useAccessToken, useLocale, useOperatingSystem } from '~/hooks'; @@ -37,6 +37,15 @@ export const Component = () => { // const { data: node } = useBridgeQuery(['nodeState']); const cloudDevicesList = useBridgeQuery(['cloud.devices.list', { access_token: accessToken }]); + useEffect(() => { + const interval = setInterval(async () => { + await cloudDevicesList.refetch(); + }, 10000); + return () => clearInterval(interval); + }, []); + const { data: node } = useBridgeQuery(['nodeState']); + const stats = useLibraryQuery(['library.statistics']); + const search = useSearchFromSearchParams({ defaultTarget: 'paths' }); return ( @@ -59,32 +68,33 @@ export const Component = () => { - - {cloudDevicesList.data?.map( - ( - device: { - pub_id: Key | null | undefined; - name: string; - os: string; - storage_size: bigint; - used_storage: bigint; - created_at: string; - device_model: string; - }, - index: number - ) => ( - - ) + + {node && ( + )} + {cloudDevicesList.data?.map((device) => ( + + ))} diff --git a/interface/app/$libraryId/settings/client/account/Profile.tsx b/interface/app/$libraryId/settings/client/account/Profile.tsx index 217bc0e19..1b0765b7d 100644 --- a/interface/app/$libraryId/settings/client/account/Profile.tsx +++ b/interface/app/$libraryId/settings/client/account/Profile.tsx @@ -1,4 +1,5 @@ import { Envelope } from '@phosphor-icons/react'; +import { useEffect } from 'react'; import { useBridgeMutation, useBridgeQuery } from '@sd/client'; import { Button, Card } from '@sd/ui'; import StatCard from '~/app/$libraryId/overview/StatCard'; @@ -21,6 +22,14 @@ const Profile = ({ email }: { email?: string }) => { const cloudBootstrap = useBridgeMutation('cloud.bootstrap'); const cloudDeleteDevice = useBridgeMutation('cloud.devices.delete'); const devices = useBridgeQuery(['cloud.devices.list', { access_token: accessToken.trim() }]); + + // Refetch every 10 seconds + useEffect(() => { + const interval = setInterval(async () => { + await devices.refetch(); + }, 10000); + return () => clearInterval(interval); + }, []); console.log(devices.data); return ( @@ -55,7 +64,7 @@ const Profile = ({ email }: { email?: string }) => { onClick={async () => { cloudDeleteDevice.mutate({ access_token: accessToken.trim(), - pub_id: '019196ed-5711-7843-a0d6-1d9f176db25a' + pub_id: '01920812-9bd2-7781-aee5-e19a01497296' }); }} > diff --git a/interface/app/$libraryId/settings/client/account/index.tsx b/interface/app/$libraryId/settings/client/account/index.tsx index 61a231934..63601083e 100644 --- a/interface/app/$libraryId/settings/client/account/index.tsx +++ b/interface/app/$libraryId/settings/client/account/index.tsx @@ -3,6 +3,7 @@ import Session, { signOut } from 'supertokens-web-js/recipe/session'; import { auth, useBridgeMutation, useBridgeQuery, useFeatureFlag } from '@sd/client'; import { Button, Input, toast } from '@sd/ui'; import { useLocale } from '~/hooks'; +import { AUTH_SERVER_URL } from '~/util'; import { Heading } from '../../Layout'; import Profile from './Profile'; @@ -20,7 +21,7 @@ export const Component = () => { const [userInfo, setUserInfo] = useState(null); useEffect(() => { async function _() { - const user_data = await fetch('http://localhost:9420/api/user', { + const user_data = await fetch(`${AUTH_SERVER_URL}/api/user`, { method: 'GET' }); const data = await user_data.json(); diff --git a/interface/util/index.tsx b/interface/util/index.tsx index 34fd7b43e..9a4ae85e8 100644 --- a/interface/util/index.tsx +++ b/interface/util/index.tsx @@ -8,3 +8,5 @@ export type NonEmptyArray = [T, ...T[]]; export const isNonEmpty = (input: T[]): input is NonEmptyArray => input.length > 0; export const isNonEmptyObject = (input: object) => Object.keys(input).length > 0; + +export const AUTH_SERVER_URL = 'https://auth.spacedrive.com'; From efc6704775aacecfe08446f482e50e4fb9e3a086 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Wed, 18 Sep 2024 23:47:50 -0400 Subject: [PATCH 130/218] Add SystemConfiguration to SDCore podspec --- Cargo.lock | Bin 332209 -> 331662 bytes .../mobile/modules/sd-core/ios/SDCore.podspec | 3 ++- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 1c5c8333c008a92c01543ce749e6dd612b9d4506..f0ce7f9937bfdb26c0ed94dac22f9e417cf4f091 100644 GIT binary patch delta 154 zcmdnESfp>h$Oa3y>0)XuD$}oRW)Yj*?It#Pf~@%D1@0`($K2YFxiM}(=Efuz&uBWC z@s{$mWlSnY=B9=gsm7MchDpi@ra_u%Qc7w{szIt*s-=-Znvq3{k%5V!sj;ECvFYT6 z`C`)(QkfN|&sfG3Io+O_nS&{-czXMNM!D^h%b8rw+C%R#Z4bT2{Quhaz-=rE(EzDd BG-3b% delta 410 zcmYMvJxJt090hPDi3sZl2QDk9hilZ%mCR%^`KJ&oQwV~V#~sLg#a&T%*N7jDAQpC7 z1f1)H6>U`zwzIObv$EP^+ba%1uoQ!h-K*aFz0-^0-DU9>;^Ibq&7CQf-Nv@w**R!z zXrAh<`#NqOo~%OlC!XmoyL;7|tY$}xKaX4SNh?O}R%{KgB5Iod(WL2ISyMzQz!;>* zVDdX+B_-no2o*pD3so5Sp%#j2j+3m}Q!*dDsNb*~XUHrmd%)DLtzU!wtoJ2aPb=v` zYR1;lNc-QlwZIDBLi6{<67xZViSGv#fRn@*9tU4wsufohl;jbFk<<_gqLsp)29M

AQcf_l6PyuA}p9>Oo65$_)KwMFeQMnK#N=x@}q|}Jny06 U`Ks+$wS6;j>5R53_YVL42^T1XumAu6 diff --git a/apps/mobile/modules/sd-core/ios/SDCore.podspec b/apps/mobile/modules/sd-core/ios/SDCore.podspec index 4061fb0a1..ad2e06d84 100644 --- a/apps/mobile/modules/sd-core/ios/SDCore.podspec +++ b/apps/mobile/modules/sd-core/ios/SDCore.podspec @@ -37,7 +37,8 @@ Pod::Spec.new do |s| ffmpeg_frameworks = [ "-framework AudioToolbox", "-framework VideoToolbox", - "-framework AVFoundation" + "-framework AVFoundation", + "-framework SystemConfiguration", ].join(' ') s.xcconfig = { From 16b1fd7e038831fa8933358c0f236f11c8784a1f Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Thu, 19 Sep 2024 00:36:16 -0400 Subject: [PATCH 131/218] Formatting --- apps/desktop/src-tauri/capabilities/default.json | 4 +--- core/Cargo.toml | 2 +- core/crates/cloud-services/Cargo.toml | 2 +- core/crates/prisma-helpers/src/lib.rs | 2 +- 4 files changed, 4 insertions(+), 6 deletions(-) diff --git a/apps/desktop/src-tauri/capabilities/default.json b/apps/desktop/src-tauri/capabilities/default.json index db83c88f1..8580f0957 100644 --- a/apps/desktop/src-tauri/capabilities/default.json +++ b/apps/desktop/src-tauri/capabilities/default.json @@ -2,9 +2,7 @@ "$schema": "../gen/schemas/desktop-schema.json", "identifier": "default", "description": "Capability for the main window", - "windows": [ - "main" - ], + "windows": ["main"], "permissions": [ "core:app:default", "core:event:default", diff --git a/core/Cargo.toml b/core/Cargo.toml index 76f6f4192..224f8d6bf 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -102,7 +102,7 @@ sysinfo = "0.29.11" # Update blocked due to API breaking changes tar = "0.4.41" tower-service = "0.3.2" tracing-appender = "0.2.3" -whoami = "1.5.2" +whoami = "1.5.2" [dependencies.tokio] features = ["io-util", "macros", "process", "rt-multi-thread", "sync", "time"] diff --git a/core/crates/cloud-services/Cargo.toml b/core/crates/cloud-services/Cargo.toml index 3a52a29ec..70a673177 100644 --- a/core/crates/cloud-services/Cargo.toml +++ b/core/crates/cloud-services/Cargo.toml @@ -47,7 +47,7 @@ quinn = { package = "iroh-quinn", version = "0.11" } reqwest = { version = "0.12", features = ["json", "native-tls-vendored", "stream"] } reqwest-middleware = { version = "0.3", features = ["json"] } reqwest-retry = "0.6" -rustls = { version = "=0.23.13", default-features = false, features = ["ring", "std", "brotli"]} +rustls = { version = "=0.23.13", default-features = false, features = ["brotli", "ring", "std"] } rustls-platform-verifier = "0.3.3" diff --git a/core/crates/prisma-helpers/src/lib.rs b/core/crates/prisma-helpers/src/lib.rs index 4c65bbf2d..97ac004b6 100644 --- a/core/crates/prisma-helpers/src/lib.rs +++ b/core/crates/prisma-helpers/src/lib.rs @@ -387,7 +387,7 @@ impl From for sd_cloud_schema::devices::PubId { fn from(DevicePubId(pub_id): DevicePubId) -> Self { Self(pub_id.into()) } -} +} #[derive(Debug, Serialize, Deserialize, Hash, PartialEq, Eq, Clone, specta::Type)] #[serde(transparent)] From fcdf854eeb427eff77924548c64d1760480af479 Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Thu, 19 Sep 2024 22:02:32 -0300 Subject: [PATCH 132/218] Fix identifier bug --- .../src/file_identifier/tasks/identifier.rs | 54 +++++----- .../src/file_identifier/tasks/mod.rs | 83 ++++++++------- .../file_identifier/tasks/object_processor.rs | 64 ++++++----- core/crates/heavy-lifting/src/indexer/mod.rs | 6 +- .../heavy-lifting/src/indexer/tasks/saver.rs | 10 ++ .../src/indexer/tasks/updater.rs | 17 ++- core/src/api/files.rs | 61 ++++++----- core/src/api/search/saved.rs | 26 ++--- core/src/api/tags.rs | 49 +++++---- core/src/location/manager/watcher/utils.rs | 100 ++++++++++-------- crates/ai/src/old_image_labeler/process.rs | 72 +++++++------ 11 files changed, 300 insertions(+), 242 deletions(-) diff --git a/core/crates/heavy-lifting/src/file_identifier/tasks/identifier.rs b/core/crates/heavy-lifting/src/file_identifier/tasks/identifier.rs index c1015b6e7..d9f57ceb8 100644 --- a/core/crates/heavy-lifting/src/file_identifier/tasks/identifier.rs +++ b/core/crates/heavy-lifting/src/file_identifier/tasks/identifier.rs @@ -394,33 +394,33 @@ async fn assign_cas_id_to_file_paths( db: &PrismaClient, sync: &SyncManager, ) -> Result<(), file_identifier::Error> { - // Assign cas_id to each file path - sync.write_ops( - db, - identified_files - .iter() - .map(|(pub_id, IdentifiedFile { cas_id, .. })| { - ( - sync.shared_update( - prisma_sync::file_path::SyncId { - pub_id: pub_id.to_db(), - }, - file_path::cas_id::NAME, - msgpack!(cas_id), - ), - db.file_path() - .update( - file_path::pub_id::equals(pub_id.to_db()), - vec![file_path::cas_id::set(cas_id.into())], - ) - // We don't need any data here, just the id avoids receiving the entire object - // as we can't pass an empty select macro call - .select(file_path::select!({ id })), - ) - }) - .unzip::<_, _, _, Vec<_>>(), - ) - .await?; + let (ops, queries) = identified_files + .iter() + .map(|(pub_id, IdentifiedFile { cas_id, .. })| { + ( + sync.shared_update( + prisma_sync::file_path::SyncId { + pub_id: pub_id.to_db(), + }, + file_path::cas_id::NAME, + msgpack!(cas_id), + ), + db.file_path() + .update( + file_path::pub_id::equals(pub_id.to_db()), + vec![file_path::cas_id::set(cas_id.into())], + ) + // We don't need any data here, just the id avoids receiving the entire object + // as we can't pass an empty select macro call + .select(file_path::select!({ id })), + ) + }) + .unzip::<_, _, Vec<_>, Vec<_>>(); + + if !ops.is_empty() && !queries.is_empty() { + // Assign cas_id to each file path + sync.write_ops(db, (ops, queries)).await?; + } Ok(()) } diff --git a/core/crates/heavy-lifting/src/file_identifier/tasks/mod.rs b/core/crates/heavy-lifting/src/file_identifier/tasks/mod.rs index 406c320b9..589adae1b 100644 --- a/core/crates/heavy-lifting/src/file_identifier/tasks/mod.rs +++ b/core/crates/heavy-lifting/src/file_identifier/tasks/mod.rs @@ -127,48 +127,57 @@ async fn create_objects_and_update_file_paths( .unzip::<_, _, HashMap<_, _>, Vec<_>>( ); - trace!( - new_objects_count = object_create_args.len(), - "Creating new Objects!;", - ); + let new_objects_count = object_create_args.len(); + if new_objects_count > 0 { + trace!(new_objects_count, "Creating new Objects!;",); - // create new object records with assembled values - let created_objects_count = sync - .write_ops(db, { - let (sync, db_params) = object_create_args - .into_iter() - .unzip::<_, _, Vec<_>, Vec<_>>(); - - (sync, db.object().create_many(db_params)) - }) - .await?; - - trace!(%created_objects_count, "Created new Objects;"); - - if created_objects_count > 0 { - trace!("Updating file paths with created objects"); - - let updated_file_path_ids = sync - .write_ops( - db, - file_path_update_args + // create new object records with assembled values + let created_objects_count = sync + .write_ops(db, { + let (sync, db_params) = object_create_args .into_iter() - .unzip::<_, _, Vec<_>, Vec<_>>(), - ) - .await - .map(|file_paths| { - file_paths - .into_iter() - .map(|file_path_id::Data { id }| id) - .collect::>() - })?; + .unzip::<_, _, Vec<_>, Vec<_>>(); - object_pub_id_by_file_path_id - .retain(|file_path_id, _| updated_file_path_ids.contains(file_path_id)); + (sync, db.object().create_many(db_params)) + }) + .await?; - Ok(object_pub_id_by_file_path_id) + trace!(%created_objects_count, "Created new Objects;"); + + if created_objects_count > 0 { + let file_paths_to_update_count = file_path_update_args.len(); + if file_paths_to_update_count > 0 { + trace!( + file_paths_to_update_count, + "Updating file paths with created objects" + ); + + let updated_file_path_ids = sync + .write_ops( + db, + file_path_update_args + .into_iter() + .unzip::<_, _, Vec<_>, Vec<_>>(), + ) + .await + .map(|file_paths| { + file_paths + .into_iter() + .map(|file_path_id::Data { id }| id) + .collect::>() + })?; + + object_pub_id_by_file_path_id + .retain(|file_path_id, _| updated_file_path_ids.contains(file_path_id)); + } + + Ok(object_pub_id_by_file_path_id) + } else { + trace!("No objects created, skipping file path updates"); + Ok(HashMap::new()) + } } else { - trace!("No objects created, skipping file path updates"); + trace!("No objects to create, skipping file path updates"); Ok(HashMap::new()) } } diff --git a/core/crates/heavy-lifting/src/file_identifier/tasks/object_processor.rs b/core/crates/heavy-lifting/src/file_identifier/tasks/object_processor.rs index 90aebff56..4efc96a3f 100644 --- a/core/crates/heavy-lifting/src/file_identifier/tasks/object_processor.rs +++ b/core/crates/heavy-lifting/src/file_identifier/tasks/object_processor.rs @@ -270,39 +270,37 @@ async fn assign_existing_objects_to_file_paths( db: &PrismaClient, sync: &SyncManager, ) -> Result, file_identifier::Error> { - sync.write_ops( - db, - objects_by_cas_id - .iter() - .flat_map(|(cas_id, object_pub_id)| { - file_paths_by_cas_id - .remove(cas_id) - .map(|file_paths| { - file_paths.into_iter().map( - |FilePathToCreateOrLinkObject { - file_path_pub_id, .. - }| { - connect_file_path_to_object( - &file_path_pub_id, - object_pub_id, - db, - sync, - ) - }, - ) - }) - .expect("must be here") - }) - .unzip::<_, _, Vec<_>, Vec<_>>(), - ) - .await - .map(|file_paths| { - file_paths - .into_iter() - .map(|file_path_id::Data { id }| id) - .collect() - }) - .map_err(Into::into) + let (ops, queries) = objects_by_cas_id + .iter() + .flat_map(|(cas_id, object_pub_id)| { + file_paths_by_cas_id + .remove(cas_id) + .map(|file_paths| { + file_paths.into_iter().map( + |FilePathToCreateOrLinkObject { + file_path_pub_id, .. + }| { + connect_file_path_to_object(&file_path_pub_id, object_pub_id, db, sync) + }, + ) + }) + .expect("must be here") + }) + .unzip::<_, _, Vec<_>, Vec<_>>(); + + if ops.is_empty() && queries.is_empty() { + return Ok(vec![]); + } + + sync.write_ops(db, (ops, queries)) + .await + .map(|file_paths| { + file_paths + .into_iter() + .map(|file_path_id::Data { id }| id) + .collect() + }) + .map_err(Into::into) } async fn assign_objects_to_duplicated_orphans( diff --git a/core/crates/heavy-lifting/src/indexer/mod.rs b/core/crates/heavy-lifting/src/indexer/mod.rs index dbf12b223..f5f515b68 100644 --- a/core/crates/heavy-lifting/src/indexer/mod.rs +++ b/core/crates/heavy-lifting/src/indexer/mod.rs @@ -136,7 +136,7 @@ async fn update_directory_sizes( db: &PrismaClient, sync: &SyncManager, ) -> Result<(), Error> { - let to_sync_and_update = db + let (ops, queries) = db ._batch(chunk_db_queries(iso_paths_and_sizes.keys(), db)) .await? .into_iter() @@ -167,7 +167,9 @@ async fn update_directory_sizes( .into_iter() .unzip::<_, _, Vec<_>, Vec<_>>(); - sync.write_ops(db, to_sync_and_update).await?; + if !ops.is_empty() && !queries.is_empty() { + sync.write_ops(db, (ops, queries)).await?; + } Ok(()) } diff --git a/core/crates/heavy-lifting/src/indexer/tasks/saver.rs b/core/crates/heavy-lifting/src/indexer/tasks/saver.rs index 2e8f6f61e..a4628d940 100644 --- a/core/crates/heavy-lifting/src/indexer/tasks/saver.rs +++ b/core/crates/heavy-lifting/src/indexer/tasks/saver.rs @@ -157,6 +157,16 @@ impl Task for Saver { ) .unzip(); + if create_crdt_ops.is_empty() && paths.is_empty() { + return Ok(ExecStatus::Done( + Output { + saved_count: 0, + save_duration: Duration::ZERO, + } + .into_output(), + )); + } + #[allow(clippy::cast_sign_loss)] let saved_count = sync .write_ops( diff --git a/core/crates/heavy-lifting/src/indexer/tasks/updater.rs b/core/crates/heavy-lifting/src/indexer/tasks/updater.rs index 26047d43f..91eb72899 100644 --- a/core/crates/heavy-lifting/src/indexer/tasks/updater.rs +++ b/core/crates/heavy-lifting/src/indexer/tasks/updater.rs @@ -159,11 +159,20 @@ impl Task for Updater { ) .unzip::<_, _, Vec<_>, Vec<_>>(); + let ops = sync_stuff.into_iter().flatten().collect::>(); + + if ops.is_empty() && paths_to_update.is_empty() { + return Ok(ExecStatus::Done( + Output { + updated_count: 0, + update_duration: Duration::ZERO, + } + .into_output(), + )); + } + let updated = sync - .write_ops( - db, - (sync_stuff.into_iter().flatten().collect(), paths_to_update), - ) + .write_ops(db, (ops, paths_to_update)) .await .map_err(indexer::Error::from)?; diff --git a/core/src/api/files.rs b/core/src/api/files.rs index 9a512fddc..8e0f29992 100644 --- a/core/src/api/files.rs +++ b/core/src/api/files.rs @@ -346,7 +346,7 @@ pub(crate) fn mount() -> AlphaRouter { let date_accessed = Utc::now().into(); - let (sync_params, db_params): (Vec<_>, Vec<_>) = objects + let (ops, object_ids): (Vec<_>, Vec<_>) = objects .into_iter() .map(|d| { ( @@ -360,20 +360,23 @@ pub(crate) fn mount() -> AlphaRouter { }) .unzip(); - sync.write_ops( - db, - ( - sync_params, - db.object().update_many( - vec![object::id::in_vec(db_params)], - vec![object::date_accessed::set(Some(date_accessed))], + if !ops.is_empty() && !object_ids.is_empty() { + sync.write_ops( + db, + ( + ops, + db.object().update_many( + vec![object::id::in_vec(object_ids)], + vec![object::date_accessed::set(Some(date_accessed))], + ), ), - ), - ) - .await?; + ) + .await?; + + invalidate_query!(library, "search.paths"); + invalidate_query!(library, "search.objects"); + } - invalidate_query!(library, "search.paths"); - invalidate_query!(library, "search.objects"); Ok(()) }) }) @@ -389,7 +392,7 @@ pub(crate) fn mount() -> AlphaRouter { .exec() .await?; - let (sync_params, db_params): (Vec<_>, Vec<_>) = objects + let (ops, object_ids): (Vec<_>, Vec<_>) = objects .into_iter() .map(|d| { ( @@ -402,20 +405,24 @@ pub(crate) fn mount() -> AlphaRouter { ) }) .unzip(); - sync.write_ops( - db, - ( - sync_params, - db.object().update_many( - vec![object::id::in_vec(db_params)], - vec![object::date_accessed::set(None)], - ), - ), - ) - .await?; - invalidate_query!(library, "search.objects"); - invalidate_query!(library, "search.paths"); + if !ops.is_empty() && !object_ids.is_empty() { + sync.write_ops( + db, + ( + ops, + db.object().update_many( + vec![object::id::in_vec(object_ids)], + vec![object::date_accessed::set(None)], + ), + ), + ) + .await?; + + invalidate_query!(library, "search.objects"); + invalidate_query!(library, "search.paths"); + } + Ok(()) }) }) diff --git a/core/src/api/search/saved.rs b/core/src/api/search/saved.rs index fd8e6dafa..37dec602e 100644 --- a/core/src/api/search/saved.rs +++ b/core/src/api/search/saved.rs @@ -162,7 +162,7 @@ pub(crate) fn mount() -> AlphaRouter { rspc::Error::new(rspc::ErrorCode::NotFound, "search not found".into()) })?; - let (sync_params, db_params): (Vec<_>, Vec<_>) = chain_optional_iter( + let (ops, db_params): (Vec<_>, Vec<_>) = chain_optional_iter( [sync_db_entry!(updated_at, saved_search::date_modified)], [ option_sync_db_entry!(args.name.flatten(), saved_search::name), @@ -187,18 +187,20 @@ pub(crate) fn mount() -> AlphaRouter { }) .unzip(); - sync.write_ops( - db, - ( - sync_params, - db.saved_search() - .update_unchecked(saved_search::id::equals(id), db_params), - ), - ) - .await?; + if !ops.is_empty() && !db_params.is_empty() { + sync.write_ops( + db, + ( + ops, + db.saved_search() + .update_unchecked(saved_search::id::equals(id), db_params), + ), + ) + .await?; - invalidate_query!(library, "search.saved.list"); - invalidate_query!(library, "search.saved.get"); + invalidate_query!(library, "search.saved.list"); + invalidate_query!(library, "search.saved.get"); + } Ok(()) } diff --git a/core/src/api/tags.rs b/core/src/api/tags.rs index 257b886b3..426b9992a 100644 --- a/core/src/api/tags.rs +++ b/core/src/api/tags.rs @@ -197,25 +197,22 @@ pub(crate) fn mount() -> AlphaRouter { ), ]); - sync.write_ops( - db, - ( - objects + let ops = objects + .into_iter() + .map(|o| o.pub_id) + .chain( + file_paths .into_iter() - .map(|o| o.pub_id) - .chain( - file_paths - .into_iter() - .filter_map(|fp| fp.object.map(|o| o.pub_id)), - ) - .map(|pub_id| sync.relation_delete(sync_id!(pub_id))) - .collect(), - query, - ), - ) - .await?; + .filter_map(|fp| fp.object.map(|o| o.pub_id)), + ) + .map(|pub_id| sync.relation_delete(sync_id!(pub_id))) + .collect::>(); + + if !ops.is_empty() { + sync.write_ops(db, (ops, query)).await?; + } } else { - let mut sync_params = vec![]; + let mut ops = vec![]; let db_params: (Vec<_>, Vec<_>) = file_paths .iter() @@ -224,12 +221,12 @@ pub(crate) fn mount() -> AlphaRouter { let id = uuid_to_bytes(&Uuid::now_v7()); let device_pub_id = sync.device_pub_id.to_db(); - sync_params.push(sync.shared_create( + ops.push(sync.shared_create( prisma_sync::object::SyncId { pub_id: id.clone() }, [(object::device_pub_id::NAME, msgpack!(device_pub_id))], )); - sync_params.push(sync.shared_update( + ops.push(sync.shared_update( prisma_sync::file_path::SyncId { pub_id: fp.pub_id.clone(), }, @@ -252,7 +249,11 @@ pub(crate) fn mount() -> AlphaRouter { }) .unzip(); - let (new_objects, _) = sync.write_ops(db, (sync_params, db_params)).await?; + if ops.is_empty() { + return Ok(()); + } + + let (new_objects, _) = sync.write_ops(db, (ops, db_params)).await?; let (sync_ops, db_creates) = objects .into_iter() @@ -290,6 +291,10 @@ pub(crate) fn mount() -> AlphaRouter { }, ); + if sync_ops.is_empty() && db_creates.is_empty() { + return Ok(()); + } + sync.write_ops( db, ( @@ -346,6 +351,10 @@ pub(crate) fn mount() -> AlphaRouter { .flatten() .unzip(); + if sync_params.is_empty() && db_params.is_empty() { + return Ok(()); + } + sync.write_ops( db, ( diff --git a/core/src/location/manager/watcher/utils.rs b/core/src/location/manager/watcher/utils.rs index f75f554c7..bafd7004c 100644 --- a/core/src/location/manager/watcher/utils.rs +++ b/core/src/location/manager/watcher/utils.rs @@ -656,29 +656,33 @@ async fn inner_update_file( .unzip() }; - // file content changed - sync.write_ops( - db, - ( - sync_params - .into_iter() - .map(|(field, value)| { - sync.shared_update( - prisma_sync::file_path::SyncId { - pub_id: file_path.pub_id.clone(), - }, - field, - value, - ) - }) - .collect(), - db.file_path().update( - file_path::pub_id::equals(file_path.pub_id.clone()), - db_params, + let ops = sync_params + .into_iter() + .map(|(field, value)| { + sync.shared_update( + prisma_sync::file_path::SyncId { + pub_id: file_path.pub_id.clone(), + }, + field, + value, + ) + }) + .collect::>(); + + if !ops.is_empty() && !db_params.is_empty() { + // file content changed + sync.write_ops( + db, + ( + ops, + db.file_path().update( + file_path::pub_id::equals(file_path.pub_id.clone()), + db_params, + ), ), - ), - ) - .await?; + ) + .await?; + } if let Some(ref object) = file_path.object { let int_kind = kind as i32; @@ -981,7 +985,9 @@ pub(super) async fn rename( }) .unzip(); - sync.write_ops(db, (sync_params, db_params)).await?; + if !sync_params.is_empty() && !db_params.is_empty() { + sync.write_ops(db, (sync_params, db_params)).await?; + } trace!(%total_paths_count, "Updated file_paths;"); } @@ -1018,29 +1024,33 @@ pub(super) async fn rename( .into_iter() .unzip(); - sync.write_ops( - db, - ( - sync_params - .into_iter() - .map(|(k, v)| { - sync.shared_update( - prisma_sync::file_path::SyncId { - pub_id: file_path.pub_id.clone(), - }, - k, - v, - ) - }) - .collect(), - db.file_path() - .update(file_path::pub_id::equals(file_path.pub_id), db_params), - ), - ) - .await?; + let ops = sync_params + .into_iter() + .map(|(k, v)| { + sync.shared_update( + prisma_sync::file_path::SyncId { + pub_id: file_path.pub_id.clone(), + }, + k, + v, + ) + }) + .collect::>(); - invalidate_query!(library, "search.paths"); - invalidate_query!(library, "search.objects"); + if !ops.is_empty() && !db_params.is_empty() { + sync.write_ops( + db, + ( + ops, + db.file_path() + .update(file_path::pub_id::equals(file_path.pub_id), db_params), + ), + ) + .await?; + + invalidate_query!(library, "search.paths"); + invalidate_query!(library, "search.objects"); + } } Ok(()) diff --git a/crates/ai/src/old_image_labeler/process.rs b/crates/ai/src/old_image_labeler/process.rs index beff95da8..553c3004e 100644 --- a/crates/ai/src/old_image_labeler/process.rs +++ b/crates/ai/src/old_image_labeler/process.rs @@ -456,44 +456,46 @@ pub async fn assign_labels( let mut sync_params = Vec::with_capacity(labels_ids.len() * 2); - let db_params: Vec<_> = labels_ids - .into_iter() - .map(|(label_id, name)| { - let device_pub_id = sync.device_pub_id.to_db(); - sync_params.push(sync.relation_create( - prisma_sync::label_on_object::SyncId { - label: prisma_sync::label::SyncId { name }, - object: prisma_sync::object::SyncId { - pub_id: object.pub_id.clone(), + if !labels_ids.is_empty() { + let db_params: Vec<_> = labels_ids + .into_iter() + .map(|(label_id, name)| { + let device_pub_id = sync.device_pub_id.to_db(); + sync_params.push(sync.relation_create( + prisma_sync::label_on_object::SyncId { + label: prisma_sync::label::SyncId { name }, + object: prisma_sync::object::SyncId { + pub_id: object.pub_id.clone(), + }, }, - }, - [( - label_on_object::device_pub_id::NAME, - msgpack!(device_pub_id), - )], - )); + [( + label_on_object::device_pub_id::NAME, + msgpack!(device_pub_id), + )], + )); - label_on_object::create_unchecked( - label_id, - object_id, - vec![ - label_on_object::date_created::set(date_created), - label_on_object::device_pub_id::set(Some(device_pub_id)), - ], - ) - }) - .collect(); + label_on_object::create_unchecked( + label_id, + object_id, + vec![ + label_on_object::date_created::set(date_created), + label_on_object::device_pub_id::set(Some(device_pub_id)), + ], + ) + }) + .collect(); - sync.write_ops( - db, - ( - sync_params, - db.label_on_object() - .create_many(db_params) - .skip_duplicates(), - ), - ) - .await?; + sync.write_ops( + db, + ( + sync_params, + db.label_on_object() + .create_many(db_params) + .skip_duplicates(), + ), + ) + .await?; + } Ok(has_new_labels) } From cd897b7de857d07435accd92f3ba54c50b87e71e Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Fri, 20 Sep 2024 00:33:35 -0300 Subject: [PATCH 133/218] Fix prisma issue on exif data --- .../helpers/exif_media_data.rs | 12 +- .../migration.sql | 201 ++++++++++++++++++ core/prisma/migrations/migration_lock.toml | 2 +- core/prisma/schema.prisma | 2 +- 4 files changed, 212 insertions(+), 5 deletions(-) create mode 100644 core/prisma/migrations/20240920032950_adding_devices/migration.sql diff --git a/core/crates/heavy-lifting/src/media_processor/helpers/exif_media_data.rs b/core/crates/heavy-lifting/src/media_processor/helpers/exif_media_data.rs index 39c152b0d..fcdb719ef 100644 --- a/core/crates/heavy-lifting/src/media_processor/helpers/exif_media_data.rs +++ b/core/crates/heavy-lifting/src/media_processor/helpers/exif_media_data.rs @@ -6,10 +6,10 @@ use sd_core_sync::{DevicePubId, SyncManager}; use sd_file_ext::extensions::{Extension, ImageExtension, ALL_IMAGE_EXTENSIONS}; use sd_media_metadata::ExifMetadata; use sd_prisma::{ - prisma::{exif_data, object, PrismaClient}, + prisma::{device, exif_data, object, PrismaClient}, prisma_sync, }; -use sd_sync::{option_sync_db_entry, sync_db_entry, OperationFactory}; +use sd_sync::{option_sync_db_entry, sync_entry, OperationFactory}; use sd_utils::chain_optional_iter; use std::path::Path; @@ -57,7 +57,10 @@ fn to_query( let device_pub_id = device_pub_id.to_db(); let (sync_params, db_params) = chain_optional_iter( - [sync_db_entry!(device_pub_id, exif_data::device_pub_id)], + [( + sync_entry!(&device_pub_id, exif_data::device_pub_id), + exif_data::device::connect(device::pub_id::equals(device_pub_id)), + )], [ option_sync_db_entry!( serde_json::to_vec(&camera_data).ok(), @@ -82,6 +85,9 @@ fn to_query( .into_iter() .unzip(); + tracing::warn!(?sync_params); + tracing::warn!(?db_params); + ( sync_params, exif_data::Create { diff --git a/core/prisma/migrations/20240920032950_adding_devices/migration.sql b/core/prisma/migrations/20240920032950_adding_devices/migration.sql new file mode 100644 index 000000000..60d4fb62f --- /dev/null +++ b/core/prisma/migrations/20240920032950_adding_devices/migration.sql @@ -0,0 +1,201 @@ +G/* + Warnings: + + - You are about to drop the `node` table. If the table is not empty, all the data it contains will be lost. + - You are about to drop the column `instance_id` on the `cloud_crdt_operation` table. All the data in the column will be lost. + - You are about to drop the column `instance_id` on the `crdt_operation` table. All the data in the column will be lost. + - You are about to drop the column `instance_pub_id` on the `storage_statistics` table. All the data in the column will be lost. + - Added the required column `device_pub_id` to the `cloud_crdt_operation` table without a default value. This is not possible if the table is not empty. + - Added the required column `device_pub_id` to the `crdt_operation` table without a default value. This is not possible if the table is not empty. + +*/ +-- DropIndex +DROP INDEX "node_pub_id_key"; + +-- DropTable +PRAGMA foreign_keys=off; +DROP TABLE "node"; +PRAGMA foreign_keys=on; + +-- CreateTable +CREATE TABLE "device" ( + "id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + "pub_id" BLOB NOT NULL, + "name" TEXT, + "os" INTEGER, + "hardware_model" INTEGER, + "timestamp" BIGINT, + "date_created" DATETIME, + "date_deleted" DATETIME +); + +-- RedefineTables +PRAGMA defer_foreign_keys=ON; +PRAGMA foreign_keys=OFF; +CREATE TABLE "new_cloud_crdt_operation" ( + "id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + "timestamp" BIGINT NOT NULL, + "model" INTEGER NOT NULL, + "record_id" BLOB NOT NULL, + "kind" TEXT NOT NULL, + "data" BLOB NOT NULL, + "device_pub_id" BLOB NOT NULL, + CONSTRAINT "cloud_crdt_operation_device_pub_id_fkey" FOREIGN KEY ("device_pub_id") REFERENCES "device" ("pub_id") ON DELETE RESTRICT ON UPDATE CASCADE +); +INSERT INTO "new_cloud_crdt_operation" ("data", "id", "kind", "model", "record_id", "timestamp") SELECT "data", "id", "kind", "model", "record_id", "timestamp" FROM "cloud_crdt_operation"; +DROP TABLE "cloud_crdt_operation"; +ALTER TABLE "new_cloud_crdt_operation" RENAME TO "cloud_crdt_operation"; +CREATE INDEX "cloud_crdt_operation_timestamp_idx" ON "cloud_crdt_operation"("timestamp"); +CREATE TABLE "new_crdt_operation" ( + "id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + "timestamp" BIGINT NOT NULL, + "model" INTEGER NOT NULL, + "record_id" BLOB NOT NULL, + "kind" TEXT NOT NULL, + "data" BLOB NOT NULL, + "device_pub_id" BLOB NOT NULL, + CONSTRAINT "crdt_operation_device_pub_id_fkey" FOREIGN KEY ("device_pub_id") REFERENCES "device" ("pub_id") ON DELETE RESTRICT ON UPDATE CASCADE +); +INSERT INTO "new_crdt_operation" ("data", "id", "kind", "model", "record_id", "timestamp") SELECT "data", "id", "kind", "model", "record_id", "timestamp" FROM "crdt_operation"; +DROP TABLE "crdt_operation"; +ALTER TABLE "new_crdt_operation" RENAME TO "crdt_operation"; +CREATE INDEX "crdt_operation_timestamp_idx" ON "crdt_operation"("timestamp"); +CREATE TABLE "new_exif_data" ( + "id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + "resolution" BLOB, + "media_date" BLOB, + "media_location" BLOB, + "camera_data" BLOB, + "artist" TEXT, + "description" TEXT, + "copyright" TEXT, + "exif_version" TEXT, + "epoch_time" BIGINT, + "object_id" INTEGER NOT NULL, + "device_pub_id" BLOB, + CONSTRAINT "exif_data_object_id_fkey" FOREIGN KEY ("object_id") REFERENCES "object" ("id") ON DELETE CASCADE ON UPDATE CASCADE, + CONSTRAINT "exif_data_device_pub_id_fkey" FOREIGN KEY ("device_pub_id") REFERENCES "device" ("pub_id") ON DELETE CASCADE ON UPDATE CASCADE +); +INSERT INTO "new_exif_data" ("artist", "camera_data", "copyright", "description", "epoch_time", "exif_version", "id", "media_date", "media_location", "object_id", "resolution") SELECT "artist", "camera_data", "copyright", "description", "epoch_time", "exif_version", "id", "media_date", "media_location", "object_id", "resolution" FROM "exif_data"; +DROP TABLE "exif_data"; +ALTER TABLE "new_exif_data" RENAME TO "exif_data"; +CREATE UNIQUE INDEX "exif_data_object_id_key" ON "exif_data"("object_id"); +CREATE TABLE "new_file_path" ( + "id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + "pub_id" BLOB NOT NULL, + "is_dir" BOOLEAN, + "cas_id" TEXT, + "integrity_checksum" TEXT, + "location_id" INTEGER, + "materialized_path" TEXT, + "name" TEXT, + "extension" TEXT, + "hidden" BOOLEAN, + "size_in_bytes" TEXT, + "size_in_bytes_bytes" BLOB, + "inode" BLOB, + "object_id" INTEGER, + "key_id" INTEGER, + "date_created" DATETIME, + "date_modified" DATETIME, + "date_indexed" DATETIME, + "device_pub_id" BLOB, + CONSTRAINT "file_path_location_id_fkey" FOREIGN KEY ("location_id") REFERENCES "location" ("id") ON DELETE SET NULL ON UPDATE CASCADE, + CONSTRAINT "file_path_object_id_fkey" FOREIGN KEY ("object_id") REFERENCES "object" ("id") ON DELETE SET NULL ON UPDATE CASCADE, + CONSTRAINT "file_path_device_pub_id_fkey" FOREIGN KEY ("device_pub_id") REFERENCES "device" ("pub_id") ON DELETE CASCADE ON UPDATE CASCADE +); +INSERT INTO "new_file_path" ("cas_id", "date_created", "date_indexed", "date_modified", "extension", "hidden", "id", "inode", "integrity_checksum", "is_dir", "key_id", "location_id", "materialized_path", "name", "object_id", "pub_id", "size_in_bytes", "size_in_bytes_bytes") SELECT "cas_id", "date_created", "date_indexed", "date_modified", "extension", "hidden", "id", "inode", "integrity_checksum", "is_dir", "key_id", "location_id", "materialized_path", "name", "object_id", "pub_id", "size_in_bytes", "size_in_bytes_bytes" FROM "file_path"; +DROP TABLE "file_path"; +ALTER TABLE "new_file_path" RENAME TO "file_path"; +CREATE UNIQUE INDEX "file_path_pub_id_key" ON "file_path"("pub_id"); +CREATE INDEX "file_path_location_id_idx" ON "file_path"("location_id"); +CREATE INDEX "file_path_location_id_materialized_path_idx" ON "file_path"("location_id", "materialized_path"); +CREATE UNIQUE INDEX "file_path_location_id_materialized_path_name_extension_key" ON "file_path"("location_id", "materialized_path", "name", "extension"); +CREATE UNIQUE INDEX "file_path_location_id_inode_key" ON "file_path"("location_id", "inode"); +CREATE TABLE "new_label_on_object" ( + "date_created" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + "object_id" INTEGER NOT NULL, + "label_id" INTEGER NOT NULL, + "device_pub_id" BLOB, + + PRIMARY KEY ("label_id", "object_id"), + CONSTRAINT "label_on_object_object_id_fkey" FOREIGN KEY ("object_id") REFERENCES "object" ("id") ON DELETE RESTRICT ON UPDATE CASCADE, + CONSTRAINT "label_on_object_label_id_fkey" FOREIGN KEY ("label_id") REFERENCES "label" ("id") ON DELETE RESTRICT ON UPDATE CASCADE, + CONSTRAINT "label_on_object_device_pub_id_fkey" FOREIGN KEY ("device_pub_id") REFERENCES "device" ("pub_id") ON DELETE CASCADE ON UPDATE CASCADE +); +INSERT INTO "new_label_on_object" ("date_created", "label_id", "object_id") SELECT "date_created", "label_id", "object_id" FROM "label_on_object"; +DROP TABLE "label_on_object"; +ALTER TABLE "new_label_on_object" RENAME TO "label_on_object"; +CREATE TABLE "new_location" ( + "id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + "pub_id" BLOB NOT NULL, + "name" TEXT, + "path" TEXT, + "total_capacity" INTEGER, + "available_capacity" INTEGER, + "size_in_bytes" BLOB, + "is_archived" BOOLEAN, + "generate_preview_media" BOOLEAN, + "sync_preview_media" BOOLEAN, + "hidden" BOOLEAN, + "date_created" DATETIME, + "scan_state" INTEGER NOT NULL DEFAULT 0, + "device_pub_id" BLOB, + "instance_id" INTEGER, + CONSTRAINT "location_device_pub_id_fkey" FOREIGN KEY ("device_pub_id") REFERENCES "device" ("pub_id") ON DELETE CASCADE ON UPDATE CASCADE, + CONSTRAINT "location_instance_id_fkey" FOREIGN KEY ("instance_id") REFERENCES "instance" ("id") ON DELETE SET NULL ON UPDATE CASCADE +); +INSERT INTO "new_location" ("available_capacity", "date_created", "generate_preview_media", "hidden", "id", "instance_id", "is_archived", "name", "path", "pub_id", "scan_state", "size_in_bytes", "sync_preview_media", "total_capacity") SELECT "available_capacity", "date_created", "generate_preview_media", "hidden", "id", "instance_id", "is_archived", "name", "path", "pub_id", "scan_state", "size_in_bytes", "sync_preview_media", "total_capacity" FROM "location"; +DROP TABLE "location"; +ALTER TABLE "new_location" RENAME TO "location"; +CREATE UNIQUE INDEX "location_pub_id_key" ON "location"("pub_id"); +CREATE TABLE "new_object" ( + "id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + "pub_id" BLOB NOT NULL, + "kind" INTEGER, + "key_id" INTEGER, + "hidden" BOOLEAN, + "favorite" BOOLEAN, + "important" BOOLEAN, + "note" TEXT, + "date_created" DATETIME, + "date_accessed" DATETIME, + "device_pub_id" BLOB, + CONSTRAINT "object_device_pub_id_fkey" FOREIGN KEY ("device_pub_id") REFERENCES "device" ("pub_id") ON DELETE CASCADE ON UPDATE CASCADE +); +INSERT INTO "new_object" ("date_accessed", "date_created", "favorite", "hidden", "id", "important", "key_id", "kind", "note", "pub_id") SELECT "date_accessed", "date_created", "favorite", "hidden", "id", "important", "key_id", "kind", "note", "pub_id" FROM "object"; +DROP TABLE "object"; +ALTER TABLE "new_object" RENAME TO "object"; +CREATE UNIQUE INDEX "object_pub_id_key" ON "object"("pub_id"); +CREATE TABLE "new_storage_statistics" ( + "id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + "pub_id" BLOB NOT NULL, + "total_capacity" BIGINT NOT NULL DEFAULT 0, + "available_capacity" BIGINT NOT NULL DEFAULT 0, + "device_pub_id" BLOB, + CONSTRAINT "storage_statistics_device_pub_id_fkey" FOREIGN KEY ("device_pub_id") REFERENCES "device" ("pub_id") ON DELETE CASCADE ON UPDATE CASCADE +); +INSERT INTO "new_storage_statistics" ("available_capacity", "id", "pub_id", "total_capacity") SELECT "available_capacity", "id", "pub_id", "total_capacity" FROM "storage_statistics"; +DROP TABLE "storage_statistics"; +ALTER TABLE "new_storage_statistics" RENAME TO "storage_statistics"; +CREATE UNIQUE INDEX "storage_statistics_pub_id_key" ON "storage_statistics"("pub_id"); +CREATE UNIQUE INDEX "storage_statistics_device_pub_id_key" ON "storage_statistics"("device_pub_id"); +CREATE TABLE "new_tag_on_object" ( + "object_id" INTEGER NOT NULL, + "tag_id" INTEGER NOT NULL, + "date_created" DATETIME, + "device_pub_id" BLOB, + + PRIMARY KEY ("tag_id", "object_id"), + CONSTRAINT "tag_on_object_object_id_fkey" FOREIGN KEY ("object_id") REFERENCES "object" ("id") ON DELETE RESTRICT ON UPDATE CASCADE, + CONSTRAINT "tag_on_object_tag_id_fkey" FOREIGN KEY ("tag_id") REFERENCES "tag" ("id") ON DELETE RESTRICT ON UPDATE CASCADE, + CONSTRAINT "tag_on_object_device_pub_id_fkey" FOREIGN KEY ("device_pub_id") REFERENCES "device" ("pub_id") ON DELETE CASCADE ON UPDATE CASCADE +); +INSERT INTO "new_tag_on_object" ("date_created", "object_id", "tag_id") SELECT "date_created", "object_id", "tag_id" FROM "tag_on_object"; +DROP TABLE "tag_on_object"; +ALTER TABLE "new_tag_on_object" RENAME TO "tag_on_object"; +PRAGMA foreign_keys=ON; +PRAGMA defer_foreign_keys=OFF; + +-- CreateIndex +CREATE UNIQUE INDEX "device_pub_id_key" ON "device"("pub_id"); diff --git a/core/prisma/migrations/migration_lock.toml b/core/prisma/migrations/migration_lock.toml index 6fcf33daf..e5e5c4705 100644 --- a/core/prisma/migrations/migration_lock.toml +++ b/core/prisma/migrations/migration_lock.toml @@ -1,3 +1,3 @@ # Please do not edit this file manually # It should be added in your version-control system (i.e. Git) -provider = "sqlite" +provider = "sqlite" \ No newline at end of file diff --git a/core/prisma/schema.prisma b/core/prisma/schema.prisma index 105e2edb0..3dc65d614 100644 --- a/core/prisma/schema.prisma +++ b/core/prisma/schema.prisma @@ -83,7 +83,7 @@ model Device { TagOnObject TagOnObject[] LabelOnObject LabelOnObject[] - @@map("node") + @@map("device") } // represents a single `.db` file (SQLite DB) that is paired to the current library. From 1d22a755aff58f1166197f1c221dc8b17b752974 Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Fri, 20 Sep 2024 00:48:45 -0300 Subject: [PATCH 134/218] bruh --- .../migrations/20240920032950_adding_devices/migration.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/prisma/migrations/20240920032950_adding_devices/migration.sql b/core/prisma/migrations/20240920032950_adding_devices/migration.sql index 60d4fb62f..ca9765728 100644 --- a/core/prisma/migrations/20240920032950_adding_devices/migration.sql +++ b/core/prisma/migrations/20240920032950_adding_devices/migration.sql @@ -1,4 +1,4 @@ -G/* +/* Warnings: - You are about to drop the `node` table. If the table is not empty, all the data it contains will be lost. From 9658c5d0f0222b39e7718b172fdc6d7cab05ae58 Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Fri, 20 Sep 2024 00:58:16 -0300 Subject: [PATCH 135/218] Comment old auth stuff on frontend --- apps/mobile/src/stores/auth.ts | 72 +++++++++---------- .../settings/client/SpacedriveAccount.tsx | 4 +- interface/app/onboarding/login.tsx | 4 +- packages/client/src/stores/auth.ts | 68 +++++++++--------- 4 files changed, 74 insertions(+), 74 deletions(-) diff --git a/apps/mobile/src/stores/auth.ts b/apps/mobile/src/stores/auth.ts index 3ef1079d8..c2851cf25 100644 --- a/apps/mobile/src/stores/auth.ts +++ b/apps/mobile/src/stores/auth.ts @@ -18,16 +18,16 @@ export function useAuthStateSnapshot() { return useSolidStore(store).state; } -nonLibraryClient - .query(['auth.me']) - .then(() => (store.state = { status: 'loggedIn' })) - .catch((e) => { - if (e instanceof RSPCError && e.code === 401) { - // TODO: handle error? - console.error('error', e); - } - store.state = { status: 'notLoggedIn' }; - }); +// nonLibraryClient +// .query(['auth.me']) +// .then(() => (store.state = { status: 'loggedIn' })) +// .catch((e) => { +// if (e instanceof RSPCError && e.code === 401) { +// // TODO: handle error? +// console.error('error', e); +// } +// store.state = { status: 'notLoggedIn' }; +// }); type CallbackStatus = 'success' | { error: string } | 'cancel'; const loginCallbacks = new Set<(status: CallbackStatus) => void>(); @@ -41,29 +41,29 @@ export function login() { store.state = { status: 'loggingIn' }; - let authCleanup = nonLibraryClient.addSubscription(['auth.loginSession'], { - onData(data) { - if (data === 'Complete') { - loginCallbacks.forEach((cb) => cb('success')); - } else if ('Error' in data) { - console.error('[auth] error: ', data.Error); - onError(data.Error); - } else { - console.log('[auth] verification url: ', data.Start.verification_url_complete); - Promise.resolve() - .then(() => Linking.openURL(data.Start.verification_url_complete)) - .then( - (res) => { - authCleanup = res; - }, - (e) => onError(e.message) - ); - } - }, - onError(e) { - onError(e.message); - } - }); + // let authCleanup = nonLibraryClient.addSubscription(['auth.loginSession'], { + // onData(data) { + // if (data === 'Complete') { + // loginCallbacks.forEach((cb) => cb('success')); + // } else if ('Error' in data) { + // console.error('[auth] error: ', data.Error); + // onError(data.Error); + // } else { + // console.log('[auth] verification url: ', data.Start.verification_url_complete); + // Promise.resolve() + // .then(() => Linking.openURL(data.Start.verification_url_complete)) + // .then( + // (res) => { + // authCleanup = res; + // }, + // (e) => onError(e.message) + // ); + // } + // }, + // onError(e) { + // onError(e.message); + // } + // }); return new Promise((res, rej) => { const cb = async (status: CallbackStatus) => { @@ -71,7 +71,7 @@ export function login() { if (status === 'success') { store.state = { status: 'loggedIn' }; - nonLibraryClient.query(['auth.me']); + // nonLibraryClient.query(['auth.me']); res(); } else { store.state = { status: 'notLoggedIn' }; @@ -88,8 +88,8 @@ export function set_logged_in() { export function logout() { store.state = { status: 'loggingOut' }; - nonLibraryClient.mutation(['auth.logout']); - nonLibraryClient.query(['auth.me']); + // nonLibraryClient.mutation(['auth.logout']); + // nonLibraryClient.query(['auth.me']); store.state = { status: 'notLoggedIn' }; } diff --git a/interface/app/$libraryId/settings/client/SpacedriveAccount.tsx b/interface/app/$libraryId/settings/client/SpacedriveAccount.tsx index e05688945..4f5f67112 100644 --- a/interface/app/$libraryId/settings/client/SpacedriveAccount.tsx +++ b/interface/app/$libraryId/settings/client/SpacedriveAccount.tsx @@ -13,7 +13,7 @@ export function SpacedriveAccount() { } function Account() { - const me = useBridgeQuery(['auth.me'], { retry: false }); + // const me = useBridgeQuery(['auth.me'], { retry: false }); const { t } = useLocale(); return ( @@ -25,7 +25,7 @@ function Account() {


- {t('logged_in_as', { email: me.data?.email })} + {t('logged_in_as', "TODO")}
); } diff --git a/interface/app/onboarding/login.tsx b/interface/app/onboarding/login.tsx index 24c794bb9..f521cdd22 100644 --- a/interface/app/onboarding/login.tsx +++ b/interface/app/onboarding/login.tsx @@ -13,7 +13,7 @@ export default function OnboardingLogin() { const authState = auth.useStateSnapshot(); const navigate = useNavigate(); - const me = useBridgeQuery(['auth.me'], { retry: false }); + // const me = useBridgeQuery(['auth.me'], { retry: false }); return ( @@ -31,7 +31,7 @@ export default function OnboardingLogin() { className="mb-3" />

- Logged in as {me.data?.email} + Logged in as TODO

diff --git a/packages/client/src/stores/auth.ts b/packages/client/src/stores/auth.ts index d261fc845..1d3b6719d 100644 --- a/packages/client/src/stores/auth.ts +++ b/packages/client/src/stores/auth.ts @@ -24,15 +24,15 @@ export function useStateSnapshot() { return useSolidStore(store).state; } -nonLibraryClient - .query(['auth.me']) - .then(() => (store.state = { status: 'loggedIn' })) - .catch((e) => { - if (e instanceof RSPCError && e.code === 401) { - // TODO: handle error? - } - store.state = { status: 'notLoggedIn' }; - }); +// nonLibraryClient +// .query(['auth.me']) +// .then(() => (store.state = { status: 'loggedIn' })) +// .catch((e) => { +// if (e instanceof RSPCError && e.code === 401) { +// // TODO: handle error? +// } +// store.state = { status: 'notLoggedIn' }; +// }); type CallbackStatus = 'success' | { error: string } | 'cancel'; const loginCallbacks = new Set<(status: CallbackStatus) => void>(); @@ -46,28 +46,28 @@ export async function login(config: ProviderConfig) { store.state = { status: 'loggingIn' }; - let authCleanup = nonLibraryClient.addSubscription(['auth.loginSession'], { - onData(data) { - if (data === 'Complete') { - config.finish?.(authCleanup); - loginCallbacks.forEach((cb) => cb('success')); - } else if ('Error' in data) { - onError(data.Error); - } else { - Promise.resolve() - .then(() => config.start(data.Start.verification_url_complete)) - .then( - (res) => { - authCleanup = res; - }, - (e) => onError(e.message) - ); - } - }, - onError(e) { - onError(e.message); - } - }); + // let authCleanup = nonLibraryClient.addSubscription(['auth.loginSession'], { + // onData(data) { + // if (data === 'Complete') { + // config.finish?.(authCleanup); + // loginCallbacks.forEach((cb) => cb('success')); + // } else if ('Error' in data) { + // onError(data.Error); + // } else { + // Promise.resolve() + // .then(() => config.start(data.Start.verification_url_complete)) + // .then( + // (res) => { + // authCleanup = res; + // }, + // (e) => onError(e.message) + // ); + // } + // }, + // onError(e) { + // onError(e.message); + // } + // }); return new Promise((res, rej) => { const cb = async (status: CallbackStatus) => { @@ -75,7 +75,7 @@ export async function login(config: ProviderConfig) { if (status === 'success') { store.state = { status: 'loggedIn' }; - nonLibraryClient.query(['auth.me']); + // nonLibraryClient.query(['auth.me']); res(); } else { store.state = { status: 'notLoggedIn' }; @@ -88,8 +88,8 @@ export async function login(config: ProviderConfig) { export async function logout() { store.state = { status: 'loggingOut' }; - await nonLibraryClient.mutation(['auth.logout']); - await nonLibraryClient.query(['auth.me']); + // await nonLibraryClient.mutation(['auth.logout']); + // await nonLibraryClient.query(['auth.me']); store.state = { status: 'notLoggedIn' }; } From 5d99c80e3d8ad0f95af1e01aac1ea35e12fa5968 Mon Sep 17 00:00:00 2001 From: myung03 Date: Fri, 20 Sep 2024 18:16:15 -0700 Subject: [PATCH 136/218] abstracted out auth page and restyled --- .../settings/client/account/Login.tsx | 140 ------------ .../settings/client/account/Register.tsx | 178 --------------- .../settings/client/account/Tabs.tsx | 169 -------------- .../settings/client/account/index.tsx | 28 ++- interface/components/Authentication.tsx | 154 +++++++++++++ interface/components/Login.tsx | 167 ++++++++++++++ interface/components/Register.tsx | 211 ++++++++++++++++++ interface/components/ShowPassword.tsx | 27 +++ interface/components/index.ts | 1 + interface/locales/en/common.json | 6 +- packages/ui/tsconfig.json | 2 +- 11 files changed, 586 insertions(+), 497 deletions(-) delete mode 100644 interface/app/$libraryId/settings/client/account/Login.tsx delete mode 100644 interface/app/$libraryId/settings/client/account/Register.tsx delete mode 100644 interface/app/$libraryId/settings/client/account/Tabs.tsx create mode 100644 interface/components/Authentication.tsx create mode 100644 interface/components/Login.tsx create mode 100644 interface/components/Register.tsx create mode 100644 interface/components/ShowPassword.tsx diff --git a/interface/app/$libraryId/settings/client/account/Login.tsx b/interface/app/$libraryId/settings/client/account/Login.tsx deleted file mode 100644 index b64ec4b6c..000000000 --- a/interface/app/$libraryId/settings/client/account/Login.tsx +++ /dev/null @@ -1,140 +0,0 @@ -import { useEffect, useState } from 'react'; -import { Controller } from 'react-hook-form'; -import { signIn } from 'supertokens-web-js/recipe/emailpassword'; -import { nonLibraryClient, useZodForm } from '@sd/client'; -import { Button, Form, Input, toast, z } from '@sd/ui'; - -import ShowPassword from './ShowPassword'; - -async function signInClicked(email: string, password: string) { - try { - const response = await signIn({ - formFields: [ - { - id: 'email', - value: email - }, - { - id: 'password', - value: password - } - ] - }); - - if (response.status === 'FIELD_ERROR') { - response.formFields.forEach((formField) => { - if (formField.id === 'email') { - // Email validation failed (for example incorrect email syntax). - toast.error(formField.error); - } - }); - } else if (response.status === 'WRONG_CREDENTIALS_ERROR') { - toast.error('Email & password combination is incorrect.'); - } else if (response.status === 'SIGN_IN_NOT_ALLOWED') { - // the reason string is a user friendly message - // about what went wrong. It can also contain a support code which users - // can tell you so you know why their sign in was not allowed. - toast.error(response.reason); - } else { - // sign in successful. The session tokens are automatically handled by - // the frontend SDK. - toast.success('Sign in successful'); - // Refresh the page to reflect the new session state. - // FIXME: This is a temporary workaround. We will provide a better way to handle this. - window.location.reload(); - } - } catch (err: any) { - if (err.isSuperTokensGeneralError === true) { - // this may be a custom error message sent from the API by you. - toast.error(err.message); - } else { - console.error(err); - toast.error('Oops! Something went wrong.'); - } - } -} - -const LoginSchema = z.object({ - email: z.string().email(), - password: z.string().min(6) -}); - -const Login = () => { - const [showPassword, setShowPassword] = useState(false); - const form = useZodForm({ - schema: LoginSchema, - defaultValues: { - email: '', - password: '' - } - }); - - return ( - { - // handle login submission - await signInClicked(data.email, data.password); - })} - form={form} - > -
- ( - - )} - /> - {form.formState.errors.email && ( -

{form.formState.errors.email.message}

- )} - ( -
- { - const pastedText = e.clipboardData.getData('text'); - field.onChange(pastedText); - }} - /> - -
- )} - /> - {form.formState.errors.password && ( -

{form.formState.errors.password.message}

- )} - -
- - ); -}; - -export default Login; diff --git a/interface/app/$libraryId/settings/client/account/Register.tsx b/interface/app/$libraryId/settings/client/account/Register.tsx deleted file mode 100644 index 6dbc4ef15..000000000 --- a/interface/app/$libraryId/settings/client/account/Register.tsx +++ /dev/null @@ -1,178 +0,0 @@ -import { zodResolver } from '@hookform/resolvers/zod'; -import { useState } from 'react'; -import { Controller, useForm } from 'react-hook-form'; -import { signUp } from 'supertokens-web-js/recipe/emailpassword'; -import { Button, Form, Input, toast, z } from '@sd/ui'; - -import ShowPassword from './ShowPassword'; - -const RegisterSchema = z - .object({ - email: z.string().email(), - password: z.string().min(6), - confirmPassword: z.string().min(6) - }) - .refine((data) => data.password === data.confirmPassword, { - message: 'Passwords do not match', - path: ['confirmPassword'] - }); -type RegisterType = z.infer; - -async function signUpClicked(email: string, password: string) { - try { - const response = await signUp({ - formFields: [ - { - id: 'email', - value: email - }, - { - id: 'password', - value: password - } - ] - }); - - if (response.status === 'FIELD_ERROR') { - // one of the input formFields failed validaiton - response.formFields.forEach((formField) => { - if (formField.id === 'email') { - // Email validation failed (for example incorrect email syntax), - // or the email is not unique. - toast.error(formField.error); - } else if (formField.id === 'password') { - // Password validation failed. - // Maybe it didn't match the password strength - toast.error(formField.error); - } - }); - } else if (response.status === 'SIGN_UP_NOT_ALLOWED') { - // the reason string is a user friendly message - // about what went wrong. It can also contain a support code which users - // can tell you so you know why their sign up was not allowed. - toast.error(response.reason); - } else { - // sign up successful. The session tokens are automatically handled by - // the frontend SDK. - toast.success('Sign up successful'); - // FIXME: This is a temporary workaround. We will provide a better way to handle this. - window.location.reload(); - } - } catch (err: any) { - if (err.isSuperTokensGeneralError === true) { - // this may be a custom error message sent from the API by you. - toast.error(err.message); - } else { - toast.error('Oops! Something went wrong.'); - } - } -} - -const Register = () => { - const [showPassword, setShowPassword] = useState(false); - // useZodForm seems to be out-dated or needs - //fixing as it does not support the schema using zod.refine - const form = useForm({ - resolver: zodResolver(RegisterSchema), - defaultValues: { - email: '', - password: '', - confirmPassword: '' - } - }); - return ( -
{ - // handle login submission - console.log(data); - await signUpClicked(data.email, data.password); - })} - form={form} - > -
- ( - - )} - /> - {form.formState.errors.email && ( -

{form.formState.errors.email.message}

- )} - ( -
- { - const pastedText = e.clipboardData.getData('text'); - field.onChange(pastedText); - }} - /> - -
- )} - /> - {form.formState.errors.password && ( -

{form.formState.errors.password.message}

- )} - ( -
- - -
- )} - /> - {form.formState.errors.confirmPassword && ( -

- {form.formState.errors.confirmPassword.message} -

- )} - -
-
- ); -}; - -export default Register; diff --git a/interface/app/$libraryId/settings/client/account/Tabs.tsx b/interface/app/$libraryId/settings/client/account/Tabs.tsx deleted file mode 100644 index 015d67d77..000000000 --- a/interface/app/$libraryId/settings/client/account/Tabs.tsx +++ /dev/null @@ -1,169 +0,0 @@ -import { GoogleLogo, Icon } from '@phosphor-icons/react'; -import { Apple, Github } from '@sd/assets/svgs/brands'; -import { open } from '@tauri-apps/plugin-shell'; -import clsx from 'clsx'; -import { motion } from 'framer-motion'; -import { useState } from 'react'; -import { getAuthorisationURLWithQueryParamsAndSetState } from 'supertokens-web-js/recipe/thirdparty'; -import { Button, Card, Divider, toast, Tooltip } from '@sd/ui'; - -import Login from './Login'; -import Register from './Register'; - -const AccountTabs = ['Login', 'Register'] as const; - -type SocialLogin = { - name: 'Github' | 'Google' | 'Apple'; - icon: Icon; -}; - -const SocialLogins: SocialLogin[] = [ - { name: 'Github', icon: Github }, - { name: 'Google', icon: GoogleLogo }, - { name: 'Apple', icon: Apple } -]; - -const Tabs = () => { - const [activeTab, setActiveTab] = useState<'Login' | 'Register'>('Login'); - - // Currently opens in App. - const socialLoginHandlers = (name: SocialLogin['name']) => { - return { - Github: async () => { - try { - const authUrl = await getAuthorisationURLWithQueryParamsAndSetState({ - thirdPartyId: 'github', - - // This is where Github should redirect the user back after login or error. - frontendRedirectURI: 'http://localhost:9420/api/auth/callback/github' - }); - - // we redirect the user to Github for auth. - await open(authUrl); - } catch (err: any) { - if (err.isSuperTokensGeneralError === true) { - // this may be a custom error message sent from the API by you. - toast.error(err.message); - } else { - toast.error('Oops! Something went wrong.'); - } - } - }, - Google: async () => { - try { - const authUrl = await getAuthorisationURLWithQueryParamsAndSetState({ - thirdPartyId: 'google', - - // This is where Google should redirect the user back after login or error. - // This URL goes on the Google's dashboard as well. - frontendRedirectURI: 'spacedrive://-/auth' - }); - - /* - Example value of authUrl: https://accounts.google.com/o/oauth2/v2/auth/oauthchooseaccount?scope=https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fuserinfo.email&access_type=offline&include_granted_scopes=true&response_type=code&client_id=1060725074195-kmeum4crr01uirfl2op9kd5acmi9jutn.apps.googleusercontent.com&state=5a489996a28cafc83ddff&redirect_uri=https%3A%2F%2Fsupertokens.io%2Fdev%2Foauth%2Fredirect-to-app&flowName=GeneralOAuthFlow - */ - - // we redirect the user to google for auth. - await open(authUrl); - } catch (err: any) { - if (err.isSuperTokensGeneralError === true) { - // this may be a custom error message sent from the API by you. - toast.error(err.message); - } else { - toast.error('Oops! Something went wrong.'); - } - } - }, - Apple: async () => { - try { - const authUrl = await getAuthorisationURLWithQueryParamsAndSetState({ - thirdPartyId: 'apple', - - // This is where Apple should redirect the user back after login or error. - frontendRedirectURI: 'http://localhost:9420/api/auth/callback/apple' - }); - - // we redirect the user to Apple for auth. - await open(authUrl); - } catch (err: any) { - if (err.isSuperTokensGeneralError === true) { - // this may be a custom error message sent from the API by you. - toast.error(err.message); - } else { - toast.error('Oops! Something went wrong.'); - } - } - } - }[name](); - }; - - return ( - -
- {AccountTabs.map((text) => ( -
{ - setActiveTab(text); - }} - className={clsx( - 'relative flex-1 border-b border-app-line p-2.5 text-center', - text === 'Login' ? 'rounded-tl-md' : 'rounded-tr-md' - )} - > -

- {text} -

- {text === activeTab && ( - - )} -
- ))} -
-
- {activeTab === 'Login' ? : } - {/* Disabling for now for demo purposes. We need to figure out on the backend how the tokens are recieved so we can a) store them in the frontend and b) use them as auth tokens for our cloud services. - @Rocky43007 */} - {/*
- -

OR

- -
-
- {SocialLogins.map((social) => ( - - - - ))} -
*/} -
-
- ); -}; - -export default Tabs; diff --git a/interface/app/$libraryId/settings/client/account/index.tsx b/interface/app/$libraryId/settings/client/account/index.tsx index 63601083e..ff91e70fc 100644 --- a/interface/app/$libraryId/settings/client/account/index.tsx +++ b/interface/app/$libraryId/settings/client/account/index.tsx @@ -2,12 +2,12 @@ import { useEffect, useState } from 'react'; import Session, { signOut } from 'supertokens-web-js/recipe/session'; import { auth, useBridgeMutation, useBridgeQuery, useFeatureFlag } from '@sd/client'; import { Button, Input, toast } from '@sd/ui'; +import { Authentication } from '~/components'; import { useLocale } from '~/hooks'; import { AUTH_SERVER_URL } from '~/util'; import { Heading } from '../../Layout'; import Profile from './Profile'; -import Tabs from './Tabs'; type User = { email: string; @@ -19,6 +19,8 @@ type User = { export const Component = () => { const { t } = useLocale(); const [userInfo, setUserInfo] = useState(null); + const [reload, setReload] = useState(false); + useEffect(() => { async function _() { const user_data = await fetch(`${AUTH_SERVER_URL}/api/user`, { @@ -36,21 +38,22 @@ export const Component = () => { setUserInfo(null); } }); + setReload(false); // eslint-disable-next-line react-hooks/exhaustive-deps - }, []); + }, [reload]); return ( <> {userInfo !== null && ( -
+
+ + ))} +
*/} +
+ + ); +}; diff --git a/interface/components/Login.tsx b/interface/components/Login.tsx new file mode 100644 index 000000000..699e756f1 --- /dev/null +++ b/interface/components/Login.tsx @@ -0,0 +1,167 @@ +import clsx from 'clsx'; +import { Dispatch, SetStateAction, useEffect, useState } from 'react'; +import { Controller } from 'react-hook-form'; +import { useNavigate } from 'react-router'; +import { signIn } from 'supertokens-web-js/recipe/emailpassword'; +import { nonLibraryClient, useZodForm } from '@sd/client'; +import { Button, Form, Input, toast, z } from '@sd/ui'; +import { useIsDark, useLocale } from '~/hooks'; + +import ShowPassword from './ShowPassword'; + +async function signInClicked( + email: string, + password: string, + reload: Dispatch> +) { + try { + const response = await signIn({ + formFields: [ + { + id: 'email', + value: email + }, + { + id: 'password', + value: password + } + ] + }); + + if (response.status === 'FIELD_ERROR') { + response.formFields.forEach((formField) => { + if (formField.id === 'email') { + toast.error(formField.error); + } + }); + } else if (response.status === 'WRONG_CREDENTIALS_ERROR') { + toast.error('Email & password combination is incorrect.'); + } else if (response.status === 'SIGN_IN_NOT_ALLOWED') { + toast.error(response.reason); + } else { + toast.success('Sign in successful'); + reload(true); + } + } catch (err: any) { + if (err.isSuperTokensGeneralError === true) { + toast.error(err.message); + } else { + console.error(err); + toast.error('Oops! Something went wrong.'); + } + } +} + +const LoginSchema = z.object({ + email: z.string().email(), + password: z.string().min(6) +}); + +const Login = ({ reload }: { reload: Dispatch> }) => { + const { t } = useLocale(); + const isDark = useIsDark(); + const [showPassword, setShowPassword] = useState(false); + const navigate = useNavigate(); // useNavigate hook + const form = useZodForm({ + schema: LoginSchema, + defaultValues: { + email: '', + password: '' + } + }); + + return ( +
{ + await signInClicked(data.email, data.password, reload); + })} + form={form} + > +
+
+
+ + ( + + )} + /> + {form.formState.errors.email && ( +

+ {form.formState.errors.email.message} +

+ )} +
+ +
+ + ( +
+ { + const pastedText = e.clipboardData.getData('text'); + field.onChange(pastedText); + }} + /> + +
+ )} + /> + {form.formState.errors.password && ( +

+ {form.formState.errors.password.message} +

+ )} +
+
+ + {form.formState.errors.password && ( +

{form.formState.errors.password.message}

+ )} + +
+
+ ); +}; + +export default Login; diff --git a/interface/components/Register.tsx b/interface/components/Register.tsx new file mode 100644 index 000000000..89b4e19ab --- /dev/null +++ b/interface/components/Register.tsx @@ -0,0 +1,211 @@ +import { zodResolver } from '@hookform/resolvers/zod'; +import clsx from 'clsx'; +import { useState } from 'react'; +import { Controller, useForm } from 'react-hook-form'; +import { signUp } from 'supertokens-web-js/recipe/emailpassword'; +import { Button, Form, Input, toast, z } from '@sd/ui'; +import { useIsDark, useLocale } from '~/hooks'; + +import ShowPassword from './ShowPassword'; + +const RegisterSchema = z + .object({ + email: z.string().email(), + password: z.string().min(6), + confirmPassword: z.string().min(6) + }) + .refine((data) => data.password === data.confirmPassword, { + message: 'Passwords do not match', + path: ['confirmPassword'] + }); +type RegisterType = z.infer; + +async function signUpClicked(email: string, password: string) { + try { + const response = await signUp({ + formFields: [ + { + id: 'email', + value: email + }, + { + id: 'password', + value: password + } + ] + }); + + if (response.status === 'FIELD_ERROR') { + // one of the input formFields failed validaiton + response.formFields.forEach((formField) => { + if (formField.id === 'email') { + // Email validation failed (for example incorrect email syntax), + // or the email is not unique. + toast.error(formField.error); + } else if (formField.id === 'password') { + // Password validation failed. + // Maybe it didn't match the password strength + toast.error(formField.error); + } + }); + } else if (response.status === 'SIGN_UP_NOT_ALLOWED') { + // the reason string is a user friendly message + // about what went wrong. It can also contain a support code which users + // can tell you so you know why their sign up was not allowed. + toast.error(response.reason); + } else { + // sign up successful. The session tokens are automatically handled by + // the frontend SDK. + toast.success('Sign up successful'); + // FIXME: This is a temporary workaround. We will provide a better way to handle this. + window.location.reload(); + } + } catch (err: any) { + if (err.isSuperTokensGeneralError === true) { + // this may be a custom error message sent from the API by you. + toast.error(err.message); + } else { + toast.error('Oops! Something went wrong.'); + } + } +} + +const Register = () => { + const { t } = useLocale(); + const isDark = useIsDark(); + const [showPassword, setShowPassword] = useState(false); + // useZodForm seems to be out-dated or needs + //fixing as it does not support the schema using zod.refine + const form = useForm({ + resolver: zodResolver(RegisterSchema), + defaultValues: { + email: '', + password: '', + confirmPassword: '' + } + }); + return ( +
{ + // handle sign-up submission + console.log(data); + await signUpClicked(data.email, data.password); + })} + form={form} + > +
+
+
+ + ( + + )} + /> + {form.formState.errors.email && ( +

+ {form.formState.errors.email.message} +

+ )} +
+ +
+ + ( +
+ { + const pastedText = e.clipboardData.getData('text'); + field.onChange(pastedText); + }} + /> + +
+ )} + /> + {form.formState.errors.password && ( +

+ {form.formState.errors.password.message} +

+ )} +
+ +
+ ( +
+ + +
+ )} + /> + {form.formState.errors.confirmPassword && ( +

+ {form.formState.errors.confirmPassword.message} +

+ )} +
+
+ + +
+
+ ); +}; + +export default Register; diff --git a/interface/components/ShowPassword.tsx b/interface/components/ShowPassword.tsx new file mode 100644 index 000000000..d3e846da0 --- /dev/null +++ b/interface/components/ShowPassword.tsx @@ -0,0 +1,27 @@ +import { Eye, EyeClosed } from '@phosphor-icons/react'; +import { Button, Tooltip } from '@sd/ui'; + +interface Props { + showPassword: boolean; + setShowPassword: (value: boolean) => void; +} + +const ShowPassword = ({ showPassword, setShowPassword }: Props) => { + return ( + + + + ); +}; + +export default ShowPassword; diff --git a/interface/components/index.ts b/interface/components/index.ts index edc3d5dc5..2f6420216 100644 --- a/interface/components/index.ts +++ b/interface/components/index.ts @@ -13,3 +13,4 @@ export * from './TextViewer'; export * from './TrafficLights'; export * from './TruncatedText'; export * from './Accordion'; +export * from './Authentication'; diff --git a/interface/locales/en/common.json b/interface/locales/en/common.json index 0f97989b7..63e57208c 100644 --- a/interface/locales/en/common.json +++ b/interface/locales/en/common.json @@ -467,6 +467,7 @@ "log_out": "Log out", "logged_in_as": "Logged in as {{email}}", "logging_in": "Logging in...", + "login": "Login", "logout": "Logout", "manage_library": "Manage Library", "managed": "Managed", @@ -610,6 +611,7 @@ "regen_labels": "Regen Labels", "regen_thumbnails": "Regen Thumbnails", "regenerate_thumbs": "Regenerate Thumbs", + "register": "Register", "reindex": "Re-index", "reject": "Reject", "reject_files": "Reject files", @@ -692,9 +694,9 @@ "skip_login": "Skip login", "software": "Software", "sort_by": "Sort by", - "spacedrive_account": "Spacedrive Account", + "spacedrive_account": "Account", "spacedrive_cloud": "Spacedrive Cloud", - "spacedrive_cloud_description": "Spacedrive is always local first, but we will offer our own optional cloud services in the future. For now, authentication is only used for the Feedback feature, otherwise it is not required.", + "spacedrive_cloud_description": "Spacedrive is always local first, but to access our Cloud features, users must register for an account. Note that an account is not required to use any of the base features of Spacedrive, and you can still connect devices to your library without an account.", "spacedrop": "Spacedrop visibility", "spacedrop_a_file": "Spacedrop a File", "spacedrop_already_progress": "Spacedrop already in progress", diff --git a/packages/ui/tsconfig.json b/packages/ui/tsconfig.json index 8e080d56b..620b6ee89 100644 --- a/packages/ui/tsconfig.json +++ b/packages/ui/tsconfig.json @@ -4,5 +4,5 @@ "rootDir": "src", "declarationDir": "dist" }, - "include": ["src"] + "include": ["src", "../../interface/components/ShowPassword.tsx"] } From e581f79e83c1697db6c5f21d651bb220f8555e0f Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Sun, 22 Sep 2024 11:03:44 -0400 Subject: [PATCH 137/218] Work on getting `request_join` working Currently have debug routes on how the flow of joining a sync group works, and there a few bugs internally still giving us issues. --- .../src/screens/settings/info/Debug.tsx | 69 +++++++++++++--- apps/mobile/src/utils/index.ts | 3 +- core/crates/cloud-services/src/error.rs | 2 + core/src/api/cloud/devices.rs | 21 ++++- core/src/api/cloud/sync_groups.rs | 2 + core/src/lib.rs | 4 +- .../settings/client/account/Profile.tsx | 81 ++++++++++++++++++- packages/client/src/core.ts | 1 + .../client/src/hooks/useLibraryContext.tsx | 6 ++ 9 files changed, 171 insertions(+), 18 deletions(-) diff --git a/apps/mobile/src/screens/settings/info/Debug.tsx b/apps/mobile/src/screens/settings/info/Debug.tsx index b7f32516c..db8c9f662 100644 --- a/apps/mobile/src/screens/settings/info/Debug.tsx +++ b/apps/mobile/src/screens/settings/info/Debug.tsx @@ -2,12 +2,13 @@ import { useQueryClient } from '@tanstack/react-query'; import React from 'react'; import { Text, View } from 'react-native'; import { - auth, + SyncGroupWithLibraryAndDevices, toggleFeatureFlag, useBridgeMutation, useBridgeQuery, useDebugState, - useFeatureFlags + useFeatureFlags, + useLibraryMutation } from '@sd/client'; import Card from '~/components/layout/Card'; import { Button } from '~/components/primitive/Button'; @@ -18,11 +19,38 @@ import { getTokens } from '~/utils'; const DebugScreen = ({ navigation }: SettingsStackScreenProps<'Debug'>) => { const debugState = useDebugState(); const featureFlags = useFeatureFlags(); - const origin = useBridgeQuery(['cloud.getApiOrigin']); - const setOrigin = useBridgeMutation(['cloud.setApiOrigin']); - const cloudBootstrap = useBridgeMutation(['cloud.bootstrap']); + const [tokens, setTokens] = React.useState({ accessToken: '', refreshToken: '' }); + const accessToken = tokens.accessToken; + const refreshToken = tokens.refreshToken; + // const origin = useBridgeQuery(['cloud.getApiOrigin']); + // const setOrigin = useBridgeMutation(['cloud.setApiOrigin']); - const queryClient = useQueryClient(); + React.useEffect(() => { + async function _() { + const _a = await getTokens(); + setTokens({ accessToken: _a.accessToken, refreshToken: _a.refreshToken }); + } + _(); + }, []); + + const cloudBootstrap = useBridgeMutation(['cloud.bootstrap']); + const requestJoinSyncGroup = useBridgeMutation('cloud.syncGroups.request_join'); + const getGroup = useBridgeQuery([ + 'cloud.syncGroups.get', + { + access_token: accessToken.trim(), + pub_id: '0192123b-5d01-7341-aa9d-4a08571052ee', + with_library: true, + with_devices: true, + with_used_storage: true + } + ]); + // console.log(getGroup.data); + const currentDevice = useBridgeQuery(['cloud.devices.get_current_device', accessToken.trim()]); + // console.log('Current Device: ', currentDevice.data); + const createSyncGroup = useLibraryMutation('cloud.syncGroups.create'); + + // const queryClient = useQueryClient(); return ( @@ -42,7 +70,7 @@ const DebugScreen = ({ navigation }: SettingsStackScreenProps<'Debug'>) => { > Disable Debug Mode - + */} - + */} + + ); diff --git a/apps/mobile/src/utils/index.ts b/apps/mobile/src/utils/index.ts index 97ee7bade..8d4e4077b 100644 --- a/apps/mobile/src/utils/index.ts +++ b/apps/mobile/src/utils/index.ts @@ -9,4 +9,5 @@ export async function getTokens() { }; } -export const AUTH_SERVER_URL = __DEV__ ? 'http://localhost:9420' : 'https://auth.spacedrive.com'; +// export const AUTH_SERVER_URL = __DEV__ ? 'http://localhost:9420' : 'https://auth.spacedrive.com'; +export const AUTH_SERVER_URL = 'https://auth.spacedrive.com'; diff --git a/core/crates/cloud-services/src/error.rs b/core/crates/cloud-services/src/error.rs index b4fa2b7de..12c93393c 100644 --- a/core/crates/cloud-services/src/error.rs +++ b/core/crates/cloud-services/src/error.rs @@ -13,6 +13,8 @@ pub enum Error { // Setup errors #[error("Couldn't parse Cloud Services API address URL: {0}")] InvalidUrl(reqwest::Error), + #[error("Failed to parse Cloud Services API address URL")] + FailedToParseRelayUrl, #[error("Failed to initialize http client: {0}")] HttpClientInit(reqwest::Error), #[error("Failed to request Cloud Services API address from Auth Server route: {0}")] diff --git a/core/src/api/cloud/devices.rs b/core/src/api/cloud/devices.rs index 47506cd77..00e1549ca 100644 --- a/core/src/api/cloud/devices.rs +++ b/core/src/api/cloud/devices.rs @@ -75,6 +75,23 @@ pub fn mount() -> AlphaRouter { Ok(devices) }) }) + .procedure("get_current_device", { + R.query(|node, access_token: AccessToken| async move { + let id = node.config.get().await.id; + let devices::get::Response(device) = super::handle_comm_error( + try_get_cloud_services_client(&node) + .await? + .devices() + .get(devices::get::Request { + pub_id: id.into(), + access_token, + }) + .await, + "Failed to get current device;", + )??; + Ok(device) + }) + }) .procedure("delete", { R.mutation(|node, req: devices::delete::Request| async move { super::handle_comm_error( @@ -207,9 +224,7 @@ pub async fn hello( } Ok(Response(State::End)) => { // Protocol completed successfully - Ok(SecretKey::new(export_key.as_slice().try_into().expect( - "Key mismatch between OPAQUE and crypto crate; this is a serious bug and should crash;", - ))) + Ok(SecretKey::from(export_key)) } Err(e) => { error!(?e, "Device hello final response error;"); diff --git a/core/src/api/cloud/sync_groups.rs b/core/src/api/cloud/sync_groups.rs index 27cf22002..a47684d85 100644 --- a/core/src/api/cloud/sync_groups.rs +++ b/core/src/api/cloud/sync_groups.rs @@ -259,6 +259,8 @@ pub fn mount() -> AlphaRouter { let group_pub_id = sync_group.pub_id; + debug!("My pub id: {:?}", current_device_pub_id); + debug!("Asking device pub id: {:?}", asking_device.pub_id); if asking_device.pub_id != current_device_pub_id { return Err(rspc::Error::new( rspc::ErrorCode::BadRequest, diff --git a/core/src/lib.rs b/core/src/lib.rs index f4087ee29..b89c62117 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -119,9 +119,9 @@ impl Node { format!("{AUTH_SERVER_URL}/cloud-api-address").to_string() }), std::env::var("SD_CLOUD_P2P_RELAY_URL") - .unwrap_or_else(|_| "http://relay.localhost:9999/".to_string()), + .unwrap_or_else(|_| "https://use1-1.relay.iroh.network".to_string()), std::env::var("SD_CLOUD_P2P_DNS_ORIGIN_NAME") - .unwrap_or_else(|_| "dnf.localhost:9999".to_string()), + .unwrap_or_else(|_| "staging-dns.iroh.link".to_string()), std::env::var("SD_CLOUD_API_DOMAIN_NAME") .unwrap_or_else(|_| "localhost".to_string()), ) diff --git a/interface/app/$libraryId/settings/client/account/Profile.tsx b/interface/app/$libraryId/settings/client/account/Profile.tsx index 1b0765b7d..9d40b77e4 100644 --- a/interface/app/$libraryId/settings/client/account/Profile.tsx +++ b/interface/app/$libraryId/settings/client/account/Profile.tsx @@ -1,6 +1,12 @@ import { Envelope } from '@phosphor-icons/react'; import { useEffect } from 'react'; -import { useBridgeMutation, useBridgeQuery } from '@sd/client'; +import { + SyncGroup, + SyncGroupWithLibraryAndDevices, + useBridgeMutation, + useBridgeQuery, + useLibraryMutation +} from '@sd/client'; import { Button, Card } from '@sd/ui'; import StatCard from '~/app/$libraryId/overview/StatCard'; import { TruncatedText } from '~/components'; @@ -22,6 +28,30 @@ const Profile = ({ email }: { email?: string }) => { const cloudBootstrap = useBridgeMutation('cloud.bootstrap'); const cloudDeleteDevice = useBridgeMutation('cloud.devices.delete'); const devices = useBridgeQuery(['cloud.devices.list', { access_token: accessToken.trim() }]); + const addLibraryToCloud = useLibraryMutation('cloud.libraries.create'); + const listLibraries = useBridgeQuery([ + 'cloud.libraries.list', + { access_token: accessToken.trim(), with_device: true } + ]); + const createSyncGroup = useLibraryMutation('cloud.syncGroups.create'); + const listSyncGroups = useBridgeQuery([ + 'cloud.syncGroups.list', + { access_token: accessToken.trim(), with_library: true } + ]); + const requestJoinSyncGroup = useBridgeMutation('cloud.syncGroups.request_join'); + const getGroup = useBridgeQuery([ + 'cloud.syncGroups.get', + { + access_token: accessToken.trim(), + pub_id: '0192123b-5d01-7341-aa9d-4a08571052ee', + with_library: true, + with_devices: true, + with_used_storage: true + } + ]); + console.log(getGroup.data); + const currentDevice = useBridgeQuery(['cloud.devices.get_current_device', accessToken.trim()]); + console.log('Current Device: ', currentDevice.data); // Refetch every 10 seconds useEffect(() => { @@ -30,7 +60,7 @@ const Profile = ({ email }: { email?: string }) => { }, 10000); return () => clearInterval(interval); }, []); - console.log(devices.data); + // console.log(devices.data); return (
@@ -70,6 +100,53 @@ const Profile = ({ email }: { email?: string }) => { > Delete Device + + + + + + {/* List all devices from const devices */} {devices.data?.map((device) => ( // { + console.log('cloud.listenCloudServicesNotifications', d); + } + }); + return ( {children} ); From 0150107c2e2f76ff25f5d615364336e2f42fd11d Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Sun, 22 Sep 2024 23:37:43 -0300 Subject: [PATCH 138/218] Trying to fix iroh discovery --- Cargo.lock | Bin 331662 -> 337426 bytes Cargo.toml | 2 +- core/crates/cloud-services/Cargo.toml | 4 +- core/crates/cloud-services/src/client.rs | 8 ++++ core/crates/cloud-services/src/error.rs | 4 ++ core/crates/cloud-services/src/p2p/mod.rs | 37 +++++++++++++++---- core/crates/cloud-services/src/p2p/runner.rs | 34 ++++++++--------- core/src/api/cloud/mod.rs | 1 + core/src/lib.rs | 4 ++ 9 files changed, 64 insertions(+), 30 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f0ce7f9937bfdb26c0ed94dac22f9e417cf4f091..4d4b3d19a46199b420411c555360a14a8e0c007f 100644 GIT binary patch delta 3015 zcmZuzX^fp^70$PG+D@05=`5WtKo`WQoyof|cDWr;DL)iCCRKyjzAL@$%-reR=`u)c zD*^%mTW{_Ak12_k#E@7W;e(T6*%q(o`sRR1z+g3yfgyT+}Q$8Yx#iS}Tl3Brxfbv`DZR zmEqhgj$*VN5uOf|*+6pND+kkk=d;eVE)`4C{o~n+j`7;q?)r4PG?U@EZP~u%jUV6E zpNYo$E29;`0%XF*;;3{q5Go>O1ZAEFtOW@gNfwD@ID{y?mXSIZC?Sjp&q><(!D8U3 znW#_CjD-!GI@fKu_B=Fcrs_AI95@)JW^VZAaCO4fCfY}?HddQ*duvrU|EFqw_jH&V zuTGe;-IKMknOGegYkzofI;!ij@9XTm_ufg9ettFY?wC)U?)pt;E7%5yeGrjr6MR(E zny6#cL_4h_F3x{3mS(XnhyjFjD>1w`q+4W|%Ta)fP z21$gcB05f-jKV3QHReHUhKa@`5`r}4NP45Z4w?}c4AdP1v(jP42x;`ZGmy}ivQ5jc z$DVI;df?gIY~?me7`PQdODvG~RyiY$c0sl-E`>4xv%myn*l`JH6ApUV>SN&P*em7W zf_B(+?0mU4?fFcxa_(%`_^lo5&@dflbG_`LY#}_eVNBC^c9b7TM>iKM8x`7{%p>J! zMMXv-bO>H(6gdTCVbeMb&O=&*u=50An1%HbVX2IV>j45yv~bmQ>9^U+^hQ1F>M=(S zjt{xoRA^^o8#bk$k+Q#WH|tEZr?QQS{V*eqe?HvR_%?eceOwm<{nNGm)mj@t)3w84 z3jW83ajVE3bFc9)WR2JOVB^L5XoUx1m4#A>AzXQwvNk>%qO67yw?M+QHzH~!9Jict z5(Lq}mLM2#r*UAxKst9N>rQY^`we@A)89FkwI z^p2Uy4V{O=RJ~f8ShscE1~$yotF{=e(7+ABRx!}Pb2g-3-Bqlf`|hC=ix#x?Ed6!1=uNXvmUmY?jUHp| zC9{em5LuuvF~9_6tw$cf#1I5Dm{I5}mmp6-Ov(^6mnfH>`Exn=(3YdkEl0B}oh_n? zVS<{Q9?yQ=erwULnSCRB;-<#7BY{oF$}}>a4>Yzv*VVjzF7tUiq>dmDR-q`JgLW%q1CUcNN|6eMEG=M~4gRHYWBy+I<0*~gM>9fygZ?-~iV+Lt$1mH?Jt0@M(qmqNy0qA06*ce1T zRt!-P0;6#d2pkGH$pqM7I{8k%GM!scEa_}X0<=U9i}a^vHdMAo+U$BE`%0dkJ)f;e zJKoHD(}}@yQTplmtdcIPVpUpvA-g%9Tazy+4_Bx6ru7%HJG$zJ&D8jiuhv}+*f^7R z{wwQC$GYKvU%Zg5PqVt{7#K2ceBP(pvNAM}?R4@&_Q`Z{dAU68`gh)!PCQa{H{ZID zRXaAmpPRjxYj{{TcD_1Vk(yCZec>DjNd!1K0{#*Z76gOuN27uz1{*>_zC%#h1*#-s zpyyTvmo6>O2b-fm$tM4>oDQQlr}ou{>NEAUc3-x-`PSL|)GrB`iqKeaqBs8UkuvBl zEFOSPtOAV|O7P0C;F)rbk^KYBmk$*WwB@~xy6s1bJ>BnO@XS|>;icE#(&Xb=fBM1e zd3W>gr;4B9h4cHzR2m&EY$byGI>n{42B1MCh$@22iW@1=6QC(ANzK9b$m zT=S1&=O;R+4%Vl~>Md9_CckDY0Xz;a2+9mjEh0b|1xhXQPy+{6codL@{mp8m!2%T% zVB&}|2BuG`Zf^?nE`lG=deh(P#X$Pcr}M=<;Ci-te|2KIUE5?BZ(O?4pU$4oc)EB3 zK>l)nxjKF77uk|Dc_F(k^&Bl%rtLohrQ5X&*23A%Wk+-Tqh;Qa++Xs0wh53Iuo4cU zO|A2YArEw<#)bg<2e;T`W~j6b1{1kam_ic+c1|PE*OWa0(f91id(u;^SdnfT%DPfz zZPt@!U&t1x-YQi7&69b5dU#8@G3~y+>}j4JDdWQQ+?{1t`rS}Ix{WB1S*ieHP=|}Q zwXI3cgm)TtPS9H=U`dM_IR>t*v{qm?4y-U(BW$|#I>^bDO_1#Allh`_&n?-abcPj+ z)7g{2^*LUwYV6}no6qkkb!FkTy)nJMl4K8rZt0ibjMiELea!6C5mH ehD%U+=R#{K7Xm>k;08z$SP%!BZ$Da|dGxO&oLR7m@80bWcq##2Lq99xpfzc*{TIjA#11{SAR^Jc4Z~lF8cB!~hLG|(AYD@ij zY3xYQbpn3hLhbd+=`mEhT^WJ1MKn}f@7=Atb5oW2%AE&I)4{9{X3?*_-(5tX^DtG7 zgl~F`hCAUEM?)3Ku{O-bGINGF9qWuFlBaQISZoukl&~p@1r{{XoTge6B}6Q_Gg!xr z)#lEWYkvO$-}VNXU=PV+5o1O$c1(&yX(})wnIXnWL4{(5Yp1NC#wjHwi)kXIR>Ug8 zxgxUm`e+$W{S5Qa{T6-SLt<$%nqX^iMq(~ilDd>~lBJ2Z)-p^Oai(#Z$wX4cX@lpq z=9XqUVba0&^{5>Nt3d%a*HE|ryM`tL=-vs7u+<+?c(IOp;Qn}63T{}y1#}3+2ExDx zq6C&VP!7Ispz|>CC@jFxNYw7LO*EYIcR!)=AF!GWo1m{BwLr5E^6=~rYVw^vSonYw z=BSoQX6@nsQ?iES)G4a7lvzs@PHm!6WtE9xu4`WzHWq?r7=^IamuA8$f{$yc!_VIf zFC6sSi(yXzT9;ADAAb^l!T#M=c;%$8=Aw&zzCI9zIT(Ez4E8o`aovbT(i9sil}VYh zu~Z!MBx6Kbszoe>PPsBXcA^oJGNuVuSjY?}MOcQ(LeL7=Mx$f!bUN&Wfu5)XzFm&m Ve0V9!_WRXpwA1TvK8%)T{sG`<+9?14 diff --git a/Cargo.toml b/Cargo.toml index f3174c6b2..f1ad9c7b0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,7 +20,7 @@ rust-version = "1.80" [workspace.dependencies] # First party dependencies -sd-cloud-schema = { git = "https://github.com/spacedriveapp/cloud-services-schema", rev = "25e4b92fdd" } +sd-cloud-schema = { git = "https://github.com/spacedriveapp/cloud-services-schema", rev = "6996916529" } # Third party dependencies used by one or more of our crates async-channel = "2.3" diff --git a/core/crates/cloud-services/Cargo.toml b/core/crates/cloud-services/Cargo.toml index 70a673177..ae1d41aca 100644 --- a/core/crates/cloud-services/Cargo.toml +++ b/core/crates/cloud-services/Cargo.toml @@ -37,8 +37,8 @@ zeroize = { workspace = true } # External dependencies anyhow = "1.0.86" -iroh-base = { version = "0.24", features = ["key"] } -iroh-net = "0.24" +iroh-base = { version = "0.25", features = ["key"] } +iroh-net = { version = "0.25", features = ["discovery-local-network", "iroh-relay"] } paste = "=1.0.15" postcard = { version = "1.0.8", features = ["use-std"] } quic-rpc = { version = "0.12.0", features = ["quinn-transport"] } diff --git a/core/crates/cloud-services/src/client.rs b/core/crates/cloud-services/src/client.rs index d5c0b14f6..b067aa0ee 100644 --- a/core/crates/cloud-services/src/client.rs +++ b/core/crates/cloud-services/src/client.rs @@ -41,6 +41,7 @@ pub struct CloudServices { domain_name: String, pub cloud_p2p_dns_origin_name: String, pub cloud_p2p_relay_url: RelayUrl, + pub cloud_p2p_dns_pkarr_url: Url, pub token_refresher: TokenRefresher, key_manager: Arc>>>, cloud_p2p: Arc>>>, @@ -58,6 +59,7 @@ impl CloudServices { pub async fn new( get_cloud_api_address: impl IntoUrl + Send, cloud_p2p_relay_url: impl IntoUrl + Send, + cloud_p2p_dns_pkarr_url: impl IntoUrl + Send, cloud_p2p_dns_origin_name: String, domain_name: String, ) -> Result { @@ -73,6 +75,10 @@ impl CloudServices { .map_err(Error::InvalidUrl)? .into(); + let cloud_p2p_dns_pkarr_url = cloud_p2p_dns_pkarr_url + .into_url() + .map_err(Error::InvalidUrl)?; + let http_client = ClientBuilder::new(http_client_builder.build().map_err(Error::HttpClientInit)?) .with(RetryTransientMiddleware::new_with_policy( @@ -114,6 +120,7 @@ impl CloudServices { http_client, cloud_p2p_dns_origin_name, cloud_p2p_relay_url, + cloud_p2p_dns_pkarr_url, domain_name, key_manager: Arc::default(), cloud_p2p: Arc::default(), @@ -321,6 +328,7 @@ mod tests { let response = CloudServices::new( "http://localhost:9420/cloud-api-address", "http://relay.localhost:9999/", + "http://pkarr.localhost:9999/", "dns.localhost:9999".to_string(), "localhost".to_string(), ) diff --git a/core/crates/cloud-services/src/error.rs b/core/crates/cloud-services/src/error.rs index 12c93393c..15dd4925e 100644 --- a/core/crates/cloud-services/src/error.rs +++ b/core/crates/cloud-services/src/error.rs @@ -69,6 +69,10 @@ pub enum Error { CloudP2PRpcCommunication(#[from] rpc::Error>), #[error("Cloud P2P not initialized")] CloudP2PNotInitialized, + #[error("Failed to initialize LocalSwarmDiscovery: {0}")] + LocalSwarmDiscoveryInit(anyhow::Error), + #[error("Failed to initialize DhtDiscovery: {0}")] + DhtDiscoveryInit(anyhow::Error), // Communication errors #[error("Failed to communicate with RPC backend: {0}")] diff --git a/core/crates/cloud-services/src/p2p/mod.rs b/core/crates/cloud-services/src/p2p/mod.rs index cf266b174..701bb4969 100644 --- a/core/crates/cloud-services/src/p2p/mod.rs +++ b/core/crates/cloud-services/src/p2p/mod.rs @@ -10,10 +10,14 @@ use sd_crypto::{CryptoRng, SeedableRng}; use iroh_base::key::SecretKey as IrohSecretKey; use iroh_net::{ - discovery::dns::DnsDiscovery, + discovery::{ + dns::DnsDiscovery, local_swarm_discovery::LocalSwarmDiscovery, pkarr::dht::DhtDiscovery, + ConcurrentDiscovery, + }, relay::{RelayMap, RelayMode, RelayUrl}, Endpoint, NodeId, }; +use reqwest::Url; use serde::{Deserialize, Serialize}; use tokio::{spawn, sync::oneshot}; use tracing::error; @@ -71,15 +75,19 @@ pub enum JoinSyncGroupResponse { CriticalError, } +#[derive(Debug, Clone, Serialize, Deserialize, specta::Type)] +pub struct BasicLibraryCreationArgs { + pub id: libraries::PubId, + pub name: String, + pub description: Option, +} + #[derive(Debug, Deserialize, specta::Type)] #[serde(tag = "kind", content = "data")] pub enum UserResponse { AcceptDeviceInSyncGroup { ticket: Ticket, - accepted: bool, - library_pub_id: libraries::PubId, - library_name: String, - library_description: Option, + accepted: Option, }, } #[derive(Debug, Clone)] @@ -94,15 +102,28 @@ impl CloudP2P { mut rng: CryptoRng, iroh_secret_key: IrohSecretKey, dns_origin_domain: String, + dns_pkarr_url: Url, relay_url: RelayUrl, ) -> Result { let endpoint = Endpoint::builder() .alpns(vec![CloudP2PALPN::LATEST.to_vec()]) + .discovery(Box::new(ConcurrentDiscovery::from_services(vec![ + Box::new(DnsDiscovery::new(dns_origin_domain)), + Box::new( + LocalSwarmDiscovery::new(iroh_secret_key.public()) + .map_err(Error::LocalSwarmDiscoveryInit)?, + ), + Box::new( + DhtDiscovery::builder() + .secret_key(iroh_secret_key.clone()) + .pkarr_relay(dns_pkarr_url) + .build() + .map_err(Error::DhtDiscoveryInit)?, + ), + ]))) .secret_key(iroh_secret_key) .relay_mode(RelayMode::Custom(RelayMap::from_url(relay_url))) - .discovery(Box::new(DnsDiscovery::new(dns_origin_domain))) - // Using 0 as port will bind to a random available port chosen by the OS. - .bind(0) + .bind() .await .map_err(Error::CreateCloudP2PEndpoint)?; diff --git a/core/crates/cloud-services/src/p2p/runner.rs b/core/crates/cloud-services/src/p2p/runner.rs index 18fb2426b..a4478f1aa 100644 --- a/core/crates/cloud-services/src/p2p/runner.rs +++ b/core/crates/cloud-services/src/p2p/runner.rs @@ -7,7 +7,6 @@ use sd_cloud_schema::{ self, authorize_new_device_in_sync_group, Client, CloudP2PALPN, CloudP2PError, Service, }, devices::{self, Device}, - libraries, sync::groups, }; use sd_crypto::{CryptoRng, SeedableRng}; @@ -40,7 +39,10 @@ use tokio::{ use tokio_stream::wrappers::IntervalStream; use tracing::{debug, error, warn}; -use super::{JoinSyncGroupResponse, JoinedLibraryCreateArgs, NotifyUser, Ticket, UserResponse}; +use super::{ + BasicLibraryCreationArgs, JoinSyncGroupResponse, JoinedLibraryCreateArgs, NotifyUser, Ticket, + UserResponse, +}; const TEN_SECONDS: Duration = Duration::from_secs(10); const FIVE_MINUTES: Duration = Duration::from_secs(60 * 5); @@ -185,18 +187,8 @@ impl Runner { StreamMessage::UserResponse(UserResponse::AcceptDeviceInSyncGroup { ticket, accepted, - library_pub_id, - library_name, - library_description, }) => { - self.handle_join_response( - ticket, - accepted, - library_pub_id, - library_name, - library_description, - ) - .await; + self.handle_join_response(ticket, accepted).await; } StreamMessage::Tick => self.tick().await, @@ -357,10 +349,7 @@ impl Runner { async fn handle_join_response( &self, ticket: Ticket, - accepted: bool, - library_pub_id: libraries::PubId, - library_name: String, - library_description: Option, + accepted: Option, ) { let Some(PendingSyncGroupJoin { channel, @@ -380,7 +369,14 @@ impl Runner { let sync_group = request.sync_group.clone(); let asking_device_pub_id = request.asking_device.pub_id; - let response = if accepted { + let was_accepted = accepted.is_some(); + + let response = if let Some(BasicLibraryCreationArgs { + id: library_pub_id, + name: library_name, + description: library_description, + }) = accepted + { Ok(authorize_new_device_in_sync_group::Response { authorizor_device: this_device, keys: self @@ -409,7 +405,7 @@ impl Runner { return; } - if accepted { + if was_accepted { let Ok(access_token) = self .token_refresher .get_access_token() diff --git a/core/src/api/cloud/mod.rs b/core/src/api/cloud/mod.rs index 873bfc2eb..c3c3655ee 100644 --- a/core/src/api/cloud/mod.rs +++ b/core/src/api/cloud/mod.rs @@ -173,6 +173,7 @@ pub(crate) fn mount() -> AlphaRouter { rng, iroh_secret_key, node.cloud_services.cloud_p2p_dns_origin_name.clone(), + node.cloud_services.cloud_p2p_dns_pkarr_url.clone(), node.cloud_services.cloud_p2p_relay_url.clone(), ) .await?, diff --git a/core/src/lib.rs b/core/src/lib.rs index b89c62117..47de0a4a8 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -110,6 +110,7 @@ impl Node { get_cloud_api_address, cloud_p2p_relay_url, cloud_p2p_dns_origin_name, + cloud_p2p_dns_pkarr_url, cloud_services_domain_name, ) = { #[cfg(debug_assertions)] @@ -122,6 +123,8 @@ impl Node { .unwrap_or_else(|_| "https://use1-1.relay.iroh.network".to_string()), std::env::var("SD_CLOUD_P2P_DNS_ORIGIN_NAME") .unwrap_or_else(|_| "staging-dns.iroh.link".to_string()), + std::env::var("SD_CLOUD_P2P_DNS_PKARR_URL") + .unwrap_or_else(|_| "https://staging-dns.iroh.link/pkarr".to_string()), std::env::var("SD_CLOUD_API_DOMAIN_NAME") .unwrap_or_else(|_| "localhost".to_string()), ) @@ -157,6 +160,7 @@ impl Node { CloudServices::new( &get_cloud_api_address, cloud_p2p_relay_url, + cloud_p2p_dns_pkarr_url, cloud_p2p_dns_origin_name, cloud_services_domain_name, ) From 9d52280c08053badd64c4ab3434be36528a04014 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Mon, 23 Sep 2024 00:02:00 -0400 Subject: [PATCH 139/218] Working on p2p stuff --- apps/mobile/src/screens/settings/info/Debug.tsx | 4 ++-- core/src/lib.rs | 7 ++++--- interface/index.tsx | 14 ++++++++++++++ packages/client/src/core.ts | 4 +++- packages/client/src/hooks/useLibraryContext.tsx | 6 ------ 5 files changed, 23 insertions(+), 12 deletions(-) diff --git a/apps/mobile/src/screens/settings/info/Debug.tsx b/apps/mobile/src/screens/settings/info/Debug.tsx index db8c9f662..5dd423b9f 100644 --- a/apps/mobile/src/screens/settings/info/Debug.tsx +++ b/apps/mobile/src/screens/settings/info/Debug.tsx @@ -61,7 +61,7 @@ const DebugScreen = ({ navigation }: SettingsStackScreenProps<'Debug'>) => { {JSON.stringify(featureFlags)} {JSON.stringify(debugState)} - + */} {/*
))}
-
+
-

+

Spacedrive

{activeTab === 'Login' ? : } {/* Optionally, uncomment the social login block when ready */} - {/*
+ {/*

OR

@@ -137,7 +132,7 @@ export const Authentication = ({ reload }: { reload: Dispatch await socialLoginHandlers(social.name)} - className="rounded-full border border-app-line bg-app-input p-3" + className="p-3 border rounded-full border-app-line bg-app-input" > > }) => { const { t } = useLocale(); - const isDark = useIsDark(); const [showPassword, setShowPassword] = useState(false); - const navigate = useNavigate(); // useNavigate hook const form = useZodForm({ schema: LoginSchema, defaultValues: { @@ -75,91 +72,77 @@ const Login = ({ reload }: { reload: Dispatch> }) => { onSubmit={form.handleSubmit(async (data) => { await signInClicked(data.email, data.password, reload); })} + className="w-full" form={form} > -
-
-
- - ( - - )} - /> - {form.formState.errors.email && ( -

- {form.formState.errors.email.message} -

+
+
+ + ( + )} -
- -
- - ( -
- { - const pastedText = e.clipboardData.getData('text'); - field.onChange(pastedText); - }} - /> - -
- )} - /> - {form.formState.errors.password && ( -

- {form.formState.errors.password.message} -

- )} -
+ /> + {form.formState.errors.email && ( +

+ {form.formState.errors.email.message} +

+ )}
- {form.formState.errors.password && ( -

{form.formState.errors.password.message}

- )} - +
+ ); }; diff --git a/interface/components/Register.tsx b/interface/components/Register.tsx index 89b4e19ab..2ca342cab 100644 --- a/interface/components/Register.tsx +++ b/interface/components/Register.tsx @@ -4,7 +4,7 @@ import { useState } from 'react'; import { Controller, useForm } from 'react-hook-form'; import { signUp } from 'supertokens-web-js/recipe/emailpassword'; import { Button, Form, Input, toast, z } from '@sd/ui'; -import { useIsDark, useLocale } from '~/hooks'; +import { useLocale } from '~/hooks'; import ShowPassword from './ShowPassword'; @@ -72,7 +72,6 @@ async function signUpClicked(email: string, password: string) { const Register = () => { const { t } = useLocale(); - const isDark = useIsDark(); const [showPassword, setShowPassword] = useState(false); // useZodForm seems to be out-dated or needs //fixing as it does not support the schema using zod.refine @@ -91,119 +90,103 @@ const Register = () => { console.log(data); await signUpClicked(data.email, data.password); })} + className="w-full" form={form} > -
-
-
- - ( - - )} - /> - {form.formState.errors.email && ( -

- {form.formState.errors.email.message} -

+
+
+ + ( + )} -
- -
- - ( -
- { - const pastedText = e.clipboardData.getData('text'); - field.onChange(pastedText); - }} - /> - -
- )} - /> - {form.formState.errors.password && ( -

- {form.formState.errors.password.message} -

- )} -
- -
- ( -
- - -
- )} - /> - {form.formState.errors.confirmPassword && ( -

- {form.formState.errors.confirmPassword.message} -

- )} -
+ /> + {form.formState.errors.email && ( +

+ {form.formState.errors.email.message} +

+ )}
- + ( +
+ + +
+ )} + /> + {form.formState.errors.confirmPassword && ( +

+ {form.formState.errors.confirmPassword.message} +

+ )} +
+ ); }; From bc2ddc620fcc001e562632695e8eb3a3dc2e9064 Mon Sep 17 00:00:00 2001 From: ameer2468 <33054370+ameer2468@users.noreply.github.com> Date: Mon, 23 Sep 2024 22:16:55 +0300 Subject: [PATCH 141/218] improve auth ui --- .../client/AccountSettings/AccountLogin.tsx | 24 ++++------- .../client/AccountSettings/AccountProfile.tsx | 1 + .../settings/client/AccountSettings/Login.tsx | 22 ++++++---- .../client/AccountSettings/Register.tsx | 42 +++++++++++-------- .../client/AccountSettings/ShowPassword.tsx | 28 ++++++------- 5 files changed, 62 insertions(+), 55 deletions(-) diff --git a/apps/mobile/src/screens/settings/client/AccountSettings/AccountLogin.tsx b/apps/mobile/src/screens/settings/client/AccountSettings/AccountLogin.tsx index 19bfe2374..ec36ee3c9 100644 --- a/apps/mobile/src/screens/settings/client/AccountSettings/AccountLogin.tsx +++ b/apps/mobile/src/screens/settings/client/AccountSettings/AccountLogin.tsx @@ -1,16 +1,12 @@ -import { useNavigation } from '@react-navigation/native'; import { MotiView } from 'moti'; import { AppleLogo, GithubLogo, GoogleLogo, IconProps } from 'phosphor-react-native'; -import { useEffect, useState } from 'react'; +import { useState } from 'react'; import { Text, View } from 'react-native'; import { LinearTransition } from 'react-native-reanimated'; -import { getAuthorisationURLWithQueryParamsAndSetState } from 'supertokens-web-js/recipe/thirdparty'; import Card from '~/components/layout/Card'; import ScreenContainer from '~/components/layout/ScreenContainer'; import { Button } from '~/components/primitive/Button'; -import { toast } from '~/components/primitive/Toast'; import { tw, twStyle } from '~/lib/tailwind'; -import { SettingsStackScreenProps } from '~/navigation/tabs/SettingsStack'; import Login from './Login'; import Register from './Register'; @@ -106,7 +102,7 @@ const AccountLogin = () => { - + {AccountTabs.map((text) => ( ))} - + {activeTab === 'Login' ? : } {/* Disabled for now */} - {/* + {/* OR @@ -154,7 +148,7 @@ const AccountLogin = () => { variant="outline" onPress={async () => await socialLoginHandlers(social.name)} key={social.name} - style={tw`rounded-full border border-app-line bg-app-input p-3`} + style={tw`p-3 border rounded-full border-app-line bg-app-input`} > diff --git a/apps/mobile/src/screens/settings/client/AccountSettings/AccountProfile.tsx b/apps/mobile/src/screens/settings/client/AccountSettings/AccountProfile.tsx index 9f670227b..3bf33edab 100644 --- a/apps/mobile/src/screens/settings/client/AccountSettings/AccountProfile.tsx +++ b/apps/mobile/src/screens/settings/client/AccountSettings/AccountProfile.tsx @@ -30,6 +30,7 @@ const AccountProfile = () => { const emailName = userInfo ? userInfo.email.split('@')[0] : ''; const capitalizedEmailName = (emailName?.charAt(0).toUpperCase() ?? '') + emailName?.slice(1); const navigator = useNavigation['navigation']>(); + function signOut() { fetch(`${AUTH_SERVER_URL}/api/auth/signout`, { method: 'POST' diff --git a/apps/mobile/src/screens/settings/client/AccountSettings/Login.tsx b/apps/mobile/src/screens/settings/client/AccountSettings/Login.tsx index 91f9d931f..3f9829cd9 100644 --- a/apps/mobile/src/screens/settings/client/AccountSettings/Login.tsx +++ b/apps/mobile/src/screens/settings/client/AccountSettings/Login.tsx @@ -8,7 +8,7 @@ import { useZodForm } from '@sd/client'; import { Button } from '~/components/primitive/Button'; import { Input } from '~/components/primitive/Input'; import { toast } from '~/components/primitive/Toast'; -import { tw } from '~/lib/tailwind'; +import { tw, twStyle } from '~/lib/tailwind'; import { SettingsStackScreenProps } from '~/navigation/tabs/SettingsStack'; import { AUTH_SERVER_URL } from '~/utils'; @@ -109,15 +109,18 @@ const Login = () => { control={form.control} name="email" render={({ field }) => ( - + {form.formState.errors.email && ( - + {form.formState.errors.email.message} )} @@ -128,16 +131,19 @@ const Login = () => { control={form.control} name="password" render={({ field }) => ( - + {form.formState.errors.password && ( - + {form.formState.errors.password.message} )} @@ -156,7 +162,7 @@ const Login = () => { })} disabled={form.formState.isSubmitting} > - Submit + Submit diff --git a/apps/mobile/src/screens/settings/client/AccountSettings/Register.tsx b/apps/mobile/src/screens/settings/client/AccountSettings/Register.tsx index e7a86a3b7..baf77d784 100644 --- a/apps/mobile/src/screens/settings/client/AccountSettings/Register.tsx +++ b/apps/mobile/src/screens/settings/client/AccountSettings/Register.tsx @@ -3,13 +3,11 @@ import { useNavigation } from '@react-navigation/native'; import { useState } from 'react'; import { Controller, useForm } from 'react-hook-form'; import { Text, View } from 'react-native'; -import { signUp } from 'supertokens-web-js/recipe/emailpassword'; import { z } from 'zod'; -import { telemetryState } from '@sd/client'; import { Button } from '~/components/primitive/Button'; import { Input } from '~/components/primitive/Input'; import { toast } from '~/components/primitive/Toast'; -import { tw } from '~/lib/tailwind'; +import { tw, twStyle } from '~/lib/tailwind'; import { SettingsStackScreenProps } from '~/navigation/tabs/SettingsStack'; import { AUTH_SERVER_URL } from '~/utils'; @@ -108,7 +106,12 @@ const Register = () => { control={form.control} name="email" render={({ field }) => ( - + )} /> {form.formState.errors.email && ( @@ -122,7 +125,10 @@ const Register = () => { @@ -138,14 +144,22 @@ const Register = () => { control={form.control} name="confirmPassword" render={({ field }) => ( - + + {form.formState.errors.confirmPassword && ( + + {form.formState.errors.confirmPassword.message} + + )} { )} /> - {form.formState.errors.confirmPassword && ( - - {form.formState.errors.confirmPassword.message} - - )} ); diff --git a/apps/mobile/src/screens/settings/client/AccountSettings/ShowPassword.tsx b/apps/mobile/src/screens/settings/client/AccountSettings/ShowPassword.tsx index 5da608d46..ea33539dd 100644 --- a/apps/mobile/src/screens/settings/client/AccountSettings/ShowPassword.tsx +++ b/apps/mobile/src/screens/settings/client/AccountSettings/ShowPassword.tsx @@ -1,5 +1,5 @@ import { Eye, EyeClosed } from 'phosphor-react-native'; -import { Text, View } from 'react-native'; +import { Text } from 'react-native'; import { Button } from '~/components/primitive/Button'; import { tw } from '~/lib/tailwind'; @@ -11,20 +11,18 @@ interface Props { const ShowPassword = ({ showPassword, setShowPassword, plural }: Props) => { return ( - - - + ); }; From c08c3d4bb7a6f8ab2374cf817cf98c1ab16694e8 Mon Sep 17 00:00:00 2001 From: myung03 Date: Mon, 23 Sep 2024 16:38:04 -0700 Subject: [PATCH 142/218] styled profile page --- .../settings/client/account/Profile.tsx | 84 ++++++++++++++----- .../settings/client/account/index.tsx | 30 ++----- 2 files changed, 72 insertions(+), 42 deletions(-) diff --git a/interface/app/$libraryId/settings/client/account/Profile.tsx b/interface/app/$libraryId/settings/client/account/Profile.tsx index 9d40b77e4..2e82c8d82 100644 --- a/interface/app/$libraryId/settings/client/account/Profile.tsx +++ b/interface/app/$libraryId/settings/client/account/Profile.tsx @@ -1,5 +1,7 @@ -import { Envelope } from '@phosphor-icons/react'; -import { useEffect } from 'react'; +import { Envelope, UserCircle } from '@phosphor-icons/react'; +import { t } from 'i18next'; +import { Dispatch, SetStateAction, useEffect } from 'react'; +import { signOut } from 'supertokens-web-js/recipe/session'; import { SyncGroup, SyncGroupWithLibraryAndDevices, @@ -7,13 +9,28 @@ import { useBridgeQuery, useLibraryMutation } from '@sd/client'; -import { Button, Card } from '@sd/ui'; +import { Button, Card, tw } from '@sd/ui'; import StatCard from '~/app/$libraryId/overview/StatCard'; import { TruncatedText } from '~/components'; import { hardwareModelToIcon } from '~/util/hardware'; -const Profile = ({ email }: { email?: string }) => { - const emailName = email?.split('@')[0]; +type User = { + email: string; + id: string; + timejoined: number; + roles: string[]; +}; + +const Pill = tw.div`px-1.5 py-[1px] rounded text-tiny font-medium text-ink-dull bg-app-box border border-app-line`; + +const Profile = ({ + user, + setReload +}: { + user: User; + setReload: Dispatch>; +}) => { + const emailName = user.email?.split('@')[0]; const capitalizedEmailName = (emailName?.charAt(0).toUpperCase() ?? '') + emailName?.slice(1); const refreshToken: string = JSON.parse(window.localStorage.getItem('frontendCookies') ?? '[]') @@ -64,23 +81,50 @@ const Profile = ({ email }: { email?: string }) => { return (
- - {/* */} -
-

- Welcome {capitalizedEmailName}, -

-
- -
- -
- {email} -
+ {/* Top Section with Welcome and Logout */} +
+ +

Profile Information

+
+ + {user.email}
+
+

Joined on:

+

+ {new Date(user.timejoined * 1000).toLocaleDateString()} +

+

User ID:

+

{user.id}

+

Roles:

+
+ {user.roles.map((role) => ( + {role.toLocaleUpperCase()} + ))} +
+
+
+ +
+ {/* User Circle Icon */} + + + {/* Logout Button */} +
- -

DEBUG

+
+ + {/* MT is added to hide */} +

DEBUG

-
- )} - - } title={t('spacedrive_account')} description={t('spacedrive_cloud_description')} /> -
-
+
+
{userInfo === null ? ( <> ) : ( <> -

{t('profile')}

- + )}
From 845c02676242ac63be4274aa3367b2a55d7ad908 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Mon, 23 Sep 2024 22:05:48 -0400 Subject: [PATCH 143/218] Fix joined in date & remove roles --- .../app/$libraryId/settings/client/account/Profile.tsx | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/interface/app/$libraryId/settings/client/account/Profile.tsx b/interface/app/$libraryId/settings/client/account/Profile.tsx index 2e82c8d82..aa5d3ddc5 100644 --- a/interface/app/$libraryId/settings/client/account/Profile.tsx +++ b/interface/app/$libraryId/settings/client/account/Profile.tsx @@ -92,16 +92,17 @@ const Profile = ({

Joined on:

- {new Date(user.timejoined * 1000).toLocaleDateString()} + {new Date(user.timejoined).toLocaleDateString()}

User ID:

{user.id}

-

Roles:

+ {/* Account Stats (for future services) */} + {/*

Roles:

// We don't use roles, at least currently.
{user.roles.map((role) => ( {role.toLocaleUpperCase()} ))} -
+
*/}
From 564e9acfb4a97db3832396b841d470816b150ac2 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Mon, 23 Sep 2024 22:56:59 -0400 Subject: [PATCH 144/218] Fix cors issue & start auto bootstrapping cloud --- .../src-tauri/capabilities/default.json | 7 ++++- apps/desktop/src-tauri/tauri.conf.json | 28 ++++++++++++++----- apps/desktop/src/App.tsx | 14 ++++++++-- core/src/api/cloud/mod.rs | 2 ++ core/src/custom_uri/utils.rs | 13 ++------- .../settings/client/account/index.tsx | 7 +++-- interface/components/Authentication.tsx | 17 +++++++++-- interface/components/Login.tsx | 21 +++++++++++--- interface/util/index.tsx | 25 +++++++++++++++++ 9 files changed, 105 insertions(+), 29 deletions(-) diff --git a/apps/desktop/src-tauri/capabilities/default.json b/apps/desktop/src-tauri/capabilities/default.json index 8580f0957..62a572b16 100644 --- a/apps/desktop/src-tauri/capabilities/default.json +++ b/apps/desktop/src-tauri/capabilities/default.json @@ -2,7 +2,9 @@ "$schema": "../gen/schemas/desktop-schema.json", "identifier": "default", "description": "Capability for the main window", - "windows": ["main"], + "windows": [ + "main" + ], "permissions": [ "core:app:default", "core:event:default", @@ -35,6 +37,9 @@ { "url": "http://asset.localhost" }, + { + "url": "http://localhost:8001" + }, { "url": "http://tauri.localhost" }, diff --git a/apps/desktop/src-tauri/tauri.conf.json b/apps/desktop/src-tauri/tauri.conf.json index cff417501..f89f76850 100644 --- a/apps/desktop/src-tauri/tauri.conf.json +++ b/apps/desktop/src-tauri/tauri.conf.json @@ -29,7 +29,9 @@ "transparent": true, "center": true, "windowEffects": { - "effects": ["sidebar"], + "effects": [ + "sidebar" + ], "state": "followsWindowActiveState", "radius": 9 } @@ -37,16 +39,20 @@ ], "security": { "csp": { - "default-src": "'self' webkit-pdfjs-viewer: asset: https://asset.localhost blob: data: filesystem: http: https: tauri:", + "default-src": "'self' webkit-pdfjs-viewer: asset: http://asset.localhost blob: data: filesystem: http: https: tauri:", "connect-src": "'self' ipc: http://ipc.localhost ws: wss: http: https: tauri:", - "img-src": "'self' asset: https://asset.localhost blob: data: filesystem: http: https: tauri:", + "img-src": "'self' asset: http://asset.localhost blob: data: filesystem: http: https: tauri:", "style-src": "'self' 'unsafe-inline' http: https: tauri:" } } }, "bundle": { "active": true, - "targets": ["deb", "msi", "dmg"], + "targets": [ + "deb", + "msi", + "dmg" + ], "publisher": "Spacedrive Technology Inc.", "copyright": "Spacedrive Technology Inc.", "category": "Productivity", @@ -65,14 +71,20 @@ "files": { "/usr/share/spacedrive/models/yolov8s.onnx": "../../.deps/models/yolov8s.onnx" }, - "depends": ["libc6", "libxdo3", "dbus"] + "depends": [ + "libc6", + "libxdo3", + "dbus" + ] } }, "macOS": { "minimumSystemVersion": "10.15", "exceptionDomain": null, "entitlements": null, - "frameworks": ["../../.deps/Spacedrive.framework"], + "frameworks": [ + "../../.deps/Spacedrive.framework" + ], "dmg": { "background": "dmg-background.png", "appPosition": { @@ -109,7 +121,9 @@ "deep-link": { "mobile": [], "desktop": { - "schemes": ["spacedrive"] + "schemes": [ + "spacedrive" + ] } } } diff --git a/apps/desktop/src/App.tsx b/apps/desktop/src/App.tsx index 632e3ba5b..184587be7 100644 --- a/apps/desktop/src/App.tsx +++ b/apps/desktop/src/App.tsx @@ -3,7 +3,7 @@ import { QueryClientProvider } from '@tanstack/react-query'; import { listen } from '@tauri-apps/api/event'; import { PropsWithChildren, startTransition, useEffect, useMemo, useRef, useState } from 'react'; import { createPortal } from 'react-dom'; -import { RspcProvider } from '@sd/client'; +import { RspcProvider, useBridgeMutation } from '@sd/client'; import { createRoutes, DeeplinkEvent, @@ -35,7 +35,7 @@ import ThirdParty from 'supertokens-web-js/recipe/thirdparty'; import getCookieHandler from '@sd/interface/app/$libraryId/settings/client/account/handlers/cookieHandler'; import getWindowHandler from '@sd/interface/app/$libraryId/settings/client/account/handlers/windowHandler'; import { useLocale } from '@sd/interface/hooks'; -import { AUTH_SERVER_URL } from '@sd/interface/util'; +import { AUTH_SERVER_URL, getTokens } from '@sd/interface/util'; import { commands } from './commands'; import { platform } from './platform'; @@ -117,6 +117,16 @@ type RedirectPath = { pathname: string; search: string | undefined }; function AppInner() { const [tabs, setTabs] = useState(() => [createTab()]); const [selectedTabIndex, setSelectedTabIndex] = useState(0); + const tokens = getTokens(); + const cloudBootstrap = useBridgeMutation('cloud.bootstrap'); + + useEffect(() => { + // If the access token and/or refresh token are missing, we need to skip the cloud bootstrap + if (tokens.accessToken.length === 0 || tokens.refreshToken.length === 0) return; + console.log('Bootstrapping cloud'); + cloudBootstrap.mutate([tokens.accessToken, tokens.refreshToken]); + // eslint-disable-next-line react-hooks/exhaustive-deps + }, []); const selectedTab = tabs[selectedTabIndex]!; diff --git a/core/src/api/cloud/mod.rs b/core/src/api/cloud/mod.rs index c3c3655ee..93d36d208 100644 --- a/core/src/api/cloud/mod.rs +++ b/core/src/api/cloud/mod.rs @@ -113,6 +113,8 @@ pub(crate) fn mount() -> AlphaRouter { ) .await?; + debug!("Device hello successful"); + KeyManager::load(master_key, data_directory).await? } Err(Error::Client(ClientSideError::NotFound(_))) => { diff --git a/core/src/custom_uri/utils.rs b/core/src/custom_uri/utils.rs index 7a86a943d..52ac9084a 100644 --- a/core/src/custom_uri/utils.rs +++ b/core/src/custom_uri/utils.rs @@ -51,10 +51,7 @@ pub(crate) async fn cors_middleware(req: Request, next: Next) -> Respon if req.method() == Method::OPTIONS { return Response::builder() .header("Access-Control-Allow-Methods", "GET, HEAD, POST, OPTIONS") - .header( - "Access-Control-Allow-Origin", - format!("{AUTH_SERVER_URL}, http://ipc.localhost, http://tauri.localhost"), - ) + .header("Access-Control-Allow-Origin", "*") .header("Access-Control-Allow-Headers", "*") .header("Access-Control-Max-Age", "86400") .status(StatusCode::OK) @@ -69,13 +66,7 @@ pub(crate) async fn cors_middleware(req: Request, next: Next) -> Respon { let headers = response.headers_mut(); - headers.insert( - "Access-Control-Allow-Origin", - HeaderValue::from_str( - format!("{AUTH_SERVER_URL}, http://ipc.localhost, http://tauri.localhost").as_str(), - ) - .expect("Invalid static response!"), - ); + headers.insert("Access-Control-Allow-Origin", HeaderValue::from_static("*")); headers.insert( "Access-Control-Allow-Headers", diff --git a/interface/app/$libraryId/settings/client/account/index.tsx b/interface/app/$libraryId/settings/client/account/index.tsx index cfa7e09e2..2f002a710 100644 --- a/interface/app/$libraryId/settings/client/account/index.tsx +++ b/interface/app/$libraryId/settings/client/account/index.tsx @@ -5,7 +5,7 @@ import { auth, useBridgeMutation, useBridgeQuery, useFeatureFlag } from '@sd/cli import { Button, Input, toast } from '@sd/ui'; import { Authentication } from '~/components'; import { useLocale } from '~/hooks'; -import { AUTH_SERVER_URL } from '~/util'; +import { AUTH_SERVER_URL, getTokens } from '~/util'; import { Heading } from '../../Layout'; import Profile from './Profile'; @@ -42,6 +42,9 @@ export const Component = () => { setReload(false); // eslint-disable-next-line react-hooks/exhaustive-deps }, [reload]); + const cloudBootstrap = useBridgeMutation('cloud.bootstrap'); + const tokens = getTokens(); + return ( <> { > {userInfo === null ? ( <> - + ) : ( <> diff --git a/interface/components/Authentication.tsx b/interface/components/Authentication.tsx index c4d31d8a6..c1697c987 100644 --- a/interface/components/Authentication.tsx +++ b/interface/components/Authentication.tsx @@ -1,5 +1,7 @@ +import { AlphaRSPCError } from '@oscartbeaumont-sd/rspc-client/v2'; import { GoogleLogo, Icon } from '@phosphor-icons/react'; import { Apple, Github } from '@sd/assets/svgs/brands'; +import { UseMutationResult } from '@tanstack/react-query'; import { open } from '@tauri-apps/plugin-shell'; import clsx from 'clsx'; import { motion } from 'framer-motion'; @@ -25,10 +27,17 @@ export const SocialLogins: SocialLogin[] = [ { name: 'Apple', icon: Apple } ]; -export const Authentication = ({ reload }: { reload: Dispatch> }) => { +export const Authentication = ({ + reload, + cloudBootstrap +}: { + reload: Dispatch>; + cloudBootstrap: UseMutationResult; // Cloud bootstrap mutation +}) => { const [activeTab, setActiveTab] = useState<'Login' | 'Register'>('Login'); const isDark = useIsDark(); + // Currently not in use due to backend issues - @Rocky43007 const socialLoginHandlers = (name: SocialLogin['name']) => { return { Github: async () => { @@ -119,7 +128,11 @@ export const Authentication = ({ reload }: { reload: Dispatch
- {activeTab === 'Login' ? : } + {activeTab === 'Login' ? ( + + ) : ( + + )} {/* Optionally, uncomment the social login block when ready */} {/*
diff --git a/interface/components/Login.tsx b/interface/components/Login.tsx index 25e764916..5c58df4e1 100644 --- a/interface/components/Login.tsx +++ b/interface/components/Login.tsx @@ -1,3 +1,5 @@ +import { AlphaRSPCError } from '@oscartbeaumont-sd/rspc-client/v2'; +import { UseMutationResult } from '@tanstack/react-query'; import clsx from 'clsx'; import { Dispatch, SetStateAction, useState } from 'react'; import { Controller } from 'react-hook-form'; @@ -5,13 +7,15 @@ import { signIn } from 'supertokens-web-js/recipe/emailpassword'; import { useZodForm } from '@sd/client'; import { Button, Form, Input, toast, z } from '@sd/ui'; import { useLocale } from '~/hooks'; +import { getTokens } from '~/util'; import ShowPassword from './ShowPassword'; async function signInClicked( email: string, password: string, - reload: Dispatch> + reload: Dispatch>, + cloudBootstrap: UseMutationResult // Cloud bootstrap mutation ) { try { const response = await signIn({ @@ -38,6 +42,9 @@ async function signInClicked( } else if (response.status === 'SIGN_IN_NOT_ALLOWED') { toast.error(response.reason); } else { + const tokens = getTokens(); + console.log(cloudBootstrap); + cloudBootstrap.mutate([tokens.accessToken, tokens.refreshToken]); toast.success('Sign in successful'); reload(true); } @@ -56,7 +63,13 @@ const LoginSchema = z.object({ password: z.string().min(6) }); -const Login = ({ reload }: { reload: Dispatch> }) => { +const Login = ({ + reload, + cloudBootstrap +}: { + reload: Dispatch>; + cloudBootstrap: UseMutationResult; // Cloud bootstrap mutation +}) => { const { t } = useLocale(); const [showPassword, setShowPassword] = useState(false); const form = useZodForm({ @@ -70,7 +83,7 @@ const Login = ({ reload }: { reload: Dispatch> }) => { return (
{ - await signInClicked(data.email, data.password, reload); + await signInClicked(data.email, data.password, reload, cloudBootstrap); })} className="w-full" form={form} @@ -137,7 +150,7 @@ const Login = ({ reload }: { reload: Dispatch> }) => { className={clsx('mx-auto mt-3 w-full border-none')} variant="accent" onClick={form.handleSubmit(async (data) => { - await signInClicked(data.email, data.password, reload); + await signInClicked(data.email, data.password, reload, cloudBootstrap); })} disabled={form.formState.isSubmitting} > diff --git a/interface/util/index.tsx b/interface/util/index.tsx index 9a4ae85e8..1026c9c9c 100644 --- a/interface/util/index.tsx +++ b/interface/util/index.tsx @@ -10,3 +10,28 @@ export const isNonEmpty = (input: T[]): input is NonEmptyArray => input.l export const isNonEmptyObject = (input: object) => Object.keys(input).length > 0; export const AUTH_SERVER_URL = 'https://auth.spacedrive.com'; + +export function getTokens() { + if (typeof window === 'undefined') { + return { + refreshToken: '', + accessToken: '' + }; + } + + const refreshToken: string = + JSON.parse(window.localStorage.getItem('frontendCookies') ?? '[]') + .find((cookie: string) => cookie.startsWith('st-refresh-token')) + ?.split('=')[1] + .split(';')[0] || ''; + const accessToken: string = + JSON.parse(window.localStorage.getItem('frontendCookies') ?? '[]') + .find((cookie: string) => cookie.startsWith('st-access-token')) + ?.split('=')[1] + .split(';')[0] || ''; + + return { + refreshToken, + accessToken + }; +} From 38003f7b613d370ab03924a1575b2da8f6809a72 Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Thu, 26 Sep 2024 00:06:14 -0300 Subject: [PATCH 145/218] Refactor out access_token from all routes except bootstrap --- core/crates/cloud-services/src/lib.rs | 3 +- core/src/api/cloud/devices.rs | 173 +++++++++++++-------- core/src/api/cloud/libraries.rs | 98 +++++++----- core/src/api/cloud/locations.rs | 112 ++++++++++---- core/src/api/cloud/mod.rs | 17 ++- core/src/api/cloud/sync_groups.rs | 208 +++++++++++++++----------- core/src/custom_uri/utils.rs | 1 - core/src/lib.rs | 12 +- interface/util/index.tsx | 3 +- 9 files changed, 394 insertions(+), 233 deletions(-) diff --git a/core/crates/cloud-services/src/lib.rs b/core/crates/cloud-services/src/lib.rs index 21477d15b..e5edc271d 100644 --- a/core/crates/cloud-services/src/lib.rs +++ b/core/crates/cloud-services/src/lib.rs @@ -52,4 +52,5 @@ pub use iroh_base::key::{NodeId, SecretKey as IrohSecretKey}; pub use quic_rpc::transport::quinn::QuinnConnection; // Export URL for the auth server -pub const AUTH_SERVER_URL: &str = "https://auth.spacedrive.com"; +// pub const AUTH_SERVER_URL: &str = "https://auth.spacedrive.com"; +pub const AUTH_SERVER_URL: &str = "http://localhost:9420"; diff --git a/core/src/api/cloud/devices.rs b/core/src/api/cloud/devices.rs index 00e1549ca..d7c03a98b 100644 --- a/core/src/api/cloud/devices.rs +++ b/core/src/api/cloud/devices.rs @@ -2,11 +2,7 @@ use crate::api::{Ctx, R}; use sd_cloud_schema::{ auth::AccessToken, - devices::{ - self, - register::{Request, RequestUpdate, Response, State}, - DeviceOS, HardwareModel, PubId, - }, + devices::{self, DeviceOS, HardwareModel, PubId}, opaque_ke::{ ClientLogin, ClientLoginFinishParameters, ClientLoginFinishResult, ClientLoginStartResult, ClientRegistration, ClientRegistrationFinishParameters, ClientRegistrationFinishResult, @@ -18,34 +14,27 @@ use sd_core_cloud_services::{NodeId, QuinnConnection}; use sd_crypto::{cloud::secret_key::SecretKey, CryptoRng}; use blake3::Hash; -use chrono::DateTime; -use futures::{SinkExt, StreamExt}; +use futures::{FutureExt, SinkExt, StreamExt}; +use futures_concurrency::future::TryJoin; use rspc::alpha::AlphaRouter; +use serde::Deserialize; use tracing::{debug, error}; -use super::{handle_comm_error, try_get_cloud_services_client}; - -#[derive(Debug, serde::Serialize, serde::Deserialize, specta::Type)] -struct MockDevice { - pub_id: PubId, - name: String, - os: DeviceOS, - used_storage: u64, - storage_size: u64, - created_at: DateTime, - updated_at: DateTime, - device_model: HardwareModel, -} - pub fn mount() -> AlphaRouter { R.router() .procedure("get", { - R.query(|node, req: devices::get::Request| async move { - let devices::get::Response(device) = super::handle_comm_error( - try_get_cloud_services_client(&node) - .await? + R.query(|node, pub_id: devices::PubId| async move { + use devices::get::{Request, Response}; + + let (client, access_token) = super::get_client_and_access_token(&node).await?; + + let Response(device) = super::handle_comm_error( + client .devices() - .get(req) + .get(Request { + pub_id, + access_token, + }) .await, "Failed to get device;", )??; @@ -56,34 +45,45 @@ pub fn mount() -> AlphaRouter { }) }) .procedure("list", { - R.query(|node, req: devices::list::Request| async move { - let devices::list::Response(mut devices) = super::handle_comm_error( - try_get_cloud_services_client(&node) - .await? - .devices() - .list(req) - .await, + R.query(|node, _: ()| async move { + use devices::list::{Request, Response}; + + let ((client, access_token), pub_id) = ( + super::get_client_and_access_token(&node), + node.config.get().map(|config| Ok(config.id.into())), + ) + .try_join() + .await?; + + let Response(mut devices) = super::handle_comm_error( + client.devices().list(Request { access_token }).await, "Failed to list devices;", )??; - debug!(?devices, "Listed devices"); - - let id = node.config.get().await.id.into(); // Filter out the local device by matching pub_id - devices.retain(|device| device.pub_id != id); + devices.retain(|device| device.pub_id != pub_id); + + debug!(?devices, "Listed devices"); Ok(devices) }) }) .procedure("get_current_device", { - R.query(|node, access_token: AccessToken| async move { - let id = node.config.get().await.id; - let devices::get::Response(device) = super::handle_comm_error( - try_get_cloud_services_client(&node) - .await? + R.query(|node, _: ()| async move { + use devices::get::{Request, Response}; + + let ((client, access_token), pub_id) = ( + super::get_client_and_access_token(&node), + node.config.get().map(|config| Ok(config.id.into())), + ) + .try_join() + .await?; + + let Response(device) = super::handle_comm_error( + client .devices() - .get(devices::get::Request { - pub_id: id.into(), + .get(Request { + pub_id, access_token, }) .await, @@ -93,12 +93,18 @@ pub fn mount() -> AlphaRouter { }) }) .procedure("delete", { - R.mutation(|node, req: devices::delete::Request| async move { + R.mutation(|node, pub_id: devices::PubId| async move { + use devices::delete::Request; + + let (client, access_token) = super::get_client_and_access_token(&node).await?; + super::handle_comm_error( - try_get_cloud_services_client(&node) - .await? + client .devices() - .delete(req) + .delete(Request { + pub_id, + access_token, + }) .await, "Failed to delete device;", )??; @@ -109,20 +115,45 @@ pub fn mount() -> AlphaRouter { }) }) .procedure("update", { - R.mutation(|node, req: devices::update::Request| async move { - super::handle_comm_error( - try_get_cloud_services_client(&node) - .await? - .devices() - .update(req) - .await, - "Failed to update device;", - )??; + #[derive(Deserialize, specta::Type)] + struct CloudUpdateDeviceArgs { + pub_id: devices::PubId, + name: String, + storage_size: u64, + used_storage: u64, + } - debug!("Updated device"); + R.mutation( + |node, + CloudUpdateDeviceArgs { + pub_id, + name, + storage_size, + used_storage, + }: CloudUpdateDeviceArgs| async move { + use devices::update::Request; - Ok(()) - }) + let (client, access_token) = super::get_client_and_access_token(&node).await?; + + super::handle_comm_error( + client + .devices() + .update(Request { + access_token, + pub_id, + name, + storage_size, + used_storage, + }) + .await, + "Failed to update device;", + )??; + + debug!("Updated device"); + + Ok(()) + }, + ) }) } @@ -145,7 +176,7 @@ pub async fn hello( ) })?; - let (mut hello_continuation, mut res_stream) = handle_comm_error( + let (mut hello_continuation, mut res_stream) = super::handle_comm_error( client .devices() .hello(Request { @@ -167,11 +198,13 @@ pub async fn hello( }; let credential_response = - match handle_comm_error(res, "Communication error on device hello response;")? { + match super::handle_comm_error(res, "Communication error on device hello response;")? { Ok(Response(State::LoginResponse(credential_response))) => credential_response, + Ok(Response(State::End)) => { unreachable!("Device hello response MUST not be End here, this is a serious bug and should crash;"); } + Err(e) => { error!(?e, "Device hello response error;"); return Err(e.into()); @@ -218,14 +251,16 @@ pub async fn hello( )); }; - match handle_comm_error(res, "Communication error on device hello response;")? { + match super::handle_comm_error(res, "Communication error on device hello response;")? { Ok(Response(State::LoginResponse(_))) => { unreachable!("Device hello final response MUST be End here, this is a serious bug and should crash;"); } + Ok(Response(State::End)) => { // Protocol completed successfully Ok(SecretKey::from(export_key)) } + Err(e) => { error!(?e, "Device hello final response error;"); Err(e.into()) @@ -258,6 +293,8 @@ pub async fn register( hashed_pub_id: Hash, rng: &mut CryptoRng, ) -> Result { + use devices::register::{Request, RequestUpdate, Response, State}; + let ClientRegistrationStartResult { message, state } = ClientRegistration::::start( rng, @@ -271,7 +308,7 @@ pub async fn register( ) })?; - let (mut register_continuation, mut res_stream) = handle_comm_error( + let (mut register_continuation, mut res_stream) = super::handle_comm_error( client .devices() .register(Request { @@ -299,11 +336,13 @@ pub async fn register( }; let registration_response = - match handle_comm_error(res, "Communication error on device register response;")? { + match super::handle_comm_error(res, "Communication error on device register response;")? { Ok(Response(State::RegistrationResponse(res))) => res, + Ok(Response(State::End)) => { unreachable!("Device hello response MUST not be End here, this is a serious bug and should crash;"); } + Err(e) => { error!(?e, "Device hello response error;"); return Err(e.into()); @@ -351,14 +390,16 @@ pub async fn register( )); }; - match handle_comm_error(res, "Communication error on device register response;")? { + match super::handle_comm_error(res, "Communication error on device register response;")? { Ok(Response(State::RegistrationResponse(_))) => { unreachable!("Device register final response MUST be End here, this is a serious bug and should crash;"); } + Ok(Response(State::End)) => { // Protocol completed successfully Ok(SecretKey::from(export_key)) } + Err(e) => { error!(?e, "Device register final response error;"); Err(e.into()) diff --git a/core/src/api/cloud/libraries.rs b/core/src/api/cloud/libraries.rs index cabe76090..884e5e21b 100644 --- a/core/src/api/cloud/libraries.rs +++ b/core/src/api/cloud/libraries.rs @@ -1,39 +1,63 @@ use crate::api::{utils::library, Ctx, R}; -use sd_cloud_schema::{auth::AccessToken, devices, libraries}; +use sd_cloud_schema::libraries; +use futures::FutureExt; use futures_concurrency::future::TryJoin; use rspc::alpha::AlphaRouter; use serde::Deserialize; use tracing::debug; -use super::try_get_cloud_services_client; - pub fn mount() -> AlphaRouter { R.router() .procedure("get", { - R.query(|node, req: libraries::get::Request| async move { - let libraries::get::Response(library) = super::handle_comm_error( - try_get_cloud_services_client(&node) - .await? - .libraries() - .get(req) - .await, - "Failed to get library;", - )??; + #[derive(Deserialize, specta::Type)] + struct CloudGetLibraryArgs { + pub_id: libraries::PubId, + with_device: bool, + } - debug!(?library, "Got library"); + R.query( + |node, + CloudGetLibraryArgs { + pub_id, + with_device, + }: CloudGetLibraryArgs| async move { + use libraries::get::{Request, Response}; - Ok(library) - }) + let (client, access_token) = super::get_client_and_access_token(&node).await?; + + let Response(library) = super::handle_comm_error( + client + .libraries() + .get(Request { + access_token, + pub_id, + with_device, + }) + .await, + "Failed to get library;", + )??; + + debug!(?library, "Got library"); + + Ok(library) + }, + ) }) .procedure("list", { - R.query(|node, req: libraries::list::Request| async move { - let libraries::list::Response(libraries) = super::handle_comm_error( - try_get_cloud_services_client(&node) - .await? + R.query(|node, with_device: bool| async move { + use libraries::list::{Request, Response}; + + let (client, access_token) = super::get_client_and_access_token(&node).await?; + + let Response(libraries) = super::handle_comm_error( + client .libraries() - .list(req) + .list(Request { + access_token, + with_device, + }) .await, "Failed to list libraries;", )??; @@ -45,11 +69,11 @@ pub fn mount() -> AlphaRouter { }) .procedure("create", { R.with2(library()) - .mutation(|(node, library), access_token: AccessToken| async move { - let (client, name, device_pub_id) = ( - try_get_cloud_services_client(&node), - async { Ok(library.config().await.name.to_string()) }, - async { Ok(devices::PubId(node.config.get().await.id.into())) }, + .mutation(|(node, library), _: ()| async move { + let ((client, access_token), name, device_pub_id) = ( + super::get_client_and_access_token(&node), + library.config().map(|config| Ok(config.name.to_string())), + node.config.get().map(|config| Ok(config.id.into())), ) .try_join() .await?; @@ -72,10 +96,11 @@ pub fn mount() -> AlphaRouter { }) .procedure("delete", { R.with2(library()) - .mutation(|(node, library), access_token: AccessToken| async move { + .mutation(|(node, library), _: ()| async move { + let (client, access_token) = super::get_client_and_access_token(&node).await?; + super::handle_comm_error( - try_get_cloud_services_client(&node) - .await? + client .libraries() .delete(libraries::delete::Request { access_token, @@ -91,18 +116,12 @@ pub fn mount() -> AlphaRouter { }) }) .procedure("update", { - #[derive(Deserialize, specta::Type)] - struct LibrariesUpdateArgs { - access_token: AccessToken, - name: String, - } + R.with2(library()) + .mutation(|(node, library), name: String| async move { + let (client, access_token) = super::get_client_and_access_token(&node).await?; - R.with2(library()).mutation( - |(node, library), - LibrariesUpdateArgs { access_token, name }: LibrariesUpdateArgs| async move { super::handle_comm_error( - try_get_cloud_services_client(&node) - .await? + client .libraries() .update(libraries::update::Request { access_token, @@ -116,7 +135,6 @@ pub fn mount() -> AlphaRouter { debug!("Updated library"); Ok(()) - }, - ) + }) }) } diff --git a/core/src/api/cloud/locations.rs b/core/src/api/cloud/locations.rs index e7320d315..e41e3a865 100644 --- a/core/src/api/cloud/locations.rs +++ b/core/src/api/cloud/locations.rs @@ -1,51 +1,105 @@ use crate::api::{Ctx, R}; -use sd_cloud_schema::locations; +use sd_cloud_schema::{devices, libraries, locations}; use rspc::alpha::AlphaRouter; +use serde::Deserialize; use tracing::debug; pub fn mount() -> AlphaRouter { R.router() .procedure("list", { - R.query(|node, req: locations::list::Request| async move { - let locations::list::Response(locations) = super::handle_comm_error( - super::try_get_cloud_services_client(&node) - .await? - .locations() - .list(req) - .await, - "Failed to list locations;", - )??; + #[derive(Deserialize, specta::Type)] + struct CloudListLocationsArgs { + pub library_pub_id: libraries::PubId, + pub with_library: bool, + pub with_device: bool, + } - debug!(?locations, "Got locations"); + R.query( + |node, + CloudListLocationsArgs { + library_pub_id, + with_library, + with_device, + }: CloudListLocationsArgs| async move { + use locations::list::{Request, Response}; - Ok(locations) - }) + let (client, access_token) = super::get_client_and_access_token(&node).await?; + + let Response(locations) = super::handle_comm_error( + client + .locations() + .list(Request { + access_token, + library_pub_id, + with_library, + with_device, + }) + .await, + "Failed to list locations;", + )??; + + debug!(?locations, "Got locations"); + + Ok(locations) + }, + ) }) .procedure("create", { - R.mutation(|node, req: locations::create::Request| async move { - super::handle_comm_error( - super::try_get_cloud_services_client(&node) - .await? - .locations() - .create(req) - .await, - "Failed to list locations;", - )??; + #[derive(Deserialize, specta::Type)] + struct CloudCreateLocationArgs { + pub pub_id: locations::PubId, + pub name: String, + pub library_pub_id: libraries::PubId, + pub device_pub_id: devices::PubId, + } - debug!("Created cloud location"); + R.mutation( + |node, + CloudCreateLocationArgs { + pub_id, + name, + library_pub_id, + device_pub_id, + }: CloudCreateLocationArgs| async move { + use locations::create::Request; - Ok(()) - }) + let (client, access_token) = super::get_client_and_access_token(&node).await?; + + super::handle_comm_error( + client + .locations() + .create(Request { + access_token, + pub_id, + name, + library_pub_id, + device_pub_id, + }) + .await, + "Failed to list locations;", + )??; + + debug!("Created cloud location"); + + Ok(()) + }, + ) }) .procedure("delete", { - R.mutation(|node, req: locations::delete::Request| async move { + R.mutation(|node, pub_id: locations::PubId| async move { + use locations::delete::Request; + + let (client, access_token) = super::get_client_and_access_token(&node).await?; + super::handle_comm_error( - super::try_get_cloud_services_client(&node) - .await? + client .locations() - .delete(req) + .delete(Request { + access_token, + pub_id, + }) .await, "Failed to list locations;", )??; diff --git a/core/src/api/cloud/mod.rs b/core/src/api/cloud/mod.rs index 93d36d208..27348122a 100644 --- a/core/src/api/cloud/mod.rs +++ b/core/src/api/cloud/mod.rs @@ -18,7 +18,7 @@ use sd_crypto::{CryptoRng, SeedableRng}; use std::pin::pin; use async_stream::stream; -use futures::StreamExt; +use futures::{FutureExt, StreamExt}; use futures_concurrency::future::TryJoin; use rspc::alpha::AlphaRouter; use tracing::{debug, error, instrument}; @@ -282,3 +282,18 @@ async fn initialize_cloud_sync( library.init_cloud_sync(node, group_pub_id).await } + +async fn get_client_and_access_token( + node: &Node, +) -> Result<(Client, Service>, auth::AccessToken), rspc::Error> { + ( + try_get_cloud_services_client(node), + node.cloud_services + .token_refresher + .get_access_token() + .map(|res| res.map_err(Into::into)), + ) + .try_join() + .await + .map_err(Into::into) +} diff --git a/core/src/api/cloud/sync_groups.rs b/core/src/api/cloud/sync_groups.rs index a47684d85..844763efb 100644 --- a/core/src/api/cloud/sync_groups.rs +++ b/core/src/api/cloud/sync_groups.rs @@ -7,13 +7,13 @@ use crate::{ use sd_core_cloud_services::JoinedLibraryCreateArgs; use sd_cloud_schema::{ - auth::AccessToken, cloud_p2p, devices, libraries, sync::{groups, KeyHash}, }; use std::sync::Arc; +use futures::FutureExt; use futures_concurrency::future::TryJoin; use rspc::alpha::AlphaRouter; use sd_crypto::{cloud::secret_key::SecretKey, CryptoRng, SeedableRng}; @@ -25,16 +25,18 @@ pub fn mount() -> AlphaRouter { R.router() .procedure("create", { R.with2(library()) - .mutation(|(node, library), access_token: AccessToken| async move { - let (client, device_pub_id, mut rng, key_manager) = ( - super::try_get_cloud_services_client(&node), - async { Ok(devices::PubId(node.config.get().await.id.into())) }, - async { - Ok(CryptoRng::from_seed( - node.master_rng.lock().await.generate_fixed(), - )) - }, - node.cloud_services.key_manager(), + .mutation(|(node, library), _: ()| async move { + use groups::create::{Request, Response}; + + let ((client, access_token), device_pub_id, mut rng, key_manager) = ( + super::get_client_and_access_token(&node), + node.config.get().map(|config| Ok(config.id.into())), + node.master_rng + .lock() + .map(|mut rng| Ok(CryptoRng::from_seed(rng.generate_fixed()))), + node.cloud_services + .key_manager() + .map(|res| res.map_err(Into::into)), ) .try_join() .await?; @@ -42,11 +44,11 @@ pub fn mount() -> AlphaRouter { let new_key = SecretKey::generate(&mut rng); let key_hash = KeyHash(blake3::hash(new_key.as_ref()).to_hex().to_string()); - let groups::create::Response(group_pub_id) = super::handle_comm_error( + let Response(group_pub_id) = super::handle_comm_error( client .sync() .groups() - .create(groups::create::Request { + .create(Request { access_token: access_token.clone(), key_hash: key_hash.clone(), library_pub_id: libraries::PubId(library.id), @@ -83,93 +85,119 @@ pub fn mount() -> AlphaRouter { }) }) .procedure("delete", { - R.mutation(|node, req: groups::delete::Request| async move { - let group_pub_id = req.pub_id; + R.mutation(|node, pub_id: groups::PubId| async move { + use groups::delete::Request; + + let (client, access_token) = super::get_client_and_access_token(&node).await?; + super::handle_comm_error( - super::try_get_cloud_services_client(&node) - .await? + client .sync() .groups() - .delete(req) + .delete(Request { + access_token, + pub_id, + }) .await, "Failed to delete sync group;", )??; - debug!(%group_pub_id, "Deleted sync group"); + debug!(%pub_id, "Deleted sync group"); Ok(()) }) }) .procedure("get", { - R.query(|node, req: groups::get::Request| async move { - let groups::get::Response(group) = super::handle_comm_error( - super::try_get_cloud_services_client(&node) - .await? - .sync() - .groups() - .get(req) - .await, - "Failed to get sync group;", - )??; - - debug!(?group, "Got sync group"); - - Ok(group) - }) - }) - .procedure("leave", { #[derive(Deserialize, specta::Type)] - struct SyncGroupsLeaveArgs { - access_token: AccessToken, - group_pub_id: groups::PubId, + struct CloudGetSyncGroupArgs { + pub pub_id: groups::PubId, + pub with_library: bool, + pub with_devices: bool, + pub with_used_storage: bool, } R.query( |node, - SyncGroupsLeaveArgs { - access_token, - group_pub_id, - }: SyncGroupsLeaveArgs| async move { - let (device_pub_id, client, key_manager) = ( - async { Ok(node.config.get().await.id) }, - super::try_get_cloud_services_client(&node), - node.cloud_services.key_manager(), - ) - .try_join() - .await?; + CloudGetSyncGroupArgs { + pub_id, + with_library, + with_devices, + with_used_storage, + }: CloudGetSyncGroupArgs| async move { + use groups::get::{Request, Response}; - super::handle_comm_error( + let (client, access_token) = super::get_client_and_access_token(&node).await?; + + let Response(group) = super::handle_comm_error( client .sync() .groups() - .leave(groups::leave::Request { + .get(Request { access_token, - pub_id: group_pub_id, - current_device_pub_id: devices::PubId(device_pub_id.into()), + pub_id, + with_library, + with_devices, + with_used_storage, }) .await, - "Failed to leave sync group;", + "Failed to get sync group;", )??; - let mut rng = - CryptoRng::from_seed(node.master_rng.lock().await.generate_fixed()); + debug!(?group, "Got sync group"); - key_manager.remove_group(group_pub_id, &mut rng).await?; - - debug!(%group_pub_id, "Left sync group"); - - Ok(()) + Ok(group) }, ) }) - .procedure("list", { - R.query(|node, req: groups::list::Request| async move { - let groups::list::Response(groups) = super::handle_comm_error( - super::try_get_cloud_services_client(&node) - .await? + .procedure("leave", { + R.query(|node, pub_id: groups::PubId| async move { + let ((client, access_token), current_device_pub_id, mut rng, key_manager) = ( + super::get_client_and_access_token(&node), + node.config.get().map(|config| Ok(config.id.into())), + node.master_rng + .lock() + .map(|mut rng| Ok(CryptoRng::from_seed(rng.generate_fixed()))), + node.cloud_services + .key_manager() + .map(|res| res.map_err(Into::into)), + ) + .try_join() + .await?; + + super::handle_comm_error( + client .sync() .groups() - .list(req) + .leave(groups::leave::Request { + access_token, + pub_id, + current_device_pub_id, + }) + .await, + "Failed to leave sync group;", + )??; + + key_manager.remove_group(pub_id, &mut rng).await?; + + debug!(%pub_id, "Left sync group"); + + Ok(()) + }) + }) + .procedure("list", { + R.query(|node, with_library: bool| async move { + use groups::list::{Request, Response}; + + let (client, access_token) = super::get_client_and_access_token(&node).await?; + + let Response(groups) = super::handle_comm_error( + client + .sync() + .groups() + .list(Request { + access_token, + with_library, + }) .await, "Failed to list groups;", )??; @@ -181,27 +209,27 @@ pub fn mount() -> AlphaRouter { }) .procedure("remove_device", { #[derive(Deserialize, specta::Type)] - struct SyncGroupsRemoveDeviceArgs { - access_token: AccessToken, + struct CloudSyncGroupsRemoveDeviceArgs { group_pub_id: groups::PubId, to_remove_device_pub_id: devices::PubId, } R.query( |node, - SyncGroupsRemoveDeviceArgs { - access_token, + CloudSyncGroupsRemoveDeviceArgs { group_pub_id, to_remove_device_pub_id, - }: SyncGroupsRemoveDeviceArgs| async move { - let (client, current_device_pub_id, mut rng, key_manager) = ( - super::try_get_cloud_services_client(&node), - async { Ok(devices::PubId(node.config.get().await.id.into())) }, - async { - Ok(CryptoRng::from_seed( - node.master_rng.lock().await.generate_fixed(), - )) - }, - node.cloud_services.key_manager(), + }: CloudSyncGroupsRemoveDeviceArgs| async move { + use groups::remove_device::Request; + + let ((client, access_token), current_device_pub_id, mut rng, key_manager) = ( + super::get_client_and_access_token(&node), + node.config.get().map(|config| Ok(config.id.into())), + node.master_rng + .lock() + .map(|mut rng| Ok(CryptoRng::from_seed(rng.generate_fixed()))), + node.cloud_services + .key_manager() + .map(|res| res.map_err(Into::into)), ) .try_join() .await?; @@ -217,7 +245,7 @@ pub fn mount() -> AlphaRouter { client .sync() .groups() - .remove_device(groups::remove_device::Request { + .remove_device(Request { access_token, group_pub_id, new_key_hash, @@ -225,7 +253,7 @@ pub fn mount() -> AlphaRouter { to_remove_device_pub_id, }) .await, - "Failed to list libraries;", + "Failed to remove device from sync group;", )??; debug!(%to_remove_device_pub_id, %group_pub_id, "Removed device"); @@ -237,7 +265,6 @@ pub fn mount() -> AlphaRouter { .procedure("request_join", { #[derive(Deserialize, specta::Type)] struct SyncGroupsRequestJoinArgs { - access_token: AccessToken, sync_group: groups::GroupWithLibraryAndDevices, asking_device: devices::Device, } @@ -245,14 +272,15 @@ pub fn mount() -> AlphaRouter { R.mutation( |node, SyncGroupsRequestJoinArgs { - access_token, sync_group, asking_device, }: SyncGroupsRequestJoinArgs| async move { - let (client, current_device_pub_id, cloud_p2p) = ( - super::try_get_cloud_services_client(&node), - async { Ok(devices::PubId(node.config.get().await.id.into())) }, - node.cloud_services.cloud_p2p(), + let ((client, access_token), current_device_pub_id, cloud_p2p) = ( + super::get_client_and_access_token(&node), + node.config.get().map(|config| Ok(config.id.into())), + node.cloud_services + .cloud_p2p() + .map(|res| res.map_err(Into::into)), ) .try_join() .await?; diff --git a/core/src/custom_uri/utils.rs b/core/src/custom_uri/utils.rs index 52ac9084a..645da5106 100644 --- a/core/src/custom_uri/utils.rs +++ b/core/src/custom_uri/utils.rs @@ -1,6 +1,5 @@ use crate::util::InfallibleResponse; -use sd_core_cloud_services::AUTH_SERVER_URL; use std::{fmt::Debug, panic::Location}; use axum::{ diff --git a/core/src/lib.rs b/core/src/lib.rs index df37a8f9d..e90e6ed2c 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -120,11 +120,14 @@ impl Node { format!("{AUTH_SERVER_URL}/cloud-api-address").to_string() }), std::env::var("SD_CLOUD_P2P_RELAY_URL") - .unwrap_or_else(|_| "https://use1-1.relay.iroh.network/".to_string()), + // .unwrap_or_else(|_| "https://use1-1.relay.iroh.network/".to_string()), + .unwrap_or_else(|_| "http://localhost:8081/".to_string()), std::env::var("SD_CLOUD_P2P_DNS_ORIGIN_NAME") - .unwrap_or_else(|_| "dns.iroh.link/".to_string()), + // .unwrap_or_else(|_| "dns.iroh.link/".to_string()), + .unwrap_or_else(|_| "irohdns.localhost".to_string()), std::env::var("SD_CLOUD_P2P_DNS_PKARR_URL") - .unwrap_or_else(|_| "https://dns.iroh.link/pkarr".to_string()), + // .unwrap_or_else(|_| "https://dns.iroh.link/pkarr".to_string()), + .unwrap_or_else(|_| "http://localhost:8080/pkarr".to_string()), std::env::var("SD_CLOUD_API_DOMAIN_NAME") .unwrap_or_else(|_| "localhost".to_string()), ) @@ -134,7 +137,8 @@ impl Node { ( "https://auth.spacedrive.com/cloud-api-address".to_string(), "https://relay.spacedrive.com/".to_string(), - "dns.spacedrive.com".to_string(), + "irohdns.spacedrive.com".to_string(), + "irohdns.spacedrive.com/pkarr".to_string(), "api.spacedrive.com".to_string(), ) } diff --git a/interface/util/index.tsx b/interface/util/index.tsx index 1026c9c9c..e5f955986 100644 --- a/interface/util/index.tsx +++ b/interface/util/index.tsx @@ -9,7 +9,8 @@ export type NonEmptyArray = [T, ...T[]]; export const isNonEmpty = (input: T[]): input is NonEmptyArray => input.length > 0; export const isNonEmptyObject = (input: object) => Object.keys(input).length > 0; -export const AUTH_SERVER_URL = 'https://auth.spacedrive.com'; +// export const AUTH_SERVER_URL = 'https://auth.spacedrive.com'; +export const AUTH_SERVER_URL = 'http://localhost:9420'; export function getTokens() { if (typeof window === 'undefined') { From 798707179505abdcd38424e407db926dbe98936d Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Thu, 26 Sep 2024 22:47:41 -0300 Subject: [PATCH 146/218] Some specta renaming on cloud services routes --- core/crates/cloud-services/src/p2p/mod.rs | 2 + packages/client/src/core.ts | 94 +++++++++-------------- 2 files changed, 40 insertions(+), 56 deletions(-) diff --git a/core/crates/cloud-services/src/p2p/mod.rs b/core/crates/cloud-services/src/p2p/mod.rs index 701bb4969..bdefe97ec 100644 --- a/core/crates/cloud-services/src/p2p/mod.rs +++ b/core/crates/cloud-services/src/p2p/mod.rs @@ -41,6 +41,7 @@ pub struct Ticket(u64); #[derive(Debug, Serialize, specta::Type)] #[serde(tag = "kind", content = "data")] +#[specta(rename = "CloudP2PNotifyUser")] pub enum NotifyUser { ReceivedJoinSyncGroupRequest { ticket: Ticket, @@ -84,6 +85,7 @@ pub struct BasicLibraryCreationArgs { #[derive(Debug, Deserialize, specta::Type)] #[serde(tag = "kind", content = "data")] +#[specta(rename = "CloudP2PUserResponse")] pub enum UserResponse { AcceptDeviceInSyncGroup { ticket: Ticket, diff --git a/packages/client/src/core.ts b/packages/client/src/core.ts index 11776426f..9a5a3277f 100644 --- a/packages/client/src/core.ts +++ b/packages/client/src/core.ts @@ -5,16 +5,16 @@ export type Procedures = { queries: { key: "backups.getAll", input: never, result: GetAll } | { key: "buildInfo", input: never, result: BuildInfo } | - { key: "cloud.devices.get", input: DeviceGetRequest, result: Device } | - { key: "cloud.devices.get_current_device", input: AccessToken, result: Device } | - { key: "cloud.devices.list", input: DeviceListRequest, result: Device[] } | - { key: "cloud.libraries.get", input: LibraryGetRequest, result: Library } | - { key: "cloud.libraries.list", input: LibraryListRequest, result: Library[] } | - { key: "cloud.locations.list", input: LocationListRequest, result: CloudLocation[] } | - { key: "cloud.syncGroups.get", input: SyncGroupGetRequest, result: SyncGroup } | - { key: "cloud.syncGroups.leave", input: SyncGroupsLeaveArgs, result: null } | - { key: "cloud.syncGroups.list", input: SyncGroupListRequest, result: SyncGroup[] } | - { key: "cloud.syncGroups.remove_device", input: SyncGroupsRemoveDeviceArgs, result: null } | + { key: "cloud.devices.get", input: DevicePubId, result: Device } | + { key: "cloud.devices.get_current_device", input: never, result: Device } | + { key: "cloud.devices.list", input: never, result: Device[] } | + { key: "cloud.libraries.get", input: CloudGetLibraryArgs, result: Library } | + { key: "cloud.libraries.list", input: boolean, result: Library[] } | + { key: "cloud.locations.list", input: CloudListLocationsArgs, result: CloudLocation[] } | + { key: "cloud.syncGroups.get", input: CloudGetSyncGroupArgs, result: SyncGroup } | + { key: "cloud.syncGroups.leave", input: SyncGroupPubId, result: null } | + { key: "cloud.syncGroups.list", input: boolean, result: SyncGroup[] } | + { key: "cloud.syncGroups.remove_device", input: CloudSyncGroupsRemoveDeviceArgs, result: null } | { key: "ephemeralFiles.getMediaData", input: string, result: MediaData | null } | { key: "files.get", input: LibraryArgs, result: ObjectWithFilePaths2 | null } | { key: "files.getConvertibleImageExtensions", input: never, result: string[] } | @@ -66,17 +66,17 @@ export type Procedures = { { key: "backups.delete", input: string, result: null } | { key: "backups.restore", input: string, result: null } | { key: "cloud.bootstrap", input: [AccessToken, RefreshToken], result: null } | - { key: "cloud.devices.delete", input: DeviceDeleteRequest, result: null } | - { key: "cloud.devices.update", input: DeviceUpdateRequest, result: null } | - { key: "cloud.libraries.create", input: LibraryArgs, result: null } | - { key: "cloud.libraries.delete", input: LibraryArgs, result: null } | - { key: "cloud.libraries.update", input: LibraryArgs, result: null } | - { key: "cloud.locations.create", input: LocationCreateRequest, result: null } | - { key: "cloud.locations.delete", input: LocationDeleteRequest, result: null } | - { key: "cloud.syncGroups.create", input: LibraryArgs, result: null } | - { key: "cloud.syncGroups.delete", input: SyncGroupDeleteRequest, result: null } | + { key: "cloud.devices.delete", input: DevicePubId, result: null } | + { key: "cloud.devices.update", input: CloudUpdateDeviceArgs, result: null } | + { key: "cloud.libraries.create", input: LibraryArgs, result: null } | + { key: "cloud.libraries.delete", input: LibraryArgs, result: null } | + { key: "cloud.libraries.update", input: LibraryArgs, result: null } | + { key: "cloud.locations.create", input: CloudCreateLocationArgs, result: null } | + { key: "cloud.locations.delete", input: LocationPubId, result: null } | + { key: "cloud.syncGroups.create", input: LibraryArgs, result: null } | + { key: "cloud.syncGroups.delete", input: SyncGroupPubId, result: null } | { key: "cloud.syncGroups.request_join", input: SyncGroupsRequestJoinArgs, result: null } | - { key: "cloud.userResponse", input: UserResponse, result: null } | + { key: "cloud.userResponse", input: CloudP2PUserResponse, result: null } | { key: "ephemeralFiles.copyFiles", input: LibraryArgs, result: null } | { key: "ephemeralFiles.createFile", input: LibraryArgs, result: string } | { key: "ephemeralFiles.createFolder", input: LibraryArgs, result: string } | @@ -137,7 +137,7 @@ export type Procedures = { { key: "tags.update", input: LibraryArgs, result: null } | { key: "toggleFeatureFlag", input: BackendFeature, result: null }, subscriptions: - { key: "cloud.listenCloudServicesNotifications", input: never, result: NotifyUser } | + { key: "cloud.listenCloudServicesNotifications", input: never, result: CloudP2PNotifyUser } | { key: "invalidation.listen", input: never, result: InvalidateOperationEvent[] } | { key: "jobs.newFilePathIdentified", input: LibraryArgs, result: number[] } | { key: "jobs.newThumbnail", input: LibraryArgs, result: ThumbKey } | @@ -182,12 +182,28 @@ export type ChangeNodeNameArgs = { name: string | null; p2p_port: Port | null; p export type Chapter = { id: number; start: [number, number]; end: [number, number]; time_base_den: number; time_base_num: number; metadata: Metadata } +export type CloudCreateLocationArgs = { pub_id: LocationPubId; name: string; library_pub_id: LibraryPubId; device_pub_id: DevicePubId } + +export type CloudGetLibraryArgs = { pub_id: LibraryPubId; with_device: boolean } + +export type CloudGetSyncGroupArgs = { pub_id: SyncGroupPubId; with_library: boolean; with_devices: boolean; with_used_storage: boolean } + +export type CloudListLocationsArgs = { library_pub_id: LibraryPubId; with_library: boolean; with_device: boolean } + export type CloudLocation = { pub_id: LocationPubId; name: string; device: Device | null; library: Library | null; created_at: string; updated_at: string } export type CloudP2PError = "Rejected" | "UnableToConnect" | "TimedOut" +export type CloudP2PNotifyUser = { kind: "ReceivedJoinSyncGroupRequest"; data: { ticket: CloudP2PTicket; asking_device: Device; sync_group: SyncGroupWithLibraryAndDevices } } | { kind: "ReceivedJoinSyncGroupResponse"; data: { response: JoinSyncGroupResponse; sync_group: SyncGroupWithLibraryAndDevices } } | { kind: "SendingJoinSyncGroupResponseError"; data: { error: JoinSyncGroupError; sync_group: SyncGroupWithLibraryAndDevices } } | { kind: "TimedOutJoinRequest"; data: { device: Device; succeeded: boolean } } + export type CloudP2PTicket = bigint +export type CloudP2PUserResponse = { kind: "AcceptDeviceInSyncGroup"; data: { ticket: CloudP2PTicket; accepted: BasicLibraryCreationArgs | null } } + +export type CloudSyncGroupsRemoveDeviceArgs = { group_pub_id: SyncGroupPubId; to_remove_device_pub_id: DevicePubId } + +export type CloudUpdateDeviceArgs = { pub_id: DevicePubId; name: string; storage_size: bigint; used_storage: bigint } + export type Codec = { kind: string | null; sub_kind: string | null; tag: string | null; name: string | null; profile: string | null; bit_rate: number; props: Props | null } export type ColorProfile = "Normal" | "Custom" | "HDRNoOriginal" | "HDRWithOriginal" | "OriginalForHDR" | "Panorama" | "PortraitHDR" | "Portrait" @@ -242,18 +258,10 @@ export type DefaultLocations = { desktop: boolean; documents: boolean; downloads export type Device = { pub_id: DevicePubId; name: string; os: DeviceOS; storage_size: bigint; used_storage: bigint; connection_id: string; created_at: string; updated_at: string; hardware_model: HardwareModel } -export type DeviceDeleteRequest = { access_token: AccessToken; pub_id: DevicePubId } - -export type DeviceGetRequest = { access_token: AccessToken; pub_id: DevicePubId } - -export type DeviceListRequest = { access_token: AccessToken } - export type DeviceOS = "Linux" | "Windows" | "MacOS" | "iOS" | "Android" export type DevicePubId = string -export type DeviceUpdateRequest = { access_token: AccessToken; pub_id: DevicePubId; name: string; storage_size: bigint; used_storage: bigint } - /** * The method used for the discovery of this peer. * *Technically* you can have multiple under the hood but this simplifies things for the UX. @@ -424,8 +432,6 @@ export type Label = { id: number; name: string; date_created: string | null; dat export type LabelWithObjects = { id: number; name: string; date_created: string | null; date_modified: string | null; label_objects: { object: { id: number; file_paths: FilePath[] } }[] } -export type LibrariesUpdateArgs = { access_token: AccessToken; name: string } - export type Library = { pub_id: LibraryPubId; name: string; original_device: Device | null; created_at: string; updated_at: string } /** @@ -459,10 +465,6 @@ export type LibraryConfigVersion = "V0" | "V1" | "V2" | "V3" | "V4" | "V5" | "V6 export type LibraryConfigWrapped = { uuid: string; instance_id: string; instance_public_key: RemoteIdentity; config: LibraryConfig } -export type LibraryGetRequest = { access_token: AccessToken; pub_id: LibraryPubId; with_device: boolean } - -export type LibraryListRequest = { access_token: AccessToken; with_device: boolean } - export type LibraryName = string export type LibraryPreferences = { location?: { [key in string]: LocationSettings }; tag?: { [key in string]: TagSettings } } @@ -484,12 +486,6 @@ export type Location = { id: number; pub_id: number[]; name: string | null; path */ export type LocationCreateArgs = { path: string; dry_run: boolean; indexer_rules_ids: number[] } -export type LocationCreateRequest = { access_token: AccessToken; pub_id: LocationPubId; name: string; library_pub_id: LibraryPubId; device_pub_id: DevicePubId } - -export type LocationDeleteRequest = { access_token: AccessToken; pub_id: LocationPubId } - -export type LocationListRequest = { access_token: AccessToken; library_pub_id: LibraryPubId; with_library: boolean; with_device: boolean } - export type LocationPubId = string export type LocationSettings = { explorer: ExplorerSettings } @@ -575,8 +571,6 @@ export type NotificationId = { type: "library"; id: [string, number] } | { type: export type NotificationKind = "info" | "success" | "error" | "warning" -export type NotifyUser = { kind: "ReceivedJoinSyncGroupRequest"; data: { ticket: CloudP2PTicket; asking_device: Device; sync_group: SyncGroupWithLibraryAndDevices } } | { kind: "ReceivedJoinSyncGroupResponse"; data: { response: JoinSyncGroupResponse; sync_group: SyncGroupWithLibraryAndDevices } } | { kind: "SendingJoinSyncGroupResponseError"; data: { error: JoinSyncGroupError; sync_group: SyncGroupWithLibraryAndDevices } } | { kind: "TimedOutJoinRequest"; data: { device: Device; succeeded: boolean } } - export type Object = { id: number; pub_id: number[]; kind: number | null; key_id: number | null; hidden: boolean | null; favorite: boolean | null; important: boolean | null; note: string | null; date_created: string | null; date_accessed: string | null; device_pub_id: number[] | null } export type ObjectCursor = "none" | { dateAccessed: CursorOrderItem } | { kind: CursorOrderItem } @@ -692,21 +686,11 @@ export type SubtitleProps = { width: number; height: number } export type SyncGroup = { pub_id: SyncGroupPubId; latest_key_hash: KeyHash; library: Library | null; devices: Device[] | null; total_sync_messages_bytes: bigint | null; total_space_files_bytes: bigint | null; created_at: string; updated_at: string } -export type SyncGroupDeleteRequest = { access_token: AccessToken; pub_id: SyncGroupPubId } - -export type SyncGroupGetRequest = { access_token: AccessToken; pub_id: SyncGroupPubId; with_library: boolean; with_devices: boolean; with_used_storage: boolean } - -export type SyncGroupListRequest = { access_token: AccessToken; with_library: boolean } - export type SyncGroupPubId = string export type SyncGroupWithLibraryAndDevices = { pub_id: SyncGroupPubId; latest_key_hash: KeyHash; library: Library; devices: Device[]; created_at: string; updated_at: string } -export type SyncGroupsLeaveArgs = { access_token: AccessToken; group_pub_id: SyncGroupPubId } - -export type SyncGroupsRemoveDeviceArgs = { access_token: AccessToken; group_pub_id: SyncGroupPubId; to_remove_device_pub_id: DevicePubId } - -export type SyncGroupsRequestJoinArgs = { access_token: AccessToken; sync_group: SyncGroupWithLibraryAndDevices; asking_device: Device } +export type SyncGroupsRequestJoinArgs = { sync_group: SyncGroupWithLibraryAndDevices; asking_device: Device } export type SyncStatus = { ingest: boolean; cloud_send: boolean; cloud_receive: boolean; cloud_ingest: boolean } @@ -732,8 +716,6 @@ export type ThumbKey = { shard_hex: string; cas_id: CasId; base_directory_str: s export type UpdateThumbnailerPreferences = Record -export type UserResponse = { kind: "AcceptDeviceInSyncGroup"; data: { ticket: CloudP2PTicket; accepted: BasicLibraryCreationArgs | null } } - export type VideoProps = { pixel_format: string | null; color_range: string | null; bits_per_channel: number | null; color_space: string | null; color_primaries: string | null; color_transfer: string | null; field_order: string | null; chroma_location: string | null; width: number; height: number; aspect_ratio_num: number | null; aspect_ratio_den: number | null; properties: string[] } export type Volume = { name: string; mount_points: string[]; total_capacity: string; available_capacity: string; disk_type: DiskType; file_system: string | null; is_root_filesystem: boolean } From fdff1390861897d9fae165e9a33871dad9889fb7 Mon Sep 17 00:00:00 2001 From: "Ericson \"Fogo\" Soares" Date: Fri, 27 Sep 2024 00:38:48 -0300 Subject: [PATCH 147/218] Update toolchain and fix a bunch of warnings --- Cargo.lock | Bin 337906 -> 337706 bytes core/Cargo.toml | 1 - core/crates/cloud-services/src/client.rs | 12 +- .../src/key_manager/key_store.rs | 2 +- core/crates/cloud-services/src/sync/ingest.rs | 2 +- .../heavy-lifting/src/job_system/report.rs | 2 +- core/src/library/statistics.rs | 2 +- core/src/location/manager/watcher/mod.rs | 2 + crates/actors/src/lib.rs | 6 +- crates/cloud-api/Cargo.toml | 21 - crates/cloud-api/src/auth.rs | 17 - crates/cloud-api/src/lib.rs | 636 ------------------ crates/p2p/crates/tunnel/src/lib.rs | 2 +- crates/task-system/src/task.rs | 6 +- rust-toolchain.toml | 2 +- 15 files changed, 22 insertions(+), 691 deletions(-) delete mode 100644 crates/cloud-api/Cargo.toml delete mode 100644 crates/cloud-api/src/auth.rs delete mode 100644 crates/cloud-api/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index e68b5755859f9b4ca3f837e3a2ee416845e1b96c..a6fe702d8278bf9a8209de7b6e5e7ee86af72b67 100644 GIT binary patch delta 36 scmex#US!odk%kt=7N#xCS?${uJD5#art?l`=4&tMVcuTS!%{aJ01z_`+5i9m delta 88 zcmV-e0H^<|&J^;`6o7;QgaWh!YKH@1aA}tjp93eCa)tvH4s&p0A}k6ZB6DOda58Y0 uA$|iNmk*Q!3YWm20~nY4n*#}#zo`Qlw_=9_JOlz^aEFhJ1GkTh1e=>|^B-0K diff --git a/core/Cargo.toml b/core/Cargo.toml index 224f8d6bf..6ed9d6eea 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -30,7 +30,6 @@ sd-core-sync = { path = "./crates/sync" } # Spacedrive Sub-crates sd-actors = { path = "../crates/actors" } sd-ai = { path = "../crates/ai", optional = true } -sd-cloud-api = { path = "../crates/cloud-api" } sd-crypto = { path = "../crates/crypto" } sd-ffmpeg = { path = "../crates/ffmpeg", optional = true } sd-file-ext = { path = "../crates/file-ext" } diff --git a/core/crates/cloud-services/src/client.rs b/core/crates/cloud-services/src/client.rs index b067aa0ee..cfb66784c 100644 --- a/core/crates/cloud-services/src/client.rs +++ b/core/crates/cloud-services/src/client.rs @@ -18,7 +18,7 @@ use super::{ error::Error, key_manager::KeyManager, p2p::CloudP2P, token_refresher::TokenRefresher, }; -#[derive(Debug, Default)] +#[derive(Debug, Default, Clone)] enum ClientState { #[default] NotConnected, @@ -190,7 +190,8 @@ impl CloudServices { _message: &[u8], _cert: &rustls::pki_types::CertificateDer<'_>, _dss: &rustls::DigitallySignedStruct, - ) -> Result { + ) -> Result + { Ok(rustls::client::danger::HandshakeSignatureValid::assertion()) } @@ -199,7 +200,8 @@ impl CloudServices { _message: &[u8], _cert: &rustls::pki_types::CertificateDer<'_>, _dss: &rustls::DigitallySignedStruct, - ) -> Result { + ) -> Result + { Ok(rustls::client::danger::HandshakeSignatureValid::assertion()) } @@ -268,8 +270,8 @@ impl CloudServices { /// Available routes documented in /// [`sd_cloud_schema::Service`](https://github.com/spacedriveapp/cloud-services-schema). pub async fn client(&self) -> Result, Service>, Error> { - if let ClientState::Connected(client) = &*self.client_state.read().await { - return Ok(client.clone()); + if let ClientState::Connected(client) = { self.client_state.read().await.clone() } { + return Ok(client); } // If we're not connected, we need to try to connect. diff --git a/core/crates/cloud-services/src/key_manager/key_store.rs b/core/crates/cloud-services/src/key_manager/key_store.rs index 087eff70b..922597a73 100644 --- a/core/crates/cloud-services/src/key_manager/key_store.rs +++ b/core/crates/cloud-services/src/key_manager/key_store.rs @@ -33,7 +33,7 @@ pub struct KeyStore { } impl KeyStore { - pub fn new(iroh_secret_key: IrohSecretKey) -> Self { + pub const fn new(iroh_secret_key: IrohSecretKey) -> Self { Self { iroh_secret_key, keys: BTreeMap::new(), diff --git a/core/crates/cloud-services/src/sync/ingest.rs b/core/crates/cloud-services/src/sync/ingest.rs index 07be4f76a..cabede505 100644 --- a/core/crates/cloud-services/src/sync/ingest.rs +++ b/core/crates/cloud-services/src/sync/ingest.rs @@ -82,7 +82,7 @@ enum IngestStatus { } impl Ingester { - pub fn new( + pub const fn new( sync: SyncManager, ingest_notify: Arc, active: Arc, diff --git a/core/crates/heavy-lifting/src/job_system/report.rs b/core/crates/heavy-lifting/src/job_system/report.rs index 359bc4496..d0e5f0108 100644 --- a/core/crates/heavy-lifting/src/job_system/report.rs +++ b/core/crates/heavy-lifting/src/job_system/report.rs @@ -402,7 +402,7 @@ impl ReportBuilder { } #[must_use] - pub fn new(id: JobId, name: JobName) -> Self { + pub const fn new(id: JobId, name: JobName) -> Self { Self { id, name, diff --git a/core/src/library/statistics.rs b/core/src/library/statistics.rs index e8c757eba..f6fc4422e 100644 --- a/core/src/library/statistics.rs +++ b/core/src/library/statistics.rs @@ -47,7 +47,7 @@ pub async fn update_library_statistics( node.config .data_directory() .join("libraries") - .join(&format!("{}.db", library.id)), + .join(format!("{}.db", library.id)), ) .await .unwrap_or(0); diff --git a/core/src/location/manager/watcher/mod.rs b/core/src/location/manager/watcher/mod.rs index 48935f4d3..81b70ef87 100644 --- a/core/src/location/manager/watcher/mod.rs +++ b/core/src/location/manager/watcher/mod.rs @@ -67,6 +67,8 @@ type Handler = ios::EventHandler; pub(super) type IgnorePath = (PathBuf, bool); type INode = u64; + +#[cfg(any(target_os = "ios", target_os = "macos", target_os = "windows"))] type InstantAndPath = (Instant, PathBuf); const ONE_SECOND: Duration = Duration::from_secs(1); diff --git a/crates/actors/src/lib.rs b/crates/actors/src/lib.rs index 1666c974d..0eaa0724c 100644 --- a/crates/actors/src/lib.rs +++ b/crates/actors/src/lib.rs @@ -169,7 +169,8 @@ impl ActorsCollection { #[instrument(skip(self))] pub async fn start(&self, identifier: Id) { - if let Some(actor) = self.actors_map.write().await.get_mut(&identifier) { + let mut actors_map = self.actors_map.write().await; + if let Some(actor) = actors_map.get_mut(&identifier) { if actor.is_running.load(Ordering::Acquire) { warn!("Actor already running!"); return; @@ -223,7 +224,8 @@ impl ActorsCollection { #[instrument(skip(self))] pub async fn stop(&self, identifier: Id) { - if let Some(actor) = self.actors_map.write().await.get_mut(&identifier) { + let mut actors_map = self.actors_map.write().await; + if let Some(actor) = actors_map.get_mut(&identifier) { if !actor.is_running.load(Ordering::Acquire) { warn!("Actor already stopped!"); return; diff --git a/crates/cloud-api/Cargo.toml b/crates/cloud-api/Cargo.toml deleted file mode 100644 index 491b2fe7c..000000000 --- a/crates/cloud-api/Cargo.toml +++ /dev/null @@ -1,21 +0,0 @@ -[package] -name = "sd-cloud-api" -version = "0.1.0" - -edition.workspace = true -license.workspace = true -repository.workspace = true - -[dependencies] -# Spacedrive Sub-crates -sd-p2p = { path = "../p2p" } - -# Workspace dependencies -reqwest = { workspace = true, features = ["native-tls-vendored"] } -rspc = { workspace = true } -serde = { workspace = true } -serde_json = { workspace = true } -specta = { workspace = true } -thiserror = { workspace = true } -tracing = { workspace = true } -uuid = { workspace = true } diff --git a/crates/cloud-api/src/auth.rs b/crates/cloud-api/src/auth.rs deleted file mode 100644 index f8d879641..000000000 --- a/crates/cloud-api/src/auth.rs +++ /dev/null @@ -1,17 +0,0 @@ -use serde::{Deserialize, Serialize}; - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct OAuthToken { - pub access_token: String, - pub refresh_token: String, - pub token_type: String, - pub expires_in: i32, -} - -impl OAuthToken { - pub fn to_header(&self) -> String { - format!("{} {}", self.token_type, self.access_token) - } -} - -pub const DEVICE_CODE_URN: &str = "urn:ietf:params:oauth:grant-type:device_code"; diff --git a/crates/cloud-api/src/lib.rs b/crates/cloud-api/src/lib.rs deleted file mode 100644 index 6880f4ab6..000000000 --- a/crates/cloud-api/src/lib.rs +++ /dev/null @@ -1,636 +0,0 @@ -pub mod auth; - -use std::{collections::HashMap, future::Future, sync::Arc}; - -use auth::OAuthToken; -use sd_p2p::RemoteIdentity; -use serde::{Deserialize, Serialize}; -use serde_json::json; -use specta::Type; -use uuid::Uuid; - -pub struct RequestConfig { - pub client: reqwest::Client, - pub api_url: String, - pub auth_token: Option, -} - -pub trait RequestConfigProvider { - fn get_request_config(self: &Arc) -> impl Future + Send; -} - -#[derive(thiserror::Error, Debug)] -#[error("{0}")] -pub struct Error(String); - -impl From for rspc::Error { - fn from(e: Error) -> rspc::Error { - rspc::Error::new(rspc::ErrorCode::InternalServerError, e.0) - } -} - -#[derive(Serialize, Deserialize, Debug, Type)] -#[serde(rename_all = "camelCase")] -#[specta(rename = "CloudLibrary")] -pub struct Library { - pub id: String, - pub uuid: Uuid, - pub name: String, - pub instances: Vec, - pub owner_id: String, -} - -#[derive(Serialize, Deserialize, Debug, Type)] -#[serde(rename_all = "camelCase")] -#[specta(rename = "CloudInstance")] -pub struct Instance { - pub id: String, - pub uuid: Uuid, - pub identity: RemoteIdentity, - #[serde(rename = "nodeId")] - pub node_id: Uuid, - pub node_remote_identity: String, - pub metadata: HashMap, -} - -#[derive(Serialize, Deserialize, Debug, Type)] -#[serde(rename_all = "camelCase")] -#[specta(rename = "CloudMessageCollection")] -pub struct MessageCollection { - pub instance_uuid: Uuid, - pub start_time: String, - pub end_time: String, - pub contents: String, -} - -trait WithAuth { - fn with_auth(self, token: OAuthToken) -> Self; -} - -impl WithAuth for reqwest::RequestBuilder { - fn with_auth(self, token: OAuthToken) -> Self { - self.header( - "authorization", - format!("{} {}", token.token_type, token.access_token), - ) - } -} - -pub mod feedback { - use super::*; - - pub use send::exec as send; - pub mod send { - use super::*; - - pub async fn exec(config: RequestConfig, message: String, emoji: u8) -> Result<(), Error> { - let mut req = config - .client - .post(format!("{}/api/v1/feedback", config.api_url)) - .json(&json!({ - "message": message, - "emoji": emoji, - })); - - if let Some(auth_token) = config.auth_token { - req = req.with_auth(auth_token); - } - - req.send() - .await - .and_then(|r| r.error_for_status()) - .map_err(|e| Error(e.to_string()))?; - - Ok(()) - } - } -} - -pub mod user { - use super::*; - - pub use me::exec as me; - pub mod me { - use super::*; - - #[derive(Serialize, Deserialize, Type)] - #[specta(inline)] - pub struct Response { - id: String, - email: String, - } - - pub async fn exec(config: RequestConfig) -> Result { - let Some(auth_token) = config.auth_token else { - return Err(Error("Authentication required".to_string())); - }; - - config - .client - .get(&format!("{}/api/v1/user/me", config.api_url)) - .with_auth(auth_token) - .send() - .await - .map_err(|e| Error(e.to_string()))? - .json() - .await - .map_err(|e| Error(e.to_string())) - } - } -} - -pub mod library { - use super::*; - - pub use get::exec as get; - pub mod get { - use super::*; - - pub async fn exec(config: RequestConfig, library_id: Uuid) -> Result { - let Some(auth_token) = config.auth_token else { - return Err(Error("Authentication required".to_string())); - }; - - config - .client - .get(&format!( - "{}/api/v1/libraries/{}", - config.api_url, library_id - )) - .with_auth(auth_token) - .send() - .await - .map_err(|e| Error(e.to_string()))? - .json() - .await - .map_err(|e| Error(e.to_string())) - } - - pub type Response = Option; - } - - pub use list::exec as list; - pub mod list { - use super::*; - - pub async fn exec(config: RequestConfig) -> Result { - let Some(auth_token) = config.auth_token else { - return Err(Error("Authentication required".to_string())); - }; - - config - .client - .get(&format!("{}/api/v1/libraries", config.api_url)) - .with_auth(auth_token) - .send() - .await - .map_err(|e| Error(e.to_string()))? - .json() - .await - .map_err(|e| Error(e.to_string())) - } - - pub type Response = Vec; - } - - pub use create::exec as create; - pub mod create { - use super::*; - - #[derive(Debug, Deserialize)] - pub struct CreateResult { - pub id: String, - } - - #[allow(clippy::too_many_arguments)] - pub async fn exec( - config: RequestConfig, - library_id: Uuid, - name: &str, - instance_uuid: Uuid, - instance_identity: RemoteIdentity, - node_id: Uuid, - node_remote_identity: RemoteIdentity, - metadata: &HashMap, - ) -> Result { - let Some(auth_token) = config.auth_token else { - return Err(Error("Authentication required".to_string())); - }; - - config - .client - .post(&format!( - "{}/api/v1/libraries/{}", - config.api_url, library_id - )) - .json(&json!({ - "name":name, - "instanceUuid": instance_uuid, - "instanceIdentity": instance_identity, - "nodeId": node_id, - "nodeRemoteIdentity": node_remote_identity, - "metadata": metadata, - })) - .with_auth(auth_token) - .send() - .await - .map_err(|e| Error(e.to_string()))? - .json() - .await - .map_err(|e| Error(e.to_string())) - } - } - - pub use update::exec as update; - pub mod update { - use super::*; - - pub async fn exec( - config: RequestConfig, - library_id: Uuid, - name: Option, - ) -> Result<(), Error> { - let Some(auth_token) = config.auth_token else { - return Err(Error("Authentication required".to_string())); - }; - - config - .client - .patch(&format!( - "{}/api/v1/libraries/{}", - config.api_url, library_id - )) - .json(&json!({ - "name":name - })) - .with_auth(auth_token) - .send() - .await - .map_err(|e| Error(e.to_string())) - .map(|_| ()) - } - } - - pub use update_instance::exec as update_instance; - pub mod update_instance { - use super::*; - - pub async fn exec( - config: RequestConfig, - library_id: Uuid, - instance_id: Uuid, - node_id: Option, - node_remote_identity: Option, - metadata: Option>, - ) -> Result<(), Error> { - let Some(auth_token) = config.auth_token else { - return Err(Error("Authentication required".to_string())); - }; - - config - .client - .patch(&format!( - "{}/api/v1/libraries/{}/{}", - config.api_url, library_id, instance_id - )) - .json(&json!({ - "nodeId": node_id, - "nodeRemoteIdentity": node_remote_identity, - "metadata": metadata, - })) - .with_auth(auth_token) - .send() - .await - .map_err(|e| Error(e.to_string())) - .map(|_| ()) - } - } - - pub use join::exec as join; - pub mod join { - use super::*; - - pub async fn exec( - config: RequestConfig, - library_id: Uuid, - instance_uuid: Uuid, - instance_identity: RemoteIdentity, - node_id: Uuid, - node_remote_identity: RemoteIdentity, - metadata: HashMap, - ) -> Result, Error> { - let Some(auth_token) = config.auth_token else { - return Err(Error("Authentication required".to_string())); - }; - - config - .client - .post(&format!( - "{}/api/v1/libraries/{library_id}/instances/{instance_uuid}", - config.api_url - )) - .json(&json!({ - "instanceIdentity": instance_identity, - "nodeId": node_id, - "nodeRemoteIdentity": node_remote_identity, - "metadata": metadata, - })) - .with_auth(auth_token) - .send() - .await - .map_err(|e| Error(e.to_string()))? - .json() - .await - .map_err(|e| Error(e.to_string())) - } - } - - pub mod message_collections { - use super::*; - - pub use get::exec as get; - pub mod get { - use super::*; - use tracing::debug; - - #[derive(Serialize)] - #[serde(rename_all = "camelCase")] - pub struct InstanceTimestamp { - pub instance_uuid: Uuid, - pub from_time: String, - } - - pub async fn exec( - config: RequestConfig, - library_id: Uuid, - this_instance_uuid: Uuid, - timestamps: Vec, - ) -> Result { - let Some(auth_token) = config.auth_token else { - return Err(Error("Authentication required".to_string())); - }; - - let res = config - .client - .post(&format!( - "{}/api/v1/libraries/{}/messageCollections/get", - config.api_url, library_id - )) - .json(&json!({ - "instanceUuid": this_instance_uuid, - "timestamps": timestamps - })) - .with_auth(auth_token) - .send() - .await; - - debug!("get message collections response: {:?}", res); - - match res { - Ok(response) => { - let status = response.status(); - let body = response.text().await.map_err(|e| Error(e.to_string()))?; - debug!("Response status: {}", status); - debug!("Response body: {}", body); - - // Attempt to parse the body as JSON - match serde_json::from_str::(&body) { - Ok(json) => Ok(json), - Err(e) => Err(Error(format!( - "error decoding response body: {}. Body: {}", - e, body - ))), - } - } - Err(e) => Err(Error(e.to_string())), - } - } - - pub type Response = Vec; - } - - pub use request_add::exec as request_add; - pub mod request_add { - use super::*; - use tracing::debug; - - #[derive(Deserialize, Debug)] - #[serde(rename_all = "camelCase")] - pub struct RequestAdd { - pub instance_uuid: Uuid, - pub from_time: Option, - // mutex key on the instance - pub key: String, - } - - pub async fn exec( - config: RequestConfig, - library_id: Uuid, - instances: Vec, - ) -> Result { - let Some(auth_token) = config.auth_token else { - return Err(Error("Authentication required".to_string())); - }; - - let instances = instances - .into_iter() - .map(|i| json!({"instanceUuid": i })) - .collect::>(); - - let res = config - .client - .post(&format!( - "{}/api/v1/libraries/{}/messageCollections/requestAdd", - config.api_url, library_id - )) - .json(&json!({ "instances": instances })) - .with_auth(auth_token) - .send() - .await; - - debug!("request add response: {:?}", res); - - match res { - Ok(response) => { - let status = response.status(); - let body = response.text().await.map_err(|e| Error(e.to_string()))?; - debug!("Response status: {}", status); - debug!("Response body: {}", body); - - // Attempt to parse the body as JSON - match serde_json::from_str::(&body) { - Ok(json) => Ok(json), - Err(e) => Err(Error(format!( - "error decoding response body: {}. Body: {}", - e, body - ))), - } - } - Err(e) => Err(Error(e.to_string())), - } - } - - pub type Response = Vec; - } - - pub use do_add::exec as do_add; - pub mod do_add { - use super::*; - - #[derive(Serialize, Debug)] - #[serde(rename_all = "camelCase")] - pub struct Input { - pub uuid: Uuid, - pub key: String, - pub start_time: String, - pub end_time: String, - pub contents: String, - pub ops_count: usize, - } - - pub async fn exec( - config: RequestConfig, - library_id: Uuid, - instances: Vec, - ) -> Result<(), Error> { - let Some(auth_token) = config.auth_token else { - return Err(Error("Authentication required".to_string())); - }; - - config - .client - .post(&format!( - "{}/api/v1/libraries/{}/messageCollections/doAdd", - config.api_url, library_id - )) - .json(&json!({ "instances": instances })) - .with_auth(auth_token) - .send() - .await - .and_then(|r| r.error_for_status()) - .map_err(|e| Error(e.to_string()))?; - - Ok(()) - } - } - } -} - -#[derive(Type, Serialize, Deserialize)] -#[specta(rename = "Core_CloudLocation")] -pub struct CloudLocation { - id: String, - name: String, -} - -pub mod locations { - use super::*; - - pub use list::exec as list; - pub mod list { - use super::*; - - pub async fn exec(config: RequestConfig) -> Result { - let Some(auth_token) = config.auth_token else { - return Err(Error("Authentication required".to_string())); - }; - - config - .client - .get(&format!("{}/api/v1/locations", config.api_url)) - .with_auth(auth_token) - .send() - .await - .map_err(|e| Error(e.to_string()))? - .json() - .await - .map_err(|e| Error(e.to_string())) - } - - pub type Response = Vec; - } - - pub use create::exec as create; - pub mod create { - use super::*; - - pub async fn exec(config: RequestConfig, name: String) -> Result { - let Some(auth_token) = config.auth_token else { - return Err(Error("Authentication required".to_string())); - }; - - config - .client - .post(&format!("{}/api/v1/locations", config.api_url)) - .json(&json!({ - "name": name, - })) - .with_auth(auth_token) - .send() - .await - .map_err(|e| Error(e.to_string()))? - .json() - .await - .map_err(|e| Error(e.to_string())) - } - - pub type Response = CloudLocation; - } - - pub use remove::exec as remove; - pub mod remove { - use super::*; - - pub async fn exec(config: RequestConfig, id: String) -> Result { - let Some(auth_token) = config.auth_token else { - return Err(Error("Authentication required".to_string())); - }; - - config - .client - .post(&format!("{}/api/v1/locations/delete", config.api_url)) - .json(&json!({ - "id": id, - })) - .with_auth(auth_token) - .send() - .await - .map_err(|e| Error(e.to_string()))? - .json() - .await - .map_err(|e| Error(e.to_string())) - } - - pub type Response = CloudLocation; - } - - pub use authorize::exec as authorize; - pub mod authorize { - use super::*; - - pub async fn exec(config: RequestConfig, id: String) -> Result { - let Some(auth_token) = config.auth_token else { - return Err(Error("Authentication required".to_string())); - }; - - config - .client - .post(&format!("{}/api/v1/locations/authorize", config.api_url)) - .json(&json!({ "id": id })) - .with_auth(auth_token) - .send() - .await - .map_err(|e| Error(e.to_string()))? - .json() - .await - .map_err(|e| Error(e.to_string())) - } - - #[derive(Debug, Clone, Type, Deserialize)] - pub struct Response { - pub access_key_id: String, - pub secret_access_key: String, - pub session_token: String, - } - } -} diff --git a/crates/p2p/crates/tunnel/src/lib.rs b/crates/p2p/crates/tunnel/src/lib.rs index df7706255..482c736c7 100644 --- a/crates/p2p/crates/tunnel/src/lib.rs +++ b/crates/p2p/crates/tunnel/src/lib.rs @@ -52,7 +52,7 @@ impl Tunnel { library_identity: &Identity, ) -> Result { stream - .write_all(&[b'T']) + .write_all(b"T") .await .map_err(|_| TunnelError::DiscriminatorWriteError)?; diff --git a/crates/task-system/src/task.rs b/crates/task-system/src/task.rs index 8b0ed2956..1b345e812 100644 --- a/crates/task-system/src/task.rs +++ b/crates/task-system/src/task.rs @@ -258,7 +258,7 @@ impl Drop for Interrupter { } impl Interrupter { - pub(crate) fn new(interrupt_tx: chan::Receiver) -> Self { + pub(crate) const fn new(interrupt_tx: chan::Receiver) -> Self { Self { interrupt_rx: interrupt_tx, } @@ -659,7 +659,7 @@ pub struct TaskWorktable { } impl TaskWorktable { - pub fn new(worker_id: WorkerId, interrupt_tx: chan::Sender) -> Self { + pub const fn new(worker_id: WorkerId, interrupt_tx: chan::Sender) -> Self { Self { started: AtomicBool::new(false), is_running: AtomicBool::new(false), @@ -899,7 +899,7 @@ pub struct PanicOnSenderDrop { } impl PanicOnSenderDrop { - pub fn new( + pub const fn new( task_id: TaskId, done_tx: oneshot::Sender, SystemError>>, ) -> Self { diff --git a/rust-toolchain.toml b/rust-toolchain.toml index a56a283d2..1de01fa45 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,2 +1,2 @@ [toolchain] -channel = "1.80.1" +channel = "1.81.0" From d180261ca5a93388486742e8f921e895e9ec26a4 Mon Sep 17 00:00:00 2001 From: ameer2468 <33054370+ameer2468@users.noreply.github.com> Date: Fri, 27 Sep 2024 15:44:40 +0300 Subject: [PATCH 148/218] email login ui --- .../settings/client/account/index.tsx | 4 +- interface/components/Authentication.tsx | 4 +- interface/components/Login.tsx | 155 +- interface/components/Register.tsx | 12 +- interface/locales/ar/common.json | 13 +- interface/locales/be/common.json | 3 + interface/locales/cs/common.json | 1615 +++++++++-------- interface/locales/de/common.json | 13 +- interface/locales/en/common.json | 3 + interface/locales/es/common.json | 13 +- interface/locales/fr/common.json | 13 +- interface/locales/it/common.json | 13 +- interface/locales/ja/common.json | 15 +- interface/locales/nl/common.json | 13 +- interface/locales/ru/common.json | 3 + interface/locales/tr/common.json | 13 +- interface/locales/zh-CN/common.json | 1467 +++++++-------- interface/locales/zh-TW/common.json | 13 +- 18 files changed, 1790 insertions(+), 1595 deletions(-) diff --git a/interface/app/$libraryId/settings/client/account/index.tsx b/interface/app/$libraryId/settings/client/account/index.tsx index 2f002a710..b4aff9ee8 100644 --- a/interface/app/$libraryId/settings/client/account/index.tsx +++ b/interface/app/$libraryId/settings/client/account/index.tsx @@ -1,8 +1,6 @@ import clsx from 'clsx'; import { useEffect, useState } from 'react'; -import Session, { signOut } from 'supertokens-web-js/recipe/session'; -import { auth, useBridgeMutation, useBridgeQuery, useFeatureFlag } from '@sd/client'; -import { Button, Input, toast } from '@sd/ui'; +import { useBridgeMutation } from '@sd/client'; import { Authentication } from '~/components'; import { useLocale } from '~/hooks'; import { AUTH_SERVER_URL, getTokens } from '~/util'; diff --git a/interface/components/Authentication.tsx b/interface/components/Authentication.tsx index c1697c987..89a4c6415 100644 --- a/interface/components/Authentication.tsx +++ b/interface/components/Authentication.tsx @@ -89,7 +89,7 @@ export const Authentication = ({ }; return ( - +
{AccountTabs.map((text) => (
>; cloudBootstrap: UseMutationResult; // Cloud bootstrap mutation }) => { + const [continueWithEmail, setContinueWithEmail] = useState(false); + + return ( + <> + {continueWithEmail ? ( + + ) : ( + + )} + + ); +}; + +interface LoginProps { + reload: Dispatch>; + cloudBootstrap: UseMutationResult; // Cloud bootstrap mutation + setContinueWithEmail: Dispatch>; +} + +const LoginForm = ({ reload, cloudBootstrap, setContinueWithEmail }: LoginProps) => { const { t } = useLocale(); const [showPassword, setShowPassword] = useState(false); const form = useZodForm({ @@ -145,6 +184,7 @@ const Login = ({ )}
+ + +
+ +

Or

+ +
+ + + + ); +}; + +interface Props { + setContinueWithEmail: Dispatch>; + reload: Dispatch>; + cloudBootstrap: UseMutationResult; // Cloud bootstrap mutation +} + +const ContinueWithEmail = ({ setContinueWithEmail, reload, cloudBootstrap }: Props) => { + const { t } = useLocale(); + const ContinueWithEmailForm = useZodForm({ + schema: ContinueWithEmailSchema, + defaultValues: { + email: '' + } + }); + const [step, setStep] = useState(1); + + return ( +
{ + //await code here to send email + setStep((step) => step + 1); + })} + className="w-full" + form={ContinueWithEmailForm} + > + {step === 1 ? ( + <> +
+ + ( + + )} + /> + {ContinueWithEmailForm.formState.errors.email && ( +

+ {ContinueWithEmailForm.formState.errors.email.message} +

+ )} +
+ + + ) : ( +
+

Check your email

+
+

{t('login_link_sent')}

+

+ {t('check_your_inbox')}{' '} + + {ContinueWithEmailForm.getValues().email} + +

+
+
+ )} +
); }; diff --git a/interface/components/Register.tsx b/interface/components/Register.tsx index 2ca342cab..0053f03b0 100644 --- a/interface/components/Register.tsx +++ b/interface/components/Register.tsx @@ -10,9 +10,15 @@ import ShowPassword from './ShowPassword'; const RegisterSchema = z .object({ - email: z.string().email(), - password: z.string().min(6), - confirmPassword: z.string().min(6) + email: z.string().email({ + message: 'Email is required' + }), + password: z.string().min(6, { + message: 'Password must be at least 6 characters' + }), + confirmPassword: z.string().min(6, { + message: 'Password must be at least 6 characters' + }) }) .refine((data) => data.password === data.confirmPassword, { message: 'Passwords do not match', diff --git a/interface/locales/ar/common.json b/interface/locales/ar/common.json index 9e6415cf9..9eab6a5d4 100644 --- a/interface/locales/ar/common.json +++ b/interface/locales/ar/common.json @@ -1,4 +1,6 @@ { + "Connect": "اتصل", + "Connecting": "يتم التوصيل", "about": "حول", "about_vision_text": "العديد منا لديه حسابات سحابية متعددة، ومحركات أقراص غير محفوظة وبيانات معرضة للخطر من الفقدان. نعتمد على خدمات السحاب مثل Google Photos و iCloud ، ولكننا مقيدين بسعة محدودة وتقريبًا لا توجد توافقية بين الخدمات وأنظمة التشغيل. لا ينبغي أن تكون ألبومات الصور محبوسة في نظام الجهاز أو تستخدم لجمع البيانات الإعلانية. يجب أن تكون غير معتمدة على نظام التشغيل ، دائمة ومملوكة شخصيًا. البيانات التي ننشئها هي إرثنا ، والذي سيستمر لفترة طويلة بعد وفاتنا - التكنولوجيا مفتوحة المصدر هي الطريقة الوحيدة لضمان السيطرة المطلقة على البيانات التي تحدد حياتنا ، بحجم غير محدود.", "about_vision_title": "الرؤية", @@ -43,6 +45,7 @@ "audio_preview_not_supported": "معاينة الصوت غير مدعومة.", "auto": "آلي", "back": "رجوع", + "back_to_login": "العودة لتسجيل الدخول", "backfill_sync": "عمليات مزامنة الردم", "backfill_sync_description": "تم إيقاف المكتبة مؤقتًا حتى اكتمال عملية الردم", "backups": "نسخ احتياطية", @@ -60,6 +63,7 @@ "changelog": "سجل التغييرات", "changelog_page_description": "انظر إلى الميزات الجديدة الرائعة التي نقوم بإضافتها", "changelog_page_title": "سجل التغييرات", + "check_your_inbox": "يرجى التحقق من صندوق الوارد الخاص بك على", "checksum": "التحقق من الصحة", "clear_finished_jobs": "مسح الوظائف المنتهية", "click_to_hide": "انقر لإخفاء", @@ -86,14 +90,12 @@ "config": "Config", "configure_location": "تكوين الموقع", "confirm": "Confirm", - "Connect": "اتصل", "connect_cloud": "Connect a cloud", "connect_cloud_description": "Connect your cloud accounts to Spacedrive.", "connect_device": "Connect a device", "connect_device_description": "Spacedrive works best on all your devices.", "connect_library_to_cloud": "قم بتوصيل المكتبة بـ Spacedrive Cloud", "connected": "متصل", - "Connecting": "يتم التوصيل", "connecting_library_to_cloud": "جارٍ توصيل المكتبة بـ Spacedrive Cloud...", "contacts": "جهات الاتصال", "contacts_description": "إدارة جهات الاتصال الخاصة بك في Spacedrive.", @@ -293,8 +295,8 @@ "general_settings": "إعدادات عامة", "general_settings_description": "إعدادات عامة متعلقة بهذا العميل.", "general_shortcut_description": "اختصارات الاستخدام العام", - "generate_checksums": "إنشاء التحقق من الصحة", "generatePreviewMedia_label": "إنشاء وسائط المعاينة لهذا الموقع", + "generate_checksums": "إنشاء التحقق من الصحة", "gitignore": "جيت تجاهل", "glob_description": "الكرة الأرضية (على سبيل المثال، **/.git)", "go_back": "العودة", @@ -439,6 +441,7 @@ "log_out": "تسجيل الخروج", "logged_in_as": "تم تسجيل الدخول كـ {{email}}", "logging_in": "جار تسجيل الدخول...", + "login_link_sent": "لقد أرسلنا رابط تسجيل دخول مؤقت.", "logout": "تسجيل الخروج", "manage_library": "إدارة المكتبة", "managed": "مُدار", @@ -682,10 +685,10 @@ "switch_to_next_tab": "التبديل إلى التبويب التالي", "switch_to_previous_tab": "التبديل إلى التبويب السابق", "sync": "مزامنة", + "syncPreviewMedia_label": "مزامنة وسائط المعاينة لهذا الموقع مع أجهزتك", "sync_description": "إدارة كيفية مزامنة Spacedrive.", "sync_with_library": "مزامنة مع المكتبة", "sync_with_library_description": "إذا تم تمكينها ، ستتم مزامنة اختصارات المفاتيح الخاصة بك مع المكتبة ، وإلا فسيتم تطبيقها فقط على هذا العميل.", - "syncPreviewMedia_label": "مزامنة وسائط المعاينة لهذا الموقع مع أجهزتك", "system": "System", "tag": "Tag", "tag_few": "Tags", @@ -764,4 +767,4 @@ "zoom": "تكبير", "zoom_in": "تكبير", "zoom_out": "تصغير" -} \ No newline at end of file +} diff --git a/interface/locales/be/common.json b/interface/locales/be/common.json index 02288ebd4..9bceff856 100644 --- a/interface/locales/be/common.json +++ b/interface/locales/be/common.json @@ -53,6 +53,7 @@ "audio_preview_not_supported": "Папярэдні прагляд аўдыя не падтрымваецца.", "auto": "Аўто", "back": "Назад", + "back_to_login": "Вярнуцца да ўваходу", "backfill_sync": "Аперацыі поўнай сінхранізацыі", "backfill_sync_description": "Праца бібліятэкі прыпынена да завяршэння сінхранізацыі", "backups": "Рэз. копіі", @@ -75,6 +76,7 @@ "changelog": "Што новага", "changelog_page_description": "Даведайцеся, якія новыя магчымасці мы дадалі", "changelog_page_title": "Спіс змен", + "check_your_inbox": "Праверце паштовую скрыню па адрасе", "checksum": "Кантрольная сума", "clear_finished_jobs": "Ачысціць скончаныя заданні", "click_to_hide": "Націсніце, каб схаваць", @@ -484,6 +486,7 @@ "log_out": "Выйсці з сістэмы", "logged_in_as": "Увайшлі ў сістэму як {{email}}", "logging_in": "Уваход у сістэму...", + "login_link_sent": "Мы адправілі часовую спасылку для ўваходу.", "logout": "Выйсці", "manage_library": "Кіраванне бібліятэкай", "managed": "Кіраваны", diff --git a/interface/locales/cs/common.json b/interface/locales/cs/common.json index c455b88b9..c5873a0fa 100644 --- a/interface/locales/cs/common.json +++ b/interface/locales/cs/common.json @@ -1,806 +1,809 @@ -{ - "about": "O nás", - "about_vision_text": "Mnozí z nás mají více cloudových účtů, disky, které nejsou zálohovány, a data ohrožená ztrátou. Závisíme na cloudových službách jako Google Photos a iCloud, ale jsme uzamčeni s omezenou kapacitou a téměř nulovou interoperabilitou mezi službami a operačními systémy. Fotoalba by neměla být uvězněna v ekosystému zařízení nebo využívána pro reklamní data. Měla by být nezávislá na OS, trvalá a osobně vlastněná. Data, která vytváříme, jsou naším odkazem, který nás daleko přežije – open source technologie je jediným způsobem, jak zajistit, že si udržíme absolutní kontrolu nad daty, která definují naše životy, v neomezeném měřítku.", - "about_vision_title": "Vize", - "accept": "Přijmout", - "accept_files": "Přijmout soubory", - "accessed": "Přistoupeno", - "account": "Účet", - "actions": "Akce", - "add": "Přidat", - "Add Device Description": "Naskenujte QR kód nebo ověřte UUID svého zařízení, abyste přidali zařízení.", - "add_device": "Přidat zařízení", - "add_file_extension_rule": "Přidat příponu souboru k aktuálnímu pravidlu", - "add_filter": "Přidat filtr", - "add_library": "Přidat knihovnu", - "add_location": "Přidat umístění", - "add_location_description": "Zlepšete svůj zážitek ze Spacedrive přidáním svých oblíbených umístění do osobní knihovny, pro bezproblémovou a efektivní správu souborů.", - "add_location_overview_description": "Připojte místní cestu, svazek nebo síťové umístění ke Spacedrive.", - "add_location_tooltip": "Přidat cestu jako indexované umístění", - "add_locations": "Přidat umístění", - "add_tag": "Přidat štítek", - "added_location": "Přidáno umístění {{name}}", - "adding_location": "Přidávání umístění {{name}}", - "advanced": "Pokročilé", - "advanced_settings": "Pokročilé nastavení", - "album": "Album", - "album_one": "Album", - "album_other": "Alba", - "alias": "Přezdívka", - "all_jobs_have_been_cleared": "Všechny dokončené úkoly byly vymazány.", - "alpha_release_description": "Jsme potěšeni, že můžete vyzkoušet Spacedrive, nyní v alfa verzi, která představuje vzrušující nové funkce. Jak je tomu u každého počátečního vydání, tato verze může obsahovat nějaké chyby. Laskavě vás žádáme, abyste nám hlásili jakékoli problémy, na které narazíte, na našem Discord kanálu. Vaše cenná zpětná vazba výrazně přispěje ke zlepšení uživatelského zážitku.", - "alpha_release_title": "Alfa verze", - "app_crashed": "APLIKACE SELHALA", - "app_crashed_description": "Jsme za horizontem událostí...", - "appearance": "Vzhled", - "appearance_description": "Změňte vzhled svého klienta.", - "apply": "Použít", - "archive": "Archiv", - "archive_coming_soon": "Archivování umístění přichází brzy...", - "archive_info": "Extrahujte data z knihovny jako archiv, užitečné pro zachování struktury složek umístění.", - "archive_one": "Archiv", - "archive_other": "Archivy", - "are_you_sure": "Jste si jisti?", - "ascending": "Vzestupně", - "ask_spacedrive": "Zeptejte se Spacedrive", - "assign_tag": "Přiřadit štítek", - "assign_tags": "Přiřadit štítky", - "audio": "Audio", - "audio_preview_not_supported": "Náhled zvuku není podporován.", - "auto": "Automaticky", - "back": "Zpět", - "backfill_sync": "Doplňování synchronizačních operací", - "backfill_sync_description": "Knihovna je pozastavena, dokud se nedokončí doplňování", - "backups": "Zálohy", - "backups_description": "Spravujte své zálohy databáze Spacedrive.", - "bar_graph_info": "Přejeďte myší přes každý pruh a zobrazí se typ souboru. Dvojitým kliknutím přejděte.", - "bitrate": "Bitrate", - "blur_effects": "Efekty rozmazání", - "blur_effects_description": "Na některé komponenty bude aplikován efekt rozmazání.", - "book": "Kniha", - "book_one": "Kniha", - "book_other": "Knihy", - "calculating_library_statistics": "Výpočet statistik knihovny...", - "cancel": "Zrušit", - "cancel_selection": "Zrušit výběr", - "canceled": "Zrušeno", - "celcius": "Celsius", - "change": "Změnit", - "change_view_setting_description": "Změňte výchozí zobrazení průzkumníka", - "changelog": "Protokol změn", - "changelog_page_description": "Podívejte se, jaké skvělé nové funkce děláme", - "changelog_page_title": "Protokol změn", - "checksum": "Kontrolní součet", - "clear_finished_jobs": "Vyčistit dokončené úkoly", - "click_to_hide": "Klikněte pro skrytí", - "click_to_lock": "Klikněte pro uzamčení", - "client": "Klient", - "close": "Zavřít", - "close_command_palette": "Zavřít příkazovou paletu", - "close_current_tab": "Zavřít aktuální záložku", - "cloud": "Cloud", - "cloud_connect_description": "Chcete připojit svou knihovnu ke cloudu?", - "cloud_drives": "Cloudové disky", - "cloud_sync": "Cloudová synchronizace", - "cloud_sync_description": "Spravujte procesy, které synchronizují vaši knihovnu s Spacedrive Cloud", - "clouds": "Cloudy", - "code": "Kód", - "code_one": "Kód", - "code_other": "Kódy", - "collection": "Sbírka", - "collection_one": "Sbírka", - "collection_other": "Sbírky", - "color": "Barva", - "color_profile": "Barevný profil", - "color_space": "Barevný prostor", - "coming_soon": "Již brzy", - "completed": "Dokončeno", - "completed_with_errors": "Dokončeno s chybami", - "compress": "Komprimovat", - "config": "Konfigurace", - "config_one": "Konfigurace", - "config_other": "Konfigurace", - "configure_location": "Konfigurovat umístění", - "confirm": "Potvrdit", - "Connect": "Připojit", - "connect_cloud": "Připojit cloud", - "connect_cloud_description": "Připojte své cloudové účty ke Spacedrive.", - "connect_device": "Připojit zařízení", - "connect_device_description": "Spacedrive funguje nejlépe na všech vašich zařízeních.", - "connect_library_to_cloud": "Připojte knihovnu ke Spacedrive Cloud", - "connected": "Připojeno", - "Connecting": "Připojování", - "connecting_library_to_cloud": "Připojování knihovny ke Spacedrive Cloud...", - "contacts": "Kontakty", - "contacts_description": "Spravujte své kontakty ve Spacedrive.", - "contains": "obsahuje", - "content_id": "ID obsahu", - "continue": "Pokračovat", - "convert_to": "Převést na", - "coordinates": "Souřadnice", - "copied": "Zkopírováno", - "copy": "Kopírovat", - "copy_as_path": "Kopírovat jako cestu", - "copy_object": "Kopírovat objekt", - "copy_path_to_clipboard": "Kopírovat cestu do schránky", - "copy_success": "Položky zkopírovány", - "create": "Vytvořit", - "create_file_error": "Chyba při vytváření souboru", - "create_file_success": "Vytvořen nový soubor: {{name}}", - "create_folder_error": "Chyba při vytváření složky", - "create_folder_success": "Vytvořena nová složka: {{name}}", - "create_library": "Vytvořit knihovnu", - "create_library_description": "Knihovny jsou bezpečná databáze na zařízení. Vaše soubory zůstanou tam, kde jsou, knihovna je katalogizuje a ukládá všechna data související se Spacedrive.", - "create_location": "Vytvořit umístění", - "create_new_library": "Vytvořit novou knihovnu", - "create_new_library_description": "Knihovny jsou bezpečná databáze na zařízení. Vaše soubory zůstanou tam, kde jsou, knihovna je katalogizuje a ukládá všechna data související se Spacedrive.", - "create_new_tag": "Vytvořit nový štítek", - "create_new_tag_description": "Vyberte název a barvu.", - "create_tag": "Vytvořit štítek", - "created": "Vytvořeno", - "creating_library": "Vytváření knihovny...", - "creating_your_library": "Vytváření vaší knihovny", - "current": "Aktuální", - "current_directory": "Aktuální adresář", - "current_directory_with_descendants": "Aktuální adresář s podadresáři", - "custom": "Vlastní", - "cut": "Vyjmout", - "cut_object": "Vyjmout objekt", - "cut_success": "Položky vyjmuty", - "dark": "Tmavý", - "data_folder": "Složka s daty", - "database": "Databáze", - "database_one": "Databáze", - "database_other": "Databáze", - "date": "Datum", - "date_accessed": "Datum přístupu", - "date_created": "Datum vytvoření", - "date_indexed": "Datum indexování", - "date_modified": "Datum změny", - "date_taken": "Datum pořízení", - "date_time_format": "Formát data a času", - "date_time_format_description": "Vyberte formát data zobrazený ve Spacedrive", - "debug_mode": "Režim ladění", - "debug_mode_description": "Povolte další ladicí funkce v aplikaci.", - "default": "Výchozí", - "default_settings": "Výchozí nastavení", - "delete": "Smazat", - "delete_dialog_title": "Smazat {{prefix}} {{type}}", - "delete_forever": "Smazat navždy", - "delete_info": "To neodstraní skutečnou složku na disku. Náhledová média budou smazána.", - "delete_library": "Smazat knihovnu", - "delete_library_description": "To je trvalé, vaše soubory nebudou smazány, pouze knihovna Spacedrive.", - "delete_location": "Smazat umístění", - "delete_location_description": "Smazání umístění také odstraní všechny soubory s ním spojené z databáze Spacedrive, samotné soubory nebudou smazány.", - "delete_object": "Smazat objekt", - "delete_rule": "Smazat pravidlo", - "delete_rule_confirmation": "Opravdu chcete smazat toto pravidlo?", - "delete_tag": "Smazat štítek", - "delete_tag_description": "Opravdu chcete smazat tento štítek? To nelze vrátit a označené soubory budou odpojeny.", - "delete_warning": "To smaže váš {{type}}. Tento krok nelze vrátit zpět. Pokud jej přesunete do koše, můžete jej později obnovit. Pokud jej smažete navždy, bude navždy pryč.", - "descending": "Sestupně", - "description": "Popis", - "deselect": "Zrušit výběr", - "details": "Detaily", - "device": "Zařízení", - "devices": "Zařízení", - "devices_coming_soon_tooltip": "Již brzy! Tato alfa verze neobsahuje synchronizaci knihoven, bude brzy připravena.", - "dialog": "Dialog", - "dialog_shortcut_description": "K provádění akcí a operací", - "direction": "Směr", - "directories": "adresáře", - "directory": "adresář", - "disabled": "Zakázáno", - "disconnected": "Odpojeno", - "display_formats": "Formáty zobrazení", - "display_name": "Zobrazované jméno", - "distance": "Vzdálenost", - "do_the_thing": "Udělějte to", - "docker": "Docker", - "document": "Dokument", - "done": "Hotovo", - "dont_have_any": "Vypadá to, že žádné nemáte!", - "dont_show_again": "Nezobrazovat znovu", - "dotfile": "Skrytý soubor", - "dotfile_one": "Skrytý soubor", - "dotfile_other": "Skryté soubory", - "double_click_action": "Akce při dvojkliku", - "download": "Stáhnout", - "downloading_update": "Stahování aktualizace", - "drag_to_resize": "Přetáhněte pro změnu velikosti", - "duplicate": "Duplikovat", - "duplicate_object": "Duplikovat objekt", - "duplicate_success": "Položky duplikovány", - "edit": "Upravit", - "edit_library": "Upravit knihovnu", - "edit_location": "Upravit umístění", - "empty_file": "Prázdný soubor", - "enable_networking": "Povolit síť", - "enable_networking_description": "Umožněte vašemu uzlu komunikovat s dalšími uzly Spacedrive kolem vás.", - "enable_networking_description_required": "Požadováno pro synchronizaci knihoven nebo Spacedrop!", - "enable_relay": "Povolit relé", - "enable_relay_description": "Povolte reléový server, aby vaše zařízení mohla komunikovat přes veřejný internet.", - "enable_sync": "Povolit synchronizaci", - "enable_sync_description": "Generujte synchronizační operace pro všechna stávající data v této knihovně a nakonfigurujte Spacedrive tak, aby generoval synchronizační operace, když se něco stane v budoucnu.", - "enabled": "Povoleno", - "encrypt": "Šifrovat", - "encrypt_library": "Šifrovat knihovnu", - "encrypt_library_coming_soon": "Šifrování knihoven přichází brzy", - "encrypt_library_description": "Povolit šifrování této knihovny, to zašifruje pouze databázi Spacedrive, nikoli samotné soubory.", - "encrypted": "Šifrované", - "encrypted_one": "Šifrovaný", - "encrypted_other": "Šifrované", - "ends_with": "končí s", - "ephemeral_notice_browse": "Procházejte své soubory a složky přímo ze svého zařízení.", - "ephemeral_notice_consider_indexing": "Zvažte indexování svých místních umístění pro rychlejší a efektivnější průzkum.", - "equals": "rovná se", - "erase": "Vymazat", - "erase_a_file": "Vymazat soubor", - "erase_a_file_description": "Nastavte své nastavení vymazání.", - "error": "Chyba", - "error_loading_original_file": "Chyba při načítání původního souboru", - "error_message": "Chyba: {{error}}.", - "error_unknown": "Došlo k neznámé chybě.", - "executable": "Spustitelný soubor", - "executable_one": "Spustitelný soubor", - "executable_other": "Spustitelné soubory", - "expand": "Rozšířit", - "explorer": "Průzkumník", - "explorer_settings": "Nastavení průzkumníka", - "explorer_shortcut_description": "Pro navigaci a interakci se souborovým systémem", - "explorer_view": "Zobrazení průzkumníka", - "export": "Exportovat", - "export_library": "Exportovat knihovnu", - "export_library_coming_soon": "Export knihovny již brzy", - "export_library_description": "Exportujte tuto knihovnu do souboru.", - "extension": "Přípona", - "extensions": "Rozšíření", - "extensions_description": "Instalujte rozšíření, abyste rozšířili funkčnost tohoto klienta.", - "fahrenheit": "Fahrenheit", - "failed": "Neúspěšné", - "failed_to_add_location": "Nepodařilo se přidat umístění", - "failed_to_cancel_job": "Nepodařilo se zrušit úkol.", - "failed_to_clear_all_jobs": "Nepodařilo se vyčistit všechny úkoly.", - "failed_to_copy_file": "Nepodařilo se zkopírovat soubor", - "failed_to_copy_file_path": "Nepodařilo se zkopírovat cestu souboru", - "failed_to_cut_file": "Nepodařilo se vyjmout soubor", - "failed_to_delete_rule": "Nepodařilo se smazat pravidlo", - "failed_to_download_update": "Nepodařilo se stáhnout aktualizaci", - "failed_to_duplicate_file": "Nepodařilo se duplikovat soubor", - "failed_to_generate_checksum": "Nepodařilo se vygenerovat kontrolní součet", - "failed_to_generate_labels": "Nepodařilo se vygenerovat štítky", - "failed_to_generate_thumbnails": "Nepodařilo se vygenerovat miniatury", - "failed_to_load_tags": "Nepodařilo se načíst štítky", - "failed_to_open_file_body": "Nepodařilo se otevřít soubor kvůli chybě: {{error}}", - "failed_to_open_file_title": "Nepodařilo se otevřít soubor", - "failed_to_open_file_with": "Nepodařilo se otevřít soubor, s: {{data}}", - "failed_to_pause_job": "Nepodařilo se pozastavit úkol.", - "failed_to_reindex_location": "Nepodařilo se přeindexovat umístění", - "failed_to_remove_file_from_recents": "Nepodařilo se odstranit soubor z nedávných", - "failed_to_remove_job": "Nepodařilo se odstranit úkol.", - "failed_to_rename_file": "Nepodařilo se přejmenovat {{oldName}} na {{newName}}", - "failed_to_rescan_location": "Nepodařilo se znovu prohledat umístění", - "failed_to_resume_job": "Nepodařilo se obnovit úkol.", - "failed_to_update_location_settings": "Nepodařilo se aktualizovat nastavení umístění", - "favorite": "Oblíbené", - "favorites": "Oblíbené", - "feedback": "Zpětná vazba", - "feedback_is_required": "Zpětná vazba je povinná", - "feedback_login_description": "Přihlášení nám umožňuje reagovat na vaši zpětnou vazbu", - "feedback_placeholder": "Vaše zpětná vazba...", - "feedback_toast_error_message": "Při odesílání vaší zpětné vazby došlo k chybě. Prosím zkuste to znovu.", - "fetching_file_kind_statistics": "Načítání statistik typů souborů...", - "file_already_exist_in_this_location": "Soubor již v tomto umístění existuje", - "file_directory_name": "Název souboru/adresáře", - "file_extension_description": "Přípona souboru (např., .mp4, .jpg, .txt)", - "file_from": "Soubor {{file}} z {{name}}", - "file_indexing_rules": "Pravidla indexování souborů", - "file_many": "soubory", - "file_one": "soubor", - "file_other": "soubory", - "file_picker_not_supported": "Výběr souboru není na této platformě podporován", - "file_two": "soubory", - "file_zero": "žádné soubory", - "filter": "Filtr", - "filters": "Filtry", - "flash": "Blesk", - "folder": "Složka", - "folder_one": "Složka", - "folder_other": "Složky", - "font": "Písmo", - "font_one": "Písmo", - "font_other": "Písma", - "for_library": "Pro knihovnu {{name}}", - "forced": "Vynuceno", - "forward": "Vpřed", - "free_of": "volné z", - "from": "od", - "full_disk_access": "Plný přístup k disku", - "full_disk_access_description": "Abychom vám poskytli nejlepší zážitek, potřebujeme přístup k vašemu disku, abychom mohli indexovat vaše soubory. Vaše soubory jsou dostupné pouze vám.", - "full_reindex": "Plný reindex", - "full_reindex_info": "Proveďte úplné opětovné skenování tohoto umístění.", - "general": "Obecné", - "general_settings": "Obecná nastavení", - "general_settings_description": "Obecná nastavení týkající se tohoto klienta.", - "general_shortcut_description": "Obecné klávesové zkratky", - "generate_checksums": "Generovat kontrolní součty", - "generatePreviewMedia_label": "Generovat náhledová média pro toto umístění", - "gitignore": "Git Ignore", - "glob_description": "Glob (např., **/.git)", - "go_back": "Jít zpět", - "go_to_labels": "Přejít na štítky", - "go_to_location": "Přejít na umístění", - "go_to_overview": "Přejít na přehled", - "go_to_recents": "Přejít na nedávné", - "go_to_settings": "Přejít na nastavení", - "go_to_tag": "Přejít na štítek", - "got_it": "Rozumím", - "grid_gap": "Mezera", - "grid_view": "Zobrazení mřížky", - "grid_view_notice_description": "Získejte vizuální přehled o svých souborech pomocí zobrazení mřížky. Toto zobrazení zobrazuje vaše soubory a složky jako miniatury, což usnadňuje rychlé nalezení požadovaného souboru.", - "hidden": "Skrytý", - "hidden_label": "Zabraňuje zobrazení umístění a jeho obsahu v souhrnných kategoriích, vyhledávání a štítcích, pokud není povoleno \"Zobrazit skryté položky\".", - "hide_in_library_search": "Skrýt při vyhledávání v knihovně", - "hide_in_library_search_description": "Skrýt soubory s tímto štítkem z výsledků při hledání v celé knihovně.", - "hide_in_sidebar": "Skrýt v postranním panelu", - "hide_in_sidebar_description": "Zabraňte zobrazení tohoto štítku v postranním panelu aplikace.", - "hide_location_from_view": "Skrýt umístění a obsah z pohledu", - "hide_sidebar": "Skrýt postranní panel", - "home": "Domů", - "hosted_locations": "Hostovaná umístění", - "hosted_locations_description": "Rozšiřte své místní úložiště pomocí našeho cloudu!", - "icon_size": "Velikost ikony", - "image": "Obrázek", - "image_labeler_ai_model": "Model AI pro rozpoznávání štítků na obrázcích", - "image_labeler_ai_model_description": "Model používaný k rozpoznávání objektů na obrázcích. Větší modely jsou přesnější, ale pomalejší.", - "image_one": "Obrázek", - "image_other": "Obrázky", - "import": "Importovat", - "incoming_spacedrop": "Příchozí Spacedrop", - "indexed": "Indexováno", - "indexed_new_files": "Indexovány nové soubory {{name}}", - "indexer_rule_reject_allow_label": "Ve výchozím nastavení funguje pravidlo indexeru jako seznam odmítnutí, což má za následek vyloučení jakýchkoli souborů, které odpovídají jeho kritériím. Povolíte-li tuto možnost, změní se na seznam povolení, což umožní umístění indexovat pouze soubory, které splňují jeho specifikovaná pravidla.", - "indexer_rules": "Pravidla indexeru", - "indexer_rules_error": "Chyba při načítání pravidel indexeru", - "indexer_rules_info": "Pravidla indexeru vám umožňují specifikovat cesty k ignorování pomocí globů.", - "indexer_rules_not_available": "Žádná pravidla indexeru nejsou k dispozici", - "ingester": "Ingester", - "ingester_description": "Tento proces přijímá cloudové operace a odesílá je hlavnímu synchronizačnímu ingesteru.", - "injester_description": "Tento proces přijímá synchronizační operace z P2P připojení a Spacedrive Cloud a aplikuje je na knihovnu.", - "install": "Instalovat", - "install_update": "Instalovat aktualizaci", - "installed": "Nainstalováno", - "invalid_extension": "Neplatná přípona", - "invalid_glob": "Neplatný glob", - "invalid_name": "Neplatné jméno", - "invalid_path": "Neplatná cesta", - "ipv4_ipv6_listeners_error": "Chyba při vytváření posluchačů IPv4 a IPv6. Zkontrolujte nastavení firewallu!", - "ipv4_listeners_error": "Chyba při vytváření posluchačů IPv4. Zkontrolujte nastavení firewallu!", - "ipv6": "IPv6 networking", - "ipv6_description": "Povolit peer-to-peer komunikaci pomocí sítě IPv6", - "ipv6_listeners_error": "Chyba při vytváření posluchačů IPv6. Zkontrolujte nastavení firewallu!", - "is": "je", - "is_not": "není", - "item": "položka", - "item_size": "Velikost položky", - "item_with_count_one": "{{count}} položka", - "item_with_count_other": "{{count}} položky", - "items": "položky", - "job_error_description": "Úloha byla dokončena s chybami. Podrobnosti naleznete níže v protokolu chyb. Pokud potřebujete pomoc, kontaktujte podporu a poskytněte tuto chybu.", - "job_has_been_canceled": "Úloha byla zrušena.", - "job_has_been_paused": "Úloha byla pozastavena.", - "job_has_been_removed": "Úloha byla odstraněna.", - "job_has_been_resumed": "Úloha byla obnovena.", - "join": "Připojit se", - "join_discord": "Připojit se k Discordu", - "join_library": "Připojit se ke knihovně", - "join_library_description": "Knihovny jsou bezpečná databáze na zařízení. Vaše soubory zůstanou tam, kde jsou, knihovna je katalogizuje a ukládá všechna data související se Spacedrive.", - "joining": "Připojování", - "key": "Klíč", - "key_manager": "Správce klíčů", - "key_manager_description": "Vytvářejte šifrovací klíče, připojujte a odpojujte své klíče, abyste viděli soubory dešifrované v reálném čase.", - "key_one": "Klíč", - "key_other": "Klíče", - "keybinds": "Klávesové zkratky", - "keybinds_description": "Zobrazit a spravovat klávesové zkratky klienta", - "keys": "Klíče", - "kilometers": "Kilometry", - "kind": "Druh", - "kind_one": "Druh", - "kind_other": "Druhy", - "label": "Štítek", - "labels": "Štítky", - "language": "Jazyk", - "language_description": "Změňte jazyk rozhraní Spacedrive", - "learn_more": "Zjistit více", - "learn_more_about_telemetry": "Zjistit více o telemetrii", - "less": "méně", - "libraries": "Knihovny", - "libraries_description": "Databáze obsahuje všechna data knihovny a metadata souborů.", - "library": "Knihovna", - "library_bytes": "Velikost knihovny", - "library_bytes_description": "Celková velikost všech umístění ve vaší knihovně.", - "library_db_size": "Velikost indexu", - "library_db_size_description": "Velikost databáze knihovny.", - "library_name": "Název knihovny", - "library_overview": "Přehled knihovny", - "library_settings": "Nastavení knihovny", - "library_settings_description": "Obecná nastavení týkající se aktuálně aktivní knihovny.", - "light": "Světlo", - "link": "Odkaz", - "link_one": "Odkaz", - "link_other": "Odkazy", - "list_view": "Zobrazení seznamu", - "list_view_notice_description": "Snadno navigujte mezi svými soubory a složkami pomocí zobrazení seznamu. Toto zobrazení zobrazuje vaše soubory v jednoduchém, organizovaném seznamu, což vám umožňuje rychle najít a přistupovat k souborům, které potřebujete.", - "loading": "Načítání", - "local": "Místní", - "local_locations": "Místní umístění", - "local_node": "Místní uzel", - "location": "Umístění", - "location_added_successfully": "Umístění úspěšně přidáno.", - "location_connected_tooltip": "Umístění je sledováno pro změny", - "location_deleted_successfully": "Umístění úspěšně smazáno.", - "location_disconnected_tooltip": "Umístění není sledováno pro změny", - "location_display_name_info": "Název tohoto umístění, toto je, co bude zobrazeno v postranním panelu. Nepřejmenuje skutečnou složku na disku.", - "location_empty_notice_message": "Zde nebyly nalezeny žádné soubory", - "location_is_already_linked": "Umístění je již propojeno", - "location_one": "Umístění", - "location_other": "Umístění", - "location_path_info": "Cesta k tomuto umístění, zde budou soubory uloženy na disku.", - "location_type": "Typ umístění", - "location_type_managed": "Spacedrive pro vás soubory třídí. Pokud umístění není prázdné, bude vytvořena složka \"spacedrive\".", - "location_type_normal": "Obsah bude indexován, nové soubory nebudou automaticky tříděny.", - "location_type_replica": "Toto umístění je replikou jiného, jeho obsah bude automaticky synchronizován.", - "locations": "Umístění", - "locations_description": "Spravujte svá úložná umístění.", - "lock": "Zamknout", - "lock_sidebar": "Zamknout postranní panel", - "log_in": "Přihlásit se", - "log_in_with_browser": "Přihlásit se pomocí prohlížeče", - "log_out": "Odhlásit se", - "logged_in_as": "Přihlášen jako {{email}}", - "logging_in": "Přihlašování...", - "logout": "Odhlásit se", - "manage_library": "Spravovat knihovnu", - "managed": "Spravováno", - "manual_peers": "Ruční přidání peerů", - "manual_peers_description": "Přidejte peery ručně zadáním jejich IP adresy a portu.\nTo je užitečné, když automatické zjištění není možné.", - "media": "Média", - "media_view": "Zobrazení médií", - "media_view_context": "Kontext zobrazení médií", - "media_view_notice_description": "Snadno objevujte fotografie a videa, zobrazení médií zobrazí výsledky počínaje aktuálním umístěním včetně podadresářů.", - "meet_contributors_behind_spacedrive": "Seznamte se s přispěvateli za Spacedrive", - "meet_title": "Seznamte se s {{title}}", - "mesh": "Mesh", - "mesh_one": "Mesh", - "mesh_other": "Meshes", - "miles": "Míle", - "mode": "Režim", - "model": "Model", - "modified": "Změněno", - "more": "Více", - "more_actions": "Více akcí...", - "more_info": "Více informací", - "move_back_within_quick_preview": "Posunout zpět v rychlém náhledu", - "move_files": "Přesunout soubory", - "move_forward_within_quick_preview": "Posunout vpřed v rychlém náhledu", - "move_to_trash": "Přesunout do koše", - "my_sick_location": "Moje skvělé umístění", - "name": "Název", - "navigate_back": "Navigovat zpět", - "navigate_backwards": "Navigovat zpět", - "navigate_files_downwards": "Navigovat soubory směrem dolů", - "navigate_files_leftwards": "Navigovat soubory směrem doleva", - "navigate_files_rightwards": "Navigovat soubory směrem doprava", - "navigate_files_upwards": "Navigovat soubory směrem nahoru", - "navigate_forward": "Navigovat vpřed", - "navigate_forwards": "Navigovat vpřed", - "navigate_to_settings_page": "Navigovat na stránku nastavení", - "network": "Síť", - "network_page_description": "Další uzly Spacedrive ve vaší síti LAN se zde objeví spolu s vašimi výchozími síťovými připojeními OS.", - "network_settings": "Nastavení sítě", - "network_settings_advanced": "Pokročilý přehled sítě", - "network_settings_advanced_description": "Pokročilé informace o vašem aktuálním nastavení sítě.", - "network_settings_description": "Nastavení týkající se síťování a konektivity.", - "networking": "Síťování", - "networking_error": "Chyba při spuštění síťování!", - "networking_port": "Síťový port", - "networking_port_description": "Port pro peer-to-peer síťování Spacedrive ke komunikaci. Měli byste to nechat zakázáno, pokud nemáte restriktivní firewall. Nevystavujte na internet!", - "new": "Nový", - "new_folder": "Složka", - "new_library": "Nová knihovna", - "new_location": "Nové umístění", - "new_location_web_description": "Protože používáte webovou verzi Spacedrive, budete muset zadat absolutní URL adresu adresáře lokálního pro vzdálený uzel.", - "new_tab": "Nová záložka", - "new_tag": "Nový štítek", - "new_update_available": "Nová aktualizace k dispozici!", - "no_apps_available": "Žádné aplikace nejsou k dispozici", - "no_favorite_items": "Žádné oblíbené položky", - "no_git_files": "Žádné Git soubory", - "no_hidden_files": "Žádné skryté soubory", - "no_items_found": "Nebyly nalezeny žádné položky", - "no_jobs": "Žádné úlohy.", - "no_labels": "Žádné štítky", - "no_nodes_found": "Nebyly nalezeny žádné uzly Spacedrive.", - "no_search_selected": "Nebylo vybráno žádné hledání", - "no_system_files": "Žádné systémové soubory", - "no_tag_selected": "Nebyl vybrán žádný štítek", - "no_tags": "Žádné štítky", - "no_tags_description": "Nevytvořili jste žádné štítky", - "node_name": "Název uzlu", - "nodes": "Uzly", - "nodes_description": "Spravujte uzly připojené k této knihovně. Uzel je instance backendu Spacedrive, běžící na zařízení nebo serveru. Každý uzel nese kopii databáze a synchronizuje se prostřednictvím peer-to-peer připojení v reálném čase.", - "none": "Žádné", - "normal": "Normální", - "not_you": "Nejste to vy?", - "note": "Poznámka", - "nothing_selected": "Nic nevybráno", - "number_of_passes": "# průchodů", - "object": "Objekt", - "object_id": "ID objektu", - "off": "Vypnuto", - "offline": "Offline", - "on": "Zapnuto", - "online": "Online", - "only_images": "Pouze obrázky", - "open": "Otevřít", - "open_file": "Otevřít soubor", - "open_in_new_tab": "Otevřít v nové záložce", - "open_logs": "Otevřít protokoly", - "open_new_location_once_added": "Otevřít nové umístění po přidání", - "open_new_tab": "Otevřít novou záložku", - "open_object": "Otevřít objekt", - "open_object_from_quick_preview_in_native_file_manager": "Otevřít objekt z rychlého náhledu v nativním správci souborů", - "open_settings": "Otevřít nastavení", - "open_with": "Otevřít pomocí", - "opening_trash": "Otevírání koše", - "or": "NEBO", - "other": "Ostatní", - "overview": "Přehled", - "p2p_visibility": "Viditelnost P2P", - "p2p_visibility_contacts_only": "Pouze kontakty", - "p2p_visibility_description": "Nastavte, kdo může vidět vaše instalace Spacedrive.", - "p2p_visibility_disabled": "Zakázáno", - "p2p_visibility_everyone": "Všichni", - "package": "Balíček", - "package_one": "Balíček", - "package_other": "Balíčky", - "page": "Stránka", - "page_shortcut_description": "Různé stránky v aplikaci", - "pair": "Spárovat", - "pairing_with_node": "Párování s {{node}}", - "paste": "Vložit", - "paste_object": "Vložit objekt", - "paste_success": "Položky vloženy", - "path": "Cesta", - "path_copied_to_clipboard_description": "Cesta pro umístění {{location}} zkopírována do schránky.", - "path_copied_to_clipboard_title": "Cesta zkopírována do schránky", - "path_to_save_do_the_thing": "Cesta k uložení při kliknutí na 'Udělej věc':", - "paths": "Cesty", - "pause": "Pozastavit", - "paused": "Pozastaveno", - "peers": "Peerové", - "people": "Lidé", - "pin": "Připnout", - "please_select_emoji": "Prosím, vyberte emoji", - "prefix_a": "a", - "preview_media_bytes": "Náhled médií", - "preview_media_bytes_description": "Celková velikost všech náhledových mediálních souborů, jako jsou miniatury.", - "privacy": "Soukromí", - "privacy_description": "Spacedrive je postaven pro ochranu soukromí, proto jsme open source a místní. Takže jasně uvedeme, která data jsou s námi sdílena.", - "queued": "Ve frontě", - "quick_preview": "Rychlý náhled", - "quick_rescan_started": "Rychlé opětovné skenování spuštěno", - "quick_view": "Rychlý pohled", - "quickpreview_thumbnail_error_message": "Nepodařilo se načíst obrázek v plném rozlišení", - "quickpreview_thumbnail_error_tip": "Obrázek nebyl nalezen. Proto je zobrazena miniatura.", - "random": "Náhodně", - "receiver": "Přijímač", - "receiver_description": "Tento proces přijímá a ukládá operace ze Spacedrive Cloud.", - "recent_jobs": "Nedávné úlohy", - "recents": "Nedávné", - "recents_notice_message": "Nedávné jsou vytvořeny, když otevřete soubor.", - "regen_labels": "Obnovit štítky", - "regen_thumbnails": "Obnovit miniatury", - "regenerate_thumbs": "Obnovit miniatury", - "reindex": "Přeindexovat", - "reject": "Odmítnout", - "reject_files": "Odmítnout soubory", - "relay_listeners_error": "Chyba při vytváření posluchače relé. Zkontrolujte prosím nastavení firewallu!", - "reload": "Načíst znovu", - "remote_access": "Povolit vzdálený přístup", - "remote_access_description": "Povolit ostatním uzlům přímé připojení k tomuto uzlu.", - "remote_identity": "Vzdálená identita", - "remove": "Odstranit", - "remove_from_recents": "Odstranit z nedávných", - "rename": "Přejmenovat", - "rename_object": "Přejmenovat objekt", - "replica": "Replika", - "rescan": "Opětovné skenování", - "rescan_directory": "Opětovné skenování adresáře", - "rescan_location": "Opětovné skenování umístění", - "reset": "Resetovat", - "reset_and_quit": "Resetovat a ukončit aplikaci", - "reset_confirmation": "Opravdu chcete resetovat Spacedrive? Vaše databáze bude smazána.", - "reset_to_continue": "Zjistili jsme, že jste možná vytvořili svou knihovnu se starší verzí Spacedrive. Pro pokračování v používání aplikace ji prosím resetujte!", - "reset_warning": "ZTRATÍTE VŠECHNA EXISTUJÍCÍ DATA SPACEDRIVE!", - "resolution": "Rozlišení", - "resources": "Zdroje", - "restore": "Obnovit", - "resume": "Pokračovat", - "retry": "Zkusit znovu", - "reveal_in_native_file_manager": "Zobrazit v nativním správci souborů", - "revel_in_browser": "Zobrazit v {{browser}}", - "rules": "Pravidla", - "running": "Běží", - "save": "Uložit", - "save_changes": "Uložit změny", - "save_search": "Uložit hledání", - "save_spacedrop": "Uložit Spacedrop", - "saved_searches": "Uložená hledání", - "screenshot": "Snímek obrazovky", - "screenshot_one": "Snímek obrazovky", - "screenshot_other": "Snímky obrazovky", - "search": "Vyhledat", - "search_extensions": "Vyhledávání rozšíření", - "search_for_files_and_actions": "Hledat soubory a akce...", - "search_locations": "Vyhledávání umístění", - "secure_delete": "Bezpečně smazat", - "security": "Zabezpečení", - "security_description": "Udržujte svůj klient v bezpečí.", - "see_less": "Zobrazit méně", - "see_more": "Zobrazit více", - "select_library": "Vyberte Cloud knihovnu", - "send": "Odeslat", - "send_report": "Odeslat zprávu", - "sender": "Odesílatel", - "sender_description": "Tento proces odesílá synchronizační operace do Spacedrive Cloud.", - "settings": "Nastavení", - "setup": "Nastavit", - "share": "Sdílet", - "share_anonymous_usage": "Sdílet anonymní používání", - "share_anonymous_usage_description": "Sdílejte zcela anonymní telemetrická data, která pomáhají vývojářům zlepšovat aplikaci", - "share_bare_minimum": "Sdílet pouze minimum", - "share_bare_minimum_description": "Sdílet pouze to, že jsem aktivním uživatelem Spacedrive a několik technických detailů", - "sharing": "Sdílení", - "sharing_description": "Spravujte, kdo má přístup k vašim knihovnám.", - "show_details": "Zobrazit podrobnosti", - "show_hidden_files": "Zobrazit skryté soubory", - "show_inspector": "Zobrazit inspektor", - "show_object_size": "Zobrazit velikost objektu", - "show_path_bar": "Zobrazit panel cesty", - "show_slider": "Zobrazit posuvník", - "show_tags": "Zobrazit štítky", - "size": "Velikost", - "size_b": "B", - "size_bs": "B", - "size_gb": "GB", - "size_gbs": "GB", - "size_kb": "kB", - "size_kbs": "kB", - "size_mb": "MB", - "size_mbs": "MB", - "size_tb": "TB", - "size_tbs": "TB", - "skip_login": "Přeskočit přihlášení", - "software": "Software", - "sort_by": "Řadit podle", - "spacedrive_account": "Účet Spacedrive", - "spacedrive_cloud": "Spacedrive Cloud", - "spacedrive_cloud_description": "Spacedrive je vždy primárně lokální, ale v budoucnu nabídneme vlastní volitelné cloudové služby. Prozatím je ověřování použito pouze pro funkci zpětné vazby, jinak není vyžadováno.", - "spacedrop": "Viditelnost Spacedrop", - "spacedrop_a_file": "Spacedrop soubor", - "spacedrop_already_progress": "Spacedrop již probíhá", - "spacedrop_contacts_only": "Pouze kontakty", - "spacedrop_description": "Okamžité sdílení se zařízeními běžícími na Spacedrive ve vaší síti.", - "spacedrop_disabled": "Zakázáno", - "spacedrop_everyone": "Všichni", - "spacedrop_rejected": "Spacedrop odmítnut", - "square_thumbnails": "Čtvercové miniatury", - "star_on_github": "Dát hvězdu na GitHubu", - "start": "Start", - "starting": "Spouštění...", - "starts_with": "začíná na", - "stop": "Zastavit", - "stopping": "Zastavování...", - "submit": "Odeslat", - "success": "Úspěch", - "support": "Podpora", - "switch_to_grid_view": "Přepnout na zobrazení mřížky", - "switch_to_list_view": "Přepnout na zobrazení seznamu", - "switch_to_media_view": "Přepnout na zobrazení médií", - "switch_to_next_tab": "Přepnout na další záložku", - "switch_to_previous_tab": "Přepnout na předchozí záložku", - "sync": "Synchronizovat", - "sync_description": "Spravujte, jak Spacedrive synchronizuje.", - "sync_with_library": "Synchronizovat s knihovnou", - "sync_with_library_description": "Pokud je povoleno, vaše klávesové zkratky budou synchronizovány s knihovnou, jinak se použijí pouze na tomto klientu.", - "syncPreviewMedia_label": "Synchronizovat náhledová média pro toto umístění s vašimi zařízeními", - "system": "Systém", - "tag": "Štítek", - "tag_one": "Štítek", - "tag_other": "Štítky", - "tags": "Štítky", - "tags_bulk_assigned": "Přiřazený štítek \"{{tag_name}}\" k {{file_count}} $t(soubor, { \"count\": {{file_count}} }).", - "tags_bulk_failed_with_tag": "Nepodařilo se přiřadit štítek \"{{tag_name}}\" k {{file_count}} $t(soubor, { \"count\": {{file_count}} }): {{error_message}}", - "tags_bulk_failed_without_tag": "Nepodařilo se označit {{file_count}} $t(soubor, { \"count\": {{file_count}} }): {{error_message}}", - "tags_bulk_instructions": "Vyberte jeden nebo více souborů a stiskněte číslici pro přiřazení/odebrání odpovídajícího štítku.", - "tags_bulk_mode_active": "Režim přiřazení štítků je povolen.", - "tags_bulk_unassigned": "Odebraný štítek \"{{tag_name}}\" z {{file_count}} $t(soubor, { \"count\": {{file_count}} }).", - "tags_description": "Spravujte své štítky.", - "tags_notice_message": "K tomuto štítku nejsou přiřazeny žádné položky.", - "task": "úkol", - "task_one": "úkol", - "task_other": "úkoly", - "telemetry_description": "Přepněte na ON, aby vývojáři mohli získat podrobné údaje o používání a telemetrii pro vylepšení aplikace. Přepněte na OFF, abyste poslali pouze základní údaje: váš stav aktivity, verzi aplikace, verzi jádra a platformu (např. mobilní, webová nebo desktopová).", - "telemetry_title": "Sdílet další telemetrické a uživatelské údaje", - "temperature": "Teplota", - "text": "Text", - "text_file": "Textový soubor", - "text_one": "Text", - "text_other": "Texty", - "text_size": "Velikost textu", - "thank_you_for_your_feedback": "Děkujeme za vaši zpětnou vazbu!", - "thumbnailer_cpu_usage": "Využití CPU thumbnaileru", - "thumbnailer_cpu_usage_description": "Omezení, kolik CPU může thumbnailer použít pro zpracování na pozadí.", - "to": "do", - "toggle_all": "Přepnout vše", - "toggle_command_palette": "Přepnout paletu příkazů", - "toggle_hidden_files": "Přepnout skryté soubory", - "toggle_image_slider_within_quick_preview": "Přepnout posuvník obrázků v rychlém náhledu", - "toggle_inspector": "Přepnout inspektor", - "toggle_job_manager": "Přepnout správce úloh", - "toggle_metadata": "Přepnout metadata", - "toggle_path_bar": "Přepnout lištu cesty", - "toggle_quick_preview": "Přepnout rychlý náhled", - "toggle_sidebar": "Přepnout boční panel", - "tools": "Nástroje", - "total_bytes_capacity": "Celková kapacita", - "total_bytes_capacity_description": "Celková kapacita všech uzlů připojených k knihovně. Může zobrazovat nesprávné hodnoty během alfa.", - "total_bytes_free": "Volné místo", - "total_bytes_free_description": "Volné místo dostupné na všech uzlech připojených k knihovně.", - "total_bytes_used": "Celkové použité místo", - "total_bytes_used_description": "Celkové místo použité na všech uzlech připojených k knihovně.", - "total_files": "Celkový počet souborů", - "trash": "Koš", - "type": "Typ", - "ui_animations": "UI Animace", - "ui_animations_description": "Dialogy a další prvky UI budou animovány při otevírání a zavírání.", - "unidentified_files": "neidentifikované soubory", - "unidentified_files_info": "Soubory, které Spacedrive nebyl schopen identifikovat.", - "unknown": "Neznámé", - "unknown_one": "Neznámé", - "unknown_other": "Neznámé", - "unnamed_location": "Nepojmenované místo", - "update": "Aktualizace", - "update_downloaded": "Aktualizace stažena. Restartujte Spacedrive pro instalaci", - "updated_successfully": "Úspěšně aktualizováno, jste na verzi {{version}}", - "uploaded_file": "Soubor nahrán!", - "usage": "Použití", - "usage_description": "Vaše využití knihovny a informace o hardwaru", - "vacuum": "Vakuum", - "vacuum_library": "Vakuum Knihovna", - "vacuum_library_description": "Znovu zabalte svou databázi, abyste uvolnili zbytečný prostor.", - "value": "Hodnota", - "value_required": "Hodnota je vyžadována", - "version": "Verze {{version}}", - "video": "Video", - "video_preview_not_supported": "Náhled videa není podporován.", - "view_changes": "Zobrazit změny", - "want_to_do_this_later": "Chcete to udělat později?", - "web_page_archive": "Archiv webových stránek", - "web_page_archive_one": "Archiv webových stránek", - "web_page_archive_other": "Archivy webových stránek", - "website": "Webová stránka", - "widget": "Widget", - "widget_one": "Widget", - "widget_other": "Widgety", - "with_descendants": "S potomky", - "your_account": "Váš účet", - "your_account_description": "Účet Spacedrive a informace.", - "your_local_network": "Vaše místní síť", - "your_privacy": "Vaše soukromí", - "zoom": "Přiblížit", - "zoom_in": "Přiblížit", - "zoom_out": "Oddálit" - } \ No newline at end of file +{ + "Add Device Description": "Naskenujte QR kód nebo ověřte UUID svého zařízení, abyste přidali zařízení.", + "Connect": "Připojit", + "Connecting": "Připojování", + "about": "O nás", + "about_vision_text": "Mnozí z nás mají více cloudových účtů, disky, které nejsou zálohovány, a data ohrožená ztrátou. Závisíme na cloudových službách jako Google Photos a iCloud, ale jsme uzamčeni s omezenou kapacitou a téměř nulovou interoperabilitou mezi službami a operačními systémy. Fotoalba by neměla být uvězněna v ekosystému zařízení nebo využívána pro reklamní data. Měla by být nezávislá na OS, trvalá a osobně vlastněná. Data, která vytváříme, jsou naším odkazem, který nás daleko přežije – open source technologie je jediným způsobem, jak zajistit, že si udržíme absolutní kontrolu nad daty, která definují naše životy, v neomezeném měřítku.", + "about_vision_title": "Vize", + "accept": "Přijmout", + "accept_files": "Přijmout soubory", + "accessed": "Přistoupeno", + "account": "Účet", + "actions": "Akce", + "add": "Přidat", + "add_device": "Přidat zařízení", + "add_file_extension_rule": "Přidat příponu souboru k aktuálnímu pravidlu", + "add_filter": "Přidat filtr", + "add_library": "Přidat knihovnu", + "add_location": "Přidat umístění", + "add_location_description": "Zlepšete svůj zážitek ze Spacedrive přidáním svých oblíbených umístění do osobní knihovny, pro bezproblémovou a efektivní správu souborů.", + "add_location_overview_description": "Připojte místní cestu, svazek nebo síťové umístění ke Spacedrive.", + "add_location_tooltip": "Přidat cestu jako indexované umístění", + "add_locations": "Přidat umístění", + "add_tag": "Přidat štítek", + "added_location": "Přidáno umístění {{name}}", + "adding_location": "Přidávání umístění {{name}}", + "advanced": "Pokročilé", + "advanced_settings": "Pokročilé nastavení", + "album": "Album", + "album_one": "Album", + "album_other": "Alba", + "alias": "Přezdívka", + "all_jobs_have_been_cleared": "Všechny dokončené úkoly byly vymazány.", + "alpha_release_description": "Jsme potěšeni, že můžete vyzkoušet Spacedrive, nyní v alfa verzi, která představuje vzrušující nové funkce. Jak je tomu u každého počátečního vydání, tato verze může obsahovat nějaké chyby. Laskavě vás žádáme, abyste nám hlásili jakékoli problémy, na které narazíte, na našem Discord kanálu. Vaše cenná zpětná vazba výrazně přispěje ke zlepšení uživatelského zážitku.", + "alpha_release_title": "Alfa verze", + "app_crashed": "APLIKACE SELHALA", + "app_crashed_description": "Jsme za horizontem událostí...", + "appearance": "Vzhled", + "appearance_description": "Změňte vzhled svého klienta.", + "apply": "Použít", + "archive": "Archiv", + "archive_coming_soon": "Archivování umístění přichází brzy...", + "archive_info": "Extrahujte data z knihovny jako archiv, užitečné pro zachování struktury složek umístění.", + "archive_one": "Archiv", + "archive_other": "Archivy", + "are_you_sure": "Jste si jisti?", + "ascending": "Vzestupně", + "ask_spacedrive": "Zeptejte se Spacedrive", + "assign_tag": "Přiřadit štítek", + "assign_tags": "Přiřadit štítky", + "audio": "Audio", + "audio_preview_not_supported": "Náhled zvuku není podporován.", + "auto": "Automaticky", + "back": "Zpět", + "back_to_login": "Zpět k přihlášení", + "backfill_sync": "Doplňování synchronizačních operací", + "backfill_sync_description": "Knihovna je pozastavena, dokud se nedokončí doplňování", + "backups": "Zálohy", + "backups_description": "Spravujte své zálohy databáze Spacedrive.", + "bar_graph_info": "Přejeďte myší přes každý pruh a zobrazí se typ souboru. Dvojitým kliknutím přejděte.", + "bitrate": "Bitrate", + "blur_effects": "Efekty rozmazání", + "blur_effects_description": "Na některé komponenty bude aplikován efekt rozmazání.", + "book": "Kniha", + "book_one": "Kniha", + "book_other": "Knihy", + "calculating_library_statistics": "Výpočet statistik knihovny...", + "cancel": "Zrušit", + "cancel_selection": "Zrušit výběr", + "canceled": "Zrušeno", + "celcius": "Celsius", + "change": "Změnit", + "change_view_setting_description": "Změňte výchozí zobrazení průzkumníka", + "changelog": "Protokol změn", + "changelog_page_description": "Podívejte se, jaké skvělé nové funkce děláme", + "changelog_page_title": "Protokol změn", + "check_your_inbox": "Zkontrolujte si svou doručenou poštu na adrese", + "checksum": "Kontrolní součet", + "clear_finished_jobs": "Vyčistit dokončené úkoly", + "click_to_hide": "Klikněte pro skrytí", + "click_to_lock": "Klikněte pro uzamčení", + "client": "Klient", + "close": "Zavřít", + "close_command_palette": "Zavřít příkazovou paletu", + "close_current_tab": "Zavřít aktuální záložku", + "cloud": "Cloud", + "cloud_connect_description": "Chcete připojit svou knihovnu ke cloudu?", + "cloud_drives": "Cloudové disky", + "cloud_sync": "Cloudová synchronizace", + "cloud_sync_description": "Spravujte procesy, které synchronizují vaši knihovnu s Spacedrive Cloud", + "clouds": "Cloudy", + "code": "Kód", + "code_one": "Kód", + "code_other": "Kódy", + "collection": "Sbírka", + "collection_one": "Sbírka", + "collection_other": "Sbírky", + "color": "Barva", + "color_profile": "Barevný profil", + "color_space": "Barevný prostor", + "coming_soon": "Již brzy", + "completed": "Dokončeno", + "completed_with_errors": "Dokončeno s chybami", + "compress": "Komprimovat", + "config": "Konfigurace", + "config_one": "Konfigurace", + "config_other": "Konfigurace", + "configure_location": "Konfigurovat umístění", + "confirm": "Potvrdit", + "connect_cloud": "Připojit cloud", + "connect_cloud_description": "Připojte své cloudové účty ke Spacedrive.", + "connect_device": "Připojit zařízení", + "connect_device_description": "Spacedrive funguje nejlépe na všech vašich zařízeních.", + "connect_library_to_cloud": "Připojte knihovnu ke Spacedrive Cloud", + "connected": "Připojeno", + "connecting_library_to_cloud": "Připojování knihovny ke Spacedrive Cloud...", + "contacts": "Kontakty", + "contacts_description": "Spravujte své kontakty ve Spacedrive.", + "contains": "obsahuje", + "content_id": "ID obsahu", + "continue": "Pokračovat", + "convert_to": "Převést na", + "coordinates": "Souřadnice", + "copied": "Zkopírováno", + "copy": "Kopírovat", + "copy_as_path": "Kopírovat jako cestu", + "copy_object": "Kopírovat objekt", + "copy_path_to_clipboard": "Kopírovat cestu do schránky", + "copy_success": "Položky zkopírovány", + "create": "Vytvořit", + "create_file_error": "Chyba při vytváření souboru", + "create_file_success": "Vytvořen nový soubor: {{name}}", + "create_folder_error": "Chyba při vytváření složky", + "create_folder_success": "Vytvořena nová složka: {{name}}", + "create_library": "Vytvořit knihovnu", + "create_library_description": "Knihovny jsou bezpečná databáze na zařízení. Vaše soubory zůstanou tam, kde jsou, knihovna je katalogizuje a ukládá všechna data související se Spacedrive.", + "create_location": "Vytvořit umístění", + "create_new_library": "Vytvořit novou knihovnu", + "create_new_library_description": "Knihovny jsou bezpečná databáze na zařízení. Vaše soubory zůstanou tam, kde jsou, knihovna je katalogizuje a ukládá všechna data související se Spacedrive.", + "create_new_tag": "Vytvořit nový štítek", + "create_new_tag_description": "Vyberte název a barvu.", + "create_tag": "Vytvořit štítek", + "created": "Vytvořeno", + "creating_library": "Vytváření knihovny...", + "creating_your_library": "Vytváření vaší knihovny", + "current": "Aktuální", + "current_directory": "Aktuální adresář", + "current_directory_with_descendants": "Aktuální adresář s podadresáři", + "custom": "Vlastní", + "cut": "Vyjmout", + "cut_object": "Vyjmout objekt", + "cut_success": "Položky vyjmuty", + "dark": "Tmavý", + "data_folder": "Složka s daty", + "database": "Databáze", + "database_one": "Databáze", + "database_other": "Databáze", + "date": "Datum", + "date_accessed": "Datum přístupu", + "date_created": "Datum vytvoření", + "date_indexed": "Datum indexování", + "date_modified": "Datum změny", + "date_taken": "Datum pořízení", + "date_time_format": "Formát data a času", + "date_time_format_description": "Vyberte formát data zobrazený ve Spacedrive", + "debug_mode": "Režim ladění", + "debug_mode_description": "Povolte další ladicí funkce v aplikaci.", + "default": "Výchozí", + "default_settings": "Výchozí nastavení", + "delete": "Smazat", + "delete_dialog_title": "Smazat {{prefix}} {{type}}", + "delete_forever": "Smazat navždy", + "delete_info": "To neodstraní skutečnou složku na disku. Náhledová média budou smazána.", + "delete_library": "Smazat knihovnu", + "delete_library_description": "To je trvalé, vaše soubory nebudou smazány, pouze knihovna Spacedrive.", + "delete_location": "Smazat umístění", + "delete_location_description": "Smazání umístění také odstraní všechny soubory s ním spojené z databáze Spacedrive, samotné soubory nebudou smazány.", + "delete_object": "Smazat objekt", + "delete_rule": "Smazat pravidlo", + "delete_rule_confirmation": "Opravdu chcete smazat toto pravidlo?", + "delete_tag": "Smazat štítek", + "delete_tag_description": "Opravdu chcete smazat tento štítek? To nelze vrátit a označené soubory budou odpojeny.", + "delete_warning": "To smaže váš {{type}}. Tento krok nelze vrátit zpět. Pokud jej přesunete do koše, můžete jej později obnovit. Pokud jej smažete navždy, bude navždy pryč.", + "descending": "Sestupně", + "description": "Popis", + "deselect": "Zrušit výběr", + "details": "Detaily", + "device": "Zařízení", + "devices": "Zařízení", + "devices_coming_soon_tooltip": "Již brzy! Tato alfa verze neobsahuje synchronizaci knihoven, bude brzy připravena.", + "dialog": "Dialog", + "dialog_shortcut_description": "K provádění akcí a operací", + "direction": "Směr", + "directories": "adresáře", + "directory": "adresář", + "disabled": "Zakázáno", + "disconnected": "Odpojeno", + "display_formats": "Formáty zobrazení", + "display_name": "Zobrazované jméno", + "distance": "Vzdálenost", + "do_the_thing": "Udělějte to", + "docker": "Docker", + "document": "Dokument", + "done": "Hotovo", + "dont_have_any": "Vypadá to, že žádné nemáte!", + "dont_show_again": "Nezobrazovat znovu", + "dotfile": "Skrytý soubor", + "dotfile_one": "Skrytý soubor", + "dotfile_other": "Skryté soubory", + "double_click_action": "Akce při dvojkliku", + "download": "Stáhnout", + "downloading_update": "Stahování aktualizace", + "drag_to_resize": "Přetáhněte pro změnu velikosti", + "duplicate": "Duplikovat", + "duplicate_object": "Duplikovat objekt", + "duplicate_success": "Položky duplikovány", + "edit": "Upravit", + "edit_library": "Upravit knihovnu", + "edit_location": "Upravit umístění", + "empty_file": "Prázdný soubor", + "enable_networking": "Povolit síť", + "enable_networking_description": "Umožněte vašemu uzlu komunikovat s dalšími uzly Spacedrive kolem vás.", + "enable_networking_description_required": "Požadováno pro synchronizaci knihoven nebo Spacedrop!", + "enable_relay": "Povolit relé", + "enable_relay_description": "Povolte reléový server, aby vaše zařízení mohla komunikovat přes veřejný internet.", + "enable_sync": "Povolit synchronizaci", + "enable_sync_description": "Generujte synchronizační operace pro všechna stávající data v této knihovně a nakonfigurujte Spacedrive tak, aby generoval synchronizační operace, když se něco stane v budoucnu.", + "enabled": "Povoleno", + "encrypt": "Šifrovat", + "encrypt_library": "Šifrovat knihovnu", + "encrypt_library_coming_soon": "Šifrování knihoven přichází brzy", + "encrypt_library_description": "Povolit šifrování této knihovny, to zašifruje pouze databázi Spacedrive, nikoli samotné soubory.", + "encrypted": "Šifrované", + "encrypted_one": "Šifrovaný", + "encrypted_other": "Šifrované", + "ends_with": "končí s", + "ephemeral_notice_browse": "Procházejte své soubory a složky přímo ze svého zařízení.", + "ephemeral_notice_consider_indexing": "Zvažte indexování svých místních umístění pro rychlejší a efektivnější průzkum.", + "equals": "rovná se", + "erase": "Vymazat", + "erase_a_file": "Vymazat soubor", + "erase_a_file_description": "Nastavte své nastavení vymazání.", + "error": "Chyba", + "error_loading_original_file": "Chyba při načítání původního souboru", + "error_message": "Chyba: {{error}}.", + "error_unknown": "Došlo k neznámé chybě.", + "executable": "Spustitelný soubor", + "executable_one": "Spustitelný soubor", + "executable_other": "Spustitelné soubory", + "expand": "Rozšířit", + "explorer": "Průzkumník", + "explorer_settings": "Nastavení průzkumníka", + "explorer_shortcut_description": "Pro navigaci a interakci se souborovým systémem", + "explorer_view": "Zobrazení průzkumníka", + "export": "Exportovat", + "export_library": "Exportovat knihovnu", + "export_library_coming_soon": "Export knihovny již brzy", + "export_library_description": "Exportujte tuto knihovnu do souboru.", + "extension": "Přípona", + "extensions": "Rozšíření", + "extensions_description": "Instalujte rozšíření, abyste rozšířili funkčnost tohoto klienta.", + "fahrenheit": "Fahrenheit", + "failed": "Neúspěšné", + "failed_to_add_location": "Nepodařilo se přidat umístění", + "failed_to_cancel_job": "Nepodařilo se zrušit úkol.", + "failed_to_clear_all_jobs": "Nepodařilo se vyčistit všechny úkoly.", + "failed_to_copy_file": "Nepodařilo se zkopírovat soubor", + "failed_to_copy_file_path": "Nepodařilo se zkopírovat cestu souboru", + "failed_to_cut_file": "Nepodařilo se vyjmout soubor", + "failed_to_delete_rule": "Nepodařilo se smazat pravidlo", + "failed_to_download_update": "Nepodařilo se stáhnout aktualizaci", + "failed_to_duplicate_file": "Nepodařilo se duplikovat soubor", + "failed_to_generate_checksum": "Nepodařilo se vygenerovat kontrolní součet", + "failed_to_generate_labels": "Nepodařilo se vygenerovat štítky", + "failed_to_generate_thumbnails": "Nepodařilo se vygenerovat miniatury", + "failed_to_load_tags": "Nepodařilo se načíst štítky", + "failed_to_open_file_body": "Nepodařilo se otevřít soubor kvůli chybě: {{error}}", + "failed_to_open_file_title": "Nepodařilo se otevřít soubor", + "failed_to_open_file_with": "Nepodařilo se otevřít soubor, s: {{data}}", + "failed_to_pause_job": "Nepodařilo se pozastavit úkol.", + "failed_to_reindex_location": "Nepodařilo se přeindexovat umístění", + "failed_to_remove_file_from_recents": "Nepodařilo se odstranit soubor z nedávných", + "failed_to_remove_job": "Nepodařilo se odstranit úkol.", + "failed_to_rename_file": "Nepodařilo se přejmenovat {{oldName}} na {{newName}}", + "failed_to_rescan_location": "Nepodařilo se znovu prohledat umístění", + "failed_to_resume_job": "Nepodařilo se obnovit úkol.", + "failed_to_update_location_settings": "Nepodařilo se aktualizovat nastavení umístění", + "favorite": "Oblíbené", + "favorites": "Oblíbené", + "feedback": "Zpětná vazba", + "feedback_is_required": "Zpětná vazba je povinná", + "feedback_login_description": "Přihlášení nám umožňuje reagovat na vaši zpětnou vazbu", + "feedback_placeholder": "Vaše zpětná vazba...", + "feedback_toast_error_message": "Při odesílání vaší zpětné vazby došlo k chybě. Prosím zkuste to znovu.", + "fetching_file_kind_statistics": "Načítání statistik typů souborů...", + "file_already_exist_in_this_location": "Soubor již v tomto umístění existuje", + "file_directory_name": "Název souboru/adresáře", + "file_extension_description": "Přípona souboru (např., .mp4, .jpg, .txt)", + "file_from": "Soubor {{file}} z {{name}}", + "file_indexing_rules": "Pravidla indexování souborů", + "file_many": "soubory", + "file_one": "soubor", + "file_other": "soubory", + "file_picker_not_supported": "Výběr souboru není na této platformě podporován", + "file_two": "soubory", + "file_zero": "žádné soubory", + "filter": "Filtr", + "filters": "Filtry", + "flash": "Blesk", + "folder": "Složka", + "folder_one": "Složka", + "folder_other": "Složky", + "font": "Písmo", + "font_one": "Písmo", + "font_other": "Písma", + "for_library": "Pro knihovnu {{name}}", + "forced": "Vynuceno", + "forward": "Vpřed", + "free_of": "volné z", + "from": "od", + "full_disk_access": "Plný přístup k disku", + "full_disk_access_description": "Abychom vám poskytli nejlepší zážitek, potřebujeme přístup k vašemu disku, abychom mohli indexovat vaše soubory. Vaše soubory jsou dostupné pouze vám.", + "full_reindex": "Plný reindex", + "full_reindex_info": "Proveďte úplné opětovné skenování tohoto umístění.", + "general": "Obecné", + "general_settings": "Obecná nastavení", + "general_settings_description": "Obecná nastavení týkající se tohoto klienta.", + "general_shortcut_description": "Obecné klávesové zkratky", + "generatePreviewMedia_label": "Generovat náhledová média pro toto umístění", + "generate_checksums": "Generovat kontrolní součty", + "gitignore": "Git Ignore", + "glob_description": "Glob (např., **/.git)", + "go_back": "Jít zpět", + "go_to_labels": "Přejít na štítky", + "go_to_location": "Přejít na umístění", + "go_to_overview": "Přejít na přehled", + "go_to_recents": "Přejít na nedávné", + "go_to_settings": "Přejít na nastavení", + "go_to_tag": "Přejít na štítek", + "got_it": "Rozumím", + "grid_gap": "Mezera", + "grid_view": "Zobrazení mřížky", + "grid_view_notice_description": "Získejte vizuální přehled o svých souborech pomocí zobrazení mřížky. Toto zobrazení zobrazuje vaše soubory a složky jako miniatury, což usnadňuje rychlé nalezení požadovaného souboru.", + "hidden": "Skrytý", + "hidden_label": "Zabraňuje zobrazení umístění a jeho obsahu v souhrnných kategoriích, vyhledávání a štítcích, pokud není povoleno \"Zobrazit skryté položky\".", + "hide_in_library_search": "Skrýt při vyhledávání v knihovně", + "hide_in_library_search_description": "Skrýt soubory s tímto štítkem z výsledků při hledání v celé knihovně.", + "hide_in_sidebar": "Skrýt v postranním panelu", + "hide_in_sidebar_description": "Zabraňte zobrazení tohoto štítku v postranním panelu aplikace.", + "hide_location_from_view": "Skrýt umístění a obsah z pohledu", + "hide_sidebar": "Skrýt postranní panel", + "home": "Domů", + "hosted_locations": "Hostovaná umístění", + "hosted_locations_description": "Rozšiřte své místní úložiště pomocí našeho cloudu!", + "icon_size": "Velikost ikony", + "image": "Obrázek", + "image_labeler_ai_model": "Model AI pro rozpoznávání štítků na obrázcích", + "image_labeler_ai_model_description": "Model používaný k rozpoznávání objektů na obrázcích. Větší modely jsou přesnější, ale pomalejší.", + "image_one": "Obrázek", + "image_other": "Obrázky", + "import": "Importovat", + "incoming_spacedrop": "Příchozí Spacedrop", + "indexed": "Indexováno", + "indexed_new_files": "Indexovány nové soubory {{name}}", + "indexer_rule_reject_allow_label": "Ve výchozím nastavení funguje pravidlo indexeru jako seznam odmítnutí, což má za následek vyloučení jakýchkoli souborů, které odpovídají jeho kritériím. Povolíte-li tuto možnost, změní se na seznam povolení, což umožní umístění indexovat pouze soubory, které splňují jeho specifikovaná pravidla.", + "indexer_rules": "Pravidla indexeru", + "indexer_rules_error": "Chyba při načítání pravidel indexeru", + "indexer_rules_info": "Pravidla indexeru vám umožňují specifikovat cesty k ignorování pomocí globů.", + "indexer_rules_not_available": "Žádná pravidla indexeru nejsou k dispozici", + "ingester": "Ingester", + "ingester_description": "Tento proces přijímá cloudové operace a odesílá je hlavnímu synchronizačnímu ingesteru.", + "injester_description": "Tento proces přijímá synchronizační operace z P2P připojení a Spacedrive Cloud a aplikuje je na knihovnu.", + "install": "Instalovat", + "install_update": "Instalovat aktualizaci", + "installed": "Nainstalováno", + "invalid_extension": "Neplatná přípona", + "invalid_glob": "Neplatný glob", + "invalid_name": "Neplatné jméno", + "invalid_path": "Neplatná cesta", + "ipv4_ipv6_listeners_error": "Chyba při vytváření posluchačů IPv4 a IPv6. Zkontrolujte nastavení firewallu!", + "ipv4_listeners_error": "Chyba při vytváření posluchačů IPv4. Zkontrolujte nastavení firewallu!", + "ipv6": "IPv6 networking", + "ipv6_description": "Povolit peer-to-peer komunikaci pomocí sítě IPv6", + "ipv6_listeners_error": "Chyba při vytváření posluchačů IPv6. Zkontrolujte nastavení firewallu!", + "is": "je", + "is_not": "není", + "item": "položka", + "item_size": "Velikost položky", + "item_with_count_one": "{{count}} položka", + "item_with_count_other": "{{count}} položky", + "items": "položky", + "job_error_description": "Úloha byla dokončena s chybami. Podrobnosti naleznete níže v protokolu chyb. Pokud potřebujete pomoc, kontaktujte podporu a poskytněte tuto chybu.", + "job_has_been_canceled": "Úloha byla zrušena.", + "job_has_been_paused": "Úloha byla pozastavena.", + "job_has_been_removed": "Úloha byla odstraněna.", + "job_has_been_resumed": "Úloha byla obnovena.", + "join": "Připojit se", + "join_discord": "Připojit se k Discordu", + "join_library": "Připojit se ke knihovně", + "join_library_description": "Knihovny jsou bezpečná databáze na zařízení. Vaše soubory zůstanou tam, kde jsou, knihovna je katalogizuje a ukládá všechna data související se Spacedrive.", + "joining": "Připojování", + "key": "Klíč", + "key_manager": "Správce klíčů", + "key_manager_description": "Vytvářejte šifrovací klíče, připojujte a odpojujte své klíče, abyste viděli soubory dešifrované v reálném čase.", + "key_one": "Klíč", + "key_other": "Klíče", + "keybinds": "Klávesové zkratky", + "keybinds_description": "Zobrazit a spravovat klávesové zkratky klienta", + "keys": "Klíče", + "kilometers": "Kilometry", + "kind": "Druh", + "kind_one": "Druh", + "kind_other": "Druhy", + "label": "Štítek", + "labels": "Štítky", + "language": "Jazyk", + "language_description": "Změňte jazyk rozhraní Spacedrive", + "learn_more": "Zjistit více", + "learn_more_about_telemetry": "Zjistit více o telemetrii", + "less": "méně", + "libraries": "Knihovny", + "libraries_description": "Databáze obsahuje všechna data knihovny a metadata souborů.", + "library": "Knihovna", + "library_bytes": "Velikost knihovny", + "library_bytes_description": "Celková velikost všech umístění ve vaší knihovně.", + "library_db_size": "Velikost indexu", + "library_db_size_description": "Velikost databáze knihovny.", + "library_name": "Název knihovny", + "library_overview": "Přehled knihovny", + "library_settings": "Nastavení knihovny", + "library_settings_description": "Obecná nastavení týkající se aktuálně aktivní knihovny.", + "light": "Světlo", + "link": "Odkaz", + "link_one": "Odkaz", + "link_other": "Odkazy", + "list_view": "Zobrazení seznamu", + "list_view_notice_description": "Snadno navigujte mezi svými soubory a složkami pomocí zobrazení seznamu. Toto zobrazení zobrazuje vaše soubory v jednoduchém, organizovaném seznamu, což vám umožňuje rychle najít a přistupovat k souborům, které potřebujete.", + "loading": "Načítání", + "local": "Místní", + "local_locations": "Místní umístění", + "local_node": "Místní uzel", + "location": "Umístění", + "location_added_successfully": "Umístění úspěšně přidáno.", + "location_connected_tooltip": "Umístění je sledováno pro změny", + "location_deleted_successfully": "Umístění úspěšně smazáno.", + "location_disconnected_tooltip": "Umístění není sledováno pro změny", + "location_display_name_info": "Název tohoto umístění, toto je, co bude zobrazeno v postranním panelu. Nepřejmenuje skutečnou složku na disku.", + "location_empty_notice_message": "Zde nebyly nalezeny žádné soubory", + "location_is_already_linked": "Umístění je již propojeno", + "location_one": "Umístění", + "location_other": "Umístění", + "location_path_info": "Cesta k tomuto umístění, zde budou soubory uloženy na disku.", + "location_type": "Typ umístění", + "location_type_managed": "Spacedrive pro vás soubory třídí. Pokud umístění není prázdné, bude vytvořena složka \"spacedrive\".", + "location_type_normal": "Obsah bude indexován, nové soubory nebudou automaticky tříděny.", + "location_type_replica": "Toto umístění je replikou jiného, jeho obsah bude automaticky synchronizován.", + "locations": "Umístění", + "locations_description": "Spravujte svá úložná umístění.", + "lock": "Zamknout", + "lock_sidebar": "Zamknout postranní panel", + "log_in": "Přihlásit se", + "log_in_with_browser": "Přihlásit se pomocí prohlížeče", + "log_out": "Odhlásit se", + "logged_in_as": "Přihlášen jako {{email}}", + "logging_in": "Přihlašování...", + "login_link_sent": "Odeslali jsme dočasný přihlašovací odkaz.", + "logout": "Odhlásit se", + "manage_library": "Spravovat knihovnu", + "managed": "Spravováno", + "manual_peers": "Ruční přidání peerů", + "manual_peers_description": "Přidejte peery ručně zadáním jejich IP adresy a portu.\nTo je užitečné, když automatické zjištění není možné.", + "media": "Média", + "media_view": "Zobrazení médií", + "media_view_context": "Kontext zobrazení médií", + "media_view_notice_description": "Snadno objevujte fotografie a videa, zobrazení médií zobrazí výsledky počínaje aktuálním umístěním včetně podadresářů.", + "meet_contributors_behind_spacedrive": "Seznamte se s přispěvateli za Spacedrive", + "meet_title": "Seznamte se s {{title}}", + "mesh": "Mesh", + "mesh_one": "Mesh", + "mesh_other": "Meshes", + "miles": "Míle", + "mode": "Režim", + "model": "Model", + "modified": "Změněno", + "more": "Více", + "more_actions": "Více akcí...", + "more_info": "Více informací", + "move_back_within_quick_preview": "Posunout zpět v rychlém náhledu", + "move_files": "Přesunout soubory", + "move_forward_within_quick_preview": "Posunout vpřed v rychlém náhledu", + "move_to_trash": "Přesunout do koše", + "my_sick_location": "Moje skvělé umístění", + "name": "Název", + "navigate_back": "Navigovat zpět", + "navigate_backwards": "Navigovat zpět", + "navigate_files_downwards": "Navigovat soubory směrem dolů", + "navigate_files_leftwards": "Navigovat soubory směrem doleva", + "navigate_files_rightwards": "Navigovat soubory směrem doprava", + "navigate_files_upwards": "Navigovat soubory směrem nahoru", + "navigate_forward": "Navigovat vpřed", + "navigate_forwards": "Navigovat vpřed", + "navigate_to_settings_page": "Navigovat na stránku nastavení", + "network": "Síť", + "network_page_description": "Další uzly Spacedrive ve vaší síti LAN se zde objeví spolu s vašimi výchozími síťovými připojeními OS.", + "network_settings": "Nastavení sítě", + "network_settings_advanced": "Pokročilý přehled sítě", + "network_settings_advanced_description": "Pokročilé informace o vašem aktuálním nastavení sítě.", + "network_settings_description": "Nastavení týkající se síťování a konektivity.", + "networking": "Síťování", + "networking_error": "Chyba při spuštění síťování!", + "networking_port": "Síťový port", + "networking_port_description": "Port pro peer-to-peer síťování Spacedrive ke komunikaci. Měli byste to nechat zakázáno, pokud nemáte restriktivní firewall. Nevystavujte na internet!", + "new": "Nový", + "new_folder": "Složka", + "new_library": "Nová knihovna", + "new_location": "Nové umístění", + "new_location_web_description": "Protože používáte webovou verzi Spacedrive, budete muset zadat absolutní URL adresu adresáře lokálního pro vzdálený uzel.", + "new_tab": "Nová záložka", + "new_tag": "Nový štítek", + "new_update_available": "Nová aktualizace k dispozici!", + "no_apps_available": "Žádné aplikace nejsou k dispozici", + "no_favorite_items": "Žádné oblíbené položky", + "no_git_files": "Žádné Git soubory", + "no_hidden_files": "Žádné skryté soubory", + "no_items_found": "Nebyly nalezeny žádné položky", + "no_jobs": "Žádné úlohy.", + "no_labels": "Žádné štítky", + "no_nodes_found": "Nebyly nalezeny žádné uzly Spacedrive.", + "no_search_selected": "Nebylo vybráno žádné hledání", + "no_system_files": "Žádné systémové soubory", + "no_tag_selected": "Nebyl vybrán žádný štítek", + "no_tags": "Žádné štítky", + "no_tags_description": "Nevytvořili jste žádné štítky", + "node_name": "Název uzlu", + "nodes": "Uzly", + "nodes_description": "Spravujte uzly připojené k této knihovně. Uzel je instance backendu Spacedrive, běžící na zařízení nebo serveru. Každý uzel nese kopii databáze a synchronizuje se prostřednictvím peer-to-peer připojení v reálném čase.", + "none": "Žádné", + "normal": "Normální", + "not_you": "Nejste to vy?", + "note": "Poznámka", + "nothing_selected": "Nic nevybráno", + "number_of_passes": "# průchodů", + "object": "Objekt", + "object_id": "ID objektu", + "off": "Vypnuto", + "offline": "Offline", + "on": "Zapnuto", + "online": "Online", + "only_images": "Pouze obrázky", + "open": "Otevřít", + "open_file": "Otevřít soubor", + "open_in_new_tab": "Otevřít v nové záložce", + "open_logs": "Otevřít protokoly", + "open_new_location_once_added": "Otevřít nové umístění po přidání", + "open_new_tab": "Otevřít novou záložku", + "open_object": "Otevřít objekt", + "open_object_from_quick_preview_in_native_file_manager": "Otevřít objekt z rychlého náhledu v nativním správci souborů", + "open_settings": "Otevřít nastavení", + "open_with": "Otevřít pomocí", + "opening_trash": "Otevírání koše", + "or": "NEBO", + "other": "Ostatní", + "overview": "Přehled", + "p2p_visibility": "Viditelnost P2P", + "p2p_visibility_contacts_only": "Pouze kontakty", + "p2p_visibility_description": "Nastavte, kdo může vidět vaše instalace Spacedrive.", + "p2p_visibility_disabled": "Zakázáno", + "p2p_visibility_everyone": "Všichni", + "package": "Balíček", + "package_one": "Balíček", + "package_other": "Balíčky", + "page": "Stránka", + "page_shortcut_description": "Různé stránky v aplikaci", + "pair": "Spárovat", + "pairing_with_node": "Párování s {{node}}", + "paste": "Vložit", + "paste_object": "Vložit objekt", + "paste_success": "Položky vloženy", + "path": "Cesta", + "path_copied_to_clipboard_description": "Cesta pro umístění {{location}} zkopírována do schránky.", + "path_copied_to_clipboard_title": "Cesta zkopírována do schránky", + "path_to_save_do_the_thing": "Cesta k uložení při kliknutí na 'Udělej věc':", + "paths": "Cesty", + "pause": "Pozastavit", + "paused": "Pozastaveno", + "peers": "Peerové", + "people": "Lidé", + "pin": "Připnout", + "please_select_emoji": "Prosím, vyberte emoji", + "prefix_a": "a", + "preview_media_bytes": "Náhled médií", + "preview_media_bytes_description": "Celková velikost všech náhledových mediálních souborů, jako jsou miniatury.", + "privacy": "Soukromí", + "privacy_description": "Spacedrive je postaven pro ochranu soukromí, proto jsme open source a místní. Takže jasně uvedeme, která data jsou s námi sdílena.", + "queued": "Ve frontě", + "quick_preview": "Rychlý náhled", + "quick_rescan_started": "Rychlé opětovné skenování spuštěno", + "quick_view": "Rychlý pohled", + "quickpreview_thumbnail_error_message": "Nepodařilo se načíst obrázek v plném rozlišení", + "quickpreview_thumbnail_error_tip": "Obrázek nebyl nalezen. Proto je zobrazena miniatura.", + "random": "Náhodně", + "receiver": "Přijímač", + "receiver_description": "Tento proces přijímá a ukládá operace ze Spacedrive Cloud.", + "recent_jobs": "Nedávné úlohy", + "recents": "Nedávné", + "recents_notice_message": "Nedávné jsou vytvořeny, když otevřete soubor.", + "regen_labels": "Obnovit štítky", + "regen_thumbnails": "Obnovit miniatury", + "regenerate_thumbs": "Obnovit miniatury", + "reindex": "Přeindexovat", + "reject": "Odmítnout", + "reject_files": "Odmítnout soubory", + "relay_listeners_error": "Chyba při vytváření posluchače relé. Zkontrolujte prosím nastavení firewallu!", + "reload": "Načíst znovu", + "remote_access": "Povolit vzdálený přístup", + "remote_access_description": "Povolit ostatním uzlům přímé připojení k tomuto uzlu.", + "remote_identity": "Vzdálená identita", + "remove": "Odstranit", + "remove_from_recents": "Odstranit z nedávných", + "rename": "Přejmenovat", + "rename_object": "Přejmenovat objekt", + "replica": "Replika", + "rescan": "Opětovné skenování", + "rescan_directory": "Opětovné skenování adresáře", + "rescan_location": "Opětovné skenování umístění", + "reset": "Resetovat", + "reset_and_quit": "Resetovat a ukončit aplikaci", + "reset_confirmation": "Opravdu chcete resetovat Spacedrive? Vaše databáze bude smazána.", + "reset_to_continue": "Zjistili jsme, že jste možná vytvořili svou knihovnu se starší verzí Spacedrive. Pro pokračování v používání aplikace ji prosím resetujte!", + "reset_warning": "ZTRATÍTE VŠECHNA EXISTUJÍCÍ DATA SPACEDRIVE!", + "resolution": "Rozlišení", + "resources": "Zdroje", + "restore": "Obnovit", + "resume": "Pokračovat", + "retry": "Zkusit znovu", + "reveal_in_native_file_manager": "Zobrazit v nativním správci souborů", + "revel_in_browser": "Zobrazit v {{browser}}", + "rules": "Pravidla", + "running": "Běží", + "save": "Uložit", + "save_changes": "Uložit změny", + "save_search": "Uložit hledání", + "save_spacedrop": "Uložit Spacedrop", + "saved_searches": "Uložená hledání", + "screenshot": "Snímek obrazovky", + "screenshot_one": "Snímek obrazovky", + "screenshot_other": "Snímky obrazovky", + "search": "Vyhledat", + "search_extensions": "Vyhledávání rozšíření", + "search_for_files_and_actions": "Hledat soubory a akce...", + "search_locations": "Vyhledávání umístění", + "secure_delete": "Bezpečně smazat", + "security": "Zabezpečení", + "security_description": "Udržujte svůj klient v bezpečí.", + "see_less": "Zobrazit méně", + "see_more": "Zobrazit více", + "select_library": "Vyberte Cloud knihovnu", + "send": "Odeslat", + "send_report": "Odeslat zprávu", + "sender": "Odesílatel", + "sender_description": "Tento proces odesílá synchronizační operace do Spacedrive Cloud.", + "settings": "Nastavení", + "setup": "Nastavit", + "share": "Sdílet", + "share_anonymous_usage": "Sdílet anonymní používání", + "share_anonymous_usage_description": "Sdílejte zcela anonymní telemetrická data, která pomáhají vývojářům zlepšovat aplikaci", + "share_bare_minimum": "Sdílet pouze minimum", + "share_bare_minimum_description": "Sdílet pouze to, že jsem aktivním uživatelem Spacedrive a několik technických detailů", + "sharing": "Sdílení", + "sharing_description": "Spravujte, kdo má přístup k vašim knihovnám.", + "show_details": "Zobrazit podrobnosti", + "show_hidden_files": "Zobrazit skryté soubory", + "show_inspector": "Zobrazit inspektor", + "show_object_size": "Zobrazit velikost objektu", + "show_path_bar": "Zobrazit panel cesty", + "show_slider": "Zobrazit posuvník", + "show_tags": "Zobrazit štítky", + "size": "Velikost", + "size_b": "B", + "size_bs": "B", + "size_gb": "GB", + "size_gbs": "GB", + "size_kb": "kB", + "size_kbs": "kB", + "size_mb": "MB", + "size_mbs": "MB", + "size_tb": "TB", + "size_tbs": "TB", + "skip_login": "Přeskočit přihlášení", + "software": "Software", + "sort_by": "Řadit podle", + "spacedrive_account": "Účet Spacedrive", + "spacedrive_cloud": "Spacedrive Cloud", + "spacedrive_cloud_description": "Spacedrive je vždy primárně lokální, ale v budoucnu nabídneme vlastní volitelné cloudové služby. Prozatím je ověřování použito pouze pro funkci zpětné vazby, jinak není vyžadováno.", + "spacedrop": "Viditelnost Spacedrop", + "spacedrop_a_file": "Spacedrop soubor", + "spacedrop_already_progress": "Spacedrop již probíhá", + "spacedrop_contacts_only": "Pouze kontakty", + "spacedrop_description": "Okamžité sdílení se zařízeními běžícími na Spacedrive ve vaší síti.", + "spacedrop_disabled": "Zakázáno", + "spacedrop_everyone": "Všichni", + "spacedrop_rejected": "Spacedrop odmítnut", + "square_thumbnails": "Čtvercové miniatury", + "star_on_github": "Dát hvězdu na GitHubu", + "start": "Start", + "starting": "Spouštění...", + "starts_with": "začíná na", + "stop": "Zastavit", + "stopping": "Zastavování...", + "submit": "Odeslat", + "success": "Úspěch", + "support": "Podpora", + "switch_to_grid_view": "Přepnout na zobrazení mřížky", + "switch_to_list_view": "Přepnout na zobrazení seznamu", + "switch_to_media_view": "Přepnout na zobrazení médií", + "switch_to_next_tab": "Přepnout na další záložku", + "switch_to_previous_tab": "Přepnout na předchozí záložku", + "sync": "Synchronizovat", + "syncPreviewMedia_label": "Synchronizovat náhledová média pro toto umístění s vašimi zařízeními", + "sync_description": "Spravujte, jak Spacedrive synchronizuje.", + "sync_with_library": "Synchronizovat s knihovnou", + "sync_with_library_description": "Pokud je povoleno, vaše klávesové zkratky budou synchronizovány s knihovnou, jinak se použijí pouze na tomto klientu.", + "system": "Systém", + "tag": "Štítek", + "tag_one": "Štítek", + "tag_other": "Štítky", + "tags": "Štítky", + "tags_bulk_assigned": "Přiřazený štítek \"{{tag_name}}\" k {{file_count}} $t(soubor, { \"count\": {{file_count}} }).", + "tags_bulk_failed_with_tag": "Nepodařilo se přiřadit štítek \"{{tag_name}}\" k {{file_count}} $t(soubor, { \"count\": {{file_count}} }): {{error_message}}", + "tags_bulk_failed_without_tag": "Nepodařilo se označit {{file_count}} $t(soubor, { \"count\": {{file_count}} }): {{error_message}}", + "tags_bulk_instructions": "Vyberte jeden nebo více souborů a stiskněte číslici pro přiřazení/odebrání odpovídajícího štítku.", + "tags_bulk_mode_active": "Režim přiřazení štítků je povolen.", + "tags_bulk_unassigned": "Odebraný štítek \"{{tag_name}}\" z {{file_count}} $t(soubor, { \"count\": {{file_count}} }).", + "tags_description": "Spravujte své štítky.", + "tags_notice_message": "K tomuto štítku nejsou přiřazeny žádné položky.", + "task": "úkol", + "task_one": "úkol", + "task_other": "úkoly", + "telemetry_description": "Přepněte na ON, aby vývojáři mohli získat podrobné údaje o používání a telemetrii pro vylepšení aplikace. Přepněte na OFF, abyste poslali pouze základní údaje: váš stav aktivity, verzi aplikace, verzi jádra a platformu (např. mobilní, webová nebo desktopová).", + "telemetry_title": "Sdílet další telemetrické a uživatelské údaje", + "temperature": "Teplota", + "text": "Text", + "text_file": "Textový soubor", + "text_one": "Text", + "text_other": "Texty", + "text_size": "Velikost textu", + "thank_you_for_your_feedback": "Děkujeme za vaši zpětnou vazbu!", + "thumbnailer_cpu_usage": "Využití CPU thumbnaileru", + "thumbnailer_cpu_usage_description": "Omezení, kolik CPU může thumbnailer použít pro zpracování na pozadí.", + "to": "do", + "toggle_all": "Přepnout vše", + "toggle_command_palette": "Přepnout paletu příkazů", + "toggle_hidden_files": "Přepnout skryté soubory", + "toggle_image_slider_within_quick_preview": "Přepnout posuvník obrázků v rychlém náhledu", + "toggle_inspector": "Přepnout inspektor", + "toggle_job_manager": "Přepnout správce úloh", + "toggle_metadata": "Přepnout metadata", + "toggle_path_bar": "Přepnout lištu cesty", + "toggle_quick_preview": "Přepnout rychlý náhled", + "toggle_sidebar": "Přepnout boční panel", + "tools": "Nástroje", + "total_bytes_capacity": "Celková kapacita", + "total_bytes_capacity_description": "Celková kapacita všech uzlů připojených k knihovně. Může zobrazovat nesprávné hodnoty během alfa.", + "total_bytes_free": "Volné místo", + "total_bytes_free_description": "Volné místo dostupné na všech uzlech připojených k knihovně.", + "total_bytes_used": "Celkové použité místo", + "total_bytes_used_description": "Celkové místo použité na všech uzlech připojených k knihovně.", + "total_files": "Celkový počet souborů", + "trash": "Koš", + "type": "Typ", + "ui_animations": "UI Animace", + "ui_animations_description": "Dialogy a další prvky UI budou animovány při otevírání a zavírání.", + "unidentified_files": "neidentifikované soubory", + "unidentified_files_info": "Soubory, které Spacedrive nebyl schopen identifikovat.", + "unknown": "Neznámé", + "unknown_one": "Neznámé", + "unknown_other": "Neznámé", + "unnamed_location": "Nepojmenované místo", + "update": "Aktualizace", + "update_downloaded": "Aktualizace stažena. Restartujte Spacedrive pro instalaci", + "updated_successfully": "Úspěšně aktualizováno, jste na verzi {{version}}", + "uploaded_file": "Soubor nahrán!", + "usage": "Použití", + "usage_description": "Vaše využití knihovny a informace o hardwaru", + "vacuum": "Vakuum", + "vacuum_library": "Vakuum Knihovna", + "vacuum_library_description": "Znovu zabalte svou databázi, abyste uvolnili zbytečný prostor.", + "value": "Hodnota", + "value_required": "Hodnota je vyžadována", + "version": "Verze {{version}}", + "video": "Video", + "video_preview_not_supported": "Náhled videa není podporován.", + "view_changes": "Zobrazit změny", + "want_to_do_this_later": "Chcete to udělat později?", + "web_page_archive": "Archiv webových stránek", + "web_page_archive_one": "Archiv webových stránek", + "web_page_archive_other": "Archivy webových stránek", + "website": "Webová stránka", + "widget": "Widget", + "widget_one": "Widget", + "widget_other": "Widgety", + "with_descendants": "S potomky", + "your_account": "Váš účet", + "your_account_description": "Účet Spacedrive a informace.", + "your_local_network": "Vaše místní síť", + "your_privacy": "Vaše soukromí", + "zoom": "Přiblížit", + "zoom_in": "Přiblížit", + "zoom_out": "Oddálit" +} diff --git a/interface/locales/de/common.json b/interface/locales/de/common.json index 26c6a19ad..3251d9948 100644 --- a/interface/locales/de/common.json +++ b/interface/locales/de/common.json @@ -1,4 +1,6 @@ { + "Connect": "Verbinden", + "Connecting": "Verbinden", "about": "Über", "about_vision_text": "Viele von uns haben mehrere Cloud-Konten, Laufwerke, die nicht gesichert sind, und Daten, die von Verlust bedroht sind. Wir verlassen uns auf Cloud-Dienste wie Google Fotos und iCloud, sind aber mit begrenzter Kapazität eingesperrt und haben fast keine Interoperabilität zwischen Diensten und Betriebssystemen. Fotoalben sollten nicht in einem Geräte-Ökosystem feststecken oder für Werbedaten geerntet werden. Du solltest betriebssystemunabhängig, dauerhaft und persönlich besessen sein. Daten, die wir erstellen, sind unser Erbe, das uns lange überleben wird - Open-Source-Technologie ist der einzige Weg, um sicherzustellen, dass wir absolute Kontrolle über die Daten behalten, die unser Leben definieren, in unbegrenztem Maßstab.", "about_vision_title": "Vision", @@ -43,6 +45,7 @@ "audio_preview_not_supported": "Audio-Vorschau wird nicht unterstützt.", "auto": "Auto", "back": "Zurück", + "back_to_login": "Zurück zum Login", "backfill_sync": "Synchronisierungsvorgänge auffüllen", "backfill_sync_description": "Die Bibliothek wird angehalten, bis der Backfill abgeschlossen ist", "backups": "Backups", @@ -60,6 +63,7 @@ "changelog": "Änderungsverlauf", "changelog_page_description": "Sehe, welche coolen neuen Funktionen wir machen", "changelog_page_title": "Änderungsprotokoll", + "check_your_inbox": "Bitte überprüfen Sie Ihren Posteingang unter", "checksum": "Prüfsumme", "clear_finished_jobs": "Beendete Aufgaben entfernen", "click_to_hide": "Zum Ausblenden klicken", @@ -86,14 +90,12 @@ "config": "Konfigurieren", "configure_location": "Standort konfigurieren", "confirm": "Bestätigen", - "Connect": "Verbinden", "connect_cloud": "Eine Cloud Verbinden", "connect_cloud_description": "Verbinde deine Cloud-Konten mit Spacedrive.", "connect_device": "Ein Gerät verbinden", "connect_device_description": "Spacedrive funktioniert am besten auf all deinen Geräten.", "connect_library_to_cloud": "Bibliothek mit Spacedrive Cloud verbinden", "connected": "Verbunden", - "Connecting": "Verbinden", "connecting_library_to_cloud": "Bibliothek mit Spacedrive Cloud verbinden …", "contacts": "Kontakte", "contacts_description": "Verwalte deine Kontakte in Spacedrive.", @@ -289,8 +291,8 @@ "general_settings": "Allgemeine Einstellungen", "general_settings_description": "Allgemeine Einstellungen in Bezug auf diesen Client.", "general_shortcut_description": "Allgemeine Verknüpfungen", - "generate_checksums": "Prüfsummen erstellen", "generatePreviewMedia_label": "Vorschaumedien für diesen Standort generieren", + "generate_checksums": "Prüfsummen erstellen", "gitignore": "Git ignorieren", "glob_description": "Glob (z. B. **/.git)", "go_back": "Zurück gehen", @@ -421,6 +423,7 @@ "log_out": "Abmelden", "logged_in_as": "Angemeldet als {{email}}", "logging_in": "Einloggen...", + "login_link_sent": "Wir haben einen temporären Login-Link gesendet.", "logout": "Abmelden", "manage_library": "Bibliothek verwalten", "managed": "Verwaltet", @@ -664,10 +667,10 @@ "switch_to_next_tab": "Zum nächsten Tab wechseln", "switch_to_previous_tab": "Zum vorherigen Tab wechseln", "sync": "Synchronisieren", + "syncPreviewMedia_label": "Vorschaumedien dieses Standorts mit Deinen Geräten synchronisieren", "sync_description": "Verwaltung der Synchronisierung in Spacedrive.", "sync_with_library": "Mit Bibliothek synchronisieren", "sync_with_library_description": "Wenn aktiviert, werden Deine Tastenkombinationen mit der Bibliothek synchronisiert, ansonsten gelten sie nur für diesen Client.", - "syncPreviewMedia_label": "Vorschaumedien dieses Standorts mit Deinen Geräten synchronisieren", "system": "System", "tag": "Tag", "tag_one": "Tag", @@ -738,4 +741,4 @@ "zoom": "Zoomen", "zoom_in": "Hereinzoomen", "zoom_out": "Hinauszoomen" -} \ No newline at end of file +} diff --git a/interface/locales/en/common.json b/interface/locales/en/common.json index 63e57208c..eeee053db 100644 --- a/interface/locales/en/common.json +++ b/interface/locales/en/common.json @@ -53,6 +53,7 @@ "audio_preview_not_supported": "Audio preview is not supported.", "auto": "Auto", "back": "Back", + "back_to_login": "Back to login", "backfill_sync": "Backfilling Sync Operations", "backfill_sync_description": "Library is paused until backfill completes", "backups": "Backups", @@ -74,6 +75,7 @@ "changelog": "Changelog", "changelog_page_description": "See what cool new features we're making", "changelog_page_title": "Changelog", + "check_your_inbox": "Please check your inbox at", "checksum": "Checksum", "clear_finished_jobs": "Clear out finished jobs", "click_to_hide": "Click to hide", @@ -468,6 +470,7 @@ "logged_in_as": "Logged in as {{email}}", "logging_in": "Logging in...", "login": "Login", + "login_link_sent": "We've sent a temporary login link.", "logout": "Logout", "manage_library": "Manage Library", "managed": "Managed", diff --git a/interface/locales/es/common.json b/interface/locales/es/common.json index de4b9d115..facd2e3ac 100644 --- a/interface/locales/es/common.json +++ b/interface/locales/es/common.json @@ -1,4 +1,6 @@ { + "Connect": "Conectar", + "Connecting": "Conectando", "about": "Acerca de", "about_vision_text": "Muchos de nosotros tenemos múltiples cuentas en la nube, discos que no tienen copias de seguridad y datos en riesgo de ser perdidos. Dependemos de servicios en la nube como Google Photos e iCloud, pero estamos limitados con una capacidad reducida y casi cero interoperabilidad entre servicios y sistemas operativos. Los álbumes de fotos no deberían estar atascados en un ecosistema de dispositivos o ser utilizados para datos publicitarios. Deberían ser independientes del SO, permanentes y de propiedad personal. Los datos que creamos son nuestro legado, que nos sobrevivirá mucho tiempo: la tecnología de código abierto es la única forma de asegurarnos de mantener el control absoluto sobre los datos que definen nuestras vidas, en una escala ilimitada.", "about_vision_title": "Visión", @@ -43,6 +45,7 @@ "audio_preview_not_supported": "La previsualización de audio no está soportada.", "auto": "Auto", "back": "Atrás", + "back_to_login": "Volver a iniciar sesión", "backfill_sync": "Operaciones de sincronización de reabastecimiento", "backfill_sync_description": "La biblioteca está en pausa hasta que se complete el reabastecimiento", "backups": "Copias de seguridad", @@ -60,6 +63,7 @@ "changelog": "Registro de cambios", "changelog_page_description": "Mira qué nuevas funciones geniales estamos creando", "changelog_page_title": "Registro de cambios", + "check_your_inbox": "Por favor revisa tu bandeja de entrada en", "checksum": "Suma de verificación", "clear_finished_jobs": "Eliminar trabajos finalizados", "click_to_hide": "Clic para ocultar", @@ -86,14 +90,12 @@ "config": "Config", "configure_location": "Configurar Ubicación", "confirm": "Confirm", - "Connect": "Conectar", "connect_cloud": "Conectar una nube", "connect_cloud_description": "Conecta tus cuentas en la nube a Spacedrive.", "connect_device": "Conectar un dispositivo", "connect_device_description": "Spacedrive funciona mejor en todos tus dispositivos.", "connect_library_to_cloud": "Conecte la biblioteca a Spacedrive Cloud", "connected": "Conectado", - "Connecting": "Conectando", "connecting_library_to_cloud": "Conectando la biblioteca a Spacedrive Cloud...", "contacts": "Contactos", "contacts_description": "Administra tus contactos en Spacedrive.", @@ -289,8 +291,8 @@ "general_settings": "Configuraciones Generales", "general_settings_description": "Configuraciones generales relacionadas con este cliente.", "general_shortcut_description": "Atajos de uso general", - "generate_checksums": "Generar Sumas de Verificación", "generatePreviewMedia_label": "Generar medios de vista previa para esta Ubicación", + "generate_checksums": "Generar Sumas de Verificación", "gitignore": "Git ignorar", "glob_description": "Globo (por ejemplo, **/.git)", "go_back": "Regresar", @@ -423,6 +425,7 @@ "log_out": "Cerrar sesión", "logged_in_as": "Conectado como {{email}}", "logging_in": "Iniciando sesión...", + "login_link_sent": "Hemos enviado un enlace de inicio de sesión temporal.", "logout": "Cerrar sesión", "manage_library": "Administrar Biblioteca", "managed": "Gestionado", @@ -666,10 +669,10 @@ "switch_to_next_tab": "Cambiar a la siguiente pestaña", "switch_to_previous_tab": "Cambiar a la pestaña anterior", "sync": "Sincronizar", + "syncPreviewMedia_label": "Sincronizar medios de vista previa para esta Ubicación con tus dispositivos", "sync_description": "Administra cómo se sincroniza Spacedrive.", "sync_with_library": "Sincronizar con la Biblioteca", "sync_with_library_description": "Si se habilita, tus atajos de teclado se sincronizarán con la biblioteca, de lo contrario solo se aplicarán a este cliente.", - "syncPreviewMedia_label": "Sincronizar medios de vista previa para esta Ubicación con tus dispositivos", "system": "Sistema", "tag": "Etiqueta", "tag_one": "Etiqueta", @@ -740,4 +743,4 @@ "zoom": "Zoom", "zoom_in": "Acercar", "zoom_out": "Alejar" -} \ No newline at end of file +} diff --git a/interface/locales/fr/common.json b/interface/locales/fr/common.json index 3bb5d3f9b..b5114518c 100644 --- a/interface/locales/fr/common.json +++ b/interface/locales/fr/common.json @@ -1,4 +1,6 @@ { + "Connect": "Connecter", + "Connecting": "De liaison", "about": "À propos", "about_vision_text": "Beaucoup d'entre nous ont plusieurs comptes cloud, des disques qui ne sont pas sauvegardés et des données à risque de perte. Nous dépendons de services cloud comme Google Photos et iCloud, mais sommes enfermés avec une capacité limitée et presque zéro interopérabilité entre les services et les systèmes d'exploitation. Les albums photo ne devraient pas être coincés dans un écosystème d'appareils, ou récoltés pour des données publicitaires. Ils devraient être indépendants du système d'exploitation, permanents et personnels. Les données que nous créons sont notre héritage, qui nous survivra longtemps - la technologie open source est le seul moyen d'assurer que nous conservons un contrôle absolu sur les données qui définissent nos vies, à une échelle illimitée.", "about_vision_title": "Vision", @@ -43,6 +45,7 @@ "audio_preview_not_supported": "L'aperçu audio n'est pas pris en charge.", "auto": "Auto", "back": "Retour", + "back_to_login": "Retour à la connexion", "backfill_sync": "Opérations de synchronisation de remplissage", "backfill_sync_description": "La bibliothèque est suspendue jusqu'à ce que le remplissage soit terminé", "backups": "Sauvegardes", @@ -60,6 +63,7 @@ "changelog": "Journal des modifications", "changelog_page_description": "Découvrez les nouvelles fonctionnalités cool que nous développons", "changelog_page_title": "Changelog", + "check_your_inbox": "Veuillez vérifier votre boîte de réception à", "checksum": "Somme de contrôle", "clear_finished_jobs": "Effacer les travaux terminés", "click_to_hide": "Cliquez pour masquer", @@ -86,14 +90,12 @@ "config": "Config", "configure_location": "Configurer l'emplacement", "confirm": "Confirm", - "Connect": "Connecter", "connect_cloud": "Connecter un nuage", "connect_cloud_description": "Connectez vos comptes cloud à Spacedrive.", "connect_device": "Connecter un appareil", "connect_device_description": "Spacedrive fonctionne mieux sur tous vos appareils.", "connect_library_to_cloud": "Connecter la bibliothèque à Spacedrive Cloud", "connected": "Connecté", - "Connecting": "De liaison", "connecting_library_to_cloud": "Connexion de la bibliothèque à Spacedrive Cloud...", "contacts": "Contacts", "contacts_description": "Gérez vos contacts dans Spacedrive.", @@ -289,8 +291,8 @@ "general_settings": "Paramètres généraux", "general_settings_description": "Paramètres généraux liés à ce client.", "general_shortcut_description": "Raccourcis d'utilisation générale", - "generate_checksums": "Générer des sommes de contrôle", "generatePreviewMedia_label": "Générer des médias d'aperçu pour cet emplacement", + "generate_checksums": "Générer des sommes de contrôle", "gitignore": "Git Ignorer", "glob_description": "Glob (par exemple, **/.git)", "go_back": "Revenir", @@ -423,6 +425,7 @@ "log_out": "Se déconnecter", "logged_in_as": "Connecté en tant que {{email}}", "logging_in": "Se connecter...", + "login_link_sent": "Nous avons envoyé un lien de connexion temporaire.", "logout": "Déconnexion", "manage_library": "Gérer la bibliothèque", "managed": "Géré", @@ -666,10 +669,10 @@ "switch_to_next_tab": "Passer à l'onglet suivant", "switch_to_previous_tab": "Passer à l'onglet précédent", "sync": "Synchroniser", + "syncPreviewMedia_label": "Synchroniser les médias d'aperçu pour cet emplacement avec vos appareils", "sync_description": "Gérer la manière dont Spacedrive se synchronise.", "sync_with_library": "Synchroniser avec la bibliothèque", "sync_with_library_description": "Si activé, vos raccourcis seront synchronisés avec la bibliothèque, sinon ils s'appliqueront uniquement à ce client.", - "syncPreviewMedia_label": "Synchroniser les médias d'aperçu pour cet emplacement avec vos appareils", "system": "Système", "tag": "Étiquette", "tag_one": "Étiquette", @@ -739,4 +742,4 @@ "zoom": "Zoom", "zoom_in": "Zoom avant", "zoom_out": "Zoom arrière" -} \ No newline at end of file +} diff --git a/interface/locales/it/common.json b/interface/locales/it/common.json index 168fe87de..1650bc7f8 100644 --- a/interface/locales/it/common.json +++ b/interface/locales/it/common.json @@ -1,4 +1,6 @@ { + "Connect": "Collegare", + "Connecting": "Connessione", "about": "Su di noi", "about_vision_text": "Molti di noi hanno più account cloud, dischi di cui non è stato eseguito il backup e dati a rischio di essere persi. Dipendiamo da servizi cloud come Google Photos e iCloud, che sono limitati dalla loro capacità e scarsissima interoperabilità tra servizi e sistemi operativi. Gli album fotografici non dovrebbero essere bloccati in un ecosistema di dispositivi, o essere usati per raccogliere dati. Dovrebbero essere agnostici dal sistema operativo, e detenuti permanentemente e personalmente. I dati che creiamo sono la nostra eredità, che sopravviverà molto più a lungo di noi. La tecnologia open source è l'unico modo per assicurarci di conservare il controllo assoluto sui dati che definisce le nostre vite, a scala illimitata.", "about_vision_title": "Visione", @@ -43,6 +45,7 @@ "audio_preview_not_supported": "L'anteprima audio non è disponibile.", "auto": "Auto", "back": "Indietro", + "back_to_login": "Torna al login", "backfill_sync": "Operazioni di sincronizzazione del backfill", "backfill_sync_description": "La raccolta viene sospesa fino al completamento del recupero", "backups": "Backups", @@ -60,6 +63,7 @@ "changelog": "Changelog", "changelog_page_description": "Scopri quali nuove fantastiche funzionalità stiamo realizzando", "changelog_page_title": "Changelog", + "check_your_inbox": "Controlla la tua casella di posta all'indirizzo", "checksum": "Checksum", "clear_finished_jobs": "Cancella i lavori completati", "click_to_hide": "Clicca per nascondere", @@ -86,14 +90,12 @@ "config": "Config", "configure_location": "Configura posizione", "confirm": "Confirm", - "Connect": "Collegare", "connect_cloud": "Collegare un cloud", "connect_cloud_description": "Collegate i vostri account cloud a Spacedrive.", "connect_device": "Collegare un dispositivo", "connect_device_description": "Spacedrive funziona al meglio su tutti i dispositivi.", "connect_library_to_cloud": "Connetti la libreria a Spacedrive Cloud", "connected": "Connesso", - "Connecting": "Connessione", "connecting_library_to_cloud": "Collegamento della libreria a Spacedrive Cloud...", "contacts": "Contatti", "contacts_description": "Gestisci i tuoi contatti su Spacedrive.", @@ -289,8 +291,8 @@ "general_settings": "Impostazioni Generali", "general_settings_description": "Impostazioni generali relative a questo client.", "general_shortcut_description": "Scorciatoie d'uso generale", - "generate_checksums": "Genera i checksum", "generatePreviewMedia_label": "Genera anteprime per questa posizione", + "generate_checksums": "Genera i checksum", "gitignore": "Git Ignora", "glob_description": "Glob (ad esempio, **/.git)", "go_back": "Indietro", @@ -423,6 +425,7 @@ "log_out": "Disconnettiti", "logged_in_as": "Accesso effettuato come {{email}}", "logging_in": "Entrando...", + "login_link_sent": "Abbiamo inviato un collegamento di accesso temporaneo.", "logout": "Esci", "manage_library": "Gestisci la Libreria", "managed": "Gestito", @@ -666,10 +669,10 @@ "switch_to_next_tab": "Passa alla scheda successiva", "switch_to_previous_tab": "Passa alla scheda precedente", "sync": "Sincronizza", + "syncPreviewMedia_label": "Sincronizza l'anteprima multimediale per questa posizione con i tuoi dispositivi", "sync_description": "Gestisci la modalità di sincronizzazione di Spacedrive.", "sync_with_library": "Sincronizza con la Libreria", "sync_with_library_description": "Se abilitato, le combinazioni di tasti verranno sincronizzate con la libreria, altrimenti verranno applicate solo a questo client.", - "syncPreviewMedia_label": "Sincronizza l'anteprima multimediale per questa posizione con i tuoi dispositivi", "system": "Sistema", "tag": "Tag", "tag_one": "Tag", @@ -739,4 +742,4 @@ "zoom": "Ingrandisci", "zoom_in": "Ingrandire", "zoom_out": "Ridurre" -} \ No newline at end of file +} diff --git a/interface/locales/ja/common.json b/interface/locales/ja/common.json index ca4e7578f..e1e32c889 100644 --- a/interface/locales/ja/common.json +++ b/interface/locales/ja/common.json @@ -1,4 +1,6 @@ { + "Connect": "接続する", + "Connecting": "接続中", "about": "概要", "about_vision_text": "私達は通常、複数のクラウドのアカウントを持ち、バックアップのないドライブを利用し、データを失う危険にさらされています。またGoogle PhotosやiCloudのようなクラウドサービスに依存していますが、それらは容量に制限があり、サービスやOS間に互換性はほとんどありません。フォトアルバムは、デバイスのエコシステムに縛られたり広告データとして利用されたりすべきではなく、OSにとらわれず、永続的で、個人所有のものであるべきです。私達が作成したデータは私達の遺産であり、私達よりもずっと長生きします。オープンソース・テクノロジーは、無制限のスケールで、私達の生活を定義するデータの絶対的なコントロールを確実に保持する唯一の方法なのです。", "about_vision_title": "ビジョン", @@ -43,6 +45,7 @@ "audio_preview_not_supported": "オーディオのプレビューには対応していません。", "auto": "自動", "back": "戻る", + "back_to_login": "ログインに戻る", "backfill_sync": "同期操作のバックフィル", "backfill_sync_description": "バックフィルが完了するまでライブラリは一時停止されます", "backups": "バックアップ", @@ -60,6 +63,7 @@ "changelog": "変更履歴", "changelog_page_description": "Spacedriveの魅力ある新機能をご確認ください。", "changelog_page_title": "変更履歴", + "check_your_inbox": "受信箱を確認してください", "checksum": "チェックサム", "clear_finished_jobs": "完了ジョブを削除", "click_to_hide": "非表示にするにはクリック", @@ -86,14 +90,12 @@ "config": "コンフィグ", "configure_location": "ロケーション設定を編集", "confirm": "Confirm", - "Connect": "接続する", "connect_cloud": "クラウドに接続する", "connect_cloud_description": "クラウドアカウントをSpacedriveに接続する。", "connect_device": "デバイスを接続する", "connect_device_description": "Spacedriveはすべてのデバイスで最適に機能します。", "connect_library_to_cloud": "ライブラリをSpacedrive Cloudに接続する", "connected": "接続中", - "Connecting": "接続中", "connecting_library_to_cloud": "ライブラリを Spacedrive Cloud に接続しています...", "contacts": "連絡先", "contacts_description": "Spacedriveで連絡先を管理。", @@ -267,8 +269,8 @@ "file_extension_description": "ファイル拡張子 (例: .mp4、.jpg、.txt)", "file_from": "File {{file}} from {{name}}", "file_indexing_rules": "ファイルのインデックス化ルール", - "file_picker_not_supported": "このプラットフォームではファイルピッカーはサポートされていません", "file_other": "ファイル", + "file_picker_not_supported": "このプラットフォームではファイルピッカーはサポートされていません", "filter": "フィルター", "filters": "フィルター", "flash": "閃光", @@ -287,8 +289,8 @@ "general_settings": "一般設定", "general_settings_description": "このクライアントに関する一般的な設定を行います。", "general_shortcut_description": "一般に使用されるショートカットキー。", - "generate_checksums": "チェックサムを作成", "generatePreviewMedia_label": "このロケーションのプレビューメディアを作成する", + "generate_checksums": "チェックサムを作成", "gitignore": "Git 無視", "glob_description": "グロブ (例: **/.git)", "go_back": "戻る", @@ -414,6 +416,7 @@ "log_out": "ログアウト", "logged_in_as": "{{email}} でログイン", "logging_in": "ログイン中...", + "login_link_sent": "一時的なログイン リンクを送信しました。", "logout": "ログアウト", "manage_library": "ライブラリの設定", "managed": "マネージド", @@ -657,10 +660,10 @@ "switch_to_next_tab": "次のタブへ", "switch_to_previous_tab": "前のタブへ", "sync": "同期", + "syncPreviewMedia_label": "このロケーションのプレビューメディアを他のデバイスと同期する", "sync_description": "Spacedriveの同期方法を管理します。", "sync_with_library": "ライブラリと同期する", "sync_with_library_description": "有効にすると、キーバインドがライブラリと同期されます。無効にすると、このクライアントにのみ適用されます。", - "syncPreviewMedia_label": "このロケーションのプレビューメディアを他のデバイスと同期する", "system": "システム", "tag": "タグ", "tag_other": "タグ", @@ -729,4 +732,4 @@ "zoom": "ズーム", "zoom_in": "拡大する", "zoom_out": "縮小する" -} \ No newline at end of file +} diff --git a/interface/locales/nl/common.json b/interface/locales/nl/common.json index 1662dc372..52801052b 100644 --- a/interface/locales/nl/common.json +++ b/interface/locales/nl/common.json @@ -1,4 +1,6 @@ { + "Connect": "Aansluiten", + "Connecting": "Verbinden", "about": "Over", "about_vision_text": "Veel van ons hebben meerdere cloud accounts, schijven waarvan geen back-ups worden gemaakt en gegevens die het risico lopen verloren te gaan. We zijn afhankelijk van clouddiensten als Google Photos en iCloud, maar zitten vast met een beperkte capaciteit en vrijwel geen interoperabiliteit tussen diensten en besturingssystemen. Fotoalbums mogen niet vastzitten in het ecosysteem van een apparaat, of worden gebruikt voor advertentiegegevens. Ze moeten OS-agnostisch, permanent en persoonlijk eigendom zijn. De gegevens die we creëren zijn onze erfenis, die ons nog lang zal overleven. Open source-technologie is de enige manier om ervoor te zorgen dat we de absolute controle behouden over de gegevens die ons leven bepalen, op onbeperkte schaal.", "about_vision_title": "Visie", @@ -43,6 +45,7 @@ "audio_preview_not_supported": "Audio voorvertoning wordt niet ondersteund.", "auto": "Auto", "back": "Terug", + "back_to_login": "Terug naar inloggen", "backfill_sync": "Synchronisatiebewerkingen voor opvulling", "backfill_sync_description": "De bibliotheek wordt gepauzeerd totdat het aanvullen is voltooid", "backups": "Backups", @@ -60,6 +63,7 @@ "changelog": "Wijzigingslogboek", "changelog_page_description": "Zie welke coole nieuwe functies we aan het maken zijn", "changelog_page_title": "Wijzigingslogboek", + "check_your_inbox": "Controleer uw inbox op", "checksum": "Controlegetal", "clear_finished_jobs": "Ruim voltooide taken op", "click_to_hide": "Klik om te verbergen", @@ -86,14 +90,12 @@ "config": "Config", "configure_location": "Locatie Configureren", "confirm": "Confirm", - "Connect": "Aansluiten", "connect_cloud": "Een cloud verbinden", "connect_cloud_description": "Verbind uw cloudaccounts met Spacedrive.", "connect_device": "Een apparaat aansluiten", "connect_device_description": "Spacedrive werkt het beste op al uw apparaten.", "connect_library_to_cloud": "Verbind de bibliotheek met Spacedrive Cloud", "connected": "Verbonden", - "Connecting": "Verbinden", "connecting_library_to_cloud": "Bibliotheek verbinden met Spacedrive Cloud...", "contacts": "Contacten", "contacts_description": "Beheer je contacten in Spacedrive.", @@ -289,8 +291,8 @@ "general_settings": "Algemene Instellingen", "general_settings_description": "Algemene instellingen gerelateerd aan deze client.", "general_shortcut_description": "Algemene sneltoetsen", - "generate_checksums": "Genereer Controlegetal", "generatePreviewMedia_label": "Genereer voorvertoning media voor deze Locatie", + "generate_checksums": "Genereer Controlegetal", "gitignore": "Git negeren", "glob_description": "Glob (bijvoorbeeld **/.git)", "go_back": "Ga Terug", @@ -421,6 +423,7 @@ "log_out": "Uitloggen", "logged_in_as": "Ingelogd als {{email}}", "logging_in": "Inloggen...", + "login_link_sent": "We hebben een tijdelijke inloglink verzonden.", "logout": "Uitloggen", "manage_library": "Beheer Bibliotheek", "managed": "Beheerd", @@ -664,10 +667,10 @@ "switch_to_next_tab": "Naar volgend tabblad schakelen", "switch_to_previous_tab": "Naar vorig tabblad schakelen", "sync": "Synchronisatie", + "syncPreviewMedia_label": "Synchroniseer voorvertoningsmedia van deze locatie met je apparaten", "sync_description": "Beheer hoe Spacedrive synchroniseert.", "sync_with_library": "Synchroniseer met Bibliotheek", "sync_with_library_description": "Indien ingeschakeld, worden je toetscombinaties gesynchroniseerd met de bibliotheek, anders zijn ze alleen van toepassing op deze client.", - "syncPreviewMedia_label": "Synchroniseer voorvertoningsmedia van deze locatie met je apparaten", "system": "Systeem", "tag": "Tag", "tag_one": "Tag", @@ -738,4 +741,4 @@ "zoom": "Zoom", "zoom_in": "Inzoomen", "zoom_out": "Uitzoomen" -} \ No newline at end of file +} diff --git a/interface/locales/ru/common.json b/interface/locales/ru/common.json index f15efdac3..301e0aa6f 100644 --- a/interface/locales/ru/common.json +++ b/interface/locales/ru/common.json @@ -53,6 +53,7 @@ "audio_preview_not_supported": "Предварительный просмотр аудио не поддерживается.", "auto": "Авто", "back": "Назад", + "back_to_login": "Вернуться к входу", "backfill_sync": "Операции полной синхронизации", "backfill_sync_description": "Работа библиотеки приостановлена ​​до завершения синхронизации", "backups": "Рез. копии", @@ -75,6 +76,7 @@ "changelog": "Что нового", "changelog_page_description": "Узнайте, какие новые возможности мы добавили", "changelog_page_title": "Список изменений", + "check_your_inbox": "Пожалуйста, проверьте свой почтовый ящик по адресу", "checksum": "Контрольная сумма", "clear_finished_jobs": "Очистить законченные задачи", "click_to_hide": "Щелкните, чтобы скрыть", @@ -484,6 +486,7 @@ "log_out": "Выйти из системы", "logged_in_as": "Вошли в систему как {{email}}", "logging_in": "Вход в систему...", + "login_link_sent": "Мы отправили временную ссылку для входа.", "logout": "Выйти", "manage_library": "Управление библиотекой", "managed": "Управляемый", diff --git a/interface/locales/tr/common.json b/interface/locales/tr/common.json index a331ad6d0..ebcc77151 100644 --- a/interface/locales/tr/common.json +++ b/interface/locales/tr/common.json @@ -1,4 +1,6 @@ { + "Connect": "Bağlamak", + "Connecting": "Bağlanıyor", "about": "Hakkında", "about_vision_text": "Birçoğumuzun birden fazla bulut hesabı, yedeklenmemiş sürücüleri ve kaybolma riski taşıyan verileri var. Google Fotoğraflar ve iCloud gibi bulut hizmetlerine bağımlıyız, ancak sınırlı kapasiteyle ve hizmetler ile işletim sistemleri arasında neredeyse sıfır geçiş yapabilirlikle kısıtlanmış durumdayız. Fotoğraf albümleri bir cihaz ekosisteminde sıkışıp kalmamalı veya reklam verileri için kullanılmamalıdır. OS bağımsız, kalıcı ve kişisel olarak sahip olunmalıdır. Oluşturduğumuz veriler, bizden uzun süre yaşayacak mirasımızdır - verilerimiz üzerinde mutlak kontrol sağlamak için açık kaynak teknolojisi tek yoludur, sınırsız ölçekte.", "about_vision_title": "Vizyon", @@ -43,6 +45,7 @@ "audio_preview_not_supported": "Ses önizlemesi desteklenmiyor.", "auto": "Oto", "back": "Geri", + "back_to_login": "Girişe geri dön", "backfill_sync": "Dolgu Senkronizasyon İşlemleri", "backfill_sync_description": "Dolgu tamamlanana kadar kitaplık duraklatıldı", "backups": "Yedeklemeler", @@ -60,6 +63,7 @@ "changelog": "Değişiklikler", "changelog_page_description": "Yaptığımız havalı yeni özellikleri görün", "changelog_page_title": "Değişiklikler", + "check_your_inbox": "Lütfen gelen kutunuzu şu adresten kontrol edin:", "checksum": "Kontrol Toplamı", "clear_finished_jobs": "Biten işleri temizle", "click_to_hide": "Gizlemek için tıklayın", @@ -86,14 +90,12 @@ "config": "Config", "configure_location": "Konumu Yapılandır", "confirm": "Confirm", - "Connect": "Bağlamak", "connect_cloud": "Bir bulut bağlayın", "connect_cloud_description": "Bulut hesaplarınızı Spacedrive'a bağlayın.", "connect_device": "Bir cihaz bağlayın", "connect_device_description": "Spacedrive tüm cihazlarınızda en iyi şekilde çalışır.", "connect_library_to_cloud": "Kitaplığı Spacedrive Cloud'a bağlayın", "connected": "Bağlı", - "Connecting": "Bağlanıyor", "connecting_library_to_cloud": "Kitaplık Spacedrive Cloud'a bağlanıyor...", "contacts": "Kişiler", "contacts_description": "Kişilerinizi Spacedrive'da yönetin.", @@ -289,8 +291,8 @@ "general_settings": "Genel Ayarlar", "general_settings_description": "Bu müşteriye ilişkin genel ayarlar.", "general_shortcut_description": "Genel kullanım kısayolları", - "generate_checksums": "Kontrol Toplamları Oluştur", "generatePreviewMedia_label": "Bu Konum için önizleme medyası oluştur", + "generate_checksums": "Kontrol Toplamları Oluştur", "gitignore": "Git Yoksay", "glob_description": "Glob (ör. **/.git)", "go_back": "Geri Dön", @@ -421,6 +423,7 @@ "log_out": "Çıkış Yap", "logged_in_as": "{{email}} olarak giriş yapıldı", "logging_in": "Giriş...", + "login_link_sent": "Geçici bir giriş bağlantısı gönderdik.", "logout": "Çıkış Yap", "manage_library": "Kütüphaneyi Yönet", "managed": "Yönetilen", @@ -664,10 +667,10 @@ "switch_to_next_tab": "Sonraki sekmeye geç", "switch_to_previous_tab": "Önceki sekmeye geç", "sync": "Senkronize Et", + "syncPreviewMedia_label": "Bu Konum için ön izleme medyasını cihazlarınızla senkronize et", "sync_description": "Spacedrive'ın nasıl senkronize edileceğini yönetin.", "sync_with_library": "Kütüphane ile Senkronize Et", "sync_with_library_description": "Etkinleştirilirse tuş bağlamalarınız kütüphane ile senkronize edilecek, aksi takdirde yalnızca bu istemciye uygulanacak.", - "syncPreviewMedia_label": "Bu Konum için ön izleme medyasını cihazlarınızla senkronize et", "system": "Sistem", "tag": "Etiket", "tag_one": "Etiket", @@ -738,4 +741,4 @@ "zoom": "Yakınlaştır", "zoom_in": "Yakınlaştır", "zoom_out": "Uzaklaştır" -} \ No newline at end of file +} diff --git a/interface/locales/zh-CN/common.json b/interface/locales/zh-CN/common.json index bd13e9bd8..e3b5128ce 100644 --- a/interface/locales/zh-CN/common.json +++ b/interface/locales/zh-CN/common.json @@ -1,733 +1,736 @@ { - "about": "关于", - "about_vision_text": "很多人都不止拥有一个云账户。没有备份,数据有丢失的风险。我们依赖像 Google 相册、iCloud 这样的云服务,但是它们容量低下,且互操作性几乎为零,几乎无法和操作系统集成。我们的照片不应该困在营业公司的广告当中,而因该由我们全权掌控。创造数据是我们的传统技能,这些数据存在于我们生活的方方面面。而开源技术是唯一能确保我们绝对控制这些日益增长的数据的方式。", - "about_vision_title": "项目远景", - "accept": "接受", - "accept_files": "Accept files", - "accessed": "已访问", - "account": "账户", - "actions": "操作", - "add": "添加", - "add_device": "添加设备", - "add_file_extension_rule": "将文件扩展名添加到当前规则", - "add_filter": "添加过滤器", - "add_library": "添加库", - "add_location": "添加位置", - "add_location_description": "通过将您喜爱的位置添加到个人库中,增强您的 Spacedrive 体验,以实现无缝高效的文件管理。", - "add_location_overview_description": "将本地路径、卷或网络位置连接到 Spacedrive。", - "add_location_tooltip": "将路径添加为索引", - "add_locations": "添加位置", - "add_tag": "添加标签", - "added_location": "已添加位置 {{name}}", - "adding_location": "添加位置 {{name}}", - "advanced": "高级", - "advanced_settings": "高级设置", - "album": "相册", - "alias": "别名", - "all_jobs_have_been_cleared": "所有任务皆已完成。", - "alpha_release_description": "感谢试用 Spacedrive。现在 Spacedrive 处于 Alpha 发布阶段,展示了激动人心的新功能。作为初始版本,它可能包含一些错误。我们恳请您在我们的 Discord 频道上反馈遇到的任何问题,您宝贵的反馈将有助于提升用户体验。", - "alpha_release_title": "Alpha 版本", - "app_crashed": "应用程序崩溃了", - "app_crashed_description": "出现了一些错误...", - "appearance": "外观", - "appearance_description": "调整客户端的外观。", - "apply": "申请", - "archive": "存档", - "archive_coming_soon": "存档位置功能即将推出……", - "archive_info": "将库中的数据作为存档提取,有利于保留位置的目录结构。", - "are_you_sure": "您确定吗?", - "ascending": "升序", - "ask_spacedrive": "询问 Spacedrive", - "assign_tag": "分配标签", - "audio": "音频", - "audio_preview_not_supported": "不支持音频预览。", - "auto": "自动", - "back": "返回", - "backfill_sync": "回填同步操作", - "backfill_sync_description": "库暂停直至回填完成", - "backups": "备份", - "backups_description": "管理您的 Spacedrive 数据库备份。", - "bitrate": "比特率", - "blur_effects": "模糊效果", - "blur_effects_description": "某些组件将应用模糊效果。", - "book": "书籍", - "cancel": "取消", - "cancel_selection": "取消选择", - "canceled": "取消", - "celcius": "摄氏度", - "change": "更改", - "change_view_setting_description": "更改默认资源管理器视图", - "changelog": "更新日志", - "changelog_page_description": "看看我们在开发哪些酷炫的新功能", - "changelog_page_title": "更新日志", - "checksum": "校验和", - "clear_finished_jobs": "清除已完成的任务", - "click_to_hide": "点击隐藏", - "click_to_lock": "点击锁定", - "client": "客户端", - "close": "关闭", - "close_command_palette": "关闭命令面板", - "close_current_tab": "关闭当前标签页", - "cloud": "云", - "cloud_connect_description": "您想将您的图书馆连接到云端吗?", - "cloud_drives": "云盘", - "cloud_sync": "云同步", - "cloud_sync_description": "管理将您的库与 Spacedrive Cloud 同步的流程", - "clouds": "云服务", - "code": "Code", - "collection": "Collection", - "color": "颜色", - "color_profile": "颜色配置文件", - "color_space": "色彩空间", - "coming_soon": "即将推出", - "completed": "已完成", - "completed_with_errors": "已完成但有错误", - "compress": "压缩", - "config": "配置文件", - "configure_location": "配置文件位置", - "confirm": "确认", - "Connect": "连接", - "connect_cloud": "连接到云", - "connect_cloud_description": "将您的云帐户连接到 Spacedrive。", - "connect_device": "连接设备", - "connect_device_description": "Spacedrive 在您的所有设备上都能发挥最佳效果。", - "connect_library_to_cloud": "将库连接到 Spacedrive Cloud", - "connected": "已连接", - "Connecting": "正在连接", - "connecting_library_to_cloud": "将库连接到 Spacedrive Cloud...", - "contacts": "联系人", - "contacts_description": "在 Spacedrive 中管理您的联系人。", - "contains": "包含", - "content_id": "内容ID", - "continue": "继续", - "convert_to": "转换为", - "coordinates": "坐标", - "copied": "已复制", - "copy": "复制", - "copy_as_path": "复制路径", - "copy_object": "复制对象", - "copy_path_to_clipboard": "复制路径到剪贴板", - "copy_success": "项目已复制", - "create": "创建", - "create_file_error": "创建文件时出错", - "create_file_success": "创建了新文件:{{name}}", - "create_folder_error": "创建文件夹时出错", - "create_folder_success": "创建了新文件夹:{{name}}", - "create_library": "创建库", - "create_library_description": "数据库将安全地存储在您地设备上。我们不会移动您的文件,仅仅对其进行索引,同时存储与 Spacedrive 相关的数据。", - "create_location": "Create Location", - "create_new_library": "创建新库", - "create_new_library_description": "新创建地数据库将安全地存储在您地设备上。我们不会移动您的文件,仅仅对其进行索引,同时存储与 Spacedrive 相关的数据。", - "create_new_tag": "创建新标签", - "create_new_tag_description": "设置名称与颜色。", - "create_tag": "创建标签", - "created": "已创建", - "creating_library": "正在创建库…", - "creating_your_library": "正在为您创建库", - "current": "当前使用", - "current_directory": "当前目录", - "current_directory_with_descendants": "当前目录及其子目录", - "custom": "自定义", - "cut": "剪切", - "cut_object": "剪切对象", - "cut_success": "剪切项目", - "dark": "深色", - "data_folder": "数据文件夹", - "database": "数据库", - "date": "日期", - "date_accessed": "访问日期", - "date_created": "创建日期", - "date_indexed": "索引日期", - "date_modified": "修改日期", - "date_taken": "拍摄日期", - "date_time_format": "日期和时间格式", - "date_time_format_description": "选择 Spacedrive 中显示的日期格式", - "debug_mode": "调试模式", - "debug_mode_description": "启用本应用额外的调试功能。", - "default": "默认", - "default_settings": "默认设置", - "delete": "删除", - "delete_dialog_title": "删除 {{prefix}} {{type}}", - "delete_forever": "永久删除", - "delete_info": "此操作只删除预览媒体,并不会删除磁盘上实际的文件夹。", - "delete_library": "删除库", - "delete_library_description": "Spacedrive 库将会永久删除,但您的文件不会删除。", - "delete_location": "删除存储位置", - "delete_location_description": "删除存储位置时,Spacedrive 会从数据库中移除所有与之相关的文件,但是不会删除文件本身。", - "delete_object": "删除对象", - "delete_rule": "删除规则", - "delete_rule_confirmation": "您确定要删除这个规则吗?", - "delete_tag": "删除标签", - "delete_tag_description": "您确定要删除这个标签吗?此操作不能撤销,打过标签的文件将会丢失标签。", - "delete_warning": "警告:这将永久删除您的 {{type}},我们目前还没有回收站……", - "descending": "降序", - "description": "描述", - "deselect": "取消选择", - "details": "详情", - "device": "设备", - "devices": "设备", - "devices_coming_soon_tooltip": "即将推出!本 Alpha 版本不支持库同步,此功能很快就会准备就绪。", - "dialog": "对话框", - "dialog_shortcut_description": "执行操作", - "direction": "次序", - "directory_other": "文件夹", - "disabled": "已禁用", - "disconnected": "已断开连接", - "display_formats": "显示格式", - "display_name": "显示名称", - "distance": "距离", - "do_the_thing": "Do the thing", - "document": "文档", - "done": "完成", - "dont_have_any": "这里看起来什么也没有╰(*°▽°*)╯", - "dont_show_again": "不再显示", - "dotfile": "Dotfile", - "double_click_action": "双击操作", - "download": "下载", - "downloading_update": "正在下载更新", - "drag_to_resize": "拖动以调整大小", - "duplicate": "复制", - "duplicate_object": "复制对象", - "duplicate_success": "项目已复制", - "edit": "编辑", - "edit_library": "编辑库", - "edit_location": "编辑位置", - "empty_file": "空的文件", - "enable_networking": "启用网络", - "enable_networking_description": "允许您的节点与您周围的其他 Spacedrive 节点进行通信。", - "enable_networking_description_required": "数据库的同步和 Spacedrop 需要开启本功能!", - "enable_relay": "启用中继服务器", - "enable_relay_description": "启用中继服务器以允许您的设备通过公共互联网进行通信。", - "enable_sync": "启用同步", - "enable_sync_description": "为该库中的所有现有数据生成同步操作,并配置 Spacedrive 以在将来发生事情时生成同步操作。", - "enabled": "启用", - "encrypt": "加密", - "encrypt_library": "加密数据库", - "encrypt_library_coming_soon": "资料库加密即将推出", - "encrypt_library_description": "为数据库启用加密,这只会加密Spacedrive数据库,不会加密文件本身。", - "encrypted": "已加密", - "ends_with": "以... 结束", - "ephemeral_notice_browse": "直接从您的设备浏览您的文件和文件夹。", - "ephemeral_notice_consider_indexing": "考虑索引您本地的位置,以获得更快和更高效的浏览。", - "equals": "是", - "erase": "擦除", - "erase_a_file": "擦除一个文件", - "erase_a_file_description": "配置您的擦除设置。", - "error": "错误", - "error_loading_original_file": "加载原始文件出错", - "error_message": "错误: {{error}}.", - "executable": "应用程序", - "expand": "展开", - "explorer": "资源管理器", - "explorer_settings": "资源管理器设置", - "explorer_shortcut_description": "导航、与文件系统交互", - "explorer_view": "资源管理器视图", - "export": "导出", - "export_library": "导出库", - "export_library_coming_soon": "导出库功能即将推出", - "export_library_description": "将这个库导出到一个文件。", - "extension": "扩大", - "extensions": "扩展", - "extensions_description": "安装扩展来扩展这个客户端的功能。", - "fahrenheit": "华氏度", - "failed": "失败的", - "failed_to_add_location": "添加位置失败", - "failed_to_cancel_job": "取消任务失败。", - "failed_to_clear_all_jobs": "清除所有任务失败。", - "failed_to_copy_file": "复制文件失败", - "failed_to_copy_file_path": "复制文件路径失败", - "failed_to_cut_file": "剪切文件失败", - "failed_to_delete_rule": "删除规则失败", - "failed_to_download_update": "无法下载更新", - "failed_to_duplicate_file": "复制文件失败", - "failed_to_generate_checksum": "生成校验和失败", - "failed_to_generate_labels": "生成标签失败", - "failed_to_generate_thumbnails": "生成缩略图失败", - "failed_to_load_tags": "加载标签失败", - "failed_to_open_file_body": "无法打开文件: {{error}}", - "failed_to_open_file_title": "无法打开文件", - "failed_to_open_file_with": "打开文件失败: {{data}}", - "failed_to_pause_job": "暂停任务失败。", - "failed_to_reindex_location": "重索引位置失败", - "failed_to_remove_file_from_recents": "从最近文档中删除文件失败", - "failed_to_remove_job": "删除任务失败。", - "failed_to_rename_file": "不能将 {{oldName}} 重命名为 {{newName}}", - "failed_to_rescan_location": "重新扫描位置失败", - "failed_to_resume_job": "恢复任务失败。", - "failed_to_update_location_settings": "更新位置设置失败", - "favorite": "收藏", - "favorites": "收藏夹", - "feedback": "反馈", - "feedback_is_required": "反馈是必填项", - "feedback_login_description": "登录使我们能够回复您的反馈", - "feedback_placeholder": "您的反馈...", - "feedback_toast_error_message": "提交反馈时出错,请重试。", - "file_already_exist_in_this_location": "文件已存在于此位置", - "file_directory_name": "文件/目录名称", - "file_extension_description": "文件扩展名(例如 .mp4、.jpg、.txt)", - "file_from": "来自 {{name}} 的文件 {{file}}", - "file_indexing_rules": "文件索引规则", - "file_other": "文件", - "file_picker_not_supported": "File picker不支持此设备", - "filter": "筛选", - "filters": "过滤器", - "flash": "闪光", - "folder": "文件夹", - "font": "字体", - "for_library": "For library {{name}}", - "forced": "被迫", - "forward": "前进", - "free_of": "可用", - "from": "从", - "full_disk_access": "完全磁盘访问", - "full_disk_access_description": "为了提供最佳体验,我们需要访问您的磁盘以索引您的文件。您的文件只有您自己可以访问。", - "full_reindex": "完全重新索引", - "full_reindex_info": "执行对此位置的完全重新扫描。", - "general": "通用", - "general_settings": "通用设置", - "general_settings_description": "与此客户端相关的一般设置。", - "general_shortcut_description": "通用快捷键", - "generate_checksums": "生成校验和", - "generatePreviewMedia_label": "为这个位置生成预览媒体", - "gitignore": "gitignore文件", - "glob_description": "全局(例如 **/.git)", - "go_back": "返回", - "go_to_labels": "转到标签", - "go_to_location": "前往地点", - "go_to_overview": "前往概览", - "go_to_recents": "转到最近的内容", - "go_to_settings": "前往设置", - "go_to_tag": "转到标签", - "got_it": "我知道了", - "grid_gap": "间隙", - "grid_view": "网格视图", - "grid_view_notice_description": "网格视图以缩略图形式显示文件和文件夹,以便直观、快速识别要寻找的文件。", - "hidden": "隐", - "hidden_label": "阻止位置及其内容出现在汇总分类、搜索和标签中,除非启用了“显示隐藏项目”。", - "hide_in_library_search": "在库搜索中隐藏", - "hide_in_library_search_description": "在搜索整个库时从结果中隐藏带有此标签的文件。", - "hide_in_sidebar": "在侧边栏中隐藏", - "hide_in_sidebar_description": "阻止此标签在应用的侧边栏中显示。", - "hide_location_from_view": "隐藏位置和内容的视图", - "hide_sidebar": "隐藏侧边栏", - "home": "我的文档", - "hosted_locations": "Hosted Locations", - "hosted_locations_description": "使用我们的云服务增强你的本地储存", - "icon_size": "图标大小", - "image": "图像", - "image_labeler_ai_model": "图像标签识别 AI 模型", - "image_labeler_ai_model_description": "用于识别图像中对象的模型。较大的模型更准确但速度较慢。", - "import": "导入", - "incoming_spacedrop": "Spacedrop正在接收文件", - "indexed": "已索引", - "indexed_new_files": "Indexed new files {{name}}", - "indexer_rule_reject_allow_label": "默认情况下,索引器规则作为拒绝列表,排除与其匹配的任何文件。启用此选项将其变成允许列表,仅索引符合其规定规则的位置的文件。", - "indexer_rules": "索引器规则", - "indexer_rules_error": "索引索引器规则时出错", - "indexer_rules_info": "索引器规则允许您使用通配符指定要忽略的路径。", - "indexer_rules_not_available": "没有索引器规则", - "ingester": "同步接收器", - "ingester_description": "此过程接收接收到的云操作并将它们发送到主同步接收器。", - "injester_description": "此过程从 P2P 连接和 Spacedrive Cloud 获取同步操作,并将其应用到库。", - "install": "安装", - "install_update": "安装更新", - "installed": "已安装", - "invalid_extension": "无效的扩展名", - "invalid_glob": "无效的全局变量", - "invalid_name": "名称无效", - "invalid_path": "路径无效", - "ipv4_ipv6_listeners_error": "创建 IPv4 和 IPv6 监听器出错。请检查防火墙设置!", - "ipv4_listeners_error": "创建 IPv4 监听器出错。请检查防火墙设置!", - "ipv6": "IPv6网络", - "ipv6_description": "允许使用 IPv6 网络进行点对点通信", - "ipv6_listeners_error": "创建 IPv6 监听器时出错。请检查防火墙设置!", - "is": "是", - "is_not": "不是", - "item": "item", - "item_other": "item", - "item_size": "图标大小", - "items": "项目", - "job_error_description": "作业已完成,但有错误。\n请参阅下面的错误日志以获取更多信息。\n如果您需要帮助,请联系支持人员并提供此错误。", - "job_has_been_canceled": "作业已取消。", - "job_has_been_paused": "作业已暂停。", - "job_has_been_removed": "作业已移除。", - "job_has_been_resumed": "作业已恢复。", - "join": "加入", - "join_discord": "加入 Discord", - "join_library": "加入一个资料库", - "join_library_description": "数据库将安全地存储在您地设备上。我们不会移动您的文件,仅仅对其进行索引,同时存储与 Spacedrive 相关的数据。", - "key": "键位", - "key_manager": "密钥管理器", - "key_manager_description": "创建加密密钥,挂载和卸载密钥以即时查看解密文件。", - "keybinds": "快捷键", - "keybinds_description": "查看、管理客户端快捷键", - "keys": "密钥", - "kilometers": "千米", - "kind": "种类", - "kind_other": "种类", - "label": "标签", - "labels": "标签", - "language": "语言", - "language_description": "更改 Spacedrive 界面的语言", - "learn_more": "Learn More", - "learn_more_about_telemetry": "了解更多有关遥测的信息", - "less": "less", - "libraries": "库", - "libraries_description": "数据库包含所有库的数据和文件的元数据。", - "library": "库", - "library_bytes": "库大小", - "library_bytes_description": "图书馆中所有位置的总大小。", - "library_db_size": "索引大小", - "library_db_size_description": "图书馆数据库的大小。", - "library_name": "库名称", - "library_overview": "库概览", - "library_settings": "库设置", - "library_settings_description": "与当前活动库相关的一般设置。", - "light": "浅色", - "link": "链接", - "list_view": "列表视图", - "list_view_notice_description": "通过列表视图轻松导航您的文件和文件夹。这种视图以简单、有组织的列表形式显示文件,让您能够快速定位和访问所需文件。", - "loading": "正在加载", - "local": "本地", - "local_locations": "本地位置", - "local_node": "本地节点", - "location": "地点", - "location_added_successfully": "位置添加成功。", - "location_connected_tooltip": "位置正在监视变化", - "location_deleted_successfully": "位置删除成功。", - "location_disconnected_tooltip": "位置未被监视以检查更改", - "location_display_name_info": "此位置的名称,这是将显示在侧边栏的名称。不会重命名磁盘上的实际文件夹。", - "location_empty_notice_message": "这个地方空空如也", - "location_is_already_linked": "位置已经链接", - "location_other": "文件位置", - "location_path_info": "此位置的路径,这是文件在磁盘上的存储位置。", - "location_type": "位置类型", - "location_type_managed": "Spacedrive 将为您排序文件。如果位置不为空,将创建一个“spacedrive”文件夹。", - "location_type_normal": "内容将按原样索引,新文件不会自动排序。", - "location_type_replica": "此位置是另一个位置的副本,其内容将自动同步。", - "locations": "存储位置", - "locations_description": "管理您的存储位置。", - "lock": "锁定", - "lock_sidebar": "锁定侧边栏", - "log_in": "登录", - "log_in_with_browser": "使用浏览器登录", - "log_out": "退出登录", - "logged_in_as": "已登录为 {{email}}", - "logging_in": "正在登录...", - "logout": "退出登录", - "manage_library": "管理库", - "managed": "已管理", - "manual_peers": "手动添加对等点", - "manual_peers_description": "通过输入 IP 地址和端口来手动添加对等点。\n当无法自动发现时,这非常有用。", - "media": "媒体", - "media_view": "媒体视图", - "media_view_context": "媒体视图上下文", - "media_view_notice_description": "轻松发现照片和视频,媒体视图将从当前位置开始显示结果,包括子目录。", - "meet_contributors_behind_spacedrive": "结识 Spacedrive 背后的贡献者", - "meet_title": "了解{{title}}", - "mesh": "Mesh", - "miles": "英里", - "mode": "模式", - "model": "模型", - "modified": "已修改", - "more": "更多", - "more_actions": "更多操作…", - "more_info": "更多信息", - "move_back_within_quick_preview": "在快速预览中后退", - "move_files": "移动文件", - "move_forward_within_quick_preview": "在快速预览中前进", - "move_to_trash": "移到回收站(废纸篓)", - "my_sick_location": "My sick location", - "name": "名称", - "navigate_back": "回退", - "navigate_backwards": "向后导航", - "navigate_files_downwards": "向下导航文件", - "navigate_files_leftwards": "向左导航文件", - "navigate_files_rightwards": "向右导航文件", - "navigate_files_upwards": "向上导航文件", - "navigate_forward": "前进", - "navigate_forwards": "向前导航", - "navigate_to_settings_page": "导航到设置页面", - "network": "网络", - "network_page_description": "您的局域网上的其他Spacedrive节点将显示在这里,以及您的默认操作系统网络挂载。", - "network_settings": "网络设置", - "network_settings_advanced": "高级网络概述", - "network_settings_advanced_description": "有关当前网络设置的高级信息。", - "network_settings_description": "与网络和连接相关的设置。", - "networking": "网络", - "networking_error": "网络启动错误!", - "networking_port": "网络端口", - "networking_port_description": "Spacedrive 点对点网络通信使用的端口。除非您有防火墙来限制,否则应保持此项禁用。不要在互联网上暴露自己!", - "new": "新的", - "new_folder": "新文件夹", - "new_library": "新库", - "new_location": "新位置", - "new_location_web_description": "由于您正在使用Spacedrive的浏览器版本,您将(目前)需要指定远程节点本地目录的绝对URL。", - "new_tab": "新标签", - "new_tag": "新标签", - "new_update_available": "新版本可用!", - "no_apps_available": "没有可用的应用程序", - "no_favorite_items": "没有最喜欢的物品", - "no_git_files": "没有 Git 文件", - "no_hidden_files": "没有隐藏文件", - "no_items_found": "找不到任何项目", - "no_jobs": "没有任务。", - "no_labels": "无标签", - "no_nodes_found": "找不到 Spacedrive 节点.", - "no_search_selected": "未选择搜索", - "no_system_files": "没有系统文件", - "no_tag_selected": "没有选中的标签", - "no_tags": "没有标签", - "no_tags_description": "您还没有创建任何标签", - "node_name": "节点名称", - "nodes": "节点", - "nodes_description": "管理连接到此库的节点。节点是在设备或服务器上运行的Spacedrive后端的实例。每个节点都携带数据库副本,并通过点对点连接实时同步。", - "none": "无", - "normal": "普通", - "not_you": "不是您?", - "note": "Note", - "nothing_selected": "未选择任何内容", - "number_of_passes": "通过次数", - "object": "目的", - "object_id": "对象ID", - "off": "离开", - "offline": "离线", - "on": "在", - "online": "在线", - "only_images": "仅图像", - "open": "打开", - "open_file": "打开文件", - "open_in_new_tab": "在新标签页中打开", - "open_logs": "查看日志", - "open_new_location_once_added": "添加新位置后立即打开", - "open_new_tab": "打开新标签页", - "open_object": "打开对象", - "open_object_from_quick_preview_in_native_file_manager": "在本机文件管理器中从快速预览中打开对象", - "open_settings": "打开设置", - "open_with": "打开方式", - "opening_trash": "打开回收站", - "or": "或", - "overview": "概览", - "p2p_visibility": "P2P 可见性", - "p2p_visibility_contacts_only": "仅限联系人", - "p2p_visibility_description": "配置谁可以看到您的 Spacedrive 安装。", - "p2p_visibility_disabled": "关闭", - "p2p_visibility_everyone": "所有人", - "package": "Package", - "page": "页面", - "page_shortcut_description": "应用程序中的不同页面", - "pair": "配对", - "pairing_with_node": "正在与{{node}}配对", - "paste": "粘贴", - "paste_object": "粘贴对象", - "paste_success": "已粘贴项目", - "path": "路径", - "path_copied_to_clipboard_description": "位置{{location}}的路径已复制到剪贴板。", - "path_copied_to_clipboard_title": "路径已复制到剪贴板", - "path_to_save_do_the_thing": "Path to save when clicking 'Do the thing':", - "paths": "路径", - "pause": "暂停", - "paused": "已暂停", - "peers": "个端点", - "people": "人们", - "pin": "Pin", - "please_select_emoji": "请选择一个表情", - "prefix_a": "a", - "preview_media_bytes": "预览媒体", - "preview_media_bytes_description": "所有预览媒体文件(例如缩略图)的总大小。", - "privacy": "隐私", - "privacy_description": "Spacedrive是为隐私而构建的,这就是为什么我们是开源的,以本地优先。因此,我们会非常明确地告诉您与我们分享了什么数据。", - "queued": "排队", - "quick_preview": "快速预览", - "quick_rescan_started": "正在快速重新扫描目录。", - "quick_view": "快速查看", - "quickpreview_thumbnail_error_message": "无法加载全分辨率图像", - "quickpreview_thumbnail_error_tip": "无法找到图像。因此显示缩略图。", - "random": "随机的", - "receiver": "接收者", - "receiver_description": "该进程接收并存储来自 Spacedrive Cloud 的操作。", - "recent_jobs": "最近的作业", - "recents": "最近使用", - "recents_notice_message": "打开文件时会创建最近的文件。", - "regen_labels": "重新生成标签", - "regen_thumbnails": "重新生成缩略图", - "regenerate_thumbs": "重新生成缩略图", - "reindex": "重新索引", - "reject": "拒绝", - "reject_files": "Reject files", - "relay_listeners_error": "创建中继侦听器时出错。请检查您的防火墙设置!", - "reload": "重新加载", - "remote_access": "启用远程访问", - "remote_access_description": "使其他节点能够直接连接到该节点。", - "remote_identity": "远程身份", - "remove": "移除", - "remove_from_recents": "从最近使用中移除", - "rename": "重命名", - "rename_object": "重命名对象", - "replica": "副本", - "rescan": "重新扫描", - "rescan_directory": "重新扫描目录", - "rescan_location": "重新扫描位置", - "reset": "重置", - "reset_and_quit": "重置并退出应用程序", - "reset_confirmation": "您确定要重置 Spacedrive 吗?您的数据库将被删除。", - "reset_to_continue": "我们检测到您可能使用旧版本的 Spacedrive 创建了您的资料库。请重置以继续使用应用程序!", - "reset_warning": "您会丢失现有的所有 Spacedrive数据!", - "resolution": "解决", - "resources": "资源", - "restore": "恢复", - "resume": "恢复", - "retry": "重试", - "reveal_in_native_file_manager": "在本机文件管理器中显示", - "revel_in_browser": "在{{browser}}中显示", - "rules": "规则", - "running": "运行中", - "save": "保存", - "save_changes": "保存更改", - "save_search": "保存搜索", - "save_spacedrop": "保存 Spacedrop", - "saved_searches": "保存的搜索", - "screenshot": "屏幕截图", - "search": "搜索", - "search_extensions": "搜索扩展", - "search_for_files_and_actions": "搜索文件和操作...", - "search_locations": "搜索地点", - "secure_delete": "安全删除", - "security": "安全", - "security_description": "确保您的客户端安全。", - "see_less": "更少", - "see_more": "更多", - "send": "发送", - "send_report": "发送报告", - "sender": "发件人", - "sender_description": "此过程将同步操作发送到 Spacedrive Cloud。", - "settings": "设置", - "setup": "设置", - "share": "分享", - "share_anonymous_usage": "分享匿名使用情况", - "share_anonymous_usage_description": "分享完全匿名的遥测数据,帮助开发者改进应用程序", - "share_bare_minimum": "分享最基本信息", - "share_bare_minimum_description": "只分享我是 Spacedrive 的活跃用户和一些技术细节", - "sharing": "共享", - "sharing_description": "管理有权访问您的库的人。", - "show_details": "显示详情", - "show_hidden_files": "显示隐藏文件", - "show_inspector": "显示检查员", - "show_object_size": "显示对象大小", - "show_path_bar": "显示路径栏", - "show_slider": "显示滑块", - "show_tags": "显示标签", - "size": "大小", - "size_b": "B", - "size_bs": "Bs", - "size_gb": "GB", - "size_gbs": "GBs", - "size_kb": "KB", - "size_kbs": "KBs", - "size_mb": "MB", - "size_mbs": "MB", - "size_tb": "TB", - "size_tbs": "TBs", - "skip_login": "跳过登录", - "software": "软件", - "sort_by": "排序依据", - "spacedrive_account": "Spacedrive 账户", - "spacedrive_cloud": "Spacedrive 云", - "spacedrive_cloud_description": "Spacedrive 始终优先重视本地资源,但我们未来会提供可选的自有云服务。目前,身份验证仅用于反馈功能。", - "spacedrop": "Spacedrop", - "spacedrop_a_file": "使用 Spacedrop 传输文件", - "spacedrop_already_progress": "Spacedrop 已在进行中", - "spacedrop_contacts_only": "仅限联系人", - "spacedrop_description": "与在您的网络上运行 Spacedrive 的设备即时共享。", - "spacedrop_disabled": "关闭", - "spacedrop_everyone": "所有人", - "spacedrop_rejected": "Spacedrop 被拒绝", - "square_thumbnails": "方形缩略图", - "star_on_github": "为我们点个Star吧", - "start": "开始", - "starting": "开始...", - "starts_with": "以...开始", - "stop": "停止", - "stopping": "正在停止...", - "success": "成功", - "support": "支持", - "switch_to_grid_view": "切换到网格视图", - "switch_to_list_view": "切换到列表视图", - "switch_to_media_view": "切换到媒体视图", - "switch_to_next_tab": "切换到下一个标签页", - "switch_to_previous_tab": "切换到上一个标签页", - "sync": "同步", - "sync_description": "管理 Spacedrive 的同步方式。", - "sync_with_library": "与资料库同步", - "sync_with_library_description": "如果启用,您的快捷方式将与资料库同步,否则它们只适用于此客户端。", - "syncPreviewMedia_label": "将此位置的预览媒体与您的设备同步", - "system": "系统", - "tag": "标签", - "tag_other": "标签", - "tags": "标签", - "tags_bulk_instructions": "点击标签并按下数字键为其编号,然后再选择文件并使用设定好的数字键为其快速分配标签", - "tags_description": "管理您的标签。", - "tags_notice_message": "没有项目分配给该标签。", - "task": "任务", - "task_other": "任务", - "telemetry_description": "启用以向开发者提供详细的使用情况和遥测数据来改善应用程序。禁用则将只发送基本数据:您的活动状态、应用版本、应用内核版本以及平台(例如移动端、web 端或桌面端)。", - "telemetry_title": "共享额外的遥测和使用数据", - "temperature": "温度", - "text": "文本", - "text_file": "文本文件", - "text_size": "文字大小", - "thank_you_for_your_feedback": "感谢您的反馈!", - "thumbnailer_cpu_usage": "缩略图生成器 CPU 使用", - "thumbnailer_cpu_usage_description": "限制缩略图生成器在后台处理时可以使用 CPU 的量。", - "to": "到", - "toggle_all": "切换全部", - "toggle_command_palette": "打开命令面板", - "toggle_hidden_files": "显示/隐藏文件", - "toggle_image_slider_within_quick_preview": "在快速预览中切换图像滑块", - "toggle_inspector": "切换检查器", - "toggle_job_manager": "切换任务管理器", - "toggle_metadata": "切换元数据", - "toggle_path_bar": "切换显示路径栏", - "toggle_quick_preview": "切换快速预览", - "toggle_sidebar": "打开/关闭侧边栏", - "tools": "工具", - "total_bytes_capacity": "总容量", - "total_bytes_capacity_description": "连接到资料库的所有节点的总容量。 在 Alpha 期间可能会显示不正确的值。", - "total_bytes_free": "可用空间", - "total_bytes_free_description": "连接到资料库的所有节点上的可用空间。", - "total_bytes_used": "总使用空间", - "total_bytes_used_description": "连接到资料库的所有节点上使用的总空间。", - "trash": "回收站", - "type": "类型", - "ui_animations": "用户界面动画", - "ui_animations_description": "打开和关闭时对话框和其他用户界面元素将产生动画效果。", - "unknown": "未知", - "unnamed_location": "未命名位置", - "update": "更新", - "update_downloaded": "更新已下载。重新启动 Spacedrive 以安装", - "updated_successfully": "成功更新,您当前使用的版本是 {{version}}", - "uploaded_file": "文件成功上传!", - "usage": "使用情况", - "usage_description": "您的库使用情况和硬件信息", - "vacuum": "清理", - "vacuum_library": "清理库", - "vacuum_library_description": "重新打包数据库以释放不必要的空间。", - "value": "值", - "value_required": "所需值", - "version": "版本 {{version}}", - "video": "视频", - "video_preview_not_supported": "不支持视频预览。", - "view_changes": "查看更改", - "want_to_do_this_later": "想稍后再做吗?", - "web_page_archive": "网页归档", - "website": "项目主页", - "widget": "小部件", - "with_descendants": "子目录", - "your_account": "您的账户", - "your_account_description": "Spacedrive 账号和信息。", - "your_local_network": "您的本地网络", - "your_privacy": "您的隐私", - "zoom": "缩放", - "zoom_in": "放大", - "zoom_out": "缩小" -} \ No newline at end of file + "Connect": "连接", + "Connecting": "正在连接", + "about": "关于", + "about_vision_text": "很多人都不止拥有一个云账户。没有备份,数据有丢失的风险。我们依赖像 Google 相册、iCloud 这样的云服务,但是它们容量低下,且互操作性几乎为零,几乎无法和操作系统集成。我们的照片不应该困在营业公司的广告当中,而因该由我们全权掌控。创造数据是我们的传统技能,这些数据存在于我们生活的方方面面。而开源技术是唯一能确保我们绝对控制这些日益增长的数据的方式。", + "about_vision_title": "项目远景", + "accept": "接受", + "accept_files": "Accept files", + "accessed": "已访问", + "account": "账户", + "actions": "操作", + "add": "添加", + "add_device": "添加设备", + "add_file_extension_rule": "将文件扩展名添加到当前规则", + "add_filter": "添加过滤器", + "add_library": "添加库", + "add_location": "添加位置", + "add_location_description": "通过将您喜爱的位置添加到个人库中,增强您的 Spacedrive 体验,以实现无缝高效的文件管理。", + "add_location_overview_description": "将本地路径、卷或网络位置连接到 Spacedrive。", + "add_location_tooltip": "将路径添加为索引", + "add_locations": "添加位置", + "add_tag": "添加标签", + "added_location": "已添加位置 {{name}}", + "adding_location": "添加位置 {{name}}", + "advanced": "高级", + "advanced_settings": "高级设置", + "album": "相册", + "alias": "别名", + "all_jobs_have_been_cleared": "所有任务皆已完成。", + "alpha_release_description": "感谢试用 Spacedrive。现在 Spacedrive 处于 Alpha 发布阶段,展示了激动人心的新功能。作为初始版本,它可能包含一些错误。我们恳请您在我们的 Discord 频道上反馈遇到的任何问题,您宝贵的反馈将有助于提升用户体验。", + "alpha_release_title": "Alpha 版本", + "app_crashed": "应用程序崩溃了", + "app_crashed_description": "出现了一些错误...", + "appearance": "外观", + "appearance_description": "调整客户端的外观。", + "apply": "申请", + "archive": "存档", + "archive_coming_soon": "存档位置功能即将推出……", + "archive_info": "将库中的数据作为存档提取,有利于保留位置的目录结构。", + "are_you_sure": "您确定吗?", + "ascending": "升序", + "ask_spacedrive": "询问 Spacedrive", + "assign_tag": "分配标签", + "audio": "音频", + "audio_preview_not_supported": "不支持音频预览。", + "auto": "自动", + "back": "返回", + "back_to_login": "返回登录", + "backfill_sync": "回填同步操作", + "backfill_sync_description": "库暂停直至回填完成", + "backups": "备份", + "backups_description": "管理您的 Spacedrive 数据库备份。", + "bitrate": "比特率", + "blur_effects": "模糊效果", + "blur_effects_description": "某些组件将应用模糊效果。", + "book": "书籍", + "cancel": "取消", + "cancel_selection": "取消选择", + "canceled": "取消", + "celcius": "摄氏度", + "change": "更改", + "change_view_setting_description": "更改默认资源管理器视图", + "changelog": "更新日志", + "changelog_page_description": "看看我们在开发哪些酷炫的新功能", + "changelog_page_title": "更新日志", + "check_your_inbox": "请检查您的收件箱:", + "checksum": "校验和", + "clear_finished_jobs": "清除已完成的任务", + "click_to_hide": "点击隐藏", + "click_to_lock": "点击锁定", + "client": "客户端", + "close": "关闭", + "close_command_palette": "关闭命令面板", + "close_current_tab": "关闭当前标签页", + "cloud": "云", + "cloud_connect_description": "您想将您的图书馆连接到云端吗?", + "cloud_drives": "云盘", + "cloud_sync": "云同步", + "cloud_sync_description": "管理将您的库与 Spacedrive Cloud 同步的流程", + "clouds": "云服务", + "code": "Code", + "collection": "Collection", + "color": "颜色", + "color_profile": "颜色配置文件", + "color_space": "色彩空间", + "coming_soon": "即将推出", + "completed": "已完成", + "completed_with_errors": "已完成但有错误", + "compress": "压缩", + "config": "配置文件", + "configure_location": "配置文件位置", + "confirm": "确认", + "connect_cloud": "连接到云", + "connect_cloud_description": "将您的云帐户连接到 Spacedrive。", + "connect_device": "连接设备", + "connect_device_description": "Spacedrive 在您的所有设备上都能发挥最佳效果。", + "connect_library_to_cloud": "将库连接到 Spacedrive Cloud", + "connected": "已连接", + "connecting_library_to_cloud": "将库连接到 Spacedrive Cloud...", + "contacts": "联系人", + "contacts_description": "在 Spacedrive 中管理您的联系人。", + "contains": "包含", + "content_id": "内容ID", + "continue": "继续", + "convert_to": "转换为", + "coordinates": "坐标", + "copied": "已复制", + "copy": "复制", + "copy_as_path": "复制路径", + "copy_object": "复制对象", + "copy_path_to_clipboard": "复制路径到剪贴板", + "copy_success": "项目已复制", + "create": "创建", + "create_file_error": "创建文件时出错", + "create_file_success": "创建了新文件:{{name}}", + "create_folder_error": "创建文件夹时出错", + "create_folder_success": "创建了新文件夹:{{name}}", + "create_library": "创建库", + "create_library_description": "数据库将安全地存储在您地设备上。我们不会移动您的文件,仅仅对其进行索引,同时存储与 Spacedrive 相关的数据。", + "create_location": "Create Location", + "create_new_library": "创建新库", + "create_new_library_description": "新创建地数据库将安全地存储在您地设备上。我们不会移动您的文件,仅仅对其进行索引,同时存储与 Spacedrive 相关的数据。", + "create_new_tag": "创建新标签", + "create_new_tag_description": "设置名称与颜色。", + "create_tag": "创建标签", + "created": "已创建", + "creating_library": "正在创建库…", + "creating_your_library": "正在为您创建库", + "current": "当前使用", + "current_directory": "当前目录", + "current_directory_with_descendants": "当前目录及其子目录", + "custom": "自定义", + "cut": "剪切", + "cut_object": "剪切对象", + "cut_success": "剪切项目", + "dark": "深色", + "data_folder": "数据文件夹", + "database": "数据库", + "date": "日期", + "date_accessed": "访问日期", + "date_created": "创建日期", + "date_indexed": "索引日期", + "date_modified": "修改日期", + "date_taken": "拍摄日期", + "date_time_format": "日期和时间格式", + "date_time_format_description": "选择 Spacedrive 中显示的日期格式", + "debug_mode": "调试模式", + "debug_mode_description": "启用本应用额外的调试功能。", + "default": "默认", + "default_settings": "默认设置", + "delete": "删除", + "delete_dialog_title": "删除 {{prefix}} {{type}}", + "delete_forever": "永久删除", + "delete_info": "此操作只删除预览媒体,并不会删除磁盘上实际的文件夹。", + "delete_library": "删除库", + "delete_library_description": "Spacedrive 库将会永久删除,但您的文件不会删除。", + "delete_location": "删除存储位置", + "delete_location_description": "删除存储位置时,Spacedrive 会从数据库中移除所有与之相关的文件,但是不会删除文件本身。", + "delete_object": "删除对象", + "delete_rule": "删除规则", + "delete_rule_confirmation": "您确定要删除这个规则吗?", + "delete_tag": "删除标签", + "delete_tag_description": "您确定要删除这个标签吗?此操作不能撤销,打过标签的文件将会丢失标签。", + "delete_warning": "警告:这将永久删除您的 {{type}},我们目前还没有回收站……", + "descending": "降序", + "description": "描述", + "deselect": "取消选择", + "details": "详情", + "device": "设备", + "devices": "设备", + "devices_coming_soon_tooltip": "即将推出!本 Alpha 版本不支持库同步,此功能很快就会准备就绪。", + "dialog": "对话框", + "dialog_shortcut_description": "执行操作", + "direction": "次序", + "directory_other": "文件夹", + "disabled": "已禁用", + "disconnected": "已断开连接", + "display_formats": "显示格式", + "display_name": "显示名称", + "distance": "距离", + "do_the_thing": "Do the thing", + "document": "文档", + "done": "完成", + "dont_have_any": "这里看起来什么也没有╰(*°▽°*)╯", + "dont_show_again": "不再显示", + "dotfile": "Dotfile", + "double_click_action": "双击操作", + "download": "下载", + "downloading_update": "正在下载更新", + "drag_to_resize": "拖动以调整大小", + "duplicate": "复制", + "duplicate_object": "复制对象", + "duplicate_success": "项目已复制", + "edit": "编辑", + "edit_library": "编辑库", + "edit_location": "编辑位置", + "empty_file": "空的文件", + "enable_networking": "启用网络", + "enable_networking_description": "允许您的节点与您周围的其他 Spacedrive 节点进行通信。", + "enable_networking_description_required": "数据库的同步和 Spacedrop 需要开启本功能!", + "enable_relay": "启用中继服务器", + "enable_relay_description": "启用中继服务器以允许您的设备通过公共互联网进行通信。", + "enable_sync": "启用同步", + "enable_sync_description": "为该库中的所有现有数据生成同步操作,并配置 Spacedrive 以在将来发生事情时生成同步操作。", + "enabled": "启用", + "encrypt": "加密", + "encrypt_library": "加密数据库", + "encrypt_library_coming_soon": "资料库加密即将推出", + "encrypt_library_description": "为数据库启用加密,这只会加密Spacedrive数据库,不会加密文件本身。", + "encrypted": "已加密", + "ends_with": "以... 结束", + "ephemeral_notice_browse": "直接从您的设备浏览您的文件和文件夹。", + "ephemeral_notice_consider_indexing": "考虑索引您本地的位置,以获得更快和更高效的浏览。", + "equals": "是", + "erase": "擦除", + "erase_a_file": "擦除一个文件", + "erase_a_file_description": "配置您的擦除设置。", + "error": "错误", + "error_loading_original_file": "加载原始文件出错", + "error_message": "错误: {{error}}.", + "executable": "应用程序", + "expand": "展开", + "explorer": "资源管理器", + "explorer_settings": "资源管理器设置", + "explorer_shortcut_description": "导航、与文件系统交互", + "explorer_view": "资源管理器视图", + "export": "导出", + "export_library": "导出库", + "export_library_coming_soon": "导出库功能即将推出", + "export_library_description": "将这个库导出到一个文件。", + "extension": "扩大", + "extensions": "扩展", + "extensions_description": "安装扩展来扩展这个客户端的功能。", + "fahrenheit": "华氏度", + "failed": "失败的", + "failed_to_add_location": "添加位置失败", + "failed_to_cancel_job": "取消任务失败。", + "failed_to_clear_all_jobs": "清除所有任务失败。", + "failed_to_copy_file": "复制文件失败", + "failed_to_copy_file_path": "复制文件路径失败", + "failed_to_cut_file": "剪切文件失败", + "failed_to_delete_rule": "删除规则失败", + "failed_to_download_update": "无法下载更新", + "failed_to_duplicate_file": "复制文件失败", + "failed_to_generate_checksum": "生成校验和失败", + "failed_to_generate_labels": "生成标签失败", + "failed_to_generate_thumbnails": "生成缩略图失败", + "failed_to_load_tags": "加载标签失败", + "failed_to_open_file_body": "无法打开文件: {{error}}", + "failed_to_open_file_title": "无法打开文件", + "failed_to_open_file_with": "打开文件失败: {{data}}", + "failed_to_pause_job": "暂停任务失败。", + "failed_to_reindex_location": "重索引位置失败", + "failed_to_remove_file_from_recents": "从最近文档中删除文件失败", + "failed_to_remove_job": "删除任务失败。", + "failed_to_rename_file": "不能将 {{oldName}} 重命名为 {{newName}}", + "failed_to_rescan_location": "重新扫描位置失败", + "failed_to_resume_job": "恢复任务失败。", + "failed_to_update_location_settings": "更新位置设置失败", + "favorite": "收藏", + "favorites": "收藏夹", + "feedback": "反馈", + "feedback_is_required": "反馈是必填项", + "feedback_login_description": "登录使我们能够回复您的反馈", + "feedback_placeholder": "您的反馈...", + "feedback_toast_error_message": "提交反馈时出错,请重试。", + "file_already_exist_in_this_location": "文件已存在于此位置", + "file_directory_name": "文件/目录名称", + "file_extension_description": "文件扩展名(例如 .mp4、.jpg、.txt)", + "file_from": "来自 {{name}} 的文件 {{file}}", + "file_indexing_rules": "文件索引规则", + "file_other": "文件", + "file_picker_not_supported": "File picker不支持此设备", + "filter": "筛选", + "filters": "过滤器", + "flash": "闪光", + "folder": "文件夹", + "font": "字体", + "for_library": "For library {{name}}", + "forced": "被迫", + "forward": "前进", + "free_of": "可用", + "from": "从", + "full_disk_access": "完全磁盘访问", + "full_disk_access_description": "为了提供最佳体验,我们需要访问您的磁盘以索引您的文件。您的文件只有您自己可以访问。", + "full_reindex": "完全重新索引", + "full_reindex_info": "执行对此位置的完全重新扫描。", + "general": "通用", + "general_settings": "通用设置", + "general_settings_description": "与此客户端相关的一般设置。", + "general_shortcut_description": "通用快捷键", + "generatePreviewMedia_label": "为这个位置生成预览媒体", + "generate_checksums": "生成校验和", + "gitignore": "gitignore文件", + "glob_description": "全局(例如 **/.git)", + "go_back": "返回", + "go_to_labels": "转到标签", + "go_to_location": "前往地点", + "go_to_overview": "前往概览", + "go_to_recents": "转到最近的内容", + "go_to_settings": "前往设置", + "go_to_tag": "转到标签", + "got_it": "我知道了", + "grid_gap": "间隙", + "grid_view": "网格视图", + "grid_view_notice_description": "网格视图以缩略图形式显示文件和文件夹,以便直观、快速识别要寻找的文件。", + "hidden": "隐", + "hidden_label": "阻止位置及其内容出现在汇总分类、搜索和标签中,除非启用了“显示隐藏项目”。", + "hide_in_library_search": "在库搜索中隐藏", + "hide_in_library_search_description": "在搜索整个库时从结果中隐藏带有此标签的文件。", + "hide_in_sidebar": "在侧边栏中隐藏", + "hide_in_sidebar_description": "阻止此标签在应用的侧边栏中显示。", + "hide_location_from_view": "隐藏位置和内容的视图", + "hide_sidebar": "隐藏侧边栏", + "home": "我的文档", + "hosted_locations": "Hosted Locations", + "hosted_locations_description": "使用我们的云服务增强你的本地储存", + "icon_size": "图标大小", + "image": "图像", + "image_labeler_ai_model": "图像标签识别 AI 模型", + "image_labeler_ai_model_description": "用于识别图像中对象的模型。较大的模型更准确但速度较慢。", + "import": "导入", + "incoming_spacedrop": "Spacedrop正在接收文件", + "indexed": "已索引", + "indexed_new_files": "Indexed new files {{name}}", + "indexer_rule_reject_allow_label": "默认情况下,索引器规则作为拒绝列表,排除与其匹配的任何文件。启用此选项将其变成允许列表,仅索引符合其规定规则的位置的文件。", + "indexer_rules": "索引器规则", + "indexer_rules_error": "索引索引器规则时出错", + "indexer_rules_info": "索引器规则允许您使用通配符指定要忽略的路径。", + "indexer_rules_not_available": "没有索引器规则", + "ingester": "同步接收器", + "ingester_description": "此过程接收接收到的云操作并将它们发送到主同步接收器。", + "injester_description": "此过程从 P2P 连接和 Spacedrive Cloud 获取同步操作,并将其应用到库。", + "install": "安装", + "install_update": "安装更新", + "installed": "已安装", + "invalid_extension": "无效的扩展名", + "invalid_glob": "无效的全局变量", + "invalid_name": "名称无效", + "invalid_path": "路径无效", + "ipv4_ipv6_listeners_error": "创建 IPv4 和 IPv6 监听器出错。请检查防火墙设置!", + "ipv4_listeners_error": "创建 IPv4 监听器出错。请检查防火墙设置!", + "ipv6": "IPv6网络", + "ipv6_description": "允许使用 IPv6 网络进行点对点通信", + "ipv6_listeners_error": "创建 IPv6 监听器时出错。请检查防火墙设置!", + "is": "是", + "is_not": "不是", + "item": "item", + "item_other": "item", + "item_size": "图标大小", + "items": "项目", + "job_error_description": "作业已完成,但有错误。\n请参阅下面的错误日志以获取更多信息。\n如果您需要帮助,请联系支持人员并提供此错误。", + "job_has_been_canceled": "作业已取消。", + "job_has_been_paused": "作业已暂停。", + "job_has_been_removed": "作业已移除。", + "job_has_been_resumed": "作业已恢复。", + "join": "加入", + "join_discord": "加入 Discord", + "join_library": "加入一个资料库", + "join_library_description": "数据库将安全地存储在您地设备上。我们不会移动您的文件,仅仅对其进行索引,同时存储与 Spacedrive 相关的数据。", + "key": "键位", + "key_manager": "密钥管理器", + "key_manager_description": "创建加密密钥,挂载和卸载密钥以即时查看解密文件。", + "keybinds": "快捷键", + "keybinds_description": "查看、管理客户端快捷键", + "keys": "密钥", + "kilometers": "千米", + "kind": "种类", + "kind_other": "种类", + "label": "标签", + "labels": "标签", + "language": "语言", + "language_description": "更改 Spacedrive 界面的语言", + "learn_more": "Learn More", + "learn_more_about_telemetry": "了解更多有关遥测的信息", + "less": "less", + "libraries": "库", + "libraries_description": "数据库包含所有库的数据和文件的元数据。", + "library": "库", + "library_bytes": "库大小", + "library_bytes_description": "图书馆中所有位置的总大小。", + "library_db_size": "索引大小", + "library_db_size_description": "图书馆数据库的大小。", + "library_name": "库名称", + "library_overview": "库概览", + "library_settings": "库设置", + "library_settings_description": "与当前活动库相关的一般设置。", + "light": "浅色", + "link": "链接", + "list_view": "列表视图", + "list_view_notice_description": "通过列表视图轻松导航您的文件和文件夹。这种视图以简单、有组织的列表形式显示文件,让您能够快速定位和访问所需文件。", + "loading": "正在加载", + "local": "本地", + "local_locations": "本地位置", + "local_node": "本地节点", + "location": "地点", + "location_added_successfully": "位置添加成功。", + "location_connected_tooltip": "位置正在监视变化", + "location_deleted_successfully": "位置删除成功。", + "location_disconnected_tooltip": "位置未被监视以检查更改", + "location_display_name_info": "此位置的名称,这是将显示在侧边栏的名称。不会重命名磁盘上的实际文件夹。", + "location_empty_notice_message": "这个地方空空如也", + "location_is_already_linked": "位置已经链接", + "location_other": "文件位置", + "location_path_info": "此位置的路径,这是文件在磁盘上的存储位置。", + "location_type": "位置类型", + "location_type_managed": "Spacedrive 将为您排序文件。如果位置不为空,将创建一个“spacedrive”文件夹。", + "location_type_normal": "内容将按原样索引,新文件不会自动排序。", + "location_type_replica": "此位置是另一个位置的副本,其内容将自动同步。", + "locations": "存储位置", + "locations_description": "管理您的存储位置。", + "lock": "锁定", + "lock_sidebar": "锁定侧边栏", + "log_in": "登录", + "log_in_with_browser": "使用浏览器登录", + "log_out": "退出登录", + "logged_in_as": "已登录为 {{email}}", + "logging_in": "正在登录...", + "login_link_sent": "我们已发送临时登录链接。", + "logout": "退出登录", + "manage_library": "管理库", + "managed": "已管理", + "manual_peers": "手动添加对等点", + "manual_peers_description": "通过输入 IP 地址和端口来手动添加对等点。\n当无法自动发现时,这非常有用。", + "media": "媒体", + "media_view": "媒体视图", + "media_view_context": "媒体视图上下文", + "media_view_notice_description": "轻松发现照片和视频,媒体视图将从当前位置开始显示结果,包括子目录。", + "meet_contributors_behind_spacedrive": "结识 Spacedrive 背后的贡献者", + "meet_title": "了解{{title}}", + "mesh": "Mesh", + "miles": "英里", + "mode": "模式", + "model": "模型", + "modified": "已修改", + "more": "更多", + "more_actions": "更多操作…", + "more_info": "更多信息", + "move_back_within_quick_preview": "在快速预览中后退", + "move_files": "移动文件", + "move_forward_within_quick_preview": "在快速预览中前进", + "move_to_trash": "移到回收站(废纸篓)", + "my_sick_location": "My sick location", + "name": "名称", + "navigate_back": "回退", + "navigate_backwards": "向后导航", + "navigate_files_downwards": "向下导航文件", + "navigate_files_leftwards": "向左导航文件", + "navigate_files_rightwards": "向右导航文件", + "navigate_files_upwards": "向上导航文件", + "navigate_forward": "前进", + "navigate_forwards": "向前导航", + "navigate_to_settings_page": "导航到设置页面", + "network": "网络", + "network_page_description": "您的局域网上的其他Spacedrive节点将显示在这里,以及您的默认操作系统网络挂载。", + "network_settings": "网络设置", + "network_settings_advanced": "高级网络概述", + "network_settings_advanced_description": "有关当前网络设置的高级信息。", + "network_settings_description": "与网络和连接相关的设置。", + "networking": "网络", + "networking_error": "网络启动错误!", + "networking_port": "网络端口", + "networking_port_description": "Spacedrive 点对点网络通信使用的端口。除非您有防火墙来限制,否则应保持此项禁用。不要在互联网上暴露自己!", + "new": "新的", + "new_folder": "新文件夹", + "new_library": "新库", + "new_location": "新位置", + "new_location_web_description": "由于您正在使用Spacedrive的浏览器版本,您将(目前)需要指定远程节点本地目录的绝对URL。", + "new_tab": "新标签", + "new_tag": "新标签", + "new_update_available": "新版本可用!", + "no_apps_available": "没有可用的应用程序", + "no_favorite_items": "没有最喜欢的物品", + "no_git_files": "没有 Git 文件", + "no_hidden_files": "没有隐藏文件", + "no_items_found": "找不到任何项目", + "no_jobs": "没有任务。", + "no_labels": "无标签", + "no_nodes_found": "找不到 Spacedrive 节点.", + "no_search_selected": "未选择搜索", + "no_system_files": "没有系统文件", + "no_tag_selected": "没有选中的标签", + "no_tags": "没有标签", + "no_tags_description": "您还没有创建任何标签", + "node_name": "节点名称", + "nodes": "节点", + "nodes_description": "管理连接到此库的节点。节点是在设备或服务器上运行的Spacedrive后端的实例。每个节点都携带数据库副本,并通过点对点连接实时同步。", + "none": "无", + "normal": "普通", + "not_you": "不是您?", + "note": "Note", + "nothing_selected": "未选择任何内容", + "number_of_passes": "通过次数", + "object": "目的", + "object_id": "对象ID", + "off": "离开", + "offline": "离线", + "on": "在", + "online": "在线", + "only_images": "仅图像", + "open": "打开", + "open_file": "打开文件", + "open_in_new_tab": "在新标签页中打开", + "open_logs": "查看日志", + "open_new_location_once_added": "添加新位置后立即打开", + "open_new_tab": "打开新标签页", + "open_object": "打开对象", + "open_object_from_quick_preview_in_native_file_manager": "在本机文件管理器中从快速预览中打开对象", + "open_settings": "打开设置", + "open_with": "打开方式", + "opening_trash": "打开回收站", + "or": "或", + "overview": "概览", + "p2p_visibility": "P2P 可见性", + "p2p_visibility_contacts_only": "仅限联系人", + "p2p_visibility_description": "配置谁可以看到您的 Spacedrive 安装。", + "p2p_visibility_disabled": "关闭", + "p2p_visibility_everyone": "所有人", + "package": "Package", + "page": "页面", + "page_shortcut_description": "应用程序中的不同页面", + "pair": "配对", + "pairing_with_node": "正在与{{node}}配对", + "paste": "粘贴", + "paste_object": "粘贴对象", + "paste_success": "已粘贴项目", + "path": "路径", + "path_copied_to_clipboard_description": "位置{{location}}的路径已复制到剪贴板。", + "path_copied_to_clipboard_title": "路径已复制到剪贴板", + "path_to_save_do_the_thing": "Path to save when clicking 'Do the thing':", + "paths": "路径", + "pause": "暂停", + "paused": "已暂停", + "peers": "个端点", + "people": "人们", + "pin": "Pin", + "please_select_emoji": "请选择一个表情", + "prefix_a": "a", + "preview_media_bytes": "预览媒体", + "preview_media_bytes_description": "所有预览媒体文件(例如缩略图)的总大小。", + "privacy": "隐私", + "privacy_description": "Spacedrive是为隐私而构建的,这就是为什么我们是开源的,以本地优先。因此,我们会非常明确地告诉您与我们分享了什么数据。", + "queued": "排队", + "quick_preview": "快速预览", + "quick_rescan_started": "正在快速重新扫描目录。", + "quick_view": "快速查看", + "quickpreview_thumbnail_error_message": "无法加载全分辨率图像", + "quickpreview_thumbnail_error_tip": "无法找到图像。因此显示缩略图。", + "random": "随机的", + "receiver": "接收者", + "receiver_description": "该进程接收并存储来自 Spacedrive Cloud 的操作。", + "recent_jobs": "最近的作业", + "recents": "最近使用", + "recents_notice_message": "打开文件时会创建最近的文件。", + "regen_labels": "重新生成标签", + "regen_thumbnails": "重新生成缩略图", + "regenerate_thumbs": "重新生成缩略图", + "reindex": "重新索引", + "reject": "拒绝", + "reject_files": "Reject files", + "relay_listeners_error": "创建中继侦听器时出错。请检查您的防火墙设置!", + "reload": "重新加载", + "remote_access": "启用远程访问", + "remote_access_description": "使其他节点能够直接连接到该节点。", + "remote_identity": "远程身份", + "remove": "移除", + "remove_from_recents": "从最近使用中移除", + "rename": "重命名", + "rename_object": "重命名对象", + "replica": "副本", + "rescan": "重新扫描", + "rescan_directory": "重新扫描目录", + "rescan_location": "重新扫描位置", + "reset": "重置", + "reset_and_quit": "重置并退出应用程序", + "reset_confirmation": "您确定要重置 Spacedrive 吗?您的数据库将被删除。", + "reset_to_continue": "我们检测到您可能使用旧版本的 Spacedrive 创建了您的资料库。请重置以继续使用应用程序!", + "reset_warning": "您会丢失现有的所有 Spacedrive数据!", + "resolution": "解决", + "resources": "资源", + "restore": "恢复", + "resume": "恢复", + "retry": "重试", + "reveal_in_native_file_manager": "在本机文件管理器中显示", + "revel_in_browser": "在{{browser}}中显示", + "rules": "规则", + "running": "运行中", + "save": "保存", + "save_changes": "保存更改", + "save_search": "保存搜索", + "save_spacedrop": "保存 Spacedrop", + "saved_searches": "保存的搜索", + "screenshot": "屏幕截图", + "search": "搜索", + "search_extensions": "搜索扩展", + "search_for_files_and_actions": "搜索文件和操作...", + "search_locations": "搜索地点", + "secure_delete": "安全删除", + "security": "安全", + "security_description": "确保您的客户端安全。", + "see_less": "更少", + "see_more": "更多", + "send": "发送", + "send_report": "发送报告", + "sender": "发件人", + "sender_description": "此过程将同步操作发送到 Spacedrive Cloud。", + "settings": "设置", + "setup": "设置", + "share": "分享", + "share_anonymous_usage": "分享匿名使用情况", + "share_anonymous_usage_description": "分享完全匿名的遥测数据,帮助开发者改进应用程序", + "share_bare_minimum": "分享最基本信息", + "share_bare_minimum_description": "只分享我是 Spacedrive 的活跃用户和一些技术细节", + "sharing": "共享", + "sharing_description": "管理有权访问您的库的人。", + "show_details": "显示详情", + "show_hidden_files": "显示隐藏文件", + "show_inspector": "显示检查员", + "show_object_size": "显示对象大小", + "show_path_bar": "显示路径栏", + "show_slider": "显示滑块", + "show_tags": "显示标签", + "size": "大小", + "size_b": "B", + "size_bs": "Bs", + "size_gb": "GB", + "size_gbs": "GBs", + "size_kb": "KB", + "size_kbs": "KBs", + "size_mb": "MB", + "size_mbs": "MB", + "size_tb": "TB", + "size_tbs": "TBs", + "skip_login": "跳过登录", + "software": "软件", + "sort_by": "排序依据", + "spacedrive_account": "Spacedrive 账户", + "spacedrive_cloud": "Spacedrive 云", + "spacedrive_cloud_description": "Spacedrive 始终优先重视本地资源,但我们未来会提供可选的自有云服务。目前,身份验证仅用于反馈功能。", + "spacedrop": "Spacedrop", + "spacedrop_a_file": "使用 Spacedrop 传输文件", + "spacedrop_already_progress": "Spacedrop 已在进行中", + "spacedrop_contacts_only": "仅限联系人", + "spacedrop_description": "与在您的网络上运行 Spacedrive 的设备即时共享。", + "spacedrop_disabled": "关闭", + "spacedrop_everyone": "所有人", + "spacedrop_rejected": "Spacedrop 被拒绝", + "square_thumbnails": "方形缩略图", + "star_on_github": "为我们点个Star吧", + "start": "开始", + "starting": "开始...", + "starts_with": "以...开始", + "stop": "停止", + "stopping": "正在停止...", + "success": "成功", + "support": "支持", + "switch_to_grid_view": "切换到网格视图", + "switch_to_list_view": "切换到列表视图", + "switch_to_media_view": "切换到媒体视图", + "switch_to_next_tab": "切换到下一个标签页", + "switch_to_previous_tab": "切换到上一个标签页", + "sync": "同步", + "syncPreviewMedia_label": "将此位置的预览媒体与您的设备同步", + "sync_description": "管理 Spacedrive 的同步方式。", + "sync_with_library": "与资料库同步", + "sync_with_library_description": "如果启用,您的快捷方式将与资料库同步,否则它们只适用于此客户端。", + "system": "系统", + "tag": "标签", + "tag_other": "标签", + "tags": "标签", + "tags_bulk_instructions": "点击标签并按下数字键为其编号,然后再选择文件并使用设定好的数字键为其快速分配标签", + "tags_description": "管理您的标签。", + "tags_notice_message": "没有项目分配给该标签。", + "task": "任务", + "task_other": "任务", + "telemetry_description": "启用以向开发者提供详细的使用情况和遥测数据来改善应用程序。禁用则将只发送基本数据:您的活动状态、应用版本、应用内核版本以及平台(例如移动端、web 端或桌面端)。", + "telemetry_title": "共享额外的遥测和使用数据", + "temperature": "温度", + "text": "文本", + "text_file": "文本文件", + "text_size": "文字大小", + "thank_you_for_your_feedback": "感谢您的反馈!", + "thumbnailer_cpu_usage": "缩略图生成器 CPU 使用", + "thumbnailer_cpu_usage_description": "限制缩略图生成器在后台处理时可以使用 CPU 的量。", + "to": "到", + "toggle_all": "切换全部", + "toggle_command_palette": "打开命令面板", + "toggle_hidden_files": "显示/隐藏文件", + "toggle_image_slider_within_quick_preview": "在快速预览中切换图像滑块", + "toggle_inspector": "切换检查器", + "toggle_job_manager": "切换任务管理器", + "toggle_metadata": "切换元数据", + "toggle_path_bar": "切换显示路径栏", + "toggle_quick_preview": "切换快速预览", + "toggle_sidebar": "打开/关闭侧边栏", + "tools": "工具", + "total_bytes_capacity": "总容量", + "total_bytes_capacity_description": "连接到资料库的所有节点的总容量。 在 Alpha 期间可能会显示不正确的值。", + "total_bytes_free": "可用空间", + "total_bytes_free_description": "连接到资料库的所有节点上的可用空间。", + "total_bytes_used": "总使用空间", + "total_bytes_used_description": "连接到资料库的所有节点上使用的总空间。", + "trash": "回收站", + "type": "类型", + "ui_animations": "用户界面动画", + "ui_animations_description": "打开和关闭时对话框和其他用户界面元素将产生动画效果。", + "unknown": "未知", + "unnamed_location": "未命名位置", + "update": "更新", + "update_downloaded": "更新已下载。重新启动 Spacedrive 以安装", + "updated_successfully": "成功更新,您当前使用的版本是 {{version}}", + "uploaded_file": "文件成功上传!", + "usage": "使用情况", + "usage_description": "您的库使用情况和硬件信息", + "vacuum": "清理", + "vacuum_library": "清理库", + "vacuum_library_description": "重新打包数据库以释放不必要的空间。", + "value": "值", + "value_required": "所需值", + "version": "版本 {{version}}", + "video": "视频", + "video_preview_not_supported": "不支持视频预览。", + "view_changes": "查看更改", + "want_to_do_this_later": "想稍后再做吗?", + "web_page_archive": "网页归档", + "website": "项目主页", + "widget": "小部件", + "with_descendants": "子目录", + "your_account": "您的账户", + "your_account_description": "Spacedrive 账号和信息。", + "your_local_network": "您的本地网络", + "your_privacy": "您的隐私", + "zoom": "缩放", + "zoom_in": "放大", + "zoom_out": "缩小" +} diff --git a/interface/locales/zh-TW/common.json b/interface/locales/zh-TW/common.json index 115180dca..6ceb53a33 100644 --- a/interface/locales/zh-TW/common.json +++ b/interface/locales/zh-TW/common.json @@ -1,4 +1,6 @@ { + "Connect": "連接", + "Connecting": "連接中", "about": "關於", "about_vision_text": "我們中的許多人都擁有數個雲帳戶,這些雲帳戶中的硬碟未備份且資料面臨丟失的風險。我們依賴諸如Google照片和iCloud之類的雲服務,但這些服務容量有限且幾乎不能在不同的服務及作業系統間進行互通。相簿不應僅限於某個裝置生態系統內,或被用於收集廣告數據。它們應該是與作業系統無關,永久且屬於個人所有的。我們創建的數據是我們的遺產,它們將比我們存活得更久——開源技術是確保我們對定義我們生活的數據擁有絕對控制權的唯一方式,並且無限規模地延伸。", "about_vision_title": "遠景", @@ -43,6 +45,7 @@ "audio_preview_not_supported": "不支援音頻預覽。", "auto": "汽車", "back": "返回", + "back_to_login": "返回登入", "backfill_sync": "回填同步操作", "backfill_sync_description": "庫暫停至回填完成", "backups": "備份", @@ -60,6 +63,7 @@ "changelog": "變更日誌", "changelog_page_description": "了解我們正在創建的酷炫新功能", "changelog_page_title": "變更日誌", + "check_your_inbox": "請檢查您的收件匣:", "checksum": "校驗和", "clear_finished_jobs": "清除已完成的工作", "click_to_hide": "點擊隱藏", @@ -86,14 +90,12 @@ "config": "Config", "configure_location": "配置位置", "confirm": "Confirm", - "Connect": "連接", "connect_cloud": "連接雲", "connect_cloud_description": "將您的雲端帳戶連接到 Spacedrive。", "connect_device": "連接裝置", "connect_device_description": "Spacedrive 在您的所有裝置上都能發揮最佳效果。", "connect_library_to_cloud": "將圖書館連接到 Spacedrive Cloud", "connected": "已連接", - "Connecting": "連接中", "connecting_library_to_cloud": "正在將圖書館連接到 Spacedrive Cloud...", "contacts": "聯繫人", "contacts_description": "在Spacedrive中管理您的聯繫人。", @@ -287,8 +289,8 @@ "general_settings": "通用設置", "general_settings_description": "與此客戶端相關的一般設置。", "general_shortcut_description": "一般使用快捷鍵", - "generate_checksums": "生成校驗和", "generatePreviewMedia_label": "為這個位置生成預覽媒體", + "generate_checksums": "生成校驗和", "gitignore": "git 忽略", "glob_description": "全域(例如 **/.git)", "go_back": "返回", @@ -414,6 +416,7 @@ "log_out": "登出", "logged_in_as": "已登入為{{email}}", "logging_in": "在登入...", + "login_link_sent": "我們已發送臨時登入連結。", "logout": "登出", "manage_library": "管理圖書館", "managed": "已管理", @@ -657,10 +660,10 @@ "switch_to_next_tab": "切換到下一個分頁", "switch_to_previous_tab": "切換到上一個分頁", "sync": "同步", + "syncPreviewMedia_label": "將此位置的預覽媒體與您的設備同步", "sync_description": "管理Spacedrive如何進行同步。", "sync_with_library": "與圖書館同步", "sync_with_library_description": "如果啟用,您的鍵綁定將與圖書館同步,否則它們僅適用於此客戶端。", - "syncPreviewMedia_label": "將此位置的預覽媒體與您的設備同步", "system": "系統", "tag": "標籤", "tag_other": "標籤", @@ -730,4 +733,4 @@ "zoom": "飛漲", "zoom_in": "放大", "zoom_out": "縮小" -} \ No newline at end of file +} From 264e3f761335fccb987f28015709e784801a794d Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Fri, 27 Sep 2024 23:20:10 -0400 Subject: [PATCH 149/218] wip --- apps/desktop/src/App.tsx | 11 +- apps/mobile/modules/sd-core/android/build.sh | 3 +- apps/mobile/modules/sd-core/ios/build-rust.sh | 38 +- .../components/modal/ImportLibraryModal.tsx | 44 +- .../src/screens/settings/info/Debug.tsx | 6 +- core/crates/cloud-services/src/lib.rs | 4 +- .../Layout/Sidebar/DebugPopover.tsx | 54 +-- interface/app/$libraryId/Layout/auth.tsx | 45 +- interface/app/$libraryId/debug/cloud.tsx | 413 +++++++++--------- .../settings/client/account/Profile.tsx | 38 +- .../client/account/handlers/windowHandler.ts | 47 +- interface/components/Login.tsx | 30 +- interface/hooks/useDeeplinkEventHandler.ts | 20 +- interface/util/index.tsx | 4 +- packages/client/src/stores/featureFlags.tsx | 2 +- packages/ui/tsconfig.json | 1 - 16 files changed, 388 insertions(+), 372 deletions(-) diff --git a/apps/desktop/src/App.tsx b/apps/desktop/src/App.tsx index 184587be7..ed7f5e650 100644 --- a/apps/desktop/src/App.tsx +++ b/apps/desktop/src/App.tsx @@ -21,6 +21,7 @@ import '@sd/ui/style/style.scss'; import { fetch } from '@tauri-apps/plugin-http'; import SuperTokens from 'supertokens-web-js'; import EmailPassword from 'supertokens-web-js/recipe/emailpassword'; +import Passwordless from 'supertokens-web-js/recipe/passwordless'; import Session from 'supertokens-web-js/recipe/session'; import ThirdParty from 'supertokens-web-js/recipe/thirdparty'; // TODO: Bring this back once upstream is fixed up. @@ -54,7 +55,8 @@ SuperTokens.init({ recipeList: [ Session.init({ tokenTransferMethod: 'header' }), EmailPassword.init(), - ThirdParty.init() + ThirdParty.init(), + Passwordless.init() ] }); @@ -63,9 +65,10 @@ const startupError = (window as any).__SD_ERROR__ as string | undefined; export default function App() { useEffect(() => { // This tells Tauri to show the current window because it's finished loading - commands.appReady().then(() => { - if (import.meta.env.PROD) window.fetch = fetch; - }); + commands.appReady(); + // .then(() => { + // if (import.meta.env.PROD) window.fetch = fetch; + // }); }, []); useEffect(() => { diff --git a/apps/mobile/modules/sd-core/android/build.sh b/apps/mobile/modules/sd-core/android/build.sh index 85939124b..6034c8b42 100755 --- a/apps/mobile/modules/sd-core/android/build.sh +++ b/apps/mobile/modules/sd-core/android/build.sh @@ -58,7 +58,8 @@ if [ "${CI:-}" = "true" ]; then ;; esac else - ANDROID_BUILD_TARGET_LIST="arm64-v8a armeabi-v7a x86_64" + # ANDROID_BUILD_TARGET_LIST="arm64-v8a armeabi-v7a x86_64" + ANDROID_BUILD_TARGET_LIST="arm64-v8a" fi # Configure build targets CLI arg for `cargo ndk` diff --git a/apps/mobile/modules/sd-core/ios/build-rust.sh b/apps/mobile/modules/sd-core/ios/build-rust.sh index e76842270..a066bcf6c 100755 --- a/apps/mobile/modules/sd-core/ios/build-rust.sh +++ b/apps/mobile/modules/sd-core/ios/build-rust.sh @@ -48,10 +48,10 @@ mkdir -p "$TARGET_DIRECTORY" TARGET_DIRECTORY="$(CDPATH='' cd -- "$TARGET_DIRECTORY" && pwd -P)" TARGET_CONFIG=debug -if [ "${CONFIGURATION:-}" = "Release" ]; then - set -- --release - TARGET_CONFIG=release -fi +# if [ "${CONFIGURATION:-}" = "Release" ]; then +# set -- --release +# TARGET_CONFIG=release +# fi trap 'if [ -e "${CARGO_CONFIG}.bak" ]; then mv "${CARGO_CONFIG}.bak" "$CARGO_CONFIG"; fi' EXIT @@ -59,21 +59,21 @@ trap 'if [ -e "${CARGO_CONFIG}.bak" ]; then mv "${CARGO_CONFIG}.bak" "$CARGO_CON RUST_PATH="${CARGO_HOME:-"${HOME}/.cargo"}/bin:$(brew --prefix)/bin:$(env -i /bin/bash --noprofile --norc -c 'echo $PATH')" if [ "${PLATFORM_NAME:-}" = "iphonesimulator" ]; then case "$(uname -m)" in - "arm64" | "aarch64") # M series - sed -i.bak "s|FFMPEG_DIR = { force = true, value = \".*\" }|FFMPEG_DIR = { force = true, value = \"${DEPS}/aarch64-apple-ios-sim\" }|" "$CARGO_CONFIG" - env CARGO_FEATURE_STATIC=1 PATH="$RUST_PATH" cargo build -p sd-mobile-ios --target aarch64-apple-ios-sim "$@" - lipo -create -output "$TARGET_DIRECTORY"/libsd_mobile_iossim.a "${TARGET_DIRECTORY}/aarch64-apple-ios-sim/${TARGET_CONFIG}/libsd_mobile_ios.a" - symlink_libs "${DEPS}/aarch64-apple-ios-sim/lib" "$TARGET_DIRECTORY" - ;; - "x86_64") # Intel - sed -i.bak "s|FFMPEG_DIR = { force = true, value = \".*\" }|FFMPEG_DIR = { force = true, value = \"${DEPS}/x86_64-apple-ios\" }|" "$CARGO_CONFIG" - env CARGO_FEATURE_STATIC=1 PATH="$RUST_PATH" cargo build -p sd-mobile-ios --target x86_64-apple-ios "$@" - lipo -create -output "$TARGET_DIRECTORY"/libsd_mobile_iossim.a "${TARGET_DIRECTORY}/x86_64-apple-ios/${TARGET_CONFIG}/libsd_mobile_ios.a" - symlink_libs "${DEPS}/x86_64-apple-ios/lib" "$TARGET_DIRECTORY" - ;; - *) - err 'Unsupported architecture.' - ;; + "arm64" | "aarch64") # M series + sed -i.bak "s|FFMPEG_DIR = { force = true, value = \".*\" }|FFMPEG_DIR = { force = true, value = \"${DEPS}/aarch64-apple-ios-sim\" }|" "$CARGO_CONFIG" + env CARGO_FEATURE_STATIC=1 PATH="$RUST_PATH" cargo build -p sd-mobile-ios --target aarch64-apple-ios-sim "$@" + lipo -create -output "$TARGET_DIRECTORY"/libsd_mobile_iossim.a "${TARGET_DIRECTORY}/aarch64-apple-ios-sim/${TARGET_CONFIG}/libsd_mobile_ios.a" + symlink_libs "${DEPS}/aarch64-apple-ios-sim/lib" "$TARGET_DIRECTORY" + ;; + "x86_64") # Intel + sed -i.bak "s|FFMPEG_DIR = { force = true, value = \".*\" }|FFMPEG_DIR = { force = true, value = \"${DEPS}/x86_64-apple-ios\" }|" "$CARGO_CONFIG" + env CARGO_FEATURE_STATIC=1 PATH="$RUST_PATH" cargo build -p sd-mobile-ios --target x86_64-apple-ios "$@" + lipo -create -output "$TARGET_DIRECTORY"/libsd_mobile_iossim.a "${TARGET_DIRECTORY}/x86_64-apple-ios/${TARGET_CONFIG}/libsd_mobile_ios.a" + symlink_libs "${DEPS}/x86_64-apple-ios/lib" "$TARGET_DIRECTORY" + ;; + *) + err 'Unsupported architecture.' + ;; esac else sed -i.bak "s|FFMPEG_DIR = { force = true, value = \".*\" }|FFMPEG_DIR = { force = true, value = \"${DEPS}/aarch64-apple-ios\" }|" "$CARGO_CONFIG" diff --git a/apps/mobile/src/components/modal/ImportLibraryModal.tsx b/apps/mobile/src/components/modal/ImportLibraryModal.tsx index 82de9fdd4..dddedcc53 100644 --- a/apps/mobile/src/components/modal/ImportLibraryModal.tsx +++ b/apps/mobile/src/components/modal/ImportLibraryModal.tsx @@ -2,13 +2,7 @@ import { BottomSheetFlatList } from '@gorhom/bottom-sheet'; import { NavigationProp, useNavigation } from '@react-navigation/native'; import { forwardRef } from 'react'; import { ActivityIndicator, Text, View } from 'react-native'; -import { - CloudLibrary, - useBridgeMutation, - useBridgeQuery, - useClientContext, - useRspcContext -} from '@sd/client'; +import { useBridgeMutation, useBridgeQuery, useClientContext, useRspcContext } from '@sd/client'; import { Modal, ModalRef } from '~/components/layout/Modal'; import { Button } from '~/components/primitive/Button'; import useForwardedRef from '~/hooks/useForwardedRef'; @@ -25,9 +19,9 @@ const ImportModalLibrary = forwardRef((_, ref) => { const { libraries } = useClientContext(); - const cloudLibraries = useBridgeQuery(['cloud.library.list']); + const cloudLibraries = useBridgeQuery(['cloud.libraries.list', true]); const cloudLibrariesData = cloudLibraries.data?.filter( - (cloudLibrary) => !libraries.data?.find((l) => l.uuid === cloudLibrary.uuid) + (cloudLibrary) => !libraries.data?.find((l) => l.uuid === cloudLibrary.pub_id) ); return ( @@ -81,38 +75,37 @@ const ImportModalLibrary = forwardRef((_, ref) => { }); interface Props { - data: CloudLibrary; + // data: CloudLibrary; modalRef: React.RefObject; navigation: NavigationProp; } -const CloudLibraryCard = ({ data, modalRef, navigation }: Props) => { +const CloudLibraryCard = ({ modalRef, navigation }: Props) => { const rspc = useRspcContext().queryClient; - const joinLibrary = useBridgeMutation(['cloud.library.join']); + // const joinLibrary = useBridgeMutation(['cloud.library.join']); return ( - {data.name} + {'BOB'} diff --git a/apps/mobile/src/screens/settings/info/Debug.tsx b/apps/mobile/src/screens/settings/info/Debug.tsx index 5dd423b9f..74ebb4749 100644 --- a/apps/mobile/src/screens/settings/info/Debug.tsx +++ b/apps/mobile/src/screens/settings/info/Debug.tsx @@ -38,7 +38,6 @@ const DebugScreen = ({ navigation }: SettingsStackScreenProps<'Debug'>) => { const getGroup = useBridgeQuery([ 'cloud.syncGroups.get', { - access_token: accessToken.trim(), pub_id: '0192123b-5d01-7341-aa9d-4a08571052ee', with_library: true, with_devices: true, @@ -46,7 +45,7 @@ const DebugScreen = ({ navigation }: SettingsStackScreenProps<'Debug'>) => { } ]); // console.log(getGroup.data); - const currentDevice = useBridgeQuery(['cloud.devices.get_current_device', accessToken.trim()]); + const currentDevice = useBridgeQuery(['cloud.devices.get_current_device']); // console.log('Current Device: ', currentDevice.data); const createSyncGroup = useLibraryMutation('cloud.syncGroups.create'); @@ -111,7 +110,7 @@ const DebugScreen = ({ navigation }: SettingsStackScreenProps<'Debug'>) => { - -
- )} - - ); -} +// return ( +// +// +//
+// } +// > +// {cloudLibrary.data ? ( +//
+// +// {thisInstance && } +// +//
+// ) : ( +//
+// +// +//
+// +//

+// {t('cloud_connect_description')} +//

+//
+// +//
+//
+// )} +// +// ); +// } -// million-ignore -const Instances = ({ instances }: { instances: any[] }) => { - const { library } = useLibraryContext(); - const filteredInstances = instances.filter((instance) => instance.uuid !== library.instance_id); - return ( -
-
-

Instances

- {filteredInstances.length} -
-
- {filteredInstances.map((instance) => ( - -
- -

- {instance.metadata.name} -

-
-
- -

- Id:{' '} - {instance.id} -

-
- -

- UUID:{' '} - - {instance.uuid} - -

-
- -

- Public Key:{' '} - - {instance.identity} - -

-
-
-
- ))} -
-
- ); -}; +// // million-ignore +// const Instances = ({ instances }: { instances: any[] }) => { +// const { library } = useLibraryContext(); +// const filteredInstances = instances.filter((instance) => instance.uuid !== library.instance_id); +// return ( +//
+//
+//

Instances

+// {filteredInstances.length} +//
+//
+// {filteredInstances.map((instance) => ( +// +//
+// +//

+// {instance.metadata.name} +//

+//
+//
+// +//

+// Id:{' '} +// {instance.id} +//

+//
+// +//

+// UUID:{' '} +// +// {instance.uuid} +// +//

+//
+// +//

+// Public Key:{' '} +// +// {instance.identity} +// +//

+//
+//
+//
+// ))} +//
+//
+// ); +// }; -interface LibraryProps { - cloudLibrary: any; - thisInstance: any | undefined; -} +// interface LibraryProps { +// cloudLibrary: any; +// thisInstance: any | undefined; +// } -// million-ignore -const Library = ({ thisInstance, cloudLibrary }: LibraryProps) => { - const syncLibrary = useLibraryMutation(['cloud.library.sync']); - return ( -
-

Library

- -

- Name: {cloudLibrary.name} -

- -
-
- ); -}; +// // million-ignore +// const Library = ({ thisInstance, cloudLibrary }: LibraryProps) => { +// const syncLibrary = useLibraryMutation(['cloud.library.sync']); +// return ( +//
+//

Library

+// +//

+// Name: {cloudLibrary.name} +//

+// +//
+//
+// ); +// }; -interface ThisInstanceProps { - instance: any; -} +// interface ThisInstanceProps { +// instance: any; +// } -// million-ignore -const ThisInstance = ({ instance }: ThisInstanceProps) => { - return ( -
-

This Instance

- -
- -

- {instance.metadata.name} -

-
-
- -

- Id: {instance.id} -

-
- -

- UUID: {instance.uuid} -

-
- -

- Public Key:{' '} - {instance.identity} -

-
-
-
-
- ); -}; +// // million-ignore +// const ThisInstance = ({ instance }: ThisInstanceProps) => { +// return ( +//
+//

This Instance

+// +//
+// +//

+// {instance.metadata.name} +//

+//
+//
+// +//

+// Id: {instance.id} +//

+//
+// +//

+// UUID: {instance.uuid} +//

+//
+// +//

+// Public Key:{' '} +// {instance.identity} +//

+//
+//
+//
+//
+// ); +// }; diff --git a/interface/app/$libraryId/settings/client/account/Profile.tsx b/interface/app/$libraryId/settings/client/account/Profile.tsx index aa5d3ddc5..fccfbb067 100644 --- a/interface/app/$libraryId/settings/client/account/Profile.tsx +++ b/interface/app/$libraryId/settings/client/account/Profile.tsx @@ -12,6 +12,7 @@ import { import { Button, Card, tw } from '@sd/ui'; import StatCard from '~/app/$libraryId/overview/StatCard'; import { TruncatedText } from '~/components'; +import { getTokens } from '~/util'; import { hardwareModelToIcon } from '~/util/hardware'; type User = { @@ -32,34 +33,19 @@ const Profile = ({ }) => { const emailName = user.email?.split('@')[0]; const capitalizedEmailName = (emailName?.charAt(0).toUpperCase() ?? '') + emailName?.slice(1); - const refreshToken: string = - JSON.parse(window.localStorage.getItem('frontendCookies') ?? '[]') - .find((cookie: string) => cookie.startsWith('st-refresh-token')) - ?.split('=')[1] - .split(';')[0] || ''; - const accessToken: string = - JSON.parse(window.localStorage.getItem('frontendCookies') ?? '[]') - .find((cookie: string) => cookie.startsWith('st-access-token')) - ?.split('=')[1] - .split(';')[0] || ''; + const { accessToken, refreshToken } = getTokens(); + console.log(accessToken); const cloudBootstrap = useBridgeMutation('cloud.bootstrap'); const cloudDeleteDevice = useBridgeMutation('cloud.devices.delete'); - const devices = useBridgeQuery(['cloud.devices.list', { access_token: accessToken.trim() }]); + const devices = useBridgeQuery(['cloud.devices.list']); const addLibraryToCloud = useLibraryMutation('cloud.libraries.create'); - const listLibraries = useBridgeQuery([ - 'cloud.libraries.list', - { access_token: accessToken.trim(), with_device: true } - ]); + const listLibraries = useBridgeQuery(['cloud.libraries.list', true]); const createSyncGroup = useLibraryMutation('cloud.syncGroups.create'); - const listSyncGroups = useBridgeQuery([ - 'cloud.syncGroups.list', - { access_token: accessToken.trim(), with_library: true } - ]); + const listSyncGroups = useBridgeQuery(['cloud.syncGroups.list', true]); const requestJoinSyncGroup = useBridgeMutation('cloud.syncGroups.request_join'); const getGroup = useBridgeQuery([ 'cloud.syncGroups.get', { - access_token: accessToken.trim(), pub_id: '0192123b-5d01-7341-aa9d-4a08571052ee', with_library: true, with_devices: true, @@ -67,7 +53,7 @@ const Profile = ({ } ]); console.log(getGroup.data); - const currentDevice = useBridgeQuery(['cloud.devices.get_current_device', accessToken.trim()]); + const currentDevice = useBridgeQuery(['cloud.devices.get_current_device']); console.log('Current Device: ', currentDevice.data); // Refetch every 10 seconds @@ -137,10 +123,7 @@ const Profile = ({ + - -
{/* MT is added to hide */} diff --git a/interface/app/$libraryId/settings/client/account/index.tsx b/interface/app/$libraryId/settings/client/account/index.tsx index b4aff9ee8..915ce6fcd 100644 --- a/interface/app/$libraryId/settings/client/account/index.tsx +++ b/interface/app/$libraryId/settings/client/account/index.tsx @@ -1,6 +1,8 @@ import clsx from 'clsx'; import { useEffect, useState } from 'react'; +import { signOut } from 'supertokens-web-js/recipe/passwordless'; import { useBridgeMutation } from '@sd/client'; +import { Button } from '@sd/ui'; import { Authentication } from '~/components'; import { useLocale } from '~/hooks'; import { AUTH_SERVER_URL, getTokens } from '~/util'; @@ -25,21 +27,15 @@ export const Component = () => { const user_data = await fetch(`${AUTH_SERVER_URL}/api/user`, { method: 'GET' }); + const data = await user_data.json(); - console.log('Data from user (auth API)', data); - return data; + + setUserInfo(data.id ? data : null); } - _().then((data) => { - // Check if data is the same as the user type - if (data.id) { - setUserInfo(data); - } else { - setUserInfo(null); - } - }); + _(); setReload(false); - // eslint-disable-next-line react-hooks/exhaustive-deps }, [reload]); + const cloudBootstrap = useBridgeMutation('cloud.bootstrap'); const tokens = getTokens(); @@ -48,6 +44,24 @@ export const Component = () => { + {userInfo?.id && ( +
+ +
+ )} + + } />
Date: Tue, 1 Oct 2024 19:36:15 +0300 Subject: [PATCH 160/218] change to sd cloud --- .../app/$libraryId/settings/client/account/index.tsx | 3 +-- interface/components/Authentication.tsx | 9 +++++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/interface/app/$libraryId/settings/client/account/index.tsx b/interface/app/$libraryId/settings/client/account/index.tsx index 915ce6fcd..89299b24c 100644 --- a/interface/app/$libraryId/settings/client/account/index.tsx +++ b/interface/app/$libraryId/settings/client/account/index.tsx @@ -5,7 +5,7 @@ import { useBridgeMutation } from '@sd/client'; import { Button } from '@sd/ui'; import { Authentication } from '~/components'; import { useLocale } from '~/hooks'; -import { AUTH_SERVER_URL, getTokens } from '~/util'; +import { AUTH_SERVER_URL } from '~/util'; import { Heading } from '../../Layout'; import Profile from './Profile'; @@ -37,7 +37,6 @@ export const Component = () => { }, [reload]); const cloudBootstrap = useBridgeMutation('cloud.bootstrap'); - const tokens = getTokens(); return ( <> diff --git a/interface/components/Authentication.tsx b/interface/components/Authentication.tsx index 89a4c6415..d58112008 100644 --- a/interface/components/Authentication.tsx +++ b/interface/components/Authentication.tsx @@ -124,8 +124,13 @@ export const Authentication = ({
-

- Spacedrive +

+ Spacedrive Cloud

{activeTab === 'Login' ? ( From 60cfafe77e840369b8051458c3253502052ed380 Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Tue, 1 Oct 2024 14:46:27 -0300 Subject: [PATCH 161/218] Update schema and deps --- Cargo.lock | Bin 337574 -> 337518 bytes Cargo.toml | 2 +- core/crates/cloud-services/Cargo.toml | 13 ++++++------- .../src/key_manager/key_store.rs | 6 ++++-- .../cloud-services/src/key_manager/mod.rs | 6 ++++-- core/crates/cloud-services/src/lib.rs | 1 - core/crates/cloud-services/src/p2p/mod.rs | 2 +- core/src/api/cloud/devices.rs | 5 +++-- core/src/api/cloud/mod.rs | 4 ++-- packages/client/src/core.ts | 2 +- 10 files changed, 22 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a6d1f8e4f13e472fb509ee06d0a9cd0735b394bf..4c55bf3b9297d54e75ad09aaee62272f026b0eff 100644 GIT binary patch delta 617 zcmYLGy=&A#5aoBjQ&9^cM9!c@VgwtJ-I@I)nzL6?EJW~{nf<6l^9RH_sl4KfElnZT zip8%Hg8{WMC3upalXs&r)duqaYb;1ST0gnyi&}u{_#n29voD7pGjqz!p(ZqL?{>Hv+td ziV8|(lo~-0Q-ubFq!|rDM(Z^ymysis=0gA6WBc*sm-1uWTwV4SgJ2BSX_*Ort<=0YgP>x?}50)n{n(N)smyJC$ z*}OBqwzAW7D?7b=3+2zuo-0FZ87b9hymP@J1sjsEzzG8wp{um10q1HGsG>=nK?$!Z zM&nq;Nwd0p=0MpTw2R%>LHq5tOc8Bnjg}YxkD(@>GdZQCBdWXtT|yDEAewx7J1EE2 fTi4xKZ-4Jy8;#p;H13bim#1^heE05k|IM|(c^<;F delta 686 zcmYL`JxG>O6vuhq=P9WWM8tRVgMw&K&iy#|14MNz33K?|LkvT6@*&HdwIBi&h0~yn2l65r$e=w%Z7f6Czfm0k z_s*5=-R9=;#s792J71QsG*e~lPt&ch9W=wWgWl8G^7d~tTAqpa@{+TG9Uf9yR6B`c z@`SO9_SGWR3sTAkjPk6Mg^Wp%bQvWkMs!E64x>lQR==G+j^6FJKktehG@BHO@ zOJDUsf~bUr`k$&DlB)kOY4CikUmiTEMY@f4?wBSv+TW+kt&R4~`t)WyoZjqcxy Date: Tue, 1 Oct 2024 15:18:28 -0300 Subject: [PATCH 162/218] Update schema --- Cargo.lock | Bin 337518 -> 337518 bytes Cargo.toml | 2 +- .../src/screens/settings/info/Debug.tsx | 26 +++++++++--------- packages/client/src/core.ts | 8 +++--- 4 files changed, 18 insertions(+), 18 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4c55bf3b9297d54e75ad09aaee62272f026b0eff..4449a4514b8ff8bb1793b3fc6973a78757a601ce 100644 GIT binary patch delta 86 zcmaENM&#WYk%kt=7N#xC8{3VHEK|&kEes7z43rVfG~?t%%j8rG19P)vgH)rGL?g3Q iOC#gNBm+w$i|HFvn8c?yOk?6|XX#+x&eFk>H4gyAEE?PZ delta 84 zcmaENM&#WYk%kt=7N#xC8`}+2Q;f~il1z=wlavw6WD}#*w6sJE6B9#=#Kcrf0|R4= gRAYnWG=s#%$qz4!Pp_ZG#NE!)!MvTNgC%Po0Q?;s@c;k- diff --git a/Cargo.toml b/Cargo.toml index 21c13170f..2092be9f4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,7 +20,7 @@ rust-version = "1.80" [workspace.dependencies] # First party dependencies -sd-cloud-schema = { git = "https://github.com/spacedriveapp/cloud-services-schema", rev = "ed37fb537b" } +sd-cloud-schema = { git = "https://github.com/spacedriveapp/cloud-services-schema", rev = "29d6381040" } # Third party dependencies used by one or more of our crates async-channel = "2.3" diff --git a/apps/mobile/src/screens/settings/info/Debug.tsx b/apps/mobile/src/screens/settings/info/Debug.tsx index e5c074909..1c987bc63 100644 --- a/apps/mobile/src/screens/settings/info/Debug.tsx +++ b/apps/mobile/src/screens/settings/info/Debug.tsx @@ -1,8 +1,6 @@ -import { useQueryClient } from '@tanstack/react-query'; import React from 'react'; import { Text, View } from 'react-native'; import { - CloudSyncGroupWithLibraryAndDevices, useBridgeMutation, useBridgeQuery, useDebugState, @@ -122,17 +120,19 @@ const DebugScreen = ({ navigation }: SettingsStackScreenProps<'Debug'>) => {
- } - > - {cloudLibrary.data ? ( -
- - {thisInstance && } - -
- ) : ( -
- - -
- -

- {t('cloud_connect_description')} -

-
- -
-
- )} - - ); -} +// return ( +// +// +//
+// } +// > +// {cloudLibrary.data ? ( +//
+// +// {thisInstance && } +// +//
+// ) : ( +//
+// +// +//
+// +//

+// {t('cloud_connect_description')} +//

+//
+// +//
+//
+// )} +// +// ); +// } // // million-ignore // const Instances = ({ instances }: { instances: any[] }) => { From 3f231c37a0c1157f3f2588e2d8960bb42d76eb6a Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Sat, 5 Oct 2024 14:12:32 -0400 Subject: [PATCH 166/218] Update index.tsx --- interface/app/$libraryId/settings/client/account/index.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/interface/app/$libraryId/settings/client/account/index.tsx b/interface/app/$libraryId/settings/client/account/index.tsx index 89299b24c..b00a93f5d 100644 --- a/interface/app/$libraryId/settings/client/account/index.tsx +++ b/interface/app/$libraryId/settings/client/account/index.tsx @@ -1,5 +1,5 @@ import clsx from 'clsx'; -import { useEffect, useState } from 'react'; +import React, { useEffect, useState } from 'react'; import { signOut } from 'supertokens-web-js/recipe/passwordless'; import { useBridgeMutation } from '@sd/client'; import { Button } from '@sd/ui'; From 13ba8d9e3083cdeff988593c9315863161224f4d Mon Sep 17 00:00:00 2001 From: myung03 Date: Mon, 7 Oct 2024 15:51:00 -0700 Subject: [PATCH 167/218] add device popup modal --- .../SidebarLayout/LibrariesDropdown.tsx | 1 + .../settings/node/libraries/JoinDialog.tsx | 19 ++-- interface/components/RequestAddDialog.tsx | 89 +++++++++++++++++++ interface/index.tsx | 13 ++- interface/locales/en/common.json | 3 + packages/ui/src/Dialog.tsx | 6 +- 6 files changed, 115 insertions(+), 16 deletions(-) create mode 100644 interface/components/RequestAddDialog.tsx diff --git a/interface/app/$libraryId/Layout/Sidebar/SidebarLayout/LibrariesDropdown.tsx b/interface/app/$libraryId/Layout/Sidebar/SidebarLayout/LibrariesDropdown.tsx index 034be0f2f..0511cf33b 100644 --- a/interface/app/$libraryId/Layout/Sidebar/SidebarLayout/LibrariesDropdown.tsx +++ b/interface/app/$libraryId/Layout/Sidebar/SidebarLayout/LibrariesDropdown.tsx @@ -3,6 +3,7 @@ import clsx from 'clsx'; import { useClientContext } from '@sd/client'; import { dialogManager, Dropdown, DropdownMenu } from '@sd/ui'; import JoinDialog from '~/app/$libraryId/settings/node/libraries/JoinDialog'; +import RequestAddDialog from '~/components/RequestAddDialog'; import { useLocale } from '~/hooks'; import CreateDialog from '../../../settings/node/libraries/CreateDialog'; diff --git a/interface/app/$libraryId/settings/node/libraries/JoinDialog.tsx b/interface/app/$libraryId/settings/node/libraries/JoinDialog.tsx index 759d890e7..e57f5340c 100644 --- a/interface/app/$libraryId/settings/node/libraries/JoinDialog.tsx +++ b/interface/app/$libraryId/settings/node/libraries/JoinDialog.tsx @@ -1,15 +1,7 @@ import { useQueryClient } from '@tanstack/react-query'; import { useNavigate } from 'react-router'; -import { - LibraryConfigWrapped, - useBridgeMutation, - useBridgeQuery, - useClientContext, - useLibraryContext, - usePlausibleEvent, - useZodForm -} from '@sd/client'; -import { Button, Dialog, Select, SelectOption, toast, useDialog, UseDialogProps, z } from '@sd/ui'; +import { LibraryConfigWrapped, useBridgeMutation, useBridgeQuery, useZodForm } from '@sd/client'; +import { Dialog, Loader, Select, SelectOption, toast, useDialog, UseDialogProps, z } from '@sd/ui'; import { useLocale } from '~/hooks'; import { usePlatform } from '~/util/Platform'; @@ -67,7 +59,12 @@ export default (props: UseDialogProps & { librariesCtx: LibraryConfigWrapped[] | ctaLabel={form.formState.isSubmitting ? t('joining') : t('join')} >
- {cloudLibraries.isLoading && {t('loading')}...} + {cloudLibraries.isLoading && ( +
+ + {t('loading')}... +
+ )} {cloudLibraries.data && ( { await signInClicked(data.email, data.password, reload, cloudBootstrap); })} @@ -207,6 +210,7 @@ const LoginForm = ({ reload, cloudBootstrap, setContinueWithEmail }: LoginProps) - - - - + + )) ||

No sync groups found.

} +
- if ( - currentDevice.data && - getGroup.data && - getGroup.data.kind === 'WithDevices' - ) { - console.log('Current Device: ', currentDevice.data); - console.log('Get Group: ', getGroup.data.data); - requestJoinSyncGroup.mutate({ - sync_group: getGroup.data.data, - asking_device: currentDevice.data - }); - } - }} - > - Request Join Sync Group - {/* List all devices from const devices */} {devices.data?.map((device) => ( - // - - // Date: Mon, 14 Oct 2024 15:16:26 -0700 Subject: [PATCH 186/218] create sync group button --- .../settings/client/account/Profile.tsx | 43 +++++++++++-------- 1 file changed, 26 insertions(+), 17 deletions(-) diff --git a/interface/app/$libraryId/settings/client/account/Profile.tsx b/interface/app/$libraryId/settings/client/account/Profile.tsx index 95cf6f52e..c678a0708 100644 --- a/interface/app/$libraryId/settings/client/account/Profile.tsx +++ b/interface/app/$libraryId/settings/client/account/Profile.tsx @@ -84,23 +84,32 @@ const Profile = ({
{/* Debug Buttons */} -

DEBUG

- - +
+ + + +
{/* Automatically list sync groups and provide a join button */}
From c7e87f08629b7ce950e0bcc2766cbb6402a79fc7 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Mon, 14 Oct 2024 21:18:28 -0400 Subject: [PATCH 187/218] Sync profile page & Update mobile device names DO NOT TRY ANDROID. IT IS BROKEN AND PANICKING ON CORE WHEN COMPILED. --- apps/mobile/src/App.tsx | 28 ++++++- .../client/AccountSettings/AccountProfile.tsx | 79 ++++++++++++++++++- core/src/node/config.rs | 24 ++++++ .../settings/client/account/Profile.tsx | 1 - 4 files changed, 127 insertions(+), 5 deletions(-) diff --git a/apps/mobile/src/App.tsx b/apps/mobile/src/App.tsx index cf3455461..24acf14ad 100644 --- a/apps/mobile/src/App.tsx +++ b/apps/mobile/src/App.tsx @@ -25,6 +25,7 @@ import { LibraryContextProvider, P2PContextProvider, RspcProvider, + useBridgeMutation, useBridgeQuery, useBridgeSubscription, useClientContext, @@ -132,16 +133,37 @@ function AppContainer() { useInvalidateQuery(); const { id } = useSnapshot(currentLibraryStore); + const userResponse = useBridgeMutation('cloud.userResponse'); + useBridgeSubscription(['cloud.listenCloudServicesNotifications'], { onData: (d) => { console.log('Received cloud service notification', d); switch (d.kind) { case 'ReceivedJoinSyncGroupRequest': - // TODO: Show modal to accept or reject + // WARNING: This is a debug solution to accept the device into the sync group. THIS SHOULD NOT MAKE IT TO PRODUCTION + userResponse.mutate({ + kind: 'AcceptDeviceInSyncGroup', + data: { + ticket: d.data.ticket, + accepted: { + id: d.data.sync_group.library.pub_id, + name: d.data.sync_group.library.name, + description: null + } + } + }); + // TODO: Move the code above into the dialog below (@Rocky43007) + // dialogManager.create((dp) => ( + // + // )); break; default: - // TODO: Show notification/toast for other kinds - toast.info(`Cloud Service Notification -> ${d.kind}`); + toast.info(`Cloud Service Notification: ${d.kind}`); break; } } diff --git a/apps/mobile/src/screens/settings/client/AccountSettings/AccountProfile.tsx b/apps/mobile/src/screens/settings/client/AccountSettings/AccountProfile.tsx index eeb164e9d..b13b532a9 100644 --- a/apps/mobile/src/screens/settings/client/AccountSettings/AccountProfile.tsx +++ b/apps/mobile/src/screens/settings/client/AccountSettings/AccountProfile.tsx @@ -1,13 +1,15 @@ import { useNavigation } from '@react-navigation/native'; import { Envelope } from 'phosphor-react-native'; +import { useEffect, useState } from 'react'; import { Text, View } from 'react-native'; +import { useBridgeMutation, useBridgeQuery, useLibraryMutation } from '@sd/client'; import Card from '~/components/layout/Card'; import ScreenContainer from '~/components/layout/ScreenContainer'; import { Button } from '~/components/primitive/Button'; import { tw } from '~/lib/tailwind'; import { SettingsStackScreenProps } from '~/navigation/tabs/SettingsStack'; import { getUserStore, useUserStore } from '~/stores/userStore'; -import { AUTH_SERVER_URL } from '~/utils'; +import { AUTH_SERVER_URL, getTokens } from '~/utils'; const AccountProfile = () => { const userInfo = useUserStore().userInfo; @@ -16,6 +18,28 @@ const AccountProfile = () => { const capitalizedEmailName = (emailName?.charAt(0).toUpperCase() ?? '') + emailName?.slice(1); const navigator = useNavigation['navigation']>(); + const cloudBootstrap = useBridgeMutation('cloud.bootstrap'); + const devices = useBridgeQuery(['cloud.devices.list']); + const addLibraryToCloud = useLibraryMutation('cloud.libraries.create'); + const listLibraries = useBridgeQuery(['cloud.libraries.list', true]); + const createSyncGroup = useLibraryMutation('cloud.syncGroups.create'); + const listSyncGroups = useBridgeQuery(['cloud.syncGroups.list']); + const requestJoinSyncGroup = useBridgeMutation('cloud.syncGroups.request_join'); + const currentDevice = useBridgeQuery(['cloud.devices.get_current_device']); + const [{ accessToken, refreshToken }, setTokens] = useState<{ + accessToken: string; + refreshToken: string; + }>({ + accessToken: '', + refreshToken: '' + }); + useEffect(() => { + (async () => { + const { accessToken, refreshToken } = await getTokens(); + setTokens({ accessToken, refreshToken }); + })(); + }, []); + async function signOut() { await fetch(`${AUTH_SERVER_URL}/api/auth/signout`, { method: 'POST' @@ -49,6 +73,59 @@ const AccountProfile = () => { + {/* Debug buttons */} + + + + + + + + Library Sync Groups + {listSyncGroups.data?.map((group) => ( + + {group.library.name} + + + )) || No sync groups found.} + ); diff --git a/core/src/node/config.rs b/core/src/node/config.rs index 6fa79879b..28ddb7a4a 100644 --- a/core/src/node/config.rs +++ b/core/src/node/config.rs @@ -197,7 +197,15 @@ impl ManagedVersion for NodeConfig { type MigrationError = NodeConfigError; fn from_latest_version() -> Option { + #[cfg(not(any(target_os = "ios", target_os = "android")))] let mut name = whoami::devicename(); + + #[cfg(target_os = "ios")] + let mut name = "iOS Device".to_string(); + + #[cfg(target_os = "android")] + let mut name = "Android Device".to_string(); + name.truncate(255); let os = DeviceOS::from_env(); @@ -328,12 +336,28 @@ impl NodeConfig { ); config.remove("name"); + + #[cfg(not(any(target_os = "ios", target_os = "android")))] config.insert( String::from("name"), serde_json::to_value(whoami::devicename()) .map_err(VersionManagerError::SerdeJson)?, ); + #[cfg(target_os = "ios")] + config.insert( + String::from("name"), + serde_json::to_value("iOS Device") + .map_err(VersionManagerError::SerdeJson)?, + ); + + #[cfg(target_os = "android")] + config.insert( + String::from("name"), + serde_json::to_value("Android Device") + .map_err(VersionManagerError::SerdeJson)?, + ); + config.insert( String::from("os"), serde_json::to_value(std::env::consts::OS) diff --git a/interface/app/$libraryId/settings/client/account/Profile.tsx b/interface/app/$libraryId/settings/client/account/Profile.tsx index c678a0708..3fc6bc584 100644 --- a/interface/app/$libraryId/settings/client/account/Profile.tsx +++ b/interface/app/$libraryId/settings/client/account/Profile.tsx @@ -28,7 +28,6 @@ const Profile = ({ const { accessToken, refreshToken } = getTokens(); const cloudBootstrap = useBridgeMutation('cloud.bootstrap'); - const cloudDeleteDevice = useBridgeMutation('cloud.devices.delete'); const devices = useBridgeQuery(['cloud.devices.list']); const addLibraryToCloud = useLibraryMutation('cloud.libraries.create'); const listLibraries = useBridgeQuery(['cloud.libraries.list', true]); From 9e4dbbacf4a693bb1f6294d235d96f385c41d0e4 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Mon, 14 Oct 2024 21:25:27 -0400 Subject: [PATCH 188/218] Update pnpm-lock.yaml --- pnpm-lock.yaml | Bin 1067283 -> 1068720 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 40a8923c1d5beb5a10e7f038c641eaec35591e4b..9599a17ad9ad7530ea8f6545509b1c8fd4c2145b 100644 GIT binary patch delta 457 zcmbO{$#KJ6#|SqXfi$UKTuhMFh4JoiNW-X zUwM?LI|}hHoL(iwFA9@gBgC%(lD#IxzjXQ|A$~cagkc50+;n+iem{^zzA%6D^lD*# zNth0(dD{;N^Its9Xt;gSZT_ds{3v0hXE;6aJ&*KsfksZ2?eFgJTQE+K731faeq0FR p6x)0JS|GDC@A0dF=!y6E^+EI@pg6OMf#!6e+3MTH?(_f61OSIpp3?vT delta 177 zcmdlm*KzVB#|hRldh)x{{LQ7g57dAHN}DUEyyu^MU>iqs;ga^kC5%AK z1jNih%mT!$K+Fcj?Ar^MaJ*n?ms`aF#GF9P1;pGy%(Go?6>lWl^sl#gq^C<9 Date: Mon, 14 Oct 2024 19:28:31 -0700 Subject: [PATCH 189/218] debug sync indicators --- .../settings/client/account/Profile.tsx | 39 ++++++++++++++++++- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/interface/app/$libraryId/settings/client/account/Profile.tsx b/interface/app/$libraryId/settings/client/account/Profile.tsx index 3fc6bc584..250f3b930 100644 --- a/interface/app/$libraryId/settings/client/account/Profile.tsx +++ b/interface/app/$libraryId/settings/client/account/Profile.tsx @@ -1,6 +1,14 @@ import { Envelope } from '@phosphor-icons/react'; -import { Dispatch, SetStateAction, useEffect } from 'react'; -import { useBridgeMutation, useBridgeQuery, useLibraryMutation } from '@sd/client'; +import clsx from 'clsx'; +import { Dispatch, SetStateAction, useEffect, useState } from 'react'; +import { + SyncStatus, + useBridgeMutation, + useBridgeQuery, + useBridgeSubscription, + useLibraryMutation, + useLibrarySubscription +} from '@sd/client'; import { Button, Card, tw } from '@sd/ui'; import StatCard from '~/app/$libraryId/overview/StatCard'; import { TruncatedText } from '~/components'; @@ -30,6 +38,13 @@ const Profile = ({ const cloudBootstrap = useBridgeMutation('cloud.bootstrap'); const devices = useBridgeQuery(['cloud.devices.list']); const addLibraryToCloud = useLibraryMutation('cloud.libraries.create'); + const [syncStatus, setSyncStatus] = useState(null); + useLibrarySubscription(['sync.active'], { + onData: (data) => { + console.log('sync activity', data); + setSyncStatus(data); + } + }); const listLibraries = useBridgeQuery(['cloud.libraries.list', true]); const createSyncGroup = useLibraryMutation('cloud.syncGroups.create'); const listSyncGroups = useBridgeQuery(['cloud.syncGroups.list']); @@ -72,6 +87,26 @@ const Profile = ({
+ {/* Sync activity */} +
+

Sync Activity

+
+ {Object.keys(syncStatus ?? {}).map((status, index) => ( + +
+

{status}

+ + ))} +
+
+ {/* Automatically list libraries */}

Cloud Libraries

From 576729334ace811cd5ff490ccf600d617236ba08 Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Mon, 14 Oct 2024 23:39:06 -0300 Subject: [PATCH 190/218] Many tweaks to improve sync responsiveness and observability --- Cargo.lock | Bin 345091 -> 345122 bytes core/crates/cloud-services/src/sync/ingest.rs | 3 +- core/crates/cloud-services/src/sync/mod.rs | 19 +--- .../crates/cloud-services/src/sync/receive.rs | 90 ++++++++---------- core/crates/cloud-services/src/sync/send.rs | 7 +- core/crates/heavy-lifting/src/indexer/mod.rs | 6 +- core/crates/sync/Cargo.toml | 1 + core/crates/sync/src/ingest_utils.rs | 9 +- core/crates/sync/src/manager.rs | 49 ++++++---- crates/utils/Cargo.toml | 2 + crates/utils/src/lib.rs | 21 ++++ 11 files changed, 114 insertions(+), 93 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5e237f4cddbeb85b8c6b825dc70988e70dbeb9fe..674aab99ecb23a43ded9898271510f0734fa0183 100644 GIT binary patch delta 61 zcmV-D0K)%+h!vuU6@Y{RgaWh!)2Wv(n*$4%tsnyox8JD)j1rfRgaiwh1gQfTmx8lyaTt=yaX|a+(j5! delta 65 zcmV-H0KWgCh!ul~6@Y{RgaWh!)2WvRnFA!3mAV5GmbwE7w;ZYiS`wG;xdRxNnY;rr Xm%gI|2A8m?0~m+#yaTuKyaX|aO5_@t diff --git a/core/crates/cloud-services/src/sync/ingest.rs b/core/crates/cloud-services/src/sync/ingest.rs index 90714e144..065ceb964 100644 --- a/core/crates/cloud-services/src/sync/ingest.rs +++ b/core/crates/cloud-services/src/sync/ingest.rs @@ -4,6 +4,7 @@ use sd_core_sync::{from_cloud_crdt_ops, CompressedCRDTOperationsPerModelPerDevic use sd_actors::{Actor, Stopper}; use sd_prisma::prisma::{cloud_crdt_operation, SortOrder}; +use sd_utils::timestamp_to_datetime; use std::{ future::IntoFuture, @@ -19,7 +20,7 @@ use futures_concurrency::future::Race; use tokio::{sync::Notify, time::sleep}; use tracing::{debug, error}; -use super::{timestamp_to_datetime, ReceiveAndIngestNotifiers, SyncActors, ONE_MINUTE}; +use super::{ReceiveAndIngestNotifiers, SyncActors, ONE_MINUTE}; const BATCH_SIZE: i64 = 1000; diff --git a/core/crates/cloud-services/src/sync/mod.rs b/core/crates/cloud-services/src/sync/mod.rs index 0d5ebf45c..b694befb4 100644 --- a/core/crates/cloud-services/src/sync/mod.rs +++ b/core/crates/cloud-services/src/sync/mod.rs @@ -1,7 +1,6 @@ use crate::{CloudServices, Error}; -use futures_concurrency::future::TryJoin; -use sd_core_sync::{SyncManager, NTP64}; +use sd_core_sync::SyncManager; use sd_actors::{ActorsCollection, IntoActor}; use sd_cloud_schema::sync::groups; @@ -11,10 +10,10 @@ use std::{ fmt, path::Path, sync::{atomic::AtomicBool, Arc}, - time::{Duration, SystemTime, UNIX_EPOCH}, + time::Duration, }; -use chrono::{DateTime, Utc}; +use futures_concurrency::future::TryJoin; use tokio::sync::Notify; mod ingest; @@ -135,15 +134,3 @@ pub async fn declare_actors( Ok(Arc::clone(&actors_state.receiver_and_ingester_notifiers)) } - -fn datetime_to_timestamp(latest_time: DateTime) -> NTP64 { - NTP64::from( - SystemTime::from(latest_time) - .duration_since(UNIX_EPOCH) - .expect("hardcoded earlier time, nothing is earlier than UNIX_EPOCH"), - ) -} - -fn timestamp_to_datetime(timestamp: NTP64) -> DateTime { - DateTime::from(timestamp.to_system_time()) -} diff --git a/core/crates/cloud-services/src/sync/receive.rs b/core/crates/cloud-services/src/sync/receive.rs index 27b4b5ae8..c8e411107 100644 --- a/core/crates/cloud-services/src/sync/receive.rs +++ b/core/crates/cloud-services/src/sync/receive.rs @@ -22,7 +22,6 @@ use sd_prisma::prisma::PrismaClient; use std::{ collections::{hash_map::Entry, HashMap}, future::IntoFuture, - num::NonZero, path::Path, pin::Pin, sync::{ @@ -34,7 +33,7 @@ use std::{ use chrono::{DateTime, Utc}; use futures::{FutureExt, StreamExt, TryStreamExt}; -use futures_concurrency::future::{Join, Race, TryJoin}; +use futures_concurrency::future::{Race, TryJoin}; use quic_rpc::transport::quinn::QuinnConnection; use reqwest::Response; use reqwest_middleware::ClientWithMiddleware; @@ -42,12 +41,11 @@ use serde::{Deserialize, Serialize}; use tokio::{ fs, io::{self, AsyncRead, AsyncReadExt, ReadBuf}, - spawn, - sync::{Notify, Semaphore}, + sync::Notify, time::sleep, }; use tokio_util::io::StreamReader; -use tracing::{debug, error, instrument}; +use tracing::{debug, error, instrument, warn}; use uuid::Uuid; use super::{ReceiveAndIngestNotifiers, SyncActors, ONE_MINUTE}; @@ -62,7 +60,6 @@ pub struct Receiver { device_pub_id: devices::PubId, cloud_services: Arc, cloud_client: Client>, - semaphore: Arc, key_manager: Arc, sync: SyncManager, notifiers: Arc, @@ -137,11 +134,6 @@ impl Receiver { device_pub_id: devices::PubId(Uuid::from(&sync.device_pub_id)), cloud_services, cloud_client, - semaphore: Arc::new(Semaphore::new( - std::thread::available_parallelism() - .map(NonZero::get) - .unwrap_or(1), - )), key_manager, sync, notifiers, @@ -179,9 +171,10 @@ impl Receiver { } self.handle_new_messages(new_messages).await?; - self.notifiers.notify_ingester(); } + debug!("Finished sync messages receiver actor iteration"); + self.keeper.save().await } @@ -189,35 +182,36 @@ impl Receiver { &mut self, new_messages: Vec, ) -> Result<(), Error> { - let handles = new_messages - .into_iter() - .map(|message| { - let sync_group_pub_id = self.sync_group_pub_id; - let semaphore = Arc::clone(&self.semaphore); - let key_manager = Arc::clone(&self.key_manager); - let sync = self.sync.clone(); - let http_client = self.cloud_services.http_client().clone(); + debug!( + new_messages_collections_count = new_messages.len(), + start_time = ?new_messages.first().map(|c| c.start_time), + end_time = ?new_messages.first().map(|c| c.end_time), + "Handling new sync messages collections", + ); - async move { - spawn(handle_single_message( - sync_group_pub_id, - message, - semaphore, - key_manager, - sync, - http_client, - )) - .await - } - }) - .collect::>(); + for message in new_messages.into_iter().filter(|message| { + if message.original_device_pub_id == self.device_pub_id { + warn!("Received sync message from the current device, need to check backend, this is a bug!"); + false + } else { + true + } + }) { + debug!( + new_messages_count = message.operations_count, + start_time = ?message.start_time, + end_time = ?message.end_time, + "Handling new sync messages", + ); - for res in handles.join().await { - let Ok(res) = res else { - return Err(Error::SyncMessagesDownloadAndDecryptTaskPanicked); - }; - - let (device_pub_id, timestamp) = res?; + let (device_pub_id, timestamp) = handle_single_message( + self.sync_group_pub_id, + message, + &self.key_manager, + &self.sync, + self.cloud_services.http_client(), + ) + .await?; match self.keeper.timestamps.entry(device_pub_id) { Entry::Occupied(mut entry) => { @@ -225,10 +219,16 @@ impl Receiver { *entry.get_mut() = timestamp; } } + Entry::Vacant(entry) => { entry.insert(timestamp); } } + + // To ingest after each sync message collection is received, we MUST download and + // store the messages SEQUENTIALLY, otherwise we might ingest messages out of order + // due to parallel downloads + self.notifiers.notify_ingester(); } Ok(()) @@ -249,21 +249,15 @@ async fn handle_single_message( signed_download_link, .. }: MessagesCollection, - semaphore: Arc, - key_manager: Arc, - sync: SyncManager, - http_client: ClientWithMiddleware, + key_manager: &KeyManager, + sync: &SyncManager, + http_client: &ClientWithMiddleware, ) -> Result<(devices::PubId, DateTime), Error> { // FIXME(@fogodev): If we don't have the key hash, we need to fetch it from another device in the group if possible let Some(secret_key) = key_manager.get_key(sync_group_pub_id, &key_hash).await else { return Err(Error::MissingKeyHash); }; - let _permit = semaphore - .acquire() - .await - .expect("sync messages receiver semaphore never closes"); - let response = http_client .get(signed_download_link) .send() diff --git a/core/crates/cloud-services/src/sync/send.rs b/core/crates/cloud-services/src/sync/send.rs index 060188fec..2e36b8118 100644 --- a/core/crates/cloud-services/src/sync/send.rs +++ b/core/crates/cloud-services/src/sync/send.rs @@ -14,6 +14,7 @@ use sd_crypto::{ primitives::EncryptedBlock, CryptoRng, SeedableRng, }; +use sd_utils::{datetime_to_timestamp, timestamp_to_datetime}; use std::{ future::IntoFuture, @@ -40,11 +41,13 @@ use tokio::{ use tracing::{debug, error}; use uuid::Uuid; -use super::{datetime_to_timestamp, timestamp_to_datetime, SyncActors, ONE_MINUTE}; +use super::{SyncActors, ONE_MINUTE}; const TEN_SECONDS: Duration = Duration::from_secs(10); const THIRTY_SECONDS: Duration = Duration::from_secs(30); +const MESSAGES_COLLECTION_SIZE: u32 = 100_000; + enum RaceNotifiedOrStopped { Notified, Stopped, @@ -173,7 +176,7 @@ impl Sender { let mut crdt_ops_stream = pin!(self.sync.stream_device_ops( &self.sync.device_pub_id, - 1000, + MESSAGES_COLLECTION_SIZE, current_latest_timestamp )); diff --git a/core/crates/heavy-lifting/src/indexer/mod.rs b/core/crates/heavy-lifting/src/indexer/mod.rs index 028ec8d2d..8ec8d59e8 100644 --- a/core/crates/heavy-lifting/src/indexer/mod.rs +++ b/core/crates/heavy-lifting/src/indexer/mod.rs @@ -322,7 +322,7 @@ pub async fn reverse_update_directories_sizes( ) .await?; - let to_sync_and_update = ancestors + let (sync_ops, update_queries) = ancestors .into_values() .filter_map(|materialized_path| { if let Some((pub_id, size)) = @@ -350,7 +350,9 @@ pub async fn reverse_update_directories_sizes( }) .unzip::<_, _, Vec<_>, Vec<_>>(); - sync.write_ops(db, to_sync_and_update).await?; + if !sync_ops.is_empty() && !update_queries.is_empty() { + sync.write_ops(db, (sync_ops, update_queries)).await?; + } Ok(()) } diff --git a/core/crates/sync/Cargo.toml b/core/crates/sync/Cargo.toml index 5e87856af..229a58ead 100644 --- a/core/crates/sync/Cargo.toml +++ b/core/crates/sync/Cargo.toml @@ -19,6 +19,7 @@ sd-utils = { path = "../../../crates/utils" } # Workspace dependencies async-channel = { workspace = true } async-stream = { workspace = true } +chrono = { workspace = true } futures = { workspace = true } futures-concurrency = { workspace = true } prisma-client-rust = { workspace = true, features = ["rspc"] } diff --git a/core/crates/sync/src/ingest_utils.rs b/core/crates/sync/src/ingest_utils.rs index d8e6629df..3cc4c8a68 100644 --- a/core/crates/sync/src/ingest_utils.rs +++ b/core/crates/sync/src/ingest_utils.rs @@ -171,7 +171,8 @@ async fn handle_crdt_updates( } db._transaction() - .with_timeout(30 * 1000) + .with_timeout(30 * 10000) + .with_max_wait(30 * 10000) .run(|db| async move { // fake operation to batch them all at once ModelSyncData::from_op(CRDTOperation { @@ -251,7 +252,8 @@ async fn handle_crdt_create_and_updates( } db._transaction() - .with_timeout(30 * 1000) + .with_timeout(30 * 10000) + .with_max_wait(30 * 10000) .run(|db| async move { // fake a create with a bunch of data rather than individual insert ModelSyncData::from_op(CRDTOperation { @@ -309,7 +311,8 @@ async fn handle_crdt_deletion( }; db._transaction() - .with_timeout(30 * 1000) + .with_timeout(30 * 10000) + .with_max_wait(30 * 10000) .run(|db| async move { ModelSyncData::from_op(op.clone()) .ok_or(Error::InvalidModelId(model))? diff --git a/core/crates/sync/src/manager.rs b/core/crates/sync/src/manager.rs index ce8e6c39c..382261b9d 100644 --- a/core/crates/sync/src/manager.rs +++ b/core/crates/sync/src/manager.rs @@ -8,6 +8,7 @@ use sd_sync::{ CRDTOperation, CompressedCRDTOperationsPerModel, CompressedCRDTOperationsPerModelPerDevice, ModelId, OperationFactory, }; +use sd_utils::timestamp_to_datetime; use std::{ collections::BTreeMap, @@ -23,7 +24,7 @@ use async_stream::stream; use futures::Stream; use futures_concurrency::future::TryJoin; use tokio::sync::{broadcast, Mutex, Notify, RwLock}; -use tracing::warn; +use tracing::{debug, warn}; use uhlc::{HLCBuilder, HLC}; use uuid::Uuid; @@ -319,30 +320,36 @@ impl Manager { .exec() .await { - Ok(ops) => { - if ops.is_empty() { - break; + Ok(ops) if ops.is_empty() => break, + + Ok(ops) => match ops + .into_iter() + .map(from_crdt_ops) + .collect::, _>>() + { + Ok(ops) => { + debug!( + start_datetime = ?ops + .first() + .map(|op| timestamp_to_datetime(op.timestamp)), + end_datetime = ?ops + .last() + .map(|op| timestamp_to_datetime(op.timestamp)), + count = ops.len(), + "Streaming crdt ops", + ); + + if let Some(last_op) = ops.last() { + current_initial_timestamp = last_op.timestamp; + } + + yield Ok(ops); } - match ops.into_iter().map(from_crdt_ops).collect::, _>>() { - Ok(ops) => { - if let Some(last_op) = ops.last() { - current_initial_timestamp = last_op.timestamp; - } - - yield Ok(ops); - }, - Err(e) => { - yield Err(e); - break; - }, - } + Err(e) => return yield Err(e), } - Err(e) => { - yield Err(e.into()); - break; - } + Err(e) => return yield Err(e.into()) } } } diff --git a/crates/utils/Cargo.toml b/crates/utils/Cargo.toml index c9685fa14..45891b344 100644 --- a/crates/utils/Cargo.toml +++ b/crates/utils/Cargo.toml @@ -12,8 +12,10 @@ rust-version.workspace = true sd-prisma = { path = "../prisma" } # Workspace dependencies +chrono = { workspace = true } prisma-client-rust = { workspace = true } rspc = { workspace = true, features = ["unstable"] } thiserror = { workspace = true } tracing = { workspace = true } +uhlc = { workspace = true } uuid = { workspace = true } diff --git a/crates/utils/src/lib.rs b/crates/utils/src/lib.rs index da92b7b79..09f45d609 100644 --- a/crates/utils/src/lib.rs +++ b/crates/utils/src/lib.rs @@ -27,6 +27,10 @@ #![forbid(deprecated_in_future)] #![allow(clippy::missing_errors_doc, clippy::module_name_repetitions)] +use std::time::{SystemTime, UNIX_EPOCH}; + +use chrono::{DateTime, Utc}; +use uhlc::NTP64; use uuid::Uuid; pub mod db; @@ -104,6 +108,23 @@ macro_rules! msgpack { }} } +/// Helper function to convert a [`chrono::DateTime`] to a [`uhlc::NTP64`] +#[allow(clippy::missing_panics_doc)] // Doesn't actually panic +#[must_use] +pub fn datetime_to_timestamp(latest_time: DateTime) -> NTP64 { + NTP64::from( + SystemTime::from(latest_time) + .duration_since(UNIX_EPOCH) + .expect("hardcoded earlier time, nothing is earlier than UNIX_EPOCH"), + ) +} + +/// Helper function to convert a [`uhlc::NTP64`] to a [`chrono::DateTime`] +#[must_use] +pub fn timestamp_to_datetime(timestamp: NTP64) -> DateTime { + DateTime::from(timestamp.to_system_time()) +} + // Only used for testing purposes. Do not use in production code. use std::any::type_name; From b7ce861acee700ebe340186a6b429cefe0e6a845 Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Mon, 14 Oct 2024 22:46:13 -0400 Subject: [PATCH 191/218] Add sync status to mobile --- .../client/AccountSettings/AccountProfile.tsx | 51 +++++++++++++++++-- 1 file changed, 48 insertions(+), 3 deletions(-) diff --git a/apps/mobile/src/screens/settings/client/AccountSettings/AccountProfile.tsx b/apps/mobile/src/screens/settings/client/AccountSettings/AccountProfile.tsx index b13b532a9..3734e63a8 100644 --- a/apps/mobile/src/screens/settings/client/AccountSettings/AccountProfile.tsx +++ b/apps/mobile/src/screens/settings/client/AccountSettings/AccountProfile.tsx @@ -2,11 +2,17 @@ import { useNavigation } from '@react-navigation/native'; import { Envelope } from 'phosphor-react-native'; import { useEffect, useState } from 'react'; import { Text, View } from 'react-native'; -import { useBridgeMutation, useBridgeQuery, useLibraryMutation } from '@sd/client'; +import { + SyncStatus, + useBridgeMutation, + useBridgeQuery, + useLibraryMutation, + useLibrarySubscription +} from '@sd/client'; import Card from '~/components/layout/Card'; import ScreenContainer from '~/components/layout/ScreenContainer'; import { Button } from '~/components/primitive/Button'; -import { tw } from '~/lib/tailwind'; +import { tw, twStyle } from '~/lib/tailwind'; import { SettingsStackScreenProps } from '~/navigation/tabs/SettingsStack'; import { getUserStore, useUserStore } from '~/stores/userStore'; import { AUTH_SERVER_URL, getTokens } from '~/utils'; @@ -39,6 +45,13 @@ const AccountProfile = () => { setTokens({ accessToken, refreshToken }); })(); }, []); + const [syncStatus, setSyncStatus] = useState(null); + useLibrarySubscription(['sync.active'], { + onData: (data) => { + console.log('sync activity', data); + setSyncStatus(data); + } + }); async function signOut() { await fetch(`${AUTH_SERVER_URL}/api/auth/signout`, { @@ -73,6 +86,36 @@ const AccountProfile = () => { + {/* Sync activity */} + + Sync Activity + + {Object.keys(syncStatus ?? {}).map((status, index) => ( + + + {status} + + ))} + + + + {/* Automatically list libraries */} + + Cloud Libraries + {listLibraries.data?.map((library) => ( + + {library.name} + + )) || No libraries found.} + + {/* Debug buttons */}

- {t('logged_in_as', "TODO")} + {t('logged_in_as', 'TODO')}
); } diff --git a/interface/hooks/useDeeplinkEventHandler.ts b/interface/hooks/useDeeplinkEventHandler.ts index 14f4f0a0b..11556ec66 100644 --- a/interface/hooks/useDeeplinkEventHandler.ts +++ b/interface/hooks/useDeeplinkEventHandler.ts @@ -17,7 +17,9 @@ export const useDeeplinkEventHandler = () => { const searchParamsObj = new URLSearchParams(searchParams); const searchParamsString = searchParamsObj.toString(); console.log('Navigating to', { - path, searchParamsString, hash + path, + searchParamsString, + hash }); navigate({ pathname: path, search: searchParamsString, hash }); diff --git a/packages/ui/tsconfig.json b/packages/ui/tsconfig.json index 0c6521e2c..0ba317fbf 100644 --- a/packages/ui/tsconfig.json +++ b/packages/ui/tsconfig.json @@ -3,5 +3,5 @@ "compilerOptions": { "rootDir": "src", "declarationDir": "dist" - }, + } } From 97e1d0a122277d0f04a975faa7c6e3d6138d44bb Mon Sep 17 00:00:00 2001 From: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Mon, 14 Oct 2024 22:52:18 -0400 Subject: [PATCH 193/218] Fix lint error --- .../settings/node/libraries/DeleteDeviceDialog.tsx | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/interface/app/$libraryId/settings/node/libraries/DeleteDeviceDialog.tsx b/interface/app/$libraryId/settings/node/libraries/DeleteDeviceDialog.tsx index db19933de..60f06670f 100644 --- a/interface/app/$libraryId/settings/node/libraries/DeleteDeviceDialog.tsx +++ b/interface/app/$libraryId/settings/node/libraries/DeleteDeviceDialog.tsx @@ -27,8 +27,7 @@ export default function DeleteLibraryDialog(props: Props) { const accessToken = useAccessToken(); const { data: node } = useBridgeQuery(['nodeState']); const deleteDevice = useBridgeMutation('cloud.devices.delete'); - const deviceAmount = useBridgeQuery(['cloud.devices.list', { access_token: accessToken }]).data - ?.length; + const deviceAmount = useBridgeQuery(['cloud.devices.list']).data?.length; const form = useZodForm(); @@ -54,12 +53,10 @@ export default function DeleteLibraryDialog(props: Props) { return; } - await deleteDevice.mutateAsync({ - access_token: accessToken, - pub_id: props.pubId - }); + await deleteDevice.mutateAsync(props.pubId); queryClient.invalidateQueries(['library.list']); + // eslint-disable-next-line @typescript-eslint/no-unused-expressions platform.refreshMenuBar && platform.refreshMenuBar(); navigate('/'); } catch (e) { From c67eaf1ae9f2404e105b72ce8f52d2b2d1699370 Mon Sep 17 00:00:00 2001 From: Jamie Pine Date: Mon, 14 Oct 2024 22:11:49 -0700 Subject: [PATCH 194/218] fun indicators for debug --- .../Layout/Sidebar/SidebarLayout/Footer.tsx | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/interface/app/$libraryId/Layout/Sidebar/SidebarLayout/Footer.tsx b/interface/app/$libraryId/Layout/Sidebar/SidebarLayout/Footer.tsx index 2d7dc4835..ba5f967d3 100644 --- a/interface/app/$libraryId/Layout/Sidebar/SidebarLayout/Footer.tsx +++ b/interface/app/$libraryId/Layout/Sidebar/SidebarLayout/Footer.tsx @@ -9,7 +9,7 @@ import { useDebugState, useLibrarySubscription } from '@sd/client'; -import { Button, ButtonLink, Tooltip } from '@sd/ui'; +import { Button, ButtonLink, Loader, Tooltip } from '@sd/ui'; import { useKeysMatcher, useLocale, useShortcut } from '~/hooks'; import { usePlatform } from '~/util/Platform'; @@ -80,5 +80,12 @@ function SyncStatusIndicator() { onData: setStatus }); - return null; + return ( +
+ {status?.cloud_ingest && } + {status?.cloud_send && } + {status?.cloud_receive && } + {status?.ingest && } +
+ ); } From 663f833925c1d8a68a05cd29a303d77e409a2922 Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Tue, 15 Oct 2024 03:01:27 -0300 Subject: [PATCH 195/218] Tweaking backfill --- core/crates/sync/src/backfill.rs | 195 ++++++++++++++++--------------- core/src/library/library.rs | 4 +- 2 files changed, 101 insertions(+), 98 deletions(-) diff --git a/core/crates/sync/src/backfill.rs b/core/crates/sync/src/backfill.rs index 9081c9904..9895e8b8c 100644 --- a/core/crates/sync/src/backfill.rs +++ b/core/crates/sync/src/backfill.rs @@ -44,20 +44,21 @@ pub async fn backfill_operations(sync: &SyncManager) -> Result<(), Error> { .exec() .await?; + backfill_device(&db, sync, local_device).await?; + ( - backfill_device(&db, sync, local_device), backfill_storage_statistics(&db, sync, local_device_id), paginate_tags(&db, sync), paginate_locations(&db, sync, local_device_id), paginate_objects(&db, sync, local_device_id), - paginate_exif_datas(&db, sync, local_device_id), - paginate_file_paths(&db, sync, local_device_id), paginate_labels(&db, sync), ) .try_join() .await?; ( + paginate_exif_datas(&db, sync, local_device_id), + paginate_file_paths(&db, sync, local_device_id), paginate_tags_on_objects(&db, sync, local_device_id), paginate_labels_on_objects(&db, sync, local_device_id), ) @@ -203,13 +204,11 @@ where #[instrument(skip(db, sync), err)] async fn paginate_tags(db: &PrismaClient, sync: &SyncManager) -> Result<(), Error> { - use tag::{color, date_created, date_modified, id, name}; - paginate( |cursor| { db.tag() - .find_many(vec![id::gt(cursor)]) - .order_by(id::order(SortOrder::Asc)) + .find_many(vec![tag::id::gt(cursor)]) + .order_by(tag::id::order(SortOrder::Asc)) .exec() }, |tag| tag.id, @@ -221,10 +220,10 @@ async fn paginate_tags(db: &PrismaClient, sync: &SyncManager) -> Result<(), Erro chain_optional_iter( [], [ - option_sync_entry!(t.name, name), - option_sync_entry!(t.color, color), - option_sync_entry!(t.date_created, date_created), - option_sync_entry!(t.date_modified, date_modified), + option_sync_entry!(t.name, tag::name), + option_sync_entry!(t.color, tag::color), + option_sync_entry!(t.date_created, tag::date_created), + option_sync_entry!(t.date_modified, tag::date_modified), ], ), ) @@ -243,18 +242,16 @@ async fn paginate_locations( sync: &SyncManager, device_id: device::id::Type, ) -> Result<(), Error> { - use location::{ - available_capacity, date_created, device_id, generate_preview_media, hidden, id, include, - instance, is_archived, name, path, size_in_bytes, sync_preview_media, total_capacity, - }; - paginate( |cursor| { db.location() - .find_many(vec![id::gt(cursor), device_id::equals(Some(device_id))]) - .order_by(id::order(SortOrder::Asc)) + .find_many(vec![ + location::id::gt(cursor), + location::device_id::equals(Some(device_id)), + ]) + .order_by(location::id::order(SortOrder::Asc)) .take(1000) - .include(include!({ + .include(location::include!({ instance: select { id pub_id @@ -273,24 +270,30 @@ async fn paginate_locations( chain_optional_iter( [], [ - option_sync_entry!(l.name, name), - option_sync_entry!(l.path, path), - option_sync_entry!(l.total_capacity, total_capacity), - option_sync_entry!(l.available_capacity, available_capacity), - option_sync_entry!(l.size_in_bytes, size_in_bytes), - option_sync_entry!(l.is_archived, is_archived), + option_sync_entry!(l.name, location::name), + option_sync_entry!(l.path, location::path), + option_sync_entry!(l.total_capacity, location::total_capacity), + option_sync_entry!( + l.available_capacity, + location::available_capacity + ), + option_sync_entry!(l.size_in_bytes, location::size_in_bytes), + option_sync_entry!(l.is_archived, location::is_archived), option_sync_entry!( l.generate_preview_media, - generate_preview_media + location::generate_preview_media ), - option_sync_entry!(l.sync_preview_media, sync_preview_media), - option_sync_entry!(l.hidden, hidden), - option_sync_entry!(l.date_created, date_created), + option_sync_entry!( + l.sync_preview_media, + location::sync_preview_media + ), + option_sync_entry!(l.hidden, location::hidden), + option_sync_entry!(l.date_created, location::date_created), option_sync_entry!( l.instance.map(|i| { prisma_sync::instance::SyncId { pub_id: i.pub_id } }), - instance + location::instance ), option_sync_entry!( l.device.map(|device| { @@ -298,7 +301,7 @@ async fn paginate_locations( pub_id: device.pub_id, } }), - device + location::device ), ], ), @@ -318,18 +321,16 @@ async fn paginate_objects( sync: &SyncManager, device_id: device::id::Type, ) -> Result<(), Error> { - use object::{ - date_accessed, date_created, device_id, favorite, hidden, id, important, include, kind, - note, - }; - paginate( |cursor| { db.object() - .find_many(vec![id::gt(cursor), device_id::equals(Some(device_id))]) - .order_by(id::order(SortOrder::Asc)) + .find_many(vec![ + object::id::gt(cursor), + object::device_id::equals(Some(device_id)), + ]) + .order_by(object::id::order(SortOrder::Asc)) .take(1000) - .include(include!({ + .include(object::include!({ device: select { pub_id } })) .exec() @@ -344,20 +345,20 @@ async fn paginate_objects( chain_optional_iter( [], [ - option_sync_entry!(o.kind, kind), - option_sync_entry!(o.hidden, hidden), - option_sync_entry!(o.favorite, favorite), - option_sync_entry!(o.important, important), - option_sync_entry!(o.note, note), - option_sync_entry!(o.date_created, date_created), - option_sync_entry!(o.date_accessed, date_accessed), + option_sync_entry!(o.kind, object::kind), + option_sync_entry!(o.hidden, object::hidden), + option_sync_entry!(o.favorite, object::favorite), + option_sync_entry!(o.important, object::important), + option_sync_entry!(o.note, object::note), + option_sync_entry!(o.date_created, object::date_created), + option_sync_entry!(o.date_accessed, object::date_accessed), option_sync_entry!( o.device.map(|device| { prisma_sync::device::SyncId { pub_id: device.pub_id, } }), - device + object::device ), ], ), @@ -443,18 +444,15 @@ async fn paginate_file_paths( sync: &SyncManager, device_id: device::id::Type, ) -> Result<(), Error> { - use file_path::{ - cas_id, date_created, date_indexed, date_modified, device_id, extension, hidden, id, - include, inode, integrity_checksum, is_dir, location, materialized_path, name, object, - size_in_bytes_bytes, - }; - paginate( |cursor| { db.file_path() - .find_many(vec![id::gt(cursor), device_id::equals(Some(device_id))]) - .order_by(id::order(SortOrder::Asc)) - .include(include!({ + .find_many(vec![ + file_path::id::gt(cursor), + file_path::device_id::equals(Some(device_id)), + ]) + .order_by(file_path::id::order(SortOrder::Asc)) + .include(file_path::include!({ location: select { pub_id } object: select { pub_id } device: select { pub_id } @@ -471,37 +469,46 @@ async fn paginate_file_paths( chain_optional_iter( [], [ - option_sync_entry!(fp.is_dir, is_dir), - option_sync_entry!(fp.cas_id, cas_id), - option_sync_entry!(fp.integrity_checksum, integrity_checksum), + option_sync_entry!(fp.is_dir, file_path::is_dir), + option_sync_entry!(fp.cas_id, file_path::cas_id), + option_sync_entry!( + fp.integrity_checksum, + file_path::integrity_checksum + ), option_sync_entry!( fp.location.map(|l| { prisma_sync::location::SyncId { pub_id: l.pub_id } }), - location + file_path::location ), option_sync_entry!( fp.object.map(|o| { prisma_sync::object::SyncId { pub_id: o.pub_id } }), - object + file_path::object ), - option_sync_entry!(fp.materialized_path, materialized_path), - option_sync_entry!(fp.name, name), - option_sync_entry!(fp.extension, extension), - option_sync_entry!(fp.hidden, hidden), - option_sync_entry!(fp.size_in_bytes_bytes, size_in_bytes_bytes), - option_sync_entry!(fp.inode, inode), - option_sync_entry!(fp.date_created, date_created), - option_sync_entry!(fp.date_modified, date_modified), - option_sync_entry!(fp.date_indexed, date_indexed), + option_sync_entry!( + fp.materialized_path, + file_path::materialized_path + ), + option_sync_entry!(fp.name, file_path::name), + option_sync_entry!(fp.extension, file_path::extension), + option_sync_entry!(fp.hidden, file_path::hidden), + option_sync_entry!( + fp.size_in_bytes_bytes, + file_path::size_in_bytes_bytes + ), + option_sync_entry!(fp.inode, file_path::inode), + option_sync_entry!(fp.date_created, file_path::date_created), + option_sync_entry!(fp.date_modified, file_path::date_modified), + option_sync_entry!(fp.date_indexed, file_path::date_indexed), option_sync_entry!( fp.device.map(|device| { prisma_sync::device::SyncId { pub_id: device.pub_id, } }), - device + file_path::device ), ], ), @@ -521,19 +528,17 @@ async fn paginate_tags_on_objects( sync: &SyncManager, device_id: device::id::Type, ) -> Result<(), Error> { - use tag_on_object::{date_created, device_id, include, object_id, tag_id}; - paginate_relation( |group_id, item_id| { db.tag_on_object() .find_many(vec![ - tag_id::gt(group_id), - object_id::gt(item_id), - device_id::equals(Some(device_id)), + tag_on_object::tag_id::gt(group_id), + tag_on_object::object_id::gt(item_id), + tag_on_object::device_id::equals(Some(device_id)), ]) - .order_by(tag_id::order(SortOrder::Asc)) - .order_by(object_id::order(SortOrder::Asc)) - .include(include!({ + .order_by(tag_on_object::tag_id::order(SortOrder::Asc)) + .order_by(tag_on_object::object_id::order(SortOrder::Asc)) + .include(tag_on_object::include!({ tag: select { pub_id } object: select { pub_id } device: select { pub_id } @@ -557,14 +562,14 @@ async fn paginate_tags_on_objects( chain_optional_iter( [], [ - option_sync_entry!(t_o.date_created, date_created), + option_sync_entry!(t_o.date_created, tag_on_object::date_created), option_sync_entry!( t_o.device.map(|device| { prisma_sync::device::SyncId { pub_id: device.pub_id, } }), - device + tag_on_object::device ), ], ), @@ -580,13 +585,11 @@ async fn paginate_tags_on_objects( #[instrument(skip(db, sync), err)] async fn paginate_labels(db: &PrismaClient, sync: &SyncManager) -> Result<(), Error> { - use label::{date_created, date_modified, id}; - paginate( |cursor| { db.label() - .find_many(vec![id::gt(cursor)]) - .order_by(id::order(SortOrder::Asc)) + .find_many(vec![label::id::gt(cursor)]) + .order_by(label::id::order(SortOrder::Asc)) .exec() }, |label| label.id, @@ -599,8 +602,8 @@ async fn paginate_labels(db: &PrismaClient, sync: &SyncManager) -> Result<(), Er chain_optional_iter( [], [ - option_sync_entry!(l.date_created, date_created), - option_sync_entry!(l.date_modified, date_modified), + option_sync_entry!(l.date_created, label::date_created), + option_sync_entry!(l.date_modified, label::date_modified), ], ), ) @@ -619,19 +622,17 @@ async fn paginate_labels_on_objects( sync: &SyncManager, device_id: device::id::Type, ) -> Result<(), Error> { - use label_on_object::{date_created, device_id, include, label_id, object_id}; - paginate_relation( |group_id, item_id| { db.label_on_object() .find_many(vec![ - label_id::gt(group_id), - object_id::gt(item_id), - device_id::equals(Some(device_id)), + label_on_object::label_id::gt(group_id), + label_on_object::object_id::gt(item_id), + label_on_object::device_id::equals(Some(device_id)), ]) - .order_by(label_id::order(SortOrder::Asc)) - .order_by(object_id::order(SortOrder::Asc)) - .include(include!({ + .order_by(label_on_object::label_id::order(SortOrder::Asc)) + .order_by(label_on_object::object_id::order(SortOrder::Asc)) + .include(label_on_object::include!({ object: select { pub_id } label: select { name } device: select { pub_id } @@ -653,14 +654,14 @@ async fn paginate_labels_on_objects( }, }, chain_optional_iter( - [sync_entry!(l_o.date_created, date_created)], + [sync_entry!(l_o.date_created, label_on_object::date_created)], [option_sync_entry!( l_o.device.map(|device| { prisma_sync::device::SyncId { pub_id: device.pub_id, } }), - device + label_on_object::device )], ), ) diff --git a/core/src/library/library.rs b/core/src/library/library.rs index 34e4313ca..f4e284f8a 100644 --- a/core/src/library/library.rs +++ b/core/src/library/library.rs @@ -22,7 +22,7 @@ use std::{ use futures_concurrency::future::Join; use tokio::{fs, io, sync::broadcast, sync::RwLock}; -use tracing::warn; +use tracing::{debug, warn}; use uuid::Uuid; use super::{LibraryConfig, LibraryManagerError}; @@ -129,6 +129,8 @@ impl Library { .join() .await; + debug!(library_id = %self.id, "Started cloud sync actors"); + Ok(()) } From 92c77f6da7c2d0fad38e867dd975679a37b58f50 Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Tue, 15 Oct 2024 03:08:44 -0300 Subject: [PATCH 196/218] Can't backfill instance data as it isn't synced anyway --- core/crates/sync/src/backfill.rs | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/core/crates/sync/src/backfill.rs b/core/crates/sync/src/backfill.rs index 9895e8b8c..ea1eaefbc 100644 --- a/core/crates/sync/src/backfill.rs +++ b/core/crates/sync/src/backfill.rs @@ -252,10 +252,6 @@ async fn paginate_locations( .order_by(location::id::order(SortOrder::Asc)) .take(1000) .include(location::include!({ - instance: select { - id - pub_id - } device: select { pub_id } })) .exec() @@ -289,12 +285,6 @@ async fn paginate_locations( ), option_sync_entry!(l.hidden, location::hidden), option_sync_entry!(l.date_created, location::date_created), - option_sync_entry!( - l.instance.map(|i| { - prisma_sync::instance::SyncId { pub_id: i.pub_id } - }), - location::instance - ), option_sync_entry!( l.device.map(|device| { prisma_sync::device::SyncId { From ddd14acd4156c0a595d97e9539f1f94ec152b794 Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Tue, 15 Oct 2024 03:23:50 -0300 Subject: [PATCH 197/218] More backfill tweaks --- core/crates/sync/src/backfill.rs | 49 ++++++++++++++++---------------- 1 file changed, 24 insertions(+), 25 deletions(-) diff --git a/core/crates/sync/src/backfill.rs b/core/crates/sync/src/backfill.rs index ea1eaefbc..970de0c0d 100644 --- a/core/crates/sync/src/backfill.rs +++ b/core/crates/sync/src/backfill.rs @@ -107,12 +107,10 @@ async fn backfill_storage_statistics( sync: &SyncManager, device_id: device::id::Type, ) -> Result<(), Error> { - use storage_statistics::{available_capacity, device, device_id, include, total_capacity}; - let Some(stats) = db .storage_statistics() - .find_first(vec![device_id::equals(Some(device_id))]) - .include(include!({device: select { pub_id }})) + .find_first(vec![storage_statistics::device_id::equals(Some(device_id))]) + .include(storage_statistics::include!({device: select { pub_id }})) .exec() .await? else { @@ -127,8 +125,11 @@ async fn backfill_storage_statistics( }, chain_optional_iter( [ - sync_entry!(stats.total_capacity, total_capacity), - sync_entry!(stats.available_capacity, available_capacity), + sync_entry!(stats.total_capacity, storage_statistics::total_capacity), + sync_entry!( + stats.available_capacity, + storage_statistics::available_capacity + ), ], [option_sync_entry!( stats.device.map(|device| { @@ -136,7 +137,7 @@ async fn backfill_storage_statistics( pub_id: device.pub_id, } }), - device + storage_statistics::device )], ), ))?]) @@ -368,18 +369,16 @@ async fn paginate_exif_datas( sync: &SyncManager, device_id: device::id::Type, ) -> Result<(), Error> { - use exif_data::{ - artist, camera_data, copyright, description, device_id, epoch_time, exif_version, id, - include, media_date, media_location, resolution, - }; - paginate( |cursor| { db.exif_data() - .find_many(vec![id::gt(cursor), device_id::equals(Some(device_id))]) - .order_by(id::order(SortOrder::Asc)) + .find_many(vec![ + exif_data::id::gt(cursor), + exif_data::device_id::equals(Some(device_id)), + ]) + .order_by(exif_data::id::order(SortOrder::Asc)) .take(1000) - .include(include!({ + .include(exif_data::include!({ object: select { pub_id } device: select { pub_id } })) @@ -399,22 +398,22 @@ async fn paginate_exif_datas( chain_optional_iter( [], [ - option_sync_entry!(ed.resolution, resolution), - option_sync_entry!(ed.media_date, media_date), - option_sync_entry!(ed.media_location, media_location), - option_sync_entry!(ed.camera_data, camera_data), - option_sync_entry!(ed.artist, artist), - option_sync_entry!(ed.description, description), - option_sync_entry!(ed.copyright, copyright), - option_sync_entry!(ed.exif_version, exif_version), - option_sync_entry!(ed.epoch_time, epoch_time), + option_sync_entry!(ed.resolution, exif_data::resolution), + option_sync_entry!(ed.media_date, exif_data::media_date), + option_sync_entry!(ed.media_location, exif_data::media_location), + option_sync_entry!(ed.camera_data, exif_data::camera_data), + option_sync_entry!(ed.artist, exif_data::artist), + option_sync_entry!(ed.description, exif_data::description), + option_sync_entry!(ed.copyright, exif_data::copyright), + option_sync_entry!(ed.exif_version, exif_data::exif_version), + option_sync_entry!(ed.epoch_time, exif_data::epoch_time), option_sync_entry!( ed.device.map(|device| { prisma_sync::device::SyncId { pub_id: device.pub_id, } }), - device + exif_data::device ), ], ), From 14d826c76b9894084ca7b996fc13085dcac95f3f Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Tue, 15 Oct 2024 03:59:07 -0300 Subject: [PATCH 198/218] Some minor tweaks --- core/crates/cloud-services/src/client.rs | 8 +++++++- core/crates/heavy-lifting/src/indexer/mod.rs | 4 ++++ core/src/api/cloud/mod.rs | 19 ++++++++++++++++++- packages/client/src/core.ts | 1 + 4 files changed, 30 insertions(+), 2 deletions(-) diff --git a/core/crates/cloud-services/src/client.rs b/core/crates/cloud-services/src/client.rs index 1c860eba5..71d4da975 100644 --- a/core/crates/cloud-services/src/client.rs +++ b/core/crates/cloud-services/src/client.rs @@ -2,7 +2,11 @@ use crate::p2p::{NotifyUser, UserResponse}; use sd_cloud_schema::{Client, Service, ServicesALPN}; -use std::{net::SocketAddr, sync::Arc, time::Duration}; +use std::{ + net::SocketAddr, + sync::{atomic::AtomicBool, Arc}, + time::Duration, +}; use futures::Stream; use iroh_net::relay::RelayUrl; @@ -49,6 +53,7 @@ pub struct CloudServices { notify_user_rx: flume::Receiver, user_response_tx: flume::Sender, pub(crate) user_response_rx: flume::Receiver, + pub has_bootstrapped: Arc, } impl CloudServices { @@ -128,6 +133,7 @@ impl CloudServices { notify_user_rx, user_response_tx, user_response_rx, + has_bootstrapped: Arc::default(), }) } diff --git a/core/crates/heavy-lifting/src/indexer/mod.rs b/core/crates/heavy-lifting/src/indexer/mod.rs index 8ec8d59e8..1ad78902b 100644 --- a/core/crates/heavy-lifting/src/indexer/mod.rs +++ b/core/crates/heavy-lifting/src/indexer/mod.rs @@ -232,6 +232,10 @@ async fn remove_non_existing_file_paths( }) .unzip(); + if sync_params.is_empty() { + return Ok(0); + } + sync.write_ops( db, ( diff --git a/core/src/api/cloud/mod.rs b/core/src/api/cloud/mod.rs index bc9bd00a1..a47ced682 100644 --- a/core/src/api/cloud/mod.rs +++ b/core/src/api/cloud/mod.rs @@ -15,7 +15,7 @@ use sd_cloud_schema::{ use sd_crypto::{CryptoRng, SeedableRng}; use sd_utils::error::report_error; -use std::pin::pin; +use std::{pin::pin, sync::atomic::Ordering}; use async_stream::stream; use futures::{FutureExt, StreamExt}; @@ -50,6 +50,13 @@ pub(crate) fn mount() -> AlphaRouter { |node, (access_token, refresh_token): (auth::AccessToken, auth::RefreshToken)| async move { use sd_cloud_schema::devices; + if node.cloud_services.has_bootstrapped.load(Ordering::Acquire) { + return Err(rspc::Error::new( + rspc::ErrorCode::Conflict, + String::from("Already bootstrapped"), + )); + } + node.cloud_services .token_refresher .init(access_token, refresh_token) @@ -203,6 +210,10 @@ pub(crate) fn mount() -> AlphaRouter { .try_join() .await?; + node.cloud_services + .has_bootstrapped + .store(true, Ordering::Release); + Ok(()) }, ) @@ -228,6 +239,12 @@ pub(crate) fn mount() -> AlphaRouter { Ok(()) }), ) + .procedure( + "hasBootstrapped", + R.query(|node, _: ()| async move { + Ok(node.cloud_services.has_bootstrapped.load(Ordering::Relaxed)) + }), + ) } fn handle_comm_error( diff --git a/packages/client/src/core.ts b/packages/client/src/core.ts index cb1306899..a3f74b34d 100644 --- a/packages/client/src/core.ts +++ b/packages/client/src/core.ts @@ -8,6 +8,7 @@ export type Procedures = { { key: "cloud.devices.get", input: CloudDevicePubId, result: CloudDevice } | { key: "cloud.devices.get_current_device", input: never, result: CloudDevice } | { key: "cloud.devices.list", input: never, result: CloudDevice[] } | + { key: "cloud.hasBootstrapped", input: never, result: boolean } | { key: "cloud.libraries.get", input: CloudGetLibraryArgs, result: CloudLibrary } | { key: "cloud.libraries.list", input: boolean, result: CloudLibrary[] } | { key: "cloud.locations.list", input: CloudListLocationsArgs, result: CloudLocation[] } | From 8c41d64cc3fe2dca6f79300ae90a2d7be21e4d9a Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Wed, 16 Oct 2024 13:36:09 -0300 Subject: [PATCH 199/218] Fix tag assign rspc route --- core/src/api/tags.rs | 124 ++++++++++++------------------------------- 1 file changed, 33 insertions(+), 91 deletions(-) diff --git a/core/src/api/tags.rs b/core/src/api/tags.rs index 451ac624e..0d71b848c 100644 --- a/core/src/api/tags.rs +++ b/core/src/api/tags.rs @@ -5,7 +5,6 @@ use sd_prisma::{ prisma_sync, }; use sd_sync::{option_sync_db_entry, sync_entry, OperationFactory}; -use sd_utils::{msgpack, uuid_to_bytes}; use std::collections::BTreeMap; @@ -14,7 +13,6 @@ use itertools::{Either, Itertools}; use rspc::{alpha::AlphaRouter, ErrorCode}; use serde::{Deserialize, Serialize}; use specta::Type; -use uuid::Uuid; use super::{utils::library, Ctx, R}; @@ -185,17 +183,6 @@ pub(crate) fn mount() -> AlphaRouter { }) .await?; - macro_rules! sync_id { - ($pub_id:expr) => { - prisma_sync::tag_on_object::SyncId { - tag: prisma_sync::tag::SyncId { - pub_id: tag.pub_id.clone(), - }, - object: prisma_sync::object::SyncId { pub_id: $pub_id }, - } - }; - } - if args.unassign { let query = db.tag_on_object().delete_many(vec![ tag_on_object::tag_id::equals(args.tag_id), @@ -220,63 +207,20 @@ pub(crate) fn mount() -> AlphaRouter { .into_iter() .filter_map(|fp| fp.object.map(|o| o.pub_id)), ) - .map(|pub_id| sync.relation_delete(sync_id!(pub_id))) + .map(|pub_id| { + sync.relation_delete(prisma_sync::tag_on_object::SyncId { + tag: prisma_sync::tag::SyncId { + pub_id: tag.pub_id.clone(), + }, + object: prisma_sync::object::SyncId { pub_id }, + }) + }) .collect::>(); if !ops.is_empty() { sync.write_ops(db, (ops, query)).await?; } } else { - let mut ops = vec![]; - - let db_params: (Vec<_>, Vec<_>) = file_paths - .iter() - .filter(|fp| fp.is_dir.unwrap_or_default() && fp.object.is_none()) - .map(|fp| { - let id = uuid_to_bytes(&Uuid::now_v7()); - let device_pub_id = sync.device_pub_id.to_db(); - - ops.push(sync.shared_create( - prisma_sync::object::SyncId { pub_id: id.clone() }, - [sync_entry!( - prisma_sync::device::SyncId { - pub_id: device_pub_id.clone(), - }, - object::device - )], - )); - - ops.push(sync.shared_update( - prisma_sync::file_path::SyncId { - pub_id: fp.pub_id.clone(), - }, - file_path::object::NAME, - msgpack!(id), - )); - - ( - db.object().create( - id.clone(), - vec![object::device::connect(device::pub_id::equals( - device_pub_id, - ))], - ), - db.file_path().update( - file_path::id::equals(fp.id), - vec![file_path::object::connect(object::pub_id::equals( - id, - ))], - ), - ) - }) - .unzip(); - - if ops.is_empty() { - return Ok(()); - } - - let (new_objects, _) = sync.write_ops(db, (ops, db_params)).await?; - let (sync_ops, db_creates) = objects .into_iter() .map(|o| (o.id, o.pub_id)) @@ -285,22 +229,23 @@ pub(crate) fn mount() -> AlphaRouter { .into_iter() .filter_map(|fp| fp.object.map(|o| (o.id, o.pub_id))), ) - .chain(new_objects.into_iter().map(|o| (o.id, o.pub_id))) - .fold( - (vec![], vec![]), - |(mut sync_ops, mut db_creates), (id, pub_id)| { - let device_pub_id = sync.device_pub_id.to_db(); - sync_ops.push(sync.relation_create( - sync_id!(pub_id), + .map(|(id, pub_id)| { + ( + sync.relation_create( + prisma_sync::tag_on_object::SyncId { + tag: prisma_sync::tag::SyncId { + pub_id: tag.pub_id.clone(), + }, + object: prisma_sync::object::SyncId { pub_id }, + }, [sync_entry!( prisma_sync::device::SyncId { - pub_id: device_pub_id.clone(), + pub_id: sync.device_pub_id.to_db(), }, tag_on_object::device )], - )); - - db_creates.push(tag_on_object::CreateUnchecked { + ), + tag_on_object::CreateUnchecked { tag_id: args.tag_id, object_id: id, _params: vec![ @@ -309,24 +254,21 @@ pub(crate) fn mount() -> AlphaRouter { )), tag_on_object::device_id::set(Some(device_id)), ], - }); + }, + ) + }) + .unzip::<_, _, Vec<_>, Vec<_>>(); - (sync_ops, db_creates) - }, - ); - - if sync_ops.is_empty() && db_creates.is_empty() { - return Ok(()); + if !sync_ops.is_empty() && !db_creates.is_empty() { + sync.write_ops( + db, + ( + sync_ops, + db.tag_on_object().create_many(db_creates).skip_duplicates(), + ), + ) + .await?; } - - sync.write_ops( - db, - ( - sync_ops, - db.tag_on_object().create_many(db_creates).skip_duplicates(), - ), - ) - .await?; } invalidate_query!(library, "tags.getForObject"); From 12fdfb8b782add6060feadb9058b79bc96e9ea2c Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Sat, 19 Oct 2024 02:48:27 -0300 Subject: [PATCH 200/218] Update sync messages push and pull Also fix scalability issues on sync design --- Cargo.lock | Bin 345122 -> 345145 bytes Cargo.toml | 2 +- .../crates/cloud-services/src/sync/receive.rs | 185 +------- core/crates/cloud-services/src/sync/send.rs | 419 ++--------------- .../src/isolated_file_path_data.rs | 5 +- .../heavy-lifting/src/file_identifier/job.rs | 32 +- .../src/file_identifier/tasks/identifier.rs | 14 +- .../src/file_identifier/tasks/mod.rs | 12 +- core/crates/heavy-lifting/src/indexer/job.rs | 30 +- core/crates/heavy-lifting/src/indexer/mod.rs | 91 ++-- .../heavy-lifting/src/indexer/shallow.rs | 2 +- .../heavy-lifting/src/indexer/tasks/saver.rs | 17 +- .../src/indexer/tasks/updater.rs | 26 +- .../heavy-lifting/src/job_system/report.rs | 2 + .../heavy-lifting/src/media_processor/job.rs | 30 +- core/crates/prisma-helpers/src/lib.rs | 14 + core/crates/sync/src/ingest_utils.rs | 154 +++---- core/crates/sync/src/lib.rs | 7 +- core/crates/sync/tests/lib.rs | 234 ---------- core/crates/sync/tests/mock_instance.rs | 143 ------ core/src/api/files.rs | 83 ++-- core/src/api/labels.rs | 31 +- core/src/api/search/saved.rs | 51 +-- core/src/api/tags.rs | 118 +++-- core/src/library/manager/mod.rs | 1 + core/src/location/manager/watcher/android.rs | 13 +- core/src/location/manager/watcher/ios.rs | 10 +- core/src/location/manager/watcher/linux.rs | 13 +- core/src/location/manager/watcher/macos.rs | 10 +- core/src/location/manager/watcher/mod.rs | 16 +- core/src/location/manager/watcher/utils.rs | 427 ++++++++---------- core/src/location/manager/watcher/windows.rs | 10 +- core/src/location/mod.rs | 295 +++++------- core/src/object/fs/old_copy.rs | 4 +- core/src/object/tag/mod.rs | 4 +- .../object/validation/old_validator_job.rs | 19 +- core/src/old_job/manager.rs | 1 + core/src/old_job/report.rs | 1 + core/src/volume/mod.rs | 95 ++-- crates/crypto/src/cloud/decrypt.rs | 2 +- crates/crypto/src/cloud/secret_key.rs | 2 +- crates/crypto/src/primitives.rs | 4 +- crates/sync-generator/src/model.rs | 41 +- crates/sync-generator/src/sync_data.rs | 139 ++++-- crates/sync/src/crdt.rs | 10 +- crates/sync/src/factory.rs | 57 ++- crates/utils/Cargo.toml | 2 + 47 files changed, 1118 insertions(+), 1760 deletions(-) delete mode 100644 core/crates/sync/tests/lib.rs delete mode 100644 core/crates/sync/tests/mock_instance.rs diff --git a/Cargo.lock b/Cargo.lock index 674aab99ecb23a43ded9898271510f0734fa0183..bbff9ba73223261d4949c704e553fa9abbeae1a0 100644 GIT binary patch delta 105 zcmZ3~A-c0ew4sHug=q`(saXce=E;T@CdnqLhRO(Lnn{Y8S(2fFiGiu5d6JQtiIJ&6 zVp_7fsj2z&fO{+w(|;~z;@&Pho4H1D`uZKrV$%&)G4o9KUct=WzJ3q$_Vs&MirWDN C03%)i delta 90 zcmdnlA-bqTw4sHug=q`(sab|;NhXGg#>pnBNy-SOWumcZijk?2iMf%biKUsPNvfq~ mVzQ-yQHo{aWJe*1>Aw~;ac!5K&0M3{zF`mZ_6>VjI@ Result<(devices::PubId, DateTime), Error> { // FIXME(@fogodev): If we don't have the key hash, we need to fetch it from another device in the group if possible let Some(secret_key) = key_manager.get_key(sync_group_pub_id, &key_hash).await else { return Err(Error::MissingKeyHash); }; - let response = http_client - .get(signed_download_link) - .send() - .await - .map_err(Error::DownloadSyncMessages)? - .error_for_status() - .map_err(Error::ErrorResponseDownloadSyncMessages)?; + debug!( + size = encrypted_messages.len(), + "Received encrypted sync messages collection" + ); - let crdt_ops = if let Some(size) = response.content_length() { - debug!(size, "Received encrypted sync messages collection"); - extract_messages_known_size(response, size, secret_key, original_device_pub_id).await - } else { - debug!("Received encrypted sync messages collection of unknown size"); - extract_messages_unknown_size(response, secret_key, original_device_pub_id).await - }?; + let crdt_ops = decrypt_messages(encrypted_messages, secret_key, original_device_pub_id).await?; assert_eq!( crdt_ops.len(), @@ -285,44 +264,28 @@ async fn handle_single_message( Ok((original_device_pub_id, end_time)) } -#[instrument(skip(response, size, secret_key), err)] -async fn extract_messages_known_size( - response: Response, - size: u64, +#[instrument(skip(encrypted_messages, secret_key), fields(messages_size = %encrypted_messages.len()), err)] +async fn decrypt_messages( + encrypted_messages: Vec, secret_key: SecretKey, devices::PubId(device_pub_id): devices::PubId, ) -> Result, Error> { - let plain_text = if size <= EncryptedBlock::CIPHER_TEXT_SIZE as u64 { - OneShotDecryption::decrypt( - &secret_key, - response - .bytes() - .await - .map_err(Error::ErrorResponseDownloadReadBytesSyncMessages)? - .as_ref() - .into(), - ) - .map_err(Error::Decrypt)? + let plain_text = if encrypted_messages.len() <= EncryptedBlock::CIPHER_TEXT_SIZE { + OneShotDecryption::decrypt(&secret_key, encrypted_messages.as_slice().into()) + .map_err(Error::Decrypt)? } else { - let mut reader = StreamReader::new(response.bytes_stream().map_err(|e| { - error!(?e, "Failed to read sync messages bytes stream"); - io::Error::new(io::ErrorKind::Other, e) - })); + let (nonce, cipher_text) = encrypted_messages.split_at(size_of::()); - let mut nonce = StreamNonce::default(); + let mut plain_text = Vec::with_capacity(cipher_text.len()); - reader - .read_exact(&mut nonce) - .await - .map_err(Error::ReadNonceStreamDecryption)?; - - // TODO: Reimplement using async streaming with serde if it ever gets implemented - - let mut plain_text = vec![]; - - StreamDecryption::decrypt(&secret_key, &nonce, reader, &mut plain_text) - .await - .map_err(Error::Decrypt)?; + StreamDecryption::decrypt( + &secret_key, + nonce.try_into().expect("we split the correct amount"), + cipher_text, + &mut plain_text, + ) + .await + .map_err(Error::Decrypt)?; plain_text }; @@ -332,34 +295,6 @@ async fn extract_messages_known_size( .map_err(Error::DeserializationFailureToPullSyncMessages) } -#[instrument(skip_all, err)] -async fn extract_messages_unknown_size( - response: Response, - secret_key: SecretKey, - devices::PubId(device_pub_id): devices::PubId, -) -> Result, Error> { - let plain_text = match UnknownDownloadKind::new(response).await? { - UnknownDownloadKind::OneShot(buffer) => { - OneShotDecryption::decrypt(&secret_key, buffer.as_slice().into()) - .map_err(Error::Decrypt)? - } - - UnknownDownloadKind::Stream((nonce, reader)) => { - let mut plain_text = vec![]; - - StreamDecryption::decrypt(&secret_key, &nonce, reader, &mut plain_text) - .await - .map_err(Error::Decrypt)?; - - plain_text - } - }; - - rmp_serde::from_slice::(&plain_text) - .map(|compressed_ops| compressed_ops.into_ops(device_pub_id)) - .map_err(Error::DeserializationFailureToPullSyncMessages) -} - #[instrument(skip_all, err)] pub async fn write_cloud_ops_to_db( ops: Vec, @@ -411,73 +346,3 @@ impl LastTimestampKeeper { .map_err(Error::FailedToWriteLastTimestampKeeper) } } - -struct UnknownDownloadSizeStreamer { - stream_reader: Box, - buffer: Vec, - was_read: usize, -} - -enum UnknownDownloadKind { - OneShot(Vec), - Stream((StreamNonce, UnknownDownloadSizeStreamer)), -} - -impl UnknownDownloadKind { - async fn new(response: Response) -> Result { - let mut buffer = Vec::with_capacity(EncryptedBlock::CIPHER_TEXT_SIZE * 2); - - let mut stream = response.bytes_stream(); - - while let Some(res) = stream.next().await { - buffer.extend(res.map_err(Error::ErrorResponseDownloadReadBytesSyncMessages)?); - if buffer.len() > EncryptedBlock::CIPHER_TEXT_SIZE { - break; - } - } - - if buffer.len() < size_of::() { - return Err(Error::IncompleteDownloadBytesSyncMessages); - } - - if buffer.len() <= EncryptedBlock::CIPHER_TEXT_SIZE { - Ok(Self::OneShot(buffer)) - } else { - let nonce_size = size_of::(); - - Ok(Self::Stream(( - StreamNonce::try_from(&buffer[..nonce_size]).expect("passing the right nonce size"), - UnknownDownloadSizeStreamer { - stream_reader: Box::new(StreamReader::new(stream.map_err(|e| { - error!(?e, "Failed to read sync messages bytes stream"); - io::Error::new(io::ErrorKind::Other, e) - }))), - buffer, - was_read: nonce_size, - }, - ))) - } - } -} - -impl AsyncRead for UnknownDownloadSizeStreamer { - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - if buf.remaining() == 0 { - return Poll::Ready(Ok(())); - } - - if self.was_read == self.buffer.len() { - Pin::new(&mut self.stream_reader).poll_read(cx, buf) - } else { - let len = std::cmp::min(self.buffer.len() - self.was_read, buf.remaining()); - buf.put_slice(&self.buffer[self.was_read..(self.was_read + len)]); - self.was_read += len; - - Poll::Ready(Ok(())) - } - } -} diff --git a/core/crates/cloud-services/src/sync/send.rs b/core/crates/cloud-services/src/sync/send.rs index 2e36b8118..4fd3842da 100644 --- a/core/crates/cloud-services/src/sync/send.rs +++ b/core/crates/cloud-services/src/sync/send.rs @@ -6,7 +6,7 @@ use sd_actors::{Actor, Stopper}; use sd_cloud_schema::{ devices, error::{ClientSideError, NotFoundError}, - sync::{self, groups, messages}, + sync::{groups, messages}, Client, Service, }; use sd_crypto::{ @@ -18,8 +18,7 @@ use sd_utils::{datetime_to_timestamp, timestamp_to_datetime}; use std::{ future::IntoFuture, - num::NonZero, - pin::{pin, Pin}, + pin::pin, sync::{ atomic::{AtomicBool, Ordering}, Arc, @@ -27,24 +26,20 @@ use std::{ time::{Duration, UNIX_EPOCH}, }; -use async_stream::try_stream; use chrono::{DateTime, Utc}; -use futures::{FutureExt, SinkExt, Stream, StreamExt, TryStream, TryStreamExt}; +use futures::{FutureExt, StreamExt, TryStreamExt}; use futures_concurrency::future::{Race, TryJoin}; -use quic_rpc::{client::UpdateSink, pattern::bidi_streaming, transport::quinn::QuinnConnection}; -use reqwest_middleware::reqwest::{header, Body}; +use quic_rpc::transport::quinn::QuinnConnection; use tokio::{ - spawn, - sync::{broadcast, oneshot, Notify, Semaphore}, + sync::{broadcast, Notify}, time::sleep, }; -use tracing::{debug, error}; +use tracing::error; use uuid::Uuid; use super::{SyncActors, ONE_MINUTE}; const TEN_SECONDS: Duration = Duration::from_secs(10); -const THIRTY_SECONDS: Duration = Duration::from_secs(30); const MESSAGES_COLLECTION_SIZE: u32 = 100_000; @@ -60,18 +55,6 @@ enum LoopStatus { type LatestTimestamp = NTP64; -type PushResponsesStream = Pin< - Box< - dyn Stream< - Item = Result< - Result, - bidi_streaming::ItemError>, - >, - > + Send - + Sync, - >, ->; - #[derive(Debug)] pub struct Sender { sync_group_pub_id: groups::PubId, @@ -205,17 +188,7 @@ impl Sender { let messages_bytes = rmp_serde::to_vec_named(&compressed_ops) .map_err(Error::SerializationFailureToPushSyncMessages)?; - let plain_text_size = messages_bytes.len(); - let expected_blob_size = if plain_text_size <= EncryptedBlock::PLAIN_TEXT_SIZE { - OneShotEncryption::cipher_text_size(&secret_key, plain_text_size) - } else { - StreamEncryption::cipher_text_size(&secret_key, plain_text_size) - } as u64; - - debug!(?expected_blob_size, ?key_hash, "Preparing sync message"); - - let (mut push_updates, mut push_responses) = self - .cloud_client + self.cloud_client .sync() .messages() .push(messages::push::Request { @@ -228,54 +201,15 @@ impl Sender { device_pub_id: current_device_pub_id, key_hash: key_hash.clone(), operations_count, - start_time, - end_time, - expected_blob_size, + time_range: (start_time, end_time), + encrypted_messages: encrypt_messages( + &secret_key, + &mut self.rng, + messages_bytes, + ) + .await?, }) - .await?; - - let Some(response) = push_responses.next().await else { - return Err(Error::EmptyResponse("push initial response")); - }; - - let messages::push::Response(response_kind) = response??; - - match response_kind { - messages::push::ResponseKind::SinglePresignedUrl(url) => { - upload_to_single_url( - url, - secret_key.clone(), - self.cloud_services.http_client(), - messages_bytes, - &mut self.rng, - ) - .await?; - } - messages::push::ResponseKind::ManyPresignedUrls(urls) => { - upload_to_many_urls( - urls, - secret_key.clone(), - self.cloud_services.http_client().clone(), - messages_bytes, - &mut self.rng, - &mut push_updates, - &mut push_responses, - ) - .await?; - } - messages::push::ResponseKind::Pong => { - return Err(Error::UnexpectedResponse( - "Pong on first messages push request", - )) - } - messages::push::ResponseKind::End => { - return Err(Error::UnexpectedResponse( - "End on first messages push request", - )) - } - } - - finalize_protocol(&mut push_updates, &mut push_responses).await?; + .await??; status = LoopStatus::SentMessages; } @@ -303,8 +237,7 @@ impl Sender { .get_access_token() .await?, group_pub_id: self.sync_group_pub_id, - current_device_pub_id, - kind: messages::get_latest_time::Kind::ForCurrentDevice, + kind: messages::get_latest_time::Kind::ForCurrentDevice(current_device_pub_id), }) .await? { @@ -328,320 +261,44 @@ impl Sender { } } -async fn finalize_protocol( - push_updates: &mut UpdateSink< - Service, - QuinnConnection, - messages::push::RequestUpdate, - sync::Service, - >, - push_responses: &mut PushResponsesStream, -) -> Result<(), Error> { - push_updates - .send(messages::push::RequestUpdate( - messages::push::UpdateKind::End, - )) - .await - .map_err(Error::EndUpdatePushSyncMessages)?; - - let Some(response) = push_responses.next().await else { - return Err(Error::EmptyResponse("push initial response")); - }; - - let messages::push::Response(response_kind) = response??; - - match response_kind { - messages::push::ResponseKind::SinglePresignedUrl(_) - | messages::push::ResponseKind::ManyPresignedUrls(_) => { - return Err(Error::UnexpectedResponse( - "Urls responses on final messages push response", - )) - } - messages::push::ResponseKind::Pong => { - return Err(Error::UnexpectedResponse( - "Pong on final message push response", - )) - } - messages::push::ResponseKind::End => { - /* - Everything is awesome! - */ - } - } - - Ok(()) -} - -async fn upload_to_many_urls( - urls: Vec, - secret_key: SecretKey, - http_client: reqwest_middleware::ClientWithMiddleware, - messages_bytes: Vec, +async fn encrypt_messages( + secret_key: &SecretKey, rng: &mut CryptoRng, - push_updates: &mut UpdateSink< - Service, - QuinnConnection, - messages::push::RequestUpdate, - sync::Service, - >, - push_responses: &mut PushResponsesStream, -) -> Result<(), Error> { - let stop_ping_pong = Arc::new(AtomicBool::new(false)); - let (out_tx, mut out_rx) = oneshot::channel(); - let rng = CryptoRng::from_seed(rng.generate_fixed()); - - let handle = spawn(handle_multipart_upload( - urls, - secret_key, - http_client, - messages_bytes, - rng, - Arc::clone(&stop_ping_pong), - out_tx, - )); - - loop { - if stop_ping_pong.load(Ordering::Acquire) { - break; - } - - if let Err(e) = push_updates - .send(messages::push::RequestUpdate( - messages::push::UpdateKind::Ping, - )) - .await - { - error!(?e, "Failed to send push ping update"); - sleep(TEN_SECONDS).await; - continue; - } - - let Some(response) = push_responses.next().await else { - error!("Empty response from push ping response"); - continue; - }; - - match response { - Ok(Ok(messages::push::Response( - messages::push::ResponseKind::SinglePresignedUrl(_) - | messages::push::ResponseKind::ManyPresignedUrls(_), - ))) => { - unreachable!("can't receive url if we didn't send an initial request") - } - - Ok(Ok(messages::push::Response(messages::push::ResponseKind::Pong))) => { - /* - Everything is awesome! - */ - } - Ok(Ok(messages::push::Response(messages::push::ResponseKind::End))) => { - unreachable!("Can't receive an End if we didn't send an End first"); - } - - Ok(Err(e)) => { - error!(?e, "Error from push ping response"); - sleep(TEN_SECONDS).await; - continue; - } - - Err(e) => { - error!(?e, "Error from push ping response"); - sleep(TEN_SECONDS).await; - continue; - } - } - - if stop_ping_pong.load(Ordering::Acquire) { - break; - } - - sleep(THIRTY_SECONDS).await; - } - - let Ok(out) = out_rx.try_recv() else { - // SAFETY: This try_recv error can only happen if the upload task panicked - // so we're good to unwrap the error. - let e = handle.await.expect_err("upload task panicked"); - error!(?e, "Critical error while uploading sync messages"); - return Err(Error::CriticalErrorWhileUploadingSyncMessages); - }; - - out -} - -async fn handle_multipart_upload( - urls: Vec, - secret_key: SecretKey, - http_client: reqwest_middleware::ClientWithMiddleware, messages_bytes: Vec, - rng: CryptoRng, - stop_ping_pong: Arc, - out_tx: oneshot::Sender>, -) { - async fn inner( - urls: Vec, - secret_key: SecretKey, - http_client: reqwest_middleware::ClientWithMiddleware, - messages_bytes: Vec, - mut rng: CryptoRng, - ) -> Result<(), Error> { - let urls_count = urls.len(); - let message_size = messages_bytes.len(); - let blocks_per_url = message_size / urls_count / EncryptedBlock::PLAIN_TEXT_SIZE; - let cipher_text_size = StreamEncryption::cipher_text_size(&secret_key, message_size); - - let parallel_upload_semaphore = Arc::new(Semaphore::new( - std::thread::available_parallelism() - .map(NonZero::get) - .unwrap_or(1), +) -> Result, Error> { + if messages_bytes.len() <= EncryptedBlock::PLAIN_TEXT_SIZE { + let mut nonce_and_cipher_text = Vec::with_capacity(OneShotEncryption::cipher_text_size( + secret_key, + messages_bytes.len(), )); - // If we're uploading to many URLs, it implies that the message size is bigger than a single - // encryption block, so we always use stream encryption. - - let mut buffers = vec![Vec::with_capacity(cipher_text_size / urls_count); urls_count]; - let (nonce, cipher_stream) = - StreamEncryption::encrypt(&secret_key, messages_bytes.as_slice(), &mut rng); - - buffers[0].extend_from_slice(&nonce); - - let mut cipher_stream = pin!(cipher_stream); - - let mut handles = Vec::with_capacity(urls_count); - - for (idx, (mut buffer, url)) in buffers.into_iter().zip(urls).enumerate() { - for _ in 0..blocks_per_url { - if let Some(cipher_res) = cipher_stream.next().await { - buffer.extend(cipher_res.map_err(Error::Encrypt)?); - } else { - return Err(Error::UnexpectedEndOfStream); - } - } - - handles.push(spawn(upload_part( - idx, - url, - http_client.clone(), - buffer, - Arc::clone(¶llel_upload_semaphore), - ))); - } - - assert!( - cipher_stream.next().await.is_none(), - "Unexpected ciphered bytes still on stream" - ); - - handles.try_join().await.map_err(|e| { - error!(?e, "Error while uploading sync messages"); - Error::CriticalErrorWhileUploadingSyncMessages - })?; - - Ok(()) - } - - let res = inner(urls, secret_key, http_client, messages_bytes, rng).await; - stop_ping_pong.store(true, Ordering::Release); - out_tx - .send(res) - .expect("upload output channel never closes"); -} - -async fn upload_part( - idx: usize, - url: reqwest::Url, - http_client: reqwest_middleware::ClientWithMiddleware, - buffer: Vec, - parallel_upload_semaphore: Arc, -) -> Result<(), Error> { - let _permit = parallel_upload_semaphore - .acquire() - .await - .expect("Semaphore never closes"); - - let response = http_client - .put(url) - .header(header::CONTENT_LENGTH, buffer.len()) - .body(buffer) - .send() - .await - .map_err(Error::UploadSyncMessages)? - .error_for_status() - .map_err(Error::ErrorResponseUploadSyncMessages)?; - - debug!(?response, idx, "Uploaded sync messages part"); - - Ok(()) -} - -async fn upload_to_single_url( - url: reqwest::Url, - secret_key: SecretKey, - http_client: &reqwest_middleware::ClientWithMiddleware, - messages_bytes: Vec, - rng: &mut CryptoRng, -) -> Result<(), Error> { - let (cipher_text_size, body) = if messages_bytes.len() <= EncryptedBlock::PLAIN_TEXT_SIZE { let EncryptedBlock { nonce, cipher_text } = - OneShotEncryption::encrypt(&secret_key, messages_bytes.as_slice(), rng) + OneShotEncryption::encrypt(secret_key, messages_bytes.as_slice(), rng) .map_err(Error::Encrypt)?; - let cipher_text_size = nonce.len() + cipher_text.len(); + nonce_and_cipher_text.extend_from_slice(nonce.as_slice()); + nonce_and_cipher_text.extend(&cipher_text); - let mut body_bytes = Vec::with_capacity(cipher_text_size); - body_bytes.extend_from_slice(nonce.as_slice()); - body_bytes.extend(&cipher_text); - - (cipher_text_size, Body::from(body_bytes)) + Ok(nonce_and_cipher_text) } else { let mut rng = CryptoRng::from_seed(rng.generate_fixed()); - let cipher_text_size = - StreamEncryption::cipher_text_size(&secret_key, messages_bytes.len()); + let mut nonce_and_cipher_text = Vec::with_capacity(StreamEncryption::cipher_text_size( + secret_key, + messages_bytes.len(), + )); - let body_bytes = stream_encryption(secret_key, messages_bytes, &mut rng) - .try_fold( - Vec::with_capacity(cipher_text_size), - |mut body_bytes, ciphered_chunk| async move { - body_bytes.extend(ciphered_chunk); - Ok(body_bytes) - }, - ) - .await?; - - (cipher_text_size, Body::from(body_bytes)) - }; - - http_client - .put(url) - .header(header::CONTENT_LENGTH, cipher_text_size) - .body(body) - .send() - .await - .map_err(Error::UploadSyncMessages)? - .error_for_status() - .map_err(Error::ErrorResponseUploadSyncMessages)?; - - Ok(()) -} - -fn stream_encryption( - secret_key: SecretKey, - messages_bytes: Vec, - rng: &mut CryptoRng, -) -> impl TryStream, Error = Error> + Send + 'static { - let mut rng = CryptoRng::from_seed(rng.generate_fixed()); - - try_stream! { let (nonce, cipher_stream) = - StreamEncryption::encrypt(&secret_key, messages_bytes.as_slice(), &mut rng); + StreamEncryption::encrypt(secret_key, messages_bytes.as_slice(), &mut rng); + + nonce_and_cipher_text.extend_from_slice(nonce.as_slice()); let mut cipher_stream = pin!(cipher_stream); - yield nonce.to_vec(); - - while let Some(res) = cipher_stream.next().await { - yield res.map_err(Error::Encrypt)?; + while let Some(ciphered_chunk) = cipher_stream.try_next().await.map_err(Error::Encrypt)? { + nonce_and_cipher_text.extend(ciphered_chunk); } + + Ok(nonce_and_cipher_text) } } diff --git a/core/crates/file-path-helper/src/isolated_file_path_data.rs b/core/crates/file-path-helper/src/isolated_file_path_data.rs index 3e89cce0f..fe83bbee9 100644 --- a/core/crates/file-path-helper/src/isolated_file_path_data.rs +++ b/core/crates/file-path-helper/src/isolated_file_path_data.rs @@ -2,7 +2,7 @@ use sd_core_prisma_helpers::{ file_path_for_file_identifier, file_path_for_media_processor, file_path_for_object_validator, file_path_to_full_path, file_path_to_handle_custom_uri, file_path_to_handle_p2p_serve_file, file_path_to_isolate, file_path_to_isolate_with_id, file_path_to_isolate_with_pub_id, - file_path_walker, file_path_with_object, + file_path_walker, file_path_watcher_remove, file_path_with_object, }; use sd_prisma::prisma::{file_path, location}; @@ -506,7 +506,8 @@ impl_from_db!( file_path_to_isolate_with_pub_id, file_path_walker, file_path_to_isolate_with_id, - file_path_with_object + file_path_with_object, + file_path_watcher_remove ); impl_from_db_without_location_id!( diff --git a/core/crates/heavy-lifting/src/file_identifier/job.rs b/core/crates/heavy-lifting/src/file_identifier/job.rs index a75bbb2ca..249ea57f2 100644 --- a/core/crates/heavy-lifting/src/file_identifier/job.rs +++ b/core/crates/heavy-lifting/src/file_identifier/job.rs @@ -14,7 +14,11 @@ use crate::{ use sd_core_file_path_helper::IsolatedFilePathData; use sd_core_prisma_helpers::{file_path_for_file_identifier, CasId}; -use sd_prisma::prisma::{device, file_path, location, SortOrder}; +use sd_prisma::{ + prisma::{device, file_path, location, SortOrder}, + prisma_sync, +}; +use sd_sync::{sync_db_not_null_entry, OperationFactory}; use sd_task_system::{ AnyTaskOutput, IntoTask, SerializableTask, Task, TaskDispatcher, TaskHandle, TaskId, TaskOutput, TaskStatus, @@ -267,15 +271,25 @@ impl Job for FileIdentifier { .. } = self; - ctx.db() - .location() - .update( - location::id::equals(location.id), - vec![location::scan_state::set( - LocationScanState::FilesIdentified as i32, - )], + let (sync_param, db_param) = sync_db_not_null_entry!( + LocationScanState::FilesIdentified as i32, + location::scan_state + ); + + ctx.sync() + .write_op( + ctx.db(), + ctx.sync().shared_update( + prisma_sync::location::SyncId { + pub_id: location.pub_id.clone(), + }, + [sync_param], + ), + ctx.db() + .location() + .update(location::id::equals(location.id), vec![db_param]) + .select(location::select!({ id })), ) - .exec() .await .map_err(file_identifier::Error::from)?; diff --git a/core/crates/heavy-lifting/src/file_identifier/tasks/identifier.rs b/core/crates/heavy-lifting/src/file_identifier/tasks/identifier.rs index 22d244cac..125a72713 100644 --- a/core/crates/heavy-lifting/src/file_identifier/tasks/identifier.rs +++ b/core/crates/heavy-lifting/src/file_identifier/tasks/identifier.rs @@ -12,11 +12,11 @@ use sd_prisma::{ prisma::{device, file_path, location, PrismaClient}, prisma_sync, }; -use sd_sync::OperationFactory; +use sd_sync::{sync_db_entry, OperationFactory}; use sd_task_system::{ ExecStatus, Interrupter, InterruptionKind, IntoAnyTaskOutput, SerializableTask, Task, TaskId, }; -use sd_utils::{error::FileIOError, msgpack}; +use sd_utils::error::FileIOError; use std::{ collections::HashMap, convert::identity, future::IntoFuture, mem, path::PathBuf, pin::pin, @@ -403,19 +403,17 @@ async fn assign_cas_id_to_file_paths( let (ops, queries) = identified_files .iter() .map(|(pub_id, IdentifiedFile { cas_id, .. })| { + let (sync_param, db_param) = sync_db_entry!(cas_id, file_path::cas_id); + ( sync.shared_update( prisma_sync::file_path::SyncId { pub_id: pub_id.to_db(), }, - file_path::cas_id::NAME, - msgpack!(cas_id), + [sync_param], ), db.file_path() - .update( - file_path::pub_id::equals(pub_id.to_db()), - vec![file_path::cas_id::set(cas_id.into())], - ) + .update(file_path::pub_id::equals(pub_id.to_db()), vec![db_param]) // We don't need any data here, just the id avoids receiving the entire object // as we can't pass an empty select macro call .select(file_path::select!({ id })), diff --git a/core/crates/heavy-lifting/src/file_identifier/tasks/mod.rs b/core/crates/heavy-lifting/src/file_identifier/tasks/mod.rs index 13e4d7d9f..59f75d0a9 100644 --- a/core/crates/heavy-lifting/src/file_identifier/tasks/mod.rs +++ b/core/crates/heavy-lifting/src/file_identifier/tasks/mod.rs @@ -9,7 +9,7 @@ use sd_prisma::{ prisma_sync, }; use sd_sync::{option_sync_db_entry, sync_db_entry, sync_entry, CRDTOperation, OperationFactory}; -use sd_utils::{chain_optional_iter, msgpack}; +use sd_utils::chain_optional_iter; use std::collections::{HashMap, HashSet}; @@ -47,10 +47,12 @@ fn connect_file_path_to_object<'db>( prisma_sync::file_path::SyncId { pub_id: file_path_pub_id.to_db(), }, - file_path::object::NAME, - msgpack!(prisma_sync::object::SyncId { - pub_id: object_pub_id.to_db(), - }), + [sync_entry!( + prisma_sync::object::SyncId { + pub_id: object_pub_id.to_db(), + }, + file_path::object + )], ), db.file_path() .update( diff --git a/core/crates/heavy-lifting/src/indexer/job.rs b/core/crates/heavy-lifting/src/indexer/job.rs index d910fec81..cf19fbb90 100644 --- a/core/crates/heavy-lifting/src/indexer/job.rs +++ b/core/crates/heavy-lifting/src/indexer/job.rs @@ -16,7 +16,11 @@ use sd_core_file_path_helper::IsolatedFilePathData; use sd_core_indexer_rules::{IndexerRule, IndexerRuler}; use sd_core_prisma_helpers::location_with_indexer_rules; -use sd_prisma::prisma::{device, location}; +use sd_prisma::{ + prisma::{device, location}, + prisma_sync, +}; +use sd_sync::{sync_db_not_null_entry, OperationFactory}; use sd_task_system::{ AnyTaskOutput, IntoTask, SerializableTask, Task, TaskDispatcher, TaskHandle, TaskId, TaskOutput, TaskStatus, @@ -269,7 +273,7 @@ impl Job for Indexer { .await?; } - update_location_size(location.id, ctx.db(), &ctx).await?; + update_location_size(location.id, location.pub_id.clone(), &ctx).await?; metadata.mean_db_write_time += start_size_update_time.elapsed(); } @@ -287,13 +291,23 @@ impl Job for Indexer { "all tasks must be completed here" ); - ctx.db() - .location() - .update( - location::id::equals(location.id), - vec![location::scan_state::set(LocationScanState::Indexed as i32)], + let (sync_param, db_param) = + sync_db_not_null_entry!(LocationScanState::Indexed as i32, location::scan_state); + + ctx.sync() + .write_op( + ctx.db(), + ctx.sync().shared_update( + prisma_sync::location::SyncId { + pub_id: location.pub_id.clone(), + }, + [sync_param], + ), + ctx.db() + .location() + .update(location::id::equals(location.id), vec![db_param]) + .select(location::select!({ id })), ) - .exec() .await .map_err(indexer::Error::from)?; diff --git a/core/crates/heavy-lifting/src/indexer/mod.rs b/core/crates/heavy-lifting/src/indexer/mod.rs index 1ad78902b..6880e6d91 100644 --- a/core/crates/heavy-lifting/src/indexer/mod.rs +++ b/core/crates/heavy-lifting/src/indexer/mod.rs @@ -10,11 +10,11 @@ use sd_prisma::{ prisma::{file_path, indexer_rule, location, PrismaClient, SortOrder}, prisma_sync, }; -use sd_sync::OperationFactory; +use sd_sync::{sync_db_entry, OperationFactory}; use sd_utils::{ db::{size_in_bytes_from_db, size_in_bytes_to_db, MissingFieldError}, error::{FileIOError, NonUtf8PathError}, - from_bytes_to_uuid, msgpack, + from_bytes_to_uuid, }; use std::{ @@ -146,22 +146,20 @@ async fn update_directory_sizes( .map(|file_path| { let size_bytes = iso_paths_and_sizes .get(&IsolatedFilePathData::try_from(&file_path)?) - .map(|size| size.to_be_bytes().to_vec()) + .map(|size| size_in_bytes_to_db(*size)) .expect("must be here"); + let (sync_param, db_param) = sync_db_entry!(size_bytes, file_path::size_in_bytes_bytes); + Ok(( sync.shared_update( prisma_sync::file_path::SyncId { pub_id: file_path.pub_id.clone(), }, - file_path::size_in_bytes_bytes::NAME, - msgpack!(size_bytes), + [sync_param], ), db.file_path() - .update( - file_path::pub_id::equals(file_path.pub_id), - vec![file_path::size_in_bytes_bytes::set(Some(size_bytes))], - ) + .update(file_path::pub_id::equals(file_path.pub_id), vec![db_param]) .select(file_path::select!({ id })), )) }) @@ -178,35 +176,45 @@ async fn update_directory_sizes( async fn update_location_size( location_id: location::id::Type, - db: &PrismaClient, + location_pub_id: location::pub_id::Type, ctx: &impl OuterContext, ) -> Result<(), Error> { - let total_size = db - .file_path() - .find_many(vec![ - file_path::location_id::equals(Some(location_id)), - file_path::materialized_path::equals(Some("/".to_string())), - ]) - .select(file_path::select!({ size_in_bytes_bytes })) - .exec() - .await? - .into_iter() - .filter_map(|file_path| { - file_path - .size_in_bytes_bytes - .map(|size_in_bytes_bytes| size_in_bytes_from_db(&size_in_bytes_bytes)) - }) - .sum::(); + let db = ctx.db(); + let sync = ctx.sync(); - db.location() - .update( - location::id::equals(location_id), - vec![location::size_in_bytes::set(Some( - total_size.to_be_bytes().to_vec(), - ))], - ) - .exec() - .await?; + let total_size = size_in_bytes_to_db( + db.file_path() + .find_many(vec![ + file_path::location_id::equals(Some(location_id)), + file_path::materialized_path::equals(Some("/".to_string())), + ]) + .select(file_path::select!({ size_in_bytes_bytes })) + .exec() + .await? + .into_iter() + .filter_map(|file_path| { + file_path + .size_in_bytes_bytes + .map(|size_in_bytes_bytes| size_in_bytes_from_db(&size_in_bytes_bytes)) + }) + .sum::(), + ); + + let (sync_param, db_param) = sync_db_entry!(total_size, location::size_in_bytes); + + sync.write_op( + db, + sync.shared_update( + prisma_sync::location::SyncId { + pub_id: location_pub_id, + }, + [sync_param], + ), + db.location() + .update(location::id::equals(location_id), vec![db_param]) + .select(location::select!({ id })), + ) + .await?; ctx.invalidate_query("locations.list"); ctx.invalidate_query("locations.get"); @@ -334,18 +342,19 @@ pub async fn reverse_update_directories_sizes( { let size_bytes = size_in_bytes_to_db(size); + let (sync_param, db_param) = + sync_db_entry!(size_bytes, file_path::size_in_bytes_bytes); + Some(( sync.shared_update( prisma_sync::file_path::SyncId { pub_id: pub_id.clone(), }, - file_path::size_in_bytes_bytes::NAME, - msgpack!(size_bytes), - ), - db.file_path().update( - file_path::pub_id::equals(pub_id), - vec![file_path::size_in_bytes_bytes::set(Some(size_bytes))], + [sync_param], ), + db.file_path() + .update(file_path::pub_id::equals(pub_id), vec![db_param]) + .select(file_path::select!({ id })), )) } else { warn!("Got a missing ancestor for a file_path in the database, ignoring..."); diff --git a/core/crates/heavy-lifting/src/indexer/shallow.rs b/core/crates/heavy-lifting/src/indexer/shallow.rs index 90a22eead..1bc55b556 100644 --- a/core/crates/heavy-lifting/src/indexer/shallow.rs +++ b/core/crates/heavy-lifting/src/indexer/shallow.rs @@ -136,7 +136,7 @@ pub async fn shallow( .await?; } - update_location_size(location.id, db, ctx).await?; + update_location_size(location.id, location.pub_id, ctx).await?; } if indexed_count > 0 || removed_count > 0 { diff --git a/core/crates/heavy-lifting/src/indexer/tasks/saver.rs b/core/crates/heavy-lifting/src/indexer/tasks/saver.rs index 9fbe24554..c5d0951d0 100644 --- a/core/crates/heavy-lifting/src/indexer/tasks/saver.rs +++ b/core/crates/heavy-lifting/src/indexer/tasks/saver.rs @@ -9,10 +9,7 @@ use sd_prisma::{ }; use sd_sync::{sync_db_entry, sync_entry, OperationFactory}; use sd_task_system::{ExecStatus, Interrupter, IntoAnyTaskOutput, SerializableTask, Task, TaskId}; -use sd_utils::{ - db::{inode_to_db, size_in_bytes_to_db}, - msgpack, -}; +use sd_utils::db::{inode_to_db, size_in_bytes_to_db}; use std::{sync::Arc, time::Duration}; @@ -121,13 +118,13 @@ impl Task for Saver { new file_paths and they were not identified yet" ); - let (sync_params, db_params): (Vec<_>, Vec<_>) = [ + let (sync_params, db_params) = [ ( - ( - location::NAME, - msgpack!(prisma_sync::location::SyncId { + sync_entry!( + prisma_sync::location::SyncId { pub_id: location_pub_id.clone() - }), + }, + location ), location_id::set(Some(*location_id)), ), @@ -152,7 +149,7 @@ impl Task for Saver { ), ] .into_iter() - .unzip(); + .unzip::<_, _, Vec<_>, Vec<_>>(); ( sync.shared_create( diff --git a/core/crates/heavy-lifting/src/indexer/tasks/updater.rs b/core/crates/heavy-lifting/src/indexer/tasks/updater.rs index 91eb72899..80cf3d6f4 100644 --- a/core/crates/heavy-lifting/src/indexer/tasks/updater.rs +++ b/core/crates/heavy-lifting/src/indexer/tasks/updater.rs @@ -93,7 +93,7 @@ impl Task for Updater { check_interruption!(interrupter); - let (sync_stuff, paths_to_update) = walked_entries + let (crdt_ops, paths_to_update) = walked_entries .drain(..) .map( |WalkedEntry { @@ -138,18 +138,12 @@ impl Task for Updater { .unzip::<_, _, Vec<_>, Vec<_>>(); ( - sync_params - .into_iter() - .map(|(field, value)| { - sync.shared_update( - prisma_sync::file_path::SyncId { - pub_id: pub_id.to_db(), - }, - field, - value, - ) - }) - .collect::>(), + sync.shared_update( + prisma_sync::file_path::SyncId { + pub_id: pub_id.to_db(), + }, + sync_params, + ), db.file_path() .update(file_path::pub_id::equals(pub_id.into()), db_params) // selecting id to avoid fetching whole object from database @@ -159,9 +153,7 @@ impl Task for Updater { ) .unzip::<_, _, Vec<_>, Vec<_>>(); - let ops = sync_stuff.into_iter().flatten().collect::>(); - - if ops.is_empty() && paths_to_update.is_empty() { + if crdt_ops.is_empty() && paths_to_update.is_empty() { return Ok(ExecStatus::Done( Output { updated_count: 0, @@ -172,7 +164,7 @@ impl Task for Updater { } let updated = sync - .write_ops(db, (ops, paths_to_update)) + .write_ops(db, (crdt_ops, paths_to_update)) .await .map_err(indexer::Error::from)?; diff --git a/core/crates/heavy-lifting/src/job_system/report.rs b/core/crates/heavy-lifting/src/job_system/report.rs index 3d536e7dd..b747b8195 100644 --- a/core/crates/heavy-lifting/src/job_system/report.rs +++ b/core/crates/heavy-lifting/src/job_system/report.rs @@ -290,6 +290,7 @@ impl Report { .map(|id| job::parent::connect(job::id::equals(id.as_bytes().to_vec())))], ), ) + .select(job::select!({ id })) .exec() .await .map_err(ReportError::Create)?; @@ -318,6 +319,7 @@ impl Report { job::date_completed::set(self.completed_at.map(Into::into)), ], ) + .select(job::select!({ id })) .exec() .await .map_err(ReportError::Update)?; diff --git a/core/crates/heavy-lifting/src/media_processor/job.rs b/core/crates/heavy-lifting/src/media_processor/job.rs index cadeb5f03..fb622e162 100644 --- a/core/crates/heavy-lifting/src/media_processor/job.rs +++ b/core/crates/heavy-lifting/src/media_processor/job.rs @@ -14,7 +14,11 @@ use sd_core_file_path_helper::IsolatedFilePathData; use sd_core_prisma_helpers::file_path_for_media_processor; use sd_file_ext::extensions::Extension; -use sd_prisma::prisma::{location, PrismaClient}; +use sd_prisma::{ + prisma::{location, PrismaClient}, + prisma_sync, +}; +use sd_sync::{sync_db_not_null_entry, OperationFactory}; use sd_task_system::{ AnyTaskOutput, IntoTask, SerializableTask, Task, TaskDispatcher, TaskHandle, TaskId, TaskOutput, TaskStatus, TaskSystemError, @@ -214,15 +218,23 @@ impl Job for MediaProcessor { .. } = self; - ctx.db() - .location() - .update( - location::id::equals(location.id), - vec![location::scan_state::set( - LocationScanState::Completed as i32, - )], + let (sync_param, db_param) = + sync_db_not_null_entry!(LocationScanState::Completed as i32, location::scan_state); + + ctx.sync() + .write_op( + ctx.db(), + ctx.sync().shared_update( + prisma_sync::location::SyncId { + pub_id: location.pub_id.clone(), + }, + [sync_param], + ), + ctx.db() + .location() + .update(location::id::equals(location.id), vec![db_param]) + .select(location::select!({ id })), ) - .exec() .await .map_err(media_processor::Error::from)?; diff --git a/core/crates/prisma-helpers/src/lib.rs b/core/crates/prisma-helpers/src/lib.rs index 48a400e65..ee8f11bb9 100644 --- a/core/crates/prisma-helpers/src/lib.rs +++ b/core/crates/prisma-helpers/src/lib.rs @@ -74,6 +74,20 @@ file_path::select!(file_path_for_media_processor { pub_id } }); +file_path::select!(file_path_watcher_remove { + id + pub_id + location_id + materialized_path + is_dir + name + extension + object: select { + id + pub_id + } + +}); file_path::select!(file_path_to_isolate { location_id materialized_path diff --git a/core/crates/sync/src/ingest_utils.rs b/core/crates/sync/src/ingest_utils.rs index 3cc4c8a68..6c77a96b7 100644 --- a/core/crates/sync/src/ingest_utils.rs +++ b/core/crates/sync/src/ingest_utils.rs @@ -1,7 +1,7 @@ use sd_core_prisma_helpers::DevicePubId; use sd_prisma::{ - prisma::{crdt_operation, PrismaClient, SortOrder}, + prisma::{crdt_operation, PrismaClient}, prisma_sync::ModelSyncData, }; use sd_sync::{ @@ -17,6 +17,8 @@ use uuid::Uuid; use super::{db_operation::write_crdt_op_to_db, Error, TimestampPerDevice}; +crdt_operation::select!(crdt_operation_id { id }); + // where the magic happens #[instrument(skip(clock, ops), fields(operations_count = %ops.len()), err)] pub async fn process_crdt_operations( @@ -24,7 +26,7 @@ pub async fn process_crdt_operations( timestamp_per_device: &TimestampPerDevice, db: &PrismaClient, device_pub_id: DevicePubId, - model: ModelId, + model_id: ModelId, record_id: RecordId, mut ops: Vec, ) -> Result<(), Error> { @@ -50,7 +52,7 @@ pub async fn process_crdt_operations( .find(|op| matches!(op.data, CRDTOperationData::Delete)) { trace!("Deleting operation"); - handle_crdt_deletion(db, &device_pub_id, model, record_id, delete_op).await?; + handle_crdt_deletion(db, &device_pub_id, model_id, record_id, delete_op).await?; } // Create + > 0 Update - overwrites the create's data with the updates else if let Some(timestamp) = ops @@ -61,23 +63,22 @@ pub async fn process_crdt_operations( trace!("Create + Updates operations"); // conflict resolution - let delete = db + let delete_count = db .crdt_operation() - .find_first(vec![ - crdt_operation::model::equals(i32::from(model)), + .count(vec![ + crdt_operation::model::equals(i32::from(model_id)), crdt_operation::record_id::equals(rmp_serde::to_vec(&record_id)?), crdt_operation::kind::equals(OperationKind::Delete.to_string()), ]) - .order_by(crdt_operation::timestamp::order(SortOrder::Desc)) .exec() .await?; - if delete.is_some() { + if delete_count > 0 { debug!("Found a previous delete operation with the same SyncId, will ignore these operations"); return Ok(()); } - handle_crdt_create_and_updates(db, &device_pub_id, model, record_id, ops, timestamp) + handle_crdt_create_and_updates(db, &device_pub_id, model_id, record_id, ops, timestamp) .await?; } // > 0 Update - batches updates with a fake Create op @@ -87,51 +88,57 @@ pub async fn process_crdt_operations( let mut data = BTreeMap::new(); for op in ops.into_iter().rev() { - let CRDTOperationData::Update { field, value } = op.data else { + let CRDTOperationData::Update(fields_and_values) = op.data else { unreachable!("Create + Delete should be filtered out!"); }; - data.insert(field, (value, op.timestamp)); + for (field, value) in fields_and_values { + data.insert(field, (value, op.timestamp)); + } } // conflict resolution - let (create, updates) = db + let (create, newer_updates_count) = db ._batch(( - db.crdt_operation() - .find_first(vec![ - crdt_operation::model::equals(i32::from(model)), - crdt_operation::record_id::equals(rmp_serde::to_vec(&record_id)?), - crdt_operation::kind::equals(OperationKind::Create.to_string()), - ]) - .order_by(crdt_operation::timestamp::order(SortOrder::Desc)), + db.crdt_operation().count(vec![ + crdt_operation::model::equals(i32::from(model_id)), + crdt_operation::record_id::equals(rmp_serde::to_vec(&record_id)?), + crdt_operation::kind::equals(OperationKind::Create.to_string()), + ]), data.iter() .map(|(k, (_, timestamp))| { - Ok(db - .crdt_operation() - .find_first(vec![ - crdt_operation::timestamp::gt({ - #[allow(clippy::cast_possible_wrap)] - // SAFETY: we had to store using i64 due to SQLite limitations - { - timestamp.as_u64() as i64 - } - }), - crdt_operation::model::equals(i32::from(model)), - crdt_operation::record_id::equals(rmp_serde::to_vec(&record_id)?), - crdt_operation::kind::equals(OperationKind::Update(k).to_string()), - ]) - .order_by(crdt_operation::timestamp::order(SortOrder::Desc))) + Ok(db.crdt_operation().count(vec![ + crdt_operation::timestamp::gt({ + #[allow(clippy::cast_possible_wrap)] + // SAFETY: we had to store using i64 due to SQLite limitations + { + timestamp.as_u64() as i64 + } + }), + crdt_operation::model::equals(i32::from(model_id)), + crdt_operation::record_id::equals(rmp_serde::to_vec(&record_id)?), + crdt_operation::kind::contains(format!(":{k}:")), + ])) }) .collect::, Error>>()?, )) .await?; - if create.is_none() { + if create == 0 { warn!("Failed to find a previous create operation with the same SyncId"); return Ok(()); } - handle_crdt_updates(db, &device_pub_id, model, record_id, data, updates).await?; + let keys = data.keys().cloned().collect::>(); + + // remove entries if we possess locally more recent updates for this field + for (update, key) in newer_updates_count.into_iter().zip(keys) { + if update > 0 { + data.remove(&key); + } + } + + handle_crdt_updates(db, &device_pub_id, model_id, record_id, data).await?; } // read the timestamp for the operation's device, or insert one if it doesn't exist @@ -157,24 +164,15 @@ async fn handle_crdt_updates( device_pub_id: &DevicePubId, model_id: ModelId, record_id: rmpv::Value, - mut data: BTreeMap, - updates: Vec>, + data: BTreeMap, ) -> Result<(), Error> { - let keys = data.keys().cloned().collect::>(); let device_pub_id = sd_sync::DevicePubId::from(device_pub_id); - // does the same thing as processing ops one-by-one and returning early if a newer op was found - for (update, key) in updates.into_iter().zip(keys) { - if update.is_some() { - data.remove(&key); - } - } - db._transaction() .with_timeout(30 * 10000) .with_max_wait(30 * 10000) .run(|db| async move { - // fake operation to batch them all at once + // fake operation to batch them all at once, inserting the latest data on appropriate table ModelSyncData::from_op(CRDTOperation { device_pub_id, model_id, @@ -185,35 +183,32 @@ async fn handle_crdt_updates( .map(|(k, (data, _))| (k.clone(), data.clone())) .collect(), ), - }) - .ok_or(Error::InvalidModelId(model_id))? + })? .exec(&db) .await?; - // need to only apply ops that haven't been filtered out - data.into_iter() - .map(|(field, (value, timestamp))| { - let record_id = record_id.clone(); - let db = &db; - - async move { - write_crdt_op_to_db( - &CRDTOperation { - device_pub_id, - model_id, - record_id, - timestamp, - data: CRDTOperationData::Update { field, value }, - }, - db, - ) - .await + let (fields_and_values, latest_timestamp) = data.into_iter().fold( + (BTreeMap::new(), NTP64::default()), + |(mut fields_and_values, mut latest_time_stamp), (field, (value, timestamp))| { + fields_and_values.insert(field, value); + if timestamp > latest_time_stamp { + latest_time_stamp = timestamp; } - }) - .collect::>() - .try_join() - .await - .map(|_| ()) + (fields_and_values, latest_time_stamp) + }, + ); + + write_crdt_op_to_db( + &CRDTOperation { + device_pub_id, + model_id, + record_id, + timestamp: latest_timestamp, + data: CRDTOperationData::Update(fields_and_values), + }, + &db, + ) + .await }) .await } @@ -244,8 +239,11 @@ async fn handle_crdt_create_and_updates( break; } - CRDTOperationData::Update { field, value } => { - data.insert(field.clone(), value.clone()); + CRDTOperationData::Update(fields_and_values) => { + for (field, value) in fields_and_values { + data.insert(field.clone(), value.clone()); + } + applied_ops.push(op); } } @@ -262,8 +260,7 @@ async fn handle_crdt_create_and_updates( record_id: record_id.clone(), timestamp, data: CRDTOperationData::Create(data), - }) - .ok_or(Error::InvalidModelId(model_id))? + })? .exec(&db) .await?; @@ -314,10 +311,7 @@ async fn handle_crdt_deletion( .with_timeout(30 * 10000) .with_max_wait(30 * 10000) .run(|db| async move { - ModelSyncData::from_op(op.clone()) - .ok_or(Error::InvalidModelId(model))? - .exec(&db) - .await?; + ModelSyncData::from_op(op.clone())?.exec(&db).await?; write_crdt_op_to_db(&op, &db).await }) diff --git a/core/crates/sync/src/lib.rs b/core/crates/sync/src/lib.rs index 903fb812b..56822509c 100644 --- a/core/crates/sync/src/lib.rs +++ b/core/crates/sync/src/lib.rs @@ -27,7 +27,10 @@ #![forbid(deprecated_in_future)] #![allow(clippy::missing_errors_doc, clippy::module_name_repetitions)] -use sd_prisma::prisma::{cloud_crdt_operation, crdt_operation}; +use sd_prisma::{ + prisma::{cloud_crdt_operation, crdt_operation}, + prisma_sync, +}; use sd_utils::uuid_to_bytes; use std::{collections::HashMap, sync::Arc}; @@ -66,6 +69,8 @@ pub enum Error { Deserialization(#[from] rmp_serde::decode::Error), #[error("database error: {0}")] Database(#[from] prisma_client_rust::QueryError), + #[error("PrismaSync error: {0}")] + PrismaSync(#[from] prisma_sync::Error), #[error("invalid model id: {0}")] InvalidModelId(ModelId), #[error("tried to write an empty operations list")] diff --git a/core/crates/sync/tests/lib.rs b/core/crates/sync/tests/lib.rs deleted file mode 100644 index 5c9dbf584..000000000 --- a/core/crates/sync/tests/lib.rs +++ /dev/null @@ -1,234 +0,0 @@ -// mod mock_instance; - -// use sd_core_sync::*; - -// use sd_prisma::{prisma::location, prisma_sync}; -// use sd_sync::*; -// use sd_utils::{msgpack, uuid_to_bytes}; - -// use mock_instance::Device; -// use tracing::info; -// use tracing_test::traced_test; -// use uuid::Uuid; - -// const MOCK_LOCATION_NAME: &str = "Location 0"; -// const MOCK_LOCATION_PATH: &str = "/User/Anon/Documents"; - -// async fn write_test_location(instance: &Device) -> location::Data { -// let location_pub_id = Uuid::new_v4(); - -// let (sync_ops, db_ops): (Vec<_>, Vec<_>) = [ -// sync_db_entry!(MOCK_LOCATION_NAME, location::name), -// sync_db_entry!(MOCK_LOCATION_PATH, location::path), -// ] -// .into_iter() -// .unzip(); - -// let location = instance -// .sync -// .write_op( -// &instance.db, -// instance.sync.shared_create( -// prisma_sync::location::SyncId { -// pub_id: uuid_to_bytes(&location_pub_id), -// }, -// sync_ops, -// ), -// instance -// .db -// .location() -// .create(uuid_to_bytes(&location_pub_id), db_ops), -// ) -// .await -// .expect("failed to create mock location"); - -// instance -// .sync -// .write_ops(&instance.db, { -// let (sync_ops, db_ops): (Vec<_>, Vec<_>) = [ -// sync_db_entry!(1024, location::total_capacity), -// sync_db_entry!(512, location::available_capacity), -// ] -// .into_iter() -// .unzip(); - -// ( -// sync_ops -// .into_iter() -// .map(|(k, v)| { -// instance.sync.shared_update( -// prisma_sync::location::SyncId { -// pub_id: uuid_to_bytes(&location_pub_id), -// }, -// k, -// v, -// ) -// }) -// .collect::>(), -// instance -// .db -// .location() -// .update(location::id::equals(location.id), db_ops), -// ) -// }) -// .await -// .expect("failed to create mock location"); - -// location -// } - -// #[tokio::test] -// #[traced_test] -// async fn writes_operations_and_rows_together() -> Result<(), Box> { -// let instance = Device::new(Uuid::new_v4()).await; - -// write_test_location(&instance).await; - -// let operations = instance -// .db -// .crdt_operation() -// .find_many(vec![]) -// .exec() -// .await?; - -// // 1 create, 2 update -// assert_eq!(operations.len(), 3); -// assert_eq!(operations[0].model, prisma_sync::location::MODEL_ID as i32); - -// let out = instance.sync.get_ops(100, vec![]).await?; - -// assert_eq!(out.len(), 3); - -// let locations = instance.db.location().find_many(vec![]).exec().await?; - -// assert_eq!(locations.len(), 1); -// let location = locations.first().unwrap(); -// assert_eq!(location.name.as_deref(), Some(MOCK_LOCATION_NAME)); -// assert_eq!(location.path.as_deref(), Some(MOCK_LOCATION_PATH)); - -// Ok(()) -// } - -// #[tokio::test] -// #[traced_test] -// async fn operations_send_and_ingest() -> Result<(), Box> { -// let instance1 = Device::new(Uuid::new_v4()).await; -// let instance2 = Device::new(Uuid::new_v4()).await; - -// let mut instance2_sync_rx = instance2.sync_rx.resubscribe(); - -// info!("Created instances!"); - -// Device::pair(&instance1, &instance2).await; - -// info!("Paired instances!"); - -// write_test_location(&instance1).await; - -// info!("Created mock location!"); - -// assert!(matches!( -// instance2_sync_rx.recv().await?, -// SyncEvent::Ingested -// )); - -// let out = instance2.sync.get_ops(100, vec![]).await?; - -// assert_locations_equality( -// &instance1.db.location().find_many(vec![]).exec().await?[0], -// &instance2.db.location().find_many(vec![]).exec().await?[0], -// ); - -// assert_eq!(out.len(), 3); - -// instance1.teardown().await; -// instance2.teardown().await; - -// Ok(()) -// } - -// #[tokio::test] -// async fn no_update_after_delete() -> Result<(), Box> { -// let instance1 = Device::new(Uuid::new_v4()).await; -// let instance2 = Device::new(Uuid::new_v4()).await; - -// let mut instance2_sync_rx = instance2.sync_rx.resubscribe(); - -// Device::pair(&instance1, &instance2).await; - -// let location = write_test_location(&instance1).await; - -// assert!(matches!( -// instance2_sync_rx.recv().await?, -// SyncEvent::Ingested -// )); - -// instance2 -// .sync -// .write_op( -// &instance2.db, -// instance2.sync.shared_delete(prisma_sync::location::SyncId { -// pub_id: location.pub_id.clone(), -// }), -// instance2.db.location().delete_many(vec![]), -// ) -// .await?; - -// assert!(matches!( -// instance1.sync_rx.resubscribe().recv().await?, -// SyncEvent::Ingested -// )); - -// instance1 -// .sync -// .write_op( -// &instance1.db, -// instance1.sync.shared_update( -// prisma_sync::location::SyncId { -// pub_id: location.pub_id.clone(), -// }, -// "name", -// msgpack!("New Location"), -// ), -// instance1.db.location().find_many(vec![]), -// ) -// .await?; - -// // one spare update operation that actually gets ignored by instance 2 -// assert_eq!(instance1.db.crdt_operation().count(vec![]).exec().await?, 5); -// assert_eq!(instance2.db.crdt_operation().count(vec![]).exec().await?, 4); - -// assert_eq!(instance1.db.location().count(vec![]).exec().await?, 0); -// // the whole point of the test - the update (which is ingested as an upsert) should be ignored -// assert_eq!(instance2.db.location().count(vec![]).exec().await?, 0); - -// instance1.teardown().await; -// instance2.teardown().await; - -// Ok(()) -// } - -// fn assert_locations_equality(l1: &location::Data, l2: &location::Data) { -// assert_eq!(l1.pub_id, l2.pub_id, "pub id"); -// assert_eq!(l1.name, l2.name, "name"); -// assert_eq!(l1.path, l2.path, "path"); -// assert_eq!(l1.total_capacity, l2.total_capacity, "total capacity"); -// assert_eq!( -// l1.available_capacity, l2.available_capacity, -// "available capacity" -// ); -// assert_eq!(l1.size_in_bytes, l2.size_in_bytes, "size in bytes"); -// assert_eq!(l1.is_archived, l2.is_archived, "is archived"); -// assert_eq!( -// l1.generate_preview_media, l2.generate_preview_media, -// "generate preview media" -// ); -// assert_eq!( -// l1.sync_preview_media, l2.sync_preview_media, -// "sync preview media" -// ); -// assert_eq!(l1.hidden, l2.hidden, "hidden"); -// assert_eq!(l1.date_created, l2.date_created, "date created"); -// assert_eq!(l1.scan_state, l2.scan_state, "scan state"); -// assert_eq!(l1.instance_id, l2.instance_id, "instance id"); -// } diff --git a/core/crates/sync/tests/mock_instance.rs b/core/crates/sync/tests/mock_instance.rs deleted file mode 100644 index 9dd5f1aff..000000000 --- a/core/crates/sync/tests/mock_instance.rs +++ /dev/null @@ -1,143 +0,0 @@ -// use sd_core_sync::*; - -// use sd_prisma::prisma; -// use sd_sync::CompressedCRDTOperationsPerModelPerDevice; - -// use std::sync::{atomic::AtomicBool, Arc}; - -// use tokio::{fs, spawn, sync::broadcast}; -// use tracing::{info, instrument, warn, Instrument}; -// use uuid::Uuid; - -// fn db_path(id: Uuid) -> String { -// format!("/tmp/test-{id}.db") -// } - -// #[derive(Clone)] -// pub struct Device { -// pub pub_id: DevicePubId, -// pub db: Arc, -// pub sync: Arc, -// pub sync_rx: Arc>, -// } - -// impl Device { -// pub async fn new(id: Uuid) -> Arc { -// let url = format!("file:{}", db_path(id)); -// let device_pub_id = DevicePubId::from(id); - -// let db = Arc::new( -// prisma::PrismaClient::_builder() -// .with_url(url.to_string()) -// .build() -// .await -// .unwrap(), -// ); - -// db._db_push().await.unwrap(); - -// db.device() -// .create(device_pub_id.to_db(), vec![]) -// .exec() -// .await -// .unwrap(); - -// // let (sync, sync_rx) = sd_core_sync::SyncManager::new( -// // Arc::clone(&db), -// // &device_pub_id, -// // Arc::new(AtomicBool::new(true)), -// // Default::default(), -// // ) -// // .await -// // .expect("failed to create sync manager"); - -// // Arc::new(Self { -// // pub_id: device_pub_id, -// // db, -// // sync: Arc::new(sync), -// // sync_rx: Arc::new(sync_rx), -// // }) -// } - -// pub async fn teardown(&self) { -// fs::remove_file(db_path(Uuid::from(&self.pub_id))) -// .await -// .unwrap(); -// } - -// pub async fn pair(instance1: &Arc, instance2: &Arc) { -// #[instrument(skip(left, right))] -// async fn half(left: &Arc, right: &Arc, context: &'static str) { -// left.db -// .device() -// .create(right.pub_id.to_db(), vec![]) -// .exec() -// .await -// .unwrap(); - -// spawn({ -// let mut sync_rx_left = left.sync_rx.resubscribe(); -// let right = Arc::clone(right); - -// async move { -// while let Ok(msg) = sync_rx_left.recv().await { -// info!(?msg, "sync_rx_left received message"); -// if matches!(msg, SyncEvent::Created) { -// right -// .sync -// .ingest -// .event_tx -// .send(ingest::Event::Notification) -// .await -// .unwrap(); -// info!("sent notification to instance 2"); -// } -// } -// } -// .in_current_span() -// }); - -// spawn({ -// let left = Arc::clone(left); -// let right = Arc::clone(right); - -// async move { -// while let Ok(msg) = right.sync.ingest.req_rx.recv().await { -// info!(?msg, "right instance received request"); -// match msg { -// ingest::Request::Messages { timestamps, tx } => { -// let messages = left.sync.get_ops(100, timestamps).await.unwrap(); - -// let ingest = &right.sync.ingest; - -// ingest -// .event_tx -// .send(ingest::Event::Messages(ingest::MessagesEvent { -// messages: CompressedCRDTOperationsPerModelPerDevice::new( -// messages, -// ), -// has_more: false, -// device_pub_id: left.pub_id.clone(), -// wait_tx: None, -// })) -// .await -// .unwrap(); - -// if tx.send(()).is_err() { -// warn!("failed to send ack to instance 1"); -// } -// } -// ingest::Request::FinishedIngesting => { -// right.sync.tx.send(SyncEvent::Ingested).unwrap(); -// } -// } -// } -// } -// .in_current_span() -// }); -// } - -// half(instance1, instance2, "instance1 -> instance2").await; -// half(instance2, instance1, "instance2 -> instance1").await; -// } -// } diff --git a/core/src/api/files.rs b/core/src/api/files.rs index 8e0f29992..155fd2884 100644 --- a/core/src/api/files.rs +++ b/core/src/api/files.rs @@ -28,8 +28,8 @@ use sd_prisma::{ prisma::{file_path, location, object}, prisma_sync, }; -use sd_sync::OperationFactory; -use sd_utils::{db::maybe_missing, error::FileIOError, msgpack}; +use sd_sync::{sync_db_entry, sync_db_nullable_entry, sync_entry, OperationFactory}; +use sd_utils::{db::maybe_missing, error::FileIOError}; use std::{ ffi::OsString, @@ -195,19 +195,19 @@ pub(crate) fn mount() -> AlphaRouter { ) })?; + let (sync_param, db_param) = sync_db_nullable_entry!(args.note, object::note); + sync.write_op( db, sync.shared_update( prisma_sync::object::SyncId { pub_id: object.pub_id, }, - object::note::NAME, - msgpack!(&args.note), - ), - db.object().update( - object::id::equals(args.id), - vec![object::note::set(args.note)], + [sync_param], ), + db.object() + .update(object::id::equals(args.id), vec![db_param]) + .select(object::select!({ id })), ) .await?; @@ -241,19 +241,19 @@ pub(crate) fn mount() -> AlphaRouter { ) })?; + let (sync_param, db_param) = sync_db_entry!(args.favorite, object::favorite); + sync.write_op( db, sync.shared_update( prisma_sync::object::SyncId { pub_id: object.pub_id, }, - object::favorite::NAME, - msgpack!(&args.favorite), - ), - db.object().update( - object::id::equals(args.id), - vec![object::favorite::set(Some(args.favorite))], + [sync_param], ), + db.object() + .update(object::id::equals(args.id), vec![db_param]) + .select(object::select!({ id })), ) .await?; @@ -346,19 +346,20 @@ pub(crate) fn mount() -> AlphaRouter { let date_accessed = Utc::now().into(); - let (ops, object_ids): (Vec<_>, Vec<_>) = objects + let (ops, object_ids) = objects .into_iter() - .map(|d| { + .map(|object| { ( sync.shared_update( - prisma_sync::object::SyncId { pub_id: d.pub_id }, - object::date_accessed::NAME, - msgpack!(date_accessed), + prisma_sync::object::SyncId { + pub_id: object.pub_id, + }, + [sync_entry!(date_accessed, object::date_accessed)], ), - d.id, + object.id, ) }) - .unzip(); + .unzip::<_, _, Vec<_>, Vec<_>>(); if !ops.is_empty() && !object_ids.is_empty() { sync.write_ops( @@ -392,19 +393,20 @@ pub(crate) fn mount() -> AlphaRouter { .exec() .await?; - let (ops, object_ids): (Vec<_>, Vec<_>) = objects + let (ops, object_ids) = objects .into_iter() - .map(|d| { + .map(|object| { ( sync.shared_update( - prisma_sync::object::SyncId { pub_id: d.pub_id }, - object::date_accessed::NAME, - msgpack!(nil), + prisma_sync::object::SyncId { + pub_id: object.pub_id, + }, + [sync_entry!(nil, object::date_accessed)], ), - d.id, + object.id, ) }) - .unzip(); + .unzip::<_, _, Vec<_>, Vec<_>>(); if !ops.is_empty() && !object_ids.is_empty() { sync.write_ops( @@ -487,11 +489,32 @@ pub(crate) fn mount() -> AlphaRouter { path = %full_path.display(), "File not found in the file system, will remove from database;", ); - library + + let file_path_pub_id = library .db .file_path() - .delete(file_path::id::equals(args.file_path_ids[0])) + .find_unique(file_path::id::equals(args.file_path_ids[0])) + .select(file_path::select!({ pub_id })) .exec() + .await? + .ok_or(LocationError::FilePath(FilePathError::IdNotFound( + args.file_path_ids[0], + )))? + .pub_id; + + library + .sync + .write_op( + &library.db, + library.sync.shared_delete( + prisma_sync::file_path::SyncId { + pub_id: file_path_pub_id, + }, + ), + library.db.file_path().delete(file_path::id::equals( + args.file_path_ids[0], + )), + ) .await .map_err(LocationError::from)?; diff --git a/core/src/api/labels.rs b/core/src/api/labels.rs index 9aaaf30e3..eed08d8d3 100644 --- a/core/src/api/labels.rs +++ b/core/src/api/labels.rs @@ -116,7 +116,7 @@ pub(crate) fn mount() -> AlphaRouter { .procedure( "delete", R.with2(library()) - .mutation(|(_, library), label_id: i32| async move { + .mutation(|(_, library), label_id: label::id::Type| async move { let Library { db, sync, .. } = library.as_ref(); let label = db @@ -131,6 +131,35 @@ pub(crate) fn mount() -> AlphaRouter { ) })?; + let delete_ops = db + .label_on_object() + .find_many(vec![label_on_object::label_id::equals(label_id)]) + .select(label_on_object::select!({ object: select { pub_id } })) + .exec() + .await? + .into_iter() + .map(|label_on_object| { + sync.relation_delete(prisma_sync::label_on_object::SyncId { + label: prisma_sync::label::SyncId { + name: label.name.clone(), + }, + object: prisma_sync::object::SyncId { + pub_id: label_on_object.object.pub_id, + }, + }) + }) + .collect::>(); + + sync.write_ops( + db, + ( + delete_ops, + db.label_on_object() + .delete_many(vec![label_on_object::label_id::equals(label_id)]), + ), + ) + .await?; + sync.write_op( db, sync.shared_delete(prisma_sync::label::SyncId { name: label.name }), diff --git a/core/src/api/search/saved.rs b/core/src/api/search/saved.rs index 37dec602e..957474c49 100644 --- a/core/src/api/search/saved.rs +++ b/core/src/api/search/saved.rs @@ -69,7 +69,7 @@ pub(crate) fn mount() -> AlphaRouter { let pub_id = Uuid::now_v7().as_bytes().to_vec(); let date_created: DateTime = Utc::now().into(); - let (sync_params, db_params): (Vec<_>, Vec<_>) = chain_optional_iter( + let (sync_params, db_params) = chain_optional_iter( [ sync_db_entry!(date_created, saved_search::date_created), sync_db_entry!(args.name, saved_search::name), @@ -96,7 +96,7 @@ pub(crate) fn mount() -> AlphaRouter { ], ) .into_iter() - .unzip(); + .unzip::<_, _, Vec<_>, Vec<_>>(); sync.write_op( db, @@ -106,7 +106,9 @@ pub(crate) fn mount() -> AlphaRouter { }, sync_params, ), - db.saved_search().create(pub_id, db_params), + db.saved_search() + .create(pub_id, db_params) + .select(saved_search::select!({ id })), ) .await?; @@ -162,7 +164,7 @@ pub(crate) fn mount() -> AlphaRouter { rspc::Error::new(rspc::ErrorCode::NotFound, "search not found".into()) })?; - let (ops, db_params): (Vec<_>, Vec<_>) = chain_optional_iter( + let (sync_params, db_params) = chain_optional_iter( [sync_db_entry!(updated_at, saved_search::date_modified)], [ option_sync_db_entry!(args.name.flatten(), saved_search::name), @@ -173,34 +175,23 @@ pub(crate) fn mount() -> AlphaRouter { ], ) .into_iter() - .map(|((k, v), p)| { - ( - sync.shared_update( - prisma_sync::saved_search::SyncId { - pub_id: search.pub_id.clone(), - }, - k, - v, - ), - p, - ) - }) - .unzip(); + .unzip::<_, _, Vec<_>, Vec<_>>(); - if !ops.is_empty() && !db_params.is_empty() { - sync.write_ops( - db, - ( - ops, - db.saved_search() - .update_unchecked(saved_search::id::equals(id), db_params), - ), - ) - .await?; + sync.write_op( + db, + sync.shared_update( + prisma_sync::saved_search::SyncId { + pub_id: search.pub_id.clone(), + }, + sync_params, + ), + db.saved_search() + .update_unchecked(saved_search::id::equals(id), db_params), + ) + .await?; - invalidate_query!(library, "search.saved.list"); - invalidate_query!(library, "search.saved.get"); - } + invalidate_query!(library, "search.saved.list"); + invalidate_query!(library, "search.saved.get"); Ok(()) } diff --git a/core/src/api/tags.rs b/core/src/api/tags.rs index 0d71b848c..0035ea592 100644 --- a/core/src/api/tags.rs +++ b/core/src/api/tags.rs @@ -4,7 +4,7 @@ use sd_prisma::{ prisma::{device, file_path, object, tag, tag_on_object}, prisma_sync, }; -use sd_sync::{option_sync_db_entry, sync_entry, OperationFactory}; +use sd_sync::{option_sync_db_entry, sync_db_entry, sync_entry, OperationFactory}; use std::collections::BTreeMap; @@ -286,13 +286,17 @@ pub(crate) fn mount() -> AlphaRouter { pub color: Option, } - R.with2(library()) - .mutation(|(_, library), args: TagUpdateArgs| async move { + R.with2(library()).mutation( + |(_, library), TagUpdateArgs { id, name, color }: TagUpdateArgs| async move { + if name.is_none() && color.is_none() { + return Ok(()); + } + let Library { sync, db, .. } = library.as_ref(); let tag = db .tag() - .find_unique(tag::id::equals(args.id)) + .find_unique(tag::id::equals(id)) .select(tag::select!({ pub_id })) .exec() .await? @@ -301,68 +305,88 @@ pub(crate) fn mount() -> AlphaRouter { "Error finding tag in db".into(), ))?; - db.tag() - .update( - tag::id::equals(args.id), - vec![tag::date_modified::set(Some(Utc::now().into()))], - ) - .exec() - .await?; - - let (sync_params, db_params): (Vec<_>, Vec<_>) = [ - option_sync_db_entry!(args.name, tag::name), - option_sync_db_entry!(args.color, tag::color), + let (sync_params, db_params) = [ + option_sync_db_entry!(name, tag::name), + option_sync_db_entry!(color, tag::color), + Some(sync_db_entry!(Utc::now(), tag::date_modified)), ] .into_iter() .flatten() - .unzip(); + .unzip::<_, _, Vec<_>, Vec<_>>(); - if sync_params.is_empty() && db_params.is_empty() { - return Ok(()); - } - - sync.write_ops( + sync.write_op( db, - ( - sync_params - .into_iter() - .map(|(k, v)| { - sync.shared_update( - prisma_sync::tag::SyncId { - pub_id: tag.pub_id.clone(), - }, - k, - v, - ) - }) - .collect(), - db.tag().update(tag::id::equals(args.id), db_params), + sync.shared_update( + prisma_sync::tag::SyncId { + pub_id: tag.pub_id.clone(), + }, + sync_params, ), + db.tag() + .update(tag::id::equals(id), db_params) + .select(tag::select!({ id })), ) .await?; invalidate_query!(library, "tags.list"); Ok(()) - }) + }, + ) }) .procedure( "delete", R.with2(library()) - .mutation(|(_, library), tag_id: i32| async move { - library - .db - .tag_on_object() - .delete_many(vec![tag_on_object::tag_id::equals(tag_id)]) - .exec() - .await?; + .mutation(|(_, library), tag_id: tag::id::Type| async move { + let Library { sync, db, .. } = &*library; - library - .db + let tag_pub_id = db .tag() - .delete(tag::id::equals(tag_id)) + .find_unique(tag::id::equals(tag_id)) + .select(tag::select!({ pub_id })) .exec() - .await?; + .await? + .ok_or(rspc::Error::new( + rspc::ErrorCode::NotFound, + "Tag not found".to_string(), + ))? + .pub_id; + + let delete_ops = db + .tag_on_object() + .find_many(vec![tag_on_object::tag_id::equals(tag_id)]) + .select(tag_on_object::select!({ object: select { pub_id } })) + .exec() + .await? + .into_iter() + .map(|tag_on_object| { + sync.relation_delete(prisma_sync::tag_on_object::SyncId { + tag: prisma_sync::tag::SyncId { + pub_id: tag_pub_id.clone(), + }, + object: prisma_sync::object::SyncId { + pub_id: tag_on_object.object.pub_id, + }, + }) + }) + .collect::>(); + + sync.write_ops( + db, + ( + delete_ops, + db.tag_on_object() + .delete_many(vec![tag_on_object::tag_id::equals(tag_id)]), + ), + ) + .await?; + + sync.write_op( + db, + sync.shared_delete(prisma_sync::tag::SyncId { pub_id: tag_pub_id }), + db.tag().delete(tag::id::equals(tag_id)), + ) + .await?; invalidate_query!(library, "tags.list"); diff --git a/core/src/library/manager/mod.rs b/core/src/library/manager/mod.rs index 7649576e6..06aa0b8c9 100644 --- a/core/src/library/manager/mod.rs +++ b/core/src/library/manager/mod.rs @@ -537,6 +537,7 @@ impl Libraries { )), ], ) + .select(instance::select!({ id })) .exec() .await?; } diff --git a/core/src/location/manager/watcher/android.rs b/core/src/location/manager/watcher/android.rs index 01bd8a2a1..723f2e076 100644 --- a/core/src/location/manager/watcher/android.rs +++ b/core/src/location/manager/watcher/android.rs @@ -27,6 +27,7 @@ use super::{ #[derive(Debug)] pub(super) struct EventHandler { location_id: location::id::Type, + location_pub_id: location::pub_id::Type, library: Arc, node: Arc, last_events_eviction_check: Instant, @@ -40,9 +41,18 @@ pub(super) struct EventHandler { } impl super::EventHandler for EventHandler { - fn new(location_id: location::id::Type, library: Arc, node: Arc) -> Self { + fn new( + location_id: location::id::Type, + location_pub_id: location::pub_id::Type, + library: Arc, + node: Arc, + ) -> Self + where + Self: Sized, + { Self { location_id, + location_pub_id, library, node, last_events_eviction_check: Instant::now(), @@ -182,6 +192,7 @@ impl super::EventHandler for EventHandler { &mut self.to_recalculate_size, &mut self.path_and_instant_buffer, self.location_id, + self.location_pub_id.clone(), &self.library, ) .await diff --git a/core/src/location/manager/watcher/ios.rs b/core/src/location/manager/watcher/ios.rs index 3a9c91500..25f0a49fd 100644 --- a/core/src/location/manager/watcher/ios.rs +++ b/core/src/location/manager/watcher/ios.rs @@ -33,6 +33,7 @@ use super::{ #[derive(Debug)] pub(super) struct EventHandler { location_id: location::id::Type, + location_pub_id: location::pub_id::Type, library: Arc, node: Arc, last_events_eviction_check: Instant, @@ -48,12 +49,18 @@ pub(super) struct EventHandler { } impl super::EventHandler for EventHandler { - fn new(location_id: location::id::Type, library: Arc, node: Arc) -> Self + fn new( + location_id: location::id::Type, + location_pub_id: location::pub_id::Type, + library: Arc, + node: Arc, + ) -> Self where Self: Sized, { Self { location_id, + location_pub_id, library, node, last_events_eviction_check: Instant::now(), @@ -183,6 +190,7 @@ impl super::EventHandler for EventHandler { &mut self.to_recalculate_size, &mut self.path_and_instant_buffer, self.location_id, + self.location_pub_id.clone(), &self.library, ) .await diff --git a/core/src/location/manager/watcher/linux.rs b/core/src/location/manager/watcher/linux.rs index 0ec459a3c..34d37ed15 100644 --- a/core/src/location/manager/watcher/linux.rs +++ b/core/src/location/manager/watcher/linux.rs @@ -32,6 +32,7 @@ use super::{ #[derive(Debug)] pub(super) struct EventHandler { location_id: location::id::Type, + location_pub_id: location::pub_id::Type, library: Arc, node: Arc, last_events_eviction_check: Instant, @@ -45,9 +46,18 @@ pub(super) struct EventHandler { } impl super::EventHandler for EventHandler { - fn new(location_id: location::id::Type, library: Arc, node: Arc) -> Self { + fn new( + location_id: location::id::Type, + location_pub_id: location::pub_id::Type, + library: Arc, + node: Arc, + ) -> Self + where + Self: Sized, + { Self { location_id, + location_pub_id, library, node, last_events_eviction_check: Instant::now(), @@ -187,6 +197,7 @@ impl super::EventHandler for EventHandler { &mut self.to_recalculate_size, &mut self.path_and_instant_buffer, self.location_id, + self.location_pub_id.clone(), &self.library, ) .await diff --git a/core/src/location/manager/watcher/macos.rs b/core/src/location/manager/watcher/macos.rs index 11486cd20..4d3b1ffec 100644 --- a/core/src/location/manager/watcher/macos.rs +++ b/core/src/location/manager/watcher/macos.rs @@ -42,6 +42,7 @@ use super::{ #[derive(Debug)] pub(super) struct EventHandler { location_id: location::id::Type, + location_pub_id: location::pub_id::Type, library: Arc, node: Arc, last_events_eviction_check: Instant, @@ -57,12 +58,18 @@ pub(super) struct EventHandler { } impl super::EventHandler for EventHandler { - fn new(location_id: location::id::Type, library: Arc, node: Arc) -> Self + fn new( + location_id: location::id::Type, + location_pub_id: location::pub_id::Type, + library: Arc, + node: Arc, + ) -> Self where Self: Sized, { Self { location_id, + location_pub_id, library, node, last_events_eviction_check: Instant::now(), @@ -206,6 +213,7 @@ impl super::EventHandler for EventHandler { &mut self.to_recalculate_size, &mut self.path_and_instant_buffer, self.location_id, + self.location_pub_id.clone(), &self.library, ) .await diff --git a/core/src/location/manager/watcher/mod.rs b/core/src/location/manager/watcher/mod.rs index 81b70ef87..d63709740 100644 --- a/core/src/location/manager/watcher/mod.rs +++ b/core/src/location/manager/watcher/mod.rs @@ -4,7 +4,7 @@ use sd_core_indexer_rules::{IndexerRule, IndexerRuler}; use sd_core_prisma_helpers::{location_ids_and_path, location_with_indexer_rules}; use sd_prisma::prisma::{location, PrismaClient}; -use sd_utils::db::maybe_missing; +use sd_utils::{db::maybe_missing, uuid_to_bytes}; use std::{ collections::HashSet, @@ -76,7 +76,12 @@ const THIRTY_SECONDS: Duration = Duration::from_secs(30); const HUNDRED_MILLIS: Duration = Duration::from_millis(100); trait EventHandler: 'static { - fn new(location_id: location::id::Type, library: Arc, node: Arc) -> Self + fn new( + location_id: location::id::Type, + location_pub_id: location::pub_id::Type, + library: Arc, + node: Arc, + ) -> Self where Self: Sized; @@ -200,7 +205,12 @@ impl LocationWatcher { Stop, } - let mut event_handler = Handler::new(location_id, Arc::clone(&library), Arc::clone(&node)); + let mut event_handler = Handler::new( + location_id, + uuid_to_bytes(&location_pub_id), + Arc::clone(&library), + Arc::clone(&node), + ); let mut last_event_at = Instant::now(); diff --git a/core/src/location/manager/watcher/utils.rs b/core/src/location/manager/watcher/utils.rs index 0adaf9f8c..88b065810 100644 --- a/core/src/location/manager/watcher/utils.rs +++ b/core/src/location/manager/watcher/utils.rs @@ -27,7 +27,9 @@ use sd_core_indexer_rules::{ seed::{GitIgnoreRules, GITIGNORE}, IndexerRuler, RulerDecision, }; -use sd_core_prisma_helpers::{file_path_with_object, object_ids, CasId, ObjectPubId}; +use sd_core_prisma_helpers::{ + file_path_watcher_remove, file_path_with_object, object_ids, CasId, ObjectPubId, +}; use sd_file_ext::{ extensions::{AudioExtension, ImageExtension, VideoExtension}, @@ -37,11 +39,11 @@ use sd_prisma::{ prisma::{device, file_path, location, object}, prisma_sync, }; -use sd_sync::{sync_entry, OperationFactory}; +use sd_sync::{option_sync_db_entry, sync_db_entry, sync_entry, OperationFactory}; use sd_utils::{ - db::{inode_from_db, inode_to_db, maybe_missing}, + chain_optional_iter, + db::{inode_from_db, inode_to_db, maybe_missing, size_in_bytes_to_db}, error::FileIOError, - msgpack, }; #[cfg(target_family = "unix")] @@ -354,32 +356,32 @@ async fn inner_create_file( let device_pub_id = sync.device_pub_id.to_db(); + let (sync_params, db_params) = [ + sync_db_entry!(date_created, object::date_created), + sync_db_entry!(int_kind, object::kind), + ( + sync_entry!( + prisma_sync::device::SyncId { + pub_id: device_pub_id.clone() + }, + object::device + ), + object::device::connect(device::pub_id::equals(device_pub_id)), + ), + ] + .into_iter() + .unzip::<_, _, Vec<_>, Vec<_>>(); + sync.write_op( db, sync.shared_create( prisma_sync::object::SyncId { pub_id: pub_id.to_db(), }, - [ - sync_entry!(date_created, object::date_created), - sync_entry!(int_kind, object::kind), - sync_entry!( - prisma_sync::device::SyncId { - pub_id: device_pub_id.clone() - }, - object::device - ), - ], + sync_params, ), db.object() - .create( - pub_id.into(), - vec![ - object::date_created::set(Some(date_created)), - object::kind::set(Some(int_kind)), - object::device::connect(device::pub_id::equals(device_pub_id)), - ], - ) + .create(pub_id.into(), db_params) .select(object_ids::select()), ) .await? @@ -391,17 +393,21 @@ async fn inner_create_file( prisma_sync::location::SyncId { pub_id: created_file.pub_id.clone(), }, - file_path::object::NAME, - msgpack!(prisma_sync::object::SyncId { - pub_id: object_pub_id.clone() - }), - ), - db.file_path().update( - file_path::pub_id::equals(created_file.pub_id.clone()), - vec![file_path::object::connect(object::pub_id::equals( - object_pub_id.clone(), - ))], + [sync_entry!( + prisma_sync::object::SyncId { + pub_id: object_pub_id.clone() + }, + file_path::object + )], ), + db.file_path() + .update( + file_path::pub_id::equals(created_file.pub_id.clone()), + vec![file_path::object::connect(object::pub_id::equals( + object_pub_id.clone(), + ))], + ) + .select(file_path::select!({ id })), ) .await?; @@ -590,34 +596,22 @@ async fn inner_update_file( let is_hidden = path_is_hidden(full_path, &fs_metadata); if file_path.cas_id.as_deref() != cas_id.as_ref().map(CasId::as_str) { - let (sync_params, db_params): (Vec<_>, Vec<_>) = { - use file_path::*; - + let (sync_params, db_params) = chain_optional_iter( [ - ( - (cas_id::NAME, msgpack!(file_path.cas_id)), - Some(cas_id::set(file_path.cas_id.clone())), + sync_db_entry!( + size_in_bytes_to_db(fs_metadata.len()), + file_path::size_in_bytes_bytes ), - ( - ( - size_in_bytes_bytes::NAME, - msgpack!(fs_metadata.len().to_be_bytes().to_vec()), - ), - Some(size_in_bytes_bytes::set(Some( - fs_metadata.len().to_be_bytes().to_vec(), - ))), + sync_db_entry!( + DateTime::::from(fs_metadata.modified_or_now()), + file_path::date_modified ), - { - let date = DateTime::::from(fs_metadata.modified_or_now()).into(); - - ( - (date_modified::NAME, msgpack!(date)), - Some(date_modified::set(Some(date))), - ) - }, - { - // TODO: Should this be a skip rather than a null-set? - let checksum = if file_path.integrity_checksum.is_some() { + ], + [ + option_sync_db_entry!(file_path.cas_id.clone(), file_path::cas_id), + option_sync_db_entry!( + if file_path.integrity_checksum.is_some() { + // TODO: Should this be a skip rather than a null-set? // If a checksum was already computed, we need to recompute it Some( file_checksum(full_path) @@ -626,68 +620,39 @@ async fn inner_update_file( ) } else { None - }; - - ( - (integrity_checksum::NAME, msgpack!(checksum)), - Some(integrity_checksum::set(checksum)), - ) - }, - { - if current_inode != inode { - ( - (inode::NAME, msgpack!(inode)), - Some(inode::set(Some(inode_to_db(inode)))), - ) - } else { - ((inode::NAME, msgpack!(nil)), None) - } - }, - { - if is_hidden != file_path.hidden.unwrap_or_default() { - ( - (hidden::NAME, msgpack!(inode)), - Some(hidden::set(Some(is_hidden))), - ) - } else { - ((hidden::NAME, msgpack!(nil)), None) - } - }, - ] - .into_iter() - .filter_map(|(sync_param, maybe_db_param)| { - maybe_db_param.map(|db_param| (sync_param, db_param)) - }) - .unzip() - }; - - let ops = sync_params - .into_iter() - .map(|(field, value)| { - sync.shared_update( - prisma_sync::file_path::SyncId { - pub_id: file_path.pub_id.clone(), }, - field, - value, - ) - }) - .collect::>(); - - if !ops.is_empty() && !db_params.is_empty() { - // file content changed - sync.write_ops( - db, - ( - ops, - db.file_path().update( - file_path::pub_id::equals(file_path.pub_id.clone()), - db_params, - ), + file_path::integrity_checksum ), - ) - .await?; - } + option_sync_db_entry!( + (current_inode != inode).then(|| inode_to_db(inode)), + file_path::inode + ), + option_sync_db_entry!( + (is_hidden != file_path.hidden.unwrap_or_default()).then_some(is_hidden), + file_path::hidden + ), + ], + ) + .into_iter() + .unzip::<_, _, Vec<_>, Vec<_>>(); + + // file content changed + sync.write_op( + db, + sync.shared_update( + prisma_sync::file_path::SyncId { + pub_id: file_path.pub_id.clone(), + }, + sync_params, + ), + db.file_path() + .update( + file_path::pub_id::equals(file_path.pub_id.clone()), + db_params, + ) + .select(file_path::select!({ id })), + ) + .await?; if let Some(ref object) = file_path.object { let int_kind = kind as i32; @@ -699,19 +664,18 @@ async fn inner_update_file( .await? == 1 { if object.kind.map(|k| k != int_kind).unwrap_or_default() { + let (sync_param, db_param) = sync_db_entry!(int_kind, object::kind); sync.write_op( db, sync.shared_update( prisma_sync::object::SyncId { pub_id: object.pub_id.clone(), }, - object::kind::NAME, - msgpack!(int_kind), - ), - db.object().update( - object::id::equals(object.id), - vec![object::kind::set(Some(int_kind))], + [sync_param], ), + db.object() + .update(object::id::equals(object.id), vec![db_param]) + .select(object::select!({ id })), ) .await?; } @@ -722,31 +686,31 @@ async fn inner_update_file( let device_pub_id = sync.device_pub_id.to_db(); + let (sync_params, db_params) = [ + sync_db_entry!(date_created, object::date_created), + sync_db_entry!(int_kind, object::kind), + ( + sync_entry!( + prisma_sync::device::SyncId { + pub_id: device_pub_id.clone() + }, + object::device + ), + object::device::connect(device::pub_id::equals(device_pub_id)), + ), + ] + .into_iter() + .unzip::<_, _, Vec<_>, Vec<_>>(); + sync.write_op( db, sync.shared_create( prisma_sync::object::SyncId { pub_id: pub_id.to_db(), }, - [ - sync_entry!(date_created, object::date_created), - sync_entry!(int_kind, object::kind), - sync_entry!( - prisma_sync::device::SyncId { - pub_id: device_pub_id.clone() - }, - object::device - ), - ], - ), - db.object().create( - pub_id.to_db(), - vec![ - object::date_created::set(Some(date_created)), - object::kind::set(Some(int_kind)), - object::device::connect(device::pub_id::equals(device_pub_id)), - ], + sync_params, ), + db.object().create(pub_id.to_db(), db_params), ) .await?; @@ -756,17 +720,21 @@ async fn inner_update_file( prisma_sync::location::SyncId { pub_id: file_path.pub_id.clone(), }, - file_path::object::NAME, - msgpack!(prisma_sync::object::SyncId { - pub_id: pub_id.to_db() - }), - ), - db.file_path().update( - file_path::pub_id::equals(file_path.pub_id.clone()), - vec![file_path::object::connect(object::pub_id::equals( - pub_id.into(), - ))], + [sync_entry!( + prisma_sync::object::SyncId { + pub_id: pub_id.to_db() + }, + file_path::object + )], ), + db.file_path() + .update( + file_path::pub_id::equals(file_path.pub_id.clone()), + vec![file_path::object::connect(object::pub_id::equals( + pub_id.into(), + ))], + ) + .select(file_path::select!({ id })), ) .await?; } @@ -874,21 +842,22 @@ async fn inner_update_file( invalidate_query!(library, "search.paths"); invalidate_query!(library, "search.objects"); } else if is_hidden != file_path.hidden.unwrap_or_default() { - sync.write_ops( + let (sync_param, db_param) = sync_db_entry!(is_hidden, file_path::hidden); + + sync.write_op( db, - ( - vec![sync.shared_update( - prisma_sync::file_path::SyncId { - pub_id: file_path.pub_id.clone(), - }, - file_path::hidden::NAME, - msgpack!(is_hidden), - )], - db.file_path().update( - file_path::pub_id::equals(file_path.pub_id.clone()), - vec![file_path::hidden::set(Some(is_hidden))], - ), + sync.shared_update( + prisma_sync::file_path::SyncId { + pub_id: file_path.pub_id.clone(), + }, + [sync_param], ), + db.file_path() + .update( + file_path::pub_id::equals(file_path.pub_id.clone()), + vec![db_param], + ) + .select(file_path::select!({ id })), ) .await?; @@ -972,7 +941,7 @@ pub(super) async fn rename( .await?; let total_paths_count = paths.len(); - let (sync_params, db_params): (Vec<_>, Vec<_>) = paths + let (sync_params, db_params) = paths .into_iter() .filter_map(|path| path.materialized_path.map(|mp| (path.id, path.pub_id, mp))) .map(|(id, pub_id, mp)| { @@ -981,19 +950,20 @@ pub(super) async fn rename( &format!("{}/{}/", new_parts.materialized_path, new_parts.name), ); + let (sync_param, db_param) = + sync_db_entry!(new_path, file_path::materialized_path); + ( sync.shared_update( sd_prisma::prisma_sync::file_path::SyncId { pub_id }, - file_path::materialized_path::NAME, - msgpack!(&new_path), - ), - db.file_path().update( - file_path::id::equals(id), - vec![file_path::materialized_path::set(Some(new_path))], + [sync_param], ), + db.file_path() + .update(file_path::id::equals(id), vec![db_param]) + .select(file_path::select!({ id })), ) }) - .unzip(); + .unzip::<_, _, Vec<_>, Vec<_>>(); if !sync_params.is_empty() && !db_params.is_empty() { sync.write_ops(db, (sync_params, db_params)).await?; @@ -1002,65 +972,38 @@ pub(super) async fn rename( trace!(%total_paths_count, "Updated file_paths;"); } - let is_hidden = path_is_hidden(new_path, &new_path_metadata); - - let date_modified = DateTime::::from(new_path_metadata.modified_or_now()).into(); - - let (sync_params, db_params): (Vec<_>, Vec<_>) = [ - ( - ( - file_path::materialized_path::NAME, - msgpack!(new_path_materialized_str), - ), - file_path::materialized_path::set(Some(new_path_materialized_str)), + let (sync_params, db_params) = [ + sync_db_entry!(new_path_materialized_str, file_path::materialized_path), + sync_db_entry!(new_parts.name.to_string(), file_path::name), + sync_db_entry!(new_parts.extension.to_string(), file_path::extension), + sync_db_entry!( + DateTime::::from(new_path_metadata.modified_or_now()), + file_path::date_modified ), - ( - (file_path::name::NAME, msgpack!(new_parts.name)), - file_path::name::set(Some(new_parts.name.to_string())), - ), - ( - (file_path::extension::NAME, msgpack!(new_parts.extension)), - file_path::extension::set(Some(new_parts.extension.to_string())), - ), - ( - (file_path::date_modified::NAME, msgpack!(&date_modified)), - file_path::date_modified::set(Some(date_modified)), - ), - ( - (file_path::hidden::NAME, msgpack!(is_hidden)), - file_path::hidden::set(Some(is_hidden)), + sync_db_entry!( + path_is_hidden(new_path, &new_path_metadata), + file_path::hidden ), ] .into_iter() - .unzip(); + .unzip::<_, _, Vec<_>, Vec<_>>(); - let ops = sync_params - .into_iter() - .map(|(k, v)| { - sync.shared_update( - prisma_sync::file_path::SyncId { - pub_id: file_path.pub_id.clone(), - }, - k, - v, - ) - }) - .collect::>(); + sync.write_op( + db, + sync.shared_update( + prisma_sync::file_path::SyncId { + pub_id: file_path.pub_id.clone(), + }, + sync_params, + ), + db.file_path() + .update(file_path::pub_id::equals(file_path.pub_id), db_params) + .select(file_path::select!({ id })), + ) + .await?; - if !ops.is_empty() && !db_params.is_empty() { - sync.write_ops( - db, - ( - ops, - db.file_path() - .update(file_path::pub_id::equals(file_path.pub_id), db_params), - ), - ) - .await?; - - invalidate_query!(library, "search.paths"); - invalidate_query!(library, "search.objects"); - } + invalidate_query!(library, "search.paths"); + invalidate_query!(library, "search.objects"); } Ok(()) @@ -1084,19 +1027,20 @@ pub(super) async fn remove( &location_path, full_path, )?) + .select(file_path_watcher_remove::select()) .exec() .await? else { return Ok(()); }; - remove_by_file_path(location_id, full_path, &file_path, library).await + remove_by_file_path(location_id, full_path, file_path, library).await } async fn remove_by_file_path( location_id: location::id::Type, path: impl AsRef + Send, - file_path: &file_path::Data, + file_path: file_path_watcher_remove::Data, library: &Library, ) -> Result<(), LocationManagerError> { // check file still exists on disk @@ -1120,28 +1064,42 @@ async fn remove_by_file_path( delete_directory( library, location_id, - Some(&IsolatedFilePathData::try_from(file_path)?), + Some(&IsolatedFilePathData::try_from(&file_path)?), ) .await?; } else { sync.write_op( db, sync.shared_delete(prisma_sync::file_path::SyncId { - pub_id: file_path.pub_id.clone(), + pub_id: file_path.pub_id, }), db.file_path().delete(file_path::id::equals(file_path.id)), ) .await?; - if let Some(object_id) = file_path.object_id { - db.object() - .delete_many(vec![ - object::id::equals(object_id), + if let Some(object) = file_path.object { + // If this object doesn't have any other file paths, delete it + if db + .object() + .count(vec![ + object::id::equals(object.id), // https://www.prisma.io/docs/reference/api-reference/prisma-client-reference#none object::file_paths::none(vec![]), ]) .exec() + .await? == 1 + { + sync.write_op( + db, + sync.shared_delete(prisma_sync::object::SyncId { + pub_id: object.pub_id, + }), + db.object() + .delete(object::id::equals(object.id)) + .select(object::select!({ id })), + ) .await?; + } } } } @@ -1210,6 +1168,7 @@ pub(super) async fn recalculate_directories_size( candidates: &mut HashMap, buffer: &mut Vec<(PathBuf, Instant)>, location_id: location::id::Type, + location_pub_id: location::pub_id::Type, library: &Library, ) -> Result<(), LocationManagerError> { let mut location_path_cache = None; @@ -1268,7 +1227,7 @@ pub(super) async fn recalculate_directories_size( } if should_update_location_size { - update_location_size(location_id, library).await?; + update_location_size(location_id, location_pub_id, library).await?; } if should_invalidate { diff --git a/core/src/location/manager/watcher/windows.rs b/core/src/location/manager/watcher/windows.rs index a9b24c54c..bd85693e8 100644 --- a/core/src/location/manager/watcher/windows.rs +++ b/core/src/location/manager/watcher/windows.rs @@ -39,6 +39,7 @@ use super::{ #[derive(Debug)] pub(super) struct EventHandler { location_id: location::id::Type, + location_pub_id: location::pub_id::Type, library: Arc, node: Arc, last_events_eviction_check: Instant, @@ -54,12 +55,18 @@ pub(super) struct EventHandler { } impl super::EventHandler for EventHandler { - fn new(location_id: location::id::Type, library: Arc, node: Arc) -> Self + fn new( + location_id: location::id::Type, + location_pub_id: location::pub_id::Type, + library: Arc, + node: Arc, + ) -> Self where Self: Sized, { Self { location_id, + location_pub_id, library, node, last_events_eviction_check: Instant::now(), @@ -277,6 +284,7 @@ impl super::EventHandler for EventHandler { &mut self.to_recalculate_size, &mut self.path_and_instant_buffer, self.location_id, + self.location_pub_id.clone(), &self.library, ) .await diff --git a/core/src/location/mod.rs b/core/src/location/mod.rs index a1fd20073..a4a998995 100644 --- a/core/src/location/mod.rs +++ b/core/src/location/mod.rs @@ -18,9 +18,9 @@ use sd_prisma::{ }; use sd_sync::*; use sd_utils::{ - db::{maybe_missing, size_in_bytes_to_db, MissingFieldError}, + db::{maybe_missing, size_in_bytes_from_db, size_in_bytes_to_db}, error::{FileIOError, NonUtf8PathError}, - msgpack, uuid_to_bytes, + uuid_to_bytes, }; use std::{ @@ -304,63 +304,36 @@ impl LocationUpdateArgs { let name = self.name.clone(); - let (sync_params, db_params): (Vec<_>, Vec<_>) = [ - self.name - .filter(|name| location.name.as_ref() != Some(name)) - .map(|v| { - ( - (location::name::NAME, msgpack!(v)), - location::name::set(Some(v)), - ) - }), - self.generate_preview_media.map(|v| { - ( - (location::generate_preview_media::NAME, msgpack!(v)), - location::generate_preview_media::set(Some(v)), - ) - }), - self.sync_preview_media.map(|v| { - ( - (location::sync_preview_media::NAME, msgpack!(v)), - location::sync_preview_media::set(Some(v)), - ) - }), - self.hidden.map(|v| { - ( - (location::hidden::NAME, msgpack!(v)), - location::hidden::set(Some(v)), - ) - }), - self.path.clone().map(|v| { - ( - (location::path::NAME, msgpack!(v)), - location::path::set(Some(v)), - ) - }), + let (sync_params, db_params) = [ + option_sync_db_entry!( + self.name + .filter(|name| location.name.as_ref() != Some(name)), + location::name + ), + option_sync_db_entry!( + self.generate_preview_media, + location::generate_preview_media + ), + option_sync_db_entry!(self.sync_preview_media, location::sync_preview_media), + option_sync_db_entry!(self.hidden, location::hidden), + option_sync_db_entry!(self.path.clone(), location::path), ] .into_iter() .flatten() - .unzip(); + .unzip::<_, _, Vec<_>, Vec<_>>(); if !sync_params.is_empty() { - sync.write_ops( + sync.write_op( db, - ( - sync_params - .into_iter() - .map(|p| { - sync.shared_update( - prisma_sync::location::SyncId { - pub_id: location.pub_id.clone(), - }, - p.0, - p.1, - ) - }) - .collect(), - db.location() - .update(location::id::equals(self.id), db_params), + sync.shared_update( + prisma_sync::location::SyncId { + pub_id: location.pub_id.clone(), + }, + sync_params, ), + db.location() + .update(location::id::equals(self.id), db_params) + .select(location::select!({ id })), ) .await?; @@ -651,33 +624,25 @@ pub async fn relink_location( .map(str::to_string) .ok_or_else(|| NonUtf8PathError(location_path.into()))?; - sync.write_op( - db, - sync.shared_update( - prisma_sync::location::SyncId { - pub_id: pub_id.clone(), - }, - location::path::NAME, - msgpack!(path), - ), - db.location().update( - location::pub_id::equals(pub_id.clone()), - vec![location::path::set(Some(path))], - ), - ) - .await?; + let (sync_param, db_param) = sync_db_entry!(path, location::path); - let location_id = db - .location() - .find_unique(location::pub_id::equals(pub_id)) - .select(location::select!({ id })) - .exec() + let location_id = sync + .write_op( + db, + sync.shared_update( + prisma_sync::location::SyncId { + pub_id: pub_id.clone(), + }, + [sync_param], + ), + db.location() + .update(location::pub_id::equals(pub_id.clone()), vec![db_param]) + .select(location::select!({ id })), + ) .await? - .ok_or_else(|| { - LocationError::MissingField(MissingFieldError::new("missing id of location")) - })?; + .id; - Ok(location_id.id) + Ok(location_id) } #[derive(Debug)] @@ -1002,45 +967,44 @@ async fn check_nested_location( #[instrument(skip_all, err)] pub async fn update_location_size( location_id: location::id::Type, + location_pub_id: location::pub_id::Type, library: &Library, -) -> Result<(), QueryError> { - let Library { db, .. } = library; +) -> Result<(), sd_core_sync::Error> { + let Library { db, sync, .. } = library; - let total_size = db - .file_path() - .find_many(vec![ - file_path::location_id::equals(Some(location_id)), - file_path::materialized_path::equals(Some("/".to_string())), - ]) - .select(file_path::select!({ size_in_bytes_bytes })) - .exec() - .await? - .into_iter() - .filter_map(|file_path| { - file_path.size_in_bytes_bytes.map(|size_in_bytes_bytes| { - u64::from_be_bytes([ - size_in_bytes_bytes[0], - size_in_bytes_bytes[1], - size_in_bytes_bytes[2], - size_in_bytes_bytes[3], - size_in_bytes_bytes[4], - size_in_bytes_bytes[5], - size_in_bytes_bytes[6], - size_in_bytes_bytes[7], - ]) + let total_size = size_in_bytes_to_db( + db.file_path() + .find_many(vec![ + file_path::location_id::equals(Some(location_id)), + file_path::materialized_path::equals(Some("/".to_string())), + ]) + .select(file_path::select!({ size_in_bytes_bytes })) + .exec() + .await? + .into_iter() + .filter_map(|file_path| { + file_path + .size_in_bytes_bytes + .map(|size_in_bytes_bytes| size_in_bytes_from_db(&size_in_bytes_bytes)) }) - }) - .sum::(); + .sum::(), + ); - db.location() - .update( - location::id::equals(location_id), - vec![location::size_in_bytes::set(Some( - total_size.to_be_bytes().to_vec(), - ))], - ) - .exec() - .await?; + let (sync_param, db_param) = sync_db_entry!(total_size, location::size_in_bytes); + + sync.write_op( + db, + sync.shared_update( + prisma_sync::location::SyncId { + pub_id: location_pub_id, + }, + [sync_param], + ), + db.location() + .update(location::id::equals(location_id), vec![db_param]) + .select(location::select!({ id })), + ) + .await?; invalidate_query!(library, "locations.list"); invalidate_query!(library, "locations.get"); @@ -1100,69 +1064,60 @@ pub async fn create_file_path( location_id, ))?; - let (sync_params, db_params): (Vec<_>, Vec<_>) = { - use file_path::{ - cas_id, date_created, date_indexed, date_modified, device, extension, hidden, inode, - is_dir, location, materialized_path, name, size_in_bytes_bytes, - }; + let device_pub_id = sync.device_pub_id.to_db(); - let device_pub_id = sync.device_pub_id.to_db(); - - [ - ( - sync_entry!( - prisma_sync::location::SyncId { - pub_id: location.pub_id - }, - location - ), - location::connect(prisma::location::id::equals(location.id)), + let (sync_params, db_params) = [ + ( + sync_entry!( + prisma_sync::location::SyncId { + pub_id: location.pub_id + }, + file_path::location ), - ( - sync_entry!(cas_id, cas_id), - cas_id::set(cas_id.map(Into::into)), + file_path::location::connect(prisma::location::id::equals(location.id)), + ), + ( + sync_entry!(cas_id, file_path::cas_id), + file_path::cas_id::set(cas_id.map(Into::into)), + ), + sync_db_entry!(materialized_path, file_path::materialized_path), + sync_db_entry!(name, file_path::name), + sync_db_entry!(extension, file_path::extension), + sync_db_entry!( + size_in_bytes_to_db(metadata.size_in_bytes), + file_path::size_in_bytes_bytes + ), + sync_db_entry!(inode_to_db(metadata.inode), file_path::inode), + sync_db_entry!(is_dir, file_path::is_dir), + sync_db_entry!(metadata.created_at, file_path::date_created), + sync_db_entry!(metadata.modified_at, file_path::date_modified), + sync_db_entry!(indexed_at, file_path::date_indexed), + sync_db_entry!(metadata.hidden, file_path::hidden), + ( + sync_entry!( + prisma_sync::device::SyncId { + pub_id: device_pub_id.clone() + }, + file_path::device ), - sync_db_entry!(materialized_path, materialized_path), - sync_db_entry!(name, name), - sync_db_entry!(extension, extension), - sync_db_entry!( - size_in_bytes_to_db(metadata.size_in_bytes), - size_in_bytes_bytes - ), - sync_db_entry!(inode_to_db(metadata.inode), inode), - sync_db_entry!(is_dir, is_dir), - sync_db_entry!(metadata.created_at, date_created), - sync_db_entry!(metadata.modified_at, date_modified), - sync_db_entry!(indexed_at, date_indexed), - sync_db_entry!(metadata.hidden, hidden), - ( - sync_entry!( - prisma_sync::device::SyncId { - pub_id: device_pub_id.clone() - }, - device - ), - device::connect(prisma::device::pub_id::equals(device_pub_id)), - ), - ] - .into_iter() - .unzip() - }; + file_path::device::connect(prisma::device::pub_id::equals(device_pub_id)), + ), + ] + .into_iter() + .unzip::<_, _, Vec<_>, Vec<_>>(); let pub_id = sd_utils::uuid_to_bytes(&Uuid::now_v7()); - let created_path = sync - .write_op( - db, - sync.shared_create( - prisma_sync::file_path::SyncId { - pub_id: pub_id.clone(), - }, - sync_params, - ), - db.file_path().create(pub_id, db_params), - ) - .await?; - - Ok(created_path) + sync.write_op( + db, + sync.shared_create( + prisma_sync::file_path::SyncId { + pub_id: pub_id.clone(), + }, + sync_params, + ), + db.file_path().create(pub_id, db_params), + ) + .await + .map_err(Into::into) } diff --git a/core/src/object/fs/old_copy.rs b/core/src/object/fs/old_copy.rs index 8b760b920..2d7b0fb70 100644 --- a/core/src/object/fs/old_copy.rs +++ b/core/src/object/fs/old_copy.rs @@ -323,8 +323,8 @@ impl StatefulJob for OldFileCopierJobInit { .await?; dirs.extend(more_dirs); - let (dir_source_file_data, dir_target_full_path): (Vec<_>, Vec<_>) = - dirs.into_iter().unzip(); + let (dir_source_file_data, dir_target_full_path) = + dirs.into_iter().unzip::<_, _, Vec<_>, Vec<_>>(); let step_files = dir_source_file_data .into_iter() diff --git a/core/src/object/tag/mod.rs b/core/src/object/tag/mod.rs index 98238462b..34b609a83 100644 --- a/core/src/object/tag/mod.rs +++ b/core/src/object/tag/mod.rs @@ -23,14 +23,14 @@ impl TagCreateArgs { ) -> Result { let pub_id = Uuid::now_v7().as_bytes().to_vec(); - let (sync_params, db_params): (Vec<_>, Vec<_>) = [ + let (sync_params, db_params) = [ sync_db_entry!(self.name, tag::name), sync_db_entry!(self.color, tag::color), sync_db_entry!(false, tag::is_hidden), sync_db_entry!(Utc::now(), tag::date_created), ] .into_iter() - .unzip(); + .unzip::<_, _, Vec<_>, Vec<_>>(); sync.write_op( db, diff --git a/core/src/object/validation/old_validator_job.rs b/core/src/object/validation/old_validator_job.rs index d90fc56cb..7ddd42938 100644 --- a/core/src/object/validation/old_validator_job.rs +++ b/core/src/object/validation/old_validator_job.rs @@ -15,8 +15,8 @@ use sd_prisma::{ prisma::{file_path, location}, prisma_sync, }; -use sd_sync::OperationFactory; -use sd_utils::{db::maybe_missing, error::FileIOError, msgpack}; +use sd_sync::{sync_db_entry, OperationFactory}; +use sd_utils::{db::maybe_missing, error::FileIOError}; use std::{ hash::{Hash, Hasher}, @@ -157,19 +157,22 @@ impl StatefulJob for OldObjectValidatorJobInit { .await .map_err(|e| ValidatorError::FileIO(FileIOError::from((full_path, e))))?; + let (sync_param, db_param) = sync_db_entry!(checksum, file_path::integrity_checksum); + sync.write_op( db, sync.shared_update( prisma_sync::file_path::SyncId { pub_id: file_path.pub_id.clone(), }, - file_path::integrity_checksum::NAME, - msgpack!(&checksum), - ), - db.file_path().update( - file_path::pub_id::equals(file_path.pub_id.clone()), - vec![file_path::integrity_checksum::set(Some(checksum))], + [sync_param], ), + db.file_path() + .update( + file_path::pub_id::equals(file_path.pub_id.clone()), + vec![db_param], + ) + .select(file_path::select!({ id })), ) .await?; } diff --git a/core/src/old_job/manager.rs b/core/src/old_job/manager.rs index f47164759..c9e5cc892 100644 --- a/core/src/old_job/manager.rs +++ b/core/src/old_job/manager.rs @@ -320,6 +320,7 @@ impl OldJobs { job::id::equals(job.id.as_bytes().to_vec()), vec![job::status::set(Some(JobStatus::Canceled as i32))], ) + .select(job::select!({ id })) .exec() .await?; } diff --git a/core/src/old_job/report.rs b/core/src/old_job/report.rs index ed40df23d..af7333267 100644 --- a/core/src/old_job/report.rs +++ b/core/src/old_job/report.rs @@ -395,6 +395,7 @@ impl OldJobReport { job::date_completed::set(self.completed_at.map(Into::into)), ], ) + .select(job::select!({ id })) .exec() .await?; Ok(()) diff --git a/core/src/volume/mod.rs b/core/src/volume/mod.rs index 50d9d1b15..9519d639a 100644 --- a/core/src/volume/mod.rs +++ b/core/src/volume/mod.rs @@ -7,7 +7,7 @@ use sd_prisma::{ prisma::{device, storage_statistics, PrismaClient}, prisma_sync, }; -use sd_sync::{sync_entry, OperationFactory}; +use sd_sync::{sync_db_not_null_entry, sync_entry, OperationFactory}; use sd_utils::uuid_to_bytes; use std::{ @@ -531,67 +531,66 @@ async fn update_storage_statistics( .map(|s| s.pub_id); if let Some(storage_statistics_pub_id) = storage_statistics_pub_id { - sync.write_ops( - db, - ( - [ - sync_entry!(total_capacity, storage_statistics::total_capacity), - sync_entry!(available_capacity, storage_statistics::available_capacity), - ] - .into_iter() - .map(|(field, value)| { - sync.shared_update( - prisma_sync::storage_statistics::SyncId { - pub_id: storage_statistics_pub_id.clone(), - }, - field, - value, - ) - }) - .collect(), - db.storage_statistics() - .update( - storage_statistics::pub_id::equals(storage_statistics_pub_id), - vec![ - storage_statistics::total_capacity::set(total_capacity as i64), - storage_statistics::available_capacity::set(available_capacity as i64), - ], - ) - // We don't need any data here, just the id avoids receiving the entire object - // as we can't pass an empty select macro call - .select(storage_statistics::select!({ id })), + let (sync_params, db_params) = [ + sync_db_not_null_entry!(total_capacity as i64, storage_statistics::total_capacity), + sync_db_not_null_entry!( + available_capacity as i64, + storage_statistics::available_capacity ), + ] + .into_iter() + .unzip::<_, _, Vec<_>, Vec<_>>(); + + sync.write_op( + db, + sync.shared_update( + prisma_sync::storage_statistics::SyncId { + pub_id: storage_statistics_pub_id.clone(), + }, + sync_params, + ), + db.storage_statistics() + .update( + storage_statistics::pub_id::equals(storage_statistics_pub_id), + db_params, + ) + // We don't need any data here, just the id avoids receiving the entire object + // as we can't pass an empty select macro call + .select(storage_statistics::select!({ id })), ) .await?; } else { let new_storage_statistics_id = uuid_to_bytes(&Uuid::now_v7()); + let (sync_params, db_params) = [ + sync_db_not_null_entry!(total_capacity as i64, storage_statistics::total_capacity), + sync_db_not_null_entry!( + available_capacity as i64, + storage_statistics::available_capacity + ), + ( + sync_entry!( + prisma_sync::device::SyncId { + pub_id: device_pub_id.clone() + }, + storage_statistics::device + ), + storage_statistics::device::connect(device::pub_id::equals(device_pub_id)), + ), + ] + .into_iter() + .unzip::<_, _, Vec<_>, Vec<_>>(); + sync.write_op( db, sync.shared_create( prisma_sync::storage_statistics::SyncId { pub_id: new_storage_statistics_id.clone(), }, - [ - sync_entry!(total_capacity, storage_statistics::total_capacity), - sync_entry!(available_capacity, storage_statistics::available_capacity), - sync_entry!( - prisma_sync::device::SyncId { - pub_id: device_pub_id.clone() - }, - storage_statistics::device - ), - ], + sync_params, ), db.storage_statistics() - .create( - new_storage_statistics_id, - vec![ - storage_statistics::total_capacity::set(total_capacity as i64), - storage_statistics::available_capacity::set(available_capacity as i64), - storage_statistics::device::connect(device::pub_id::equals(device_pub_id)), - ], - ) + .create(new_storage_statistics_id, db_params) // We don't need any data here, just the id avoids receiving the entire object // as we can't pass an empty select macro call .select(storage_statistics::select!({ id })), diff --git a/crates/crypto/src/cloud/decrypt.rs b/crates/crypto/src/cloud/decrypt.rs index c45a99110..94913f64b 100644 --- a/crates/crypto/src/cloud/decrypt.rs +++ b/crates/crypto/src/cloud/decrypt.rs @@ -31,7 +31,7 @@ impl OneShotDecryption for SecretKey { EncryptedBlockRef { nonce, cipher_text }: EncryptedBlockRef<'_>, ) -> Result, Error> { XChaCha20Poly1305::new(&self.0) - .decrypt(&nonce, cipher_text) + .decrypt(nonce, cipher_text) .map_err(|aead::Error| Error::Decrypt) } diff --git a/crates/crypto/src/cloud/secret_key.rs b/crates/crypto/src/cloud/secret_key.rs index c1df94f9f..2477684ad 100644 --- a/crates/crypto/src/cloud/secret_key.rs +++ b/crates/crypto/src/cloud/secret_key.rs @@ -191,7 +191,7 @@ mod tests { let EncryptedBlock { nonce, cipher_text } = key.encrypt(message, &mut rng).unwrap(); let mut bytes = Vec::with_capacity(nonce.len() + cipher_text.len()); - bytes.extend_from_slice(&nonce); + bytes.extend_from_slice(nonce.as_slice()); bytes.extend(cipher_text); assert_eq!( diff --git a/crates/crypto/src/primitives.rs b/crates/crypto/src/primitives.rs index a37981a01..1d8335fc1 100644 --- a/crates/crypto/src/primitives.rs +++ b/crates/crypto/src/primitives.rs @@ -16,7 +16,7 @@ pub struct EncryptedBlock { } pub struct EncryptedBlockRef<'e> { - pub nonce: OneShotNonce, + pub nonce: &'e OneShotNonce, pub cipher_text: &'e [u8], } @@ -25,7 +25,7 @@ impl<'e> From<&'e [u8]> for EncryptedBlockRef<'e> { let (nonce, cipher_text) = cipher_text.split_at(size_of::()); Self { - nonce: OneShotNonce::try_from(nonce).expect("we split the correct amount"), + nonce: nonce.try_into().expect("we split the correct amount"), cipher_text, } } diff --git a/crates/sync-generator/src/model.rs b/crates/sync-generator/src/model.rs index 767c1d820..e171634b8 100644 --- a/crates/sync-generator/src/model.rs +++ b/crates/sync-generator/src/model.rs @@ -46,7 +46,7 @@ pub fn module((model, sync_type): ModelWithSyncType<'_>) -> Module { RefinedFieldWalker::Scalar(scalar_field) => { (!scalar_field.is_in_required_relation()).then(|| { quote! { - #model_name_snake::#field_name_snake::set(::rmpv::ext::from_value(val).unwrap()), + #model_name_snake::#field_name_snake::set(::rmpv::ext::from_value(val)?), } }) } @@ -59,11 +59,19 @@ pub fn module((model, sync_type): ModelWithSyncType<'_>) -> Module { |i| { if i.count() == 1 { Some(quote! {{ - let val: std::collections::HashMap = ::rmpv::ext::from_value(val).unwrap(); - let val = val.into_iter().next().unwrap(); + + let (field, value) = ::rmpv + ::ext + ::from_value::>(val)? + .into_iter() + .next() + .ok_or(Error::MissingRelationData { + field: field.to_string(), + model: #relation_model_name_snake::NAME.to_string() + })?; #model_name_snake::#field_name_snake::connect( - #relation_model_name_snake::UniqueWhereParam::deserialize(&val.0, val.1).unwrap() + #relation_model_name_snake::UniqueWhereParam::deserialize(&field, value)? ) }}) } else { @@ -81,10 +89,13 @@ pub fn module((model, sync_type): ModelWithSyncType<'_>) -> Module { } else { quote! { impl #model_name_snake::SetParam { - pub fn deserialize(field: &str, val: ::rmpv::Value) -> Option { - Some(match field { + pub fn deserialize(field: &str, val: ::rmpv::Value) -> Result { + Ok(match field { #(#field_matches)* - _ => return None + _ => return Err(Error::FieldNotFound { + field: field.to_string(), + model: #model_name_snake::NAME.to_string(), + }), }) } } @@ -97,9 +108,12 @@ pub fn module((model, sync_type): ModelWithSyncType<'_>) -> Module { Module::new( model.name(), quote! { - use super::prisma::*; + use super::Error; + use prisma_client_rust::scalar_types::*; + use super::prisma::*; + #sync_id #set_param_impl @@ -172,7 +186,7 @@ fn process_unique_params(model: Walker<'_, ModelId>, model_name_snake: &Ident) - Some(quote!(#model_name_snake::#field_name_snake::NAME => #model_name_snake::#field_name_snake::equals( - ::rmpv::ext::from_value(val).unwrap() + ::rmpv::ext::from_value(val)? ), )) } @@ -185,10 +199,13 @@ fn process_unique_params(model: Walker<'_, ModelId>, model_name_snake: &Ident) - } else { quote! { impl #model_name_snake::UniqueWhereParam { - pub fn deserialize(field: &str, val: ::rmpv::Value) -> Option { - Some(match field { + pub fn deserialize(field: &str, val: ::rmpv::Value) -> Result { + Ok(match field { #(#field_matches)* - _ => return None + _ => return Err(Error::FieldNotFound { + field: field.to_string(), + model: #model_name_snake::NAME.to_string(), + }) }) } } diff --git a/crates/sync-generator/src/sync_data.rs b/crates/sync-generator/src/sync_data.rs index 9e9fdd937..e8ee713e6 100644 --- a/crates/sync-generator/src/sync_data.rs +++ b/crates/sync-generator/src/sync_data.rs @@ -7,7 +7,7 @@ use prisma_models::walkers::{FieldWalker, ScalarFieldWalker}; use crate::{ModelSyncType, ModelWithSyncType}; pub fn enumerate(models: &[ModelWithSyncType<'_>]) -> TokenStream { - let (variants, matches): (Vec<_>, Vec<_>) = models + let (variants, matches) = models .iter() .filter_map(|(model, sync_type)| { let model_name_snake = snake_ident(model.name()); @@ -26,12 +26,12 @@ pub fn enumerate(models: &[ModelWithSyncType<'_>]) -> TokenStream { quote!(#model_name_pascal(#model_name_snake::SyncId, sd_sync::CRDTOperationData)), quote! { #model_name_snake::MODEL_ID => - Self::#model_name_pascal(rmpv::ext::from_value(op.record_id).ok()?, op.data) + Self::#model_name_pascal(rmpv::ext::from_value(op.record_id)?, op.data) }, ) }) }) - .unzip(); + .unzip::<_, _, Vec<_>, Vec<_>>(); let exec_matches = models.iter().filter_map(|(model, sync_type)| { let model_name_pascal = pascal_ident(model.name()); @@ -54,20 +54,22 @@ pub fn enumerate(models: &[ModelWithSyncType<'_>]) -> TokenStream { }) }); + let error_enum = declare_error_enum(); + quote! { pub enum ModelSyncData { #(#variants),* } impl ModelSyncData { - pub fn from_op(op: sd_sync::CRDTOperation) -> Option { - Some(match op.model_id { + pub fn from_op(op: sd_sync::CRDTOperation) -> Result { + Ok(match op.model_id { #(#matches),*, - _ => return None + _ => return Err(Error::InvalidModelId(op.model_id)), }) } - pub async fn exec(self, db: &prisma::PrismaClient) -> prisma_client_rust::Result<()> { + pub async fn exec(self, db: &prisma::PrismaClient) -> Result<(), Error> { match self { #(#exec_matches),* } @@ -75,6 +77,69 @@ pub fn enumerate(models: &[ModelWithSyncType<'_>]) -> TokenStream { Ok(()) } } + + #error_enum + } +} + +fn declare_error_enum() -> TokenStream { + quote! { + #[derive(Debug)] + pub enum Error { + Rmpv(rmpv::ext::Error), + RmpSerialize(rmp_serde::encode::Error), + Prisma(prisma_client_rust::QueryError), + InvalidModelId(sd_sync::ModelId), + FieldNotFound { field: String, model: String }, + MissingRelationData { field: String, model: String }, + RelatedEntryNotFound { field: String, model: String }, + } + + impl From for Error { + fn from(e: rmpv::ext::Error) -> Self { + Self::Rmpv(e) + } + } + + impl From for Error { + fn from(e: rmp_serde::encode::Error) -> Self { + Self::RmpSerialize(e) + } + } + + impl From for Error { + fn from(e: prisma_client_rust::QueryError) -> Self { + Self::Prisma(e) + } + } + + impl std::fmt::Display for Error { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Rmpv(e) => write!(f, "Failed to serialize or deserialize rmpv data: {e}"), + Self::RmpSerialize(e) => write!(f, "Failed to serialize rmp data: {e}"), + Self::Prisma(e) => write!(f, "Prisma error: {e}"), + Self::InvalidModelId(id) => write!(f, "Invalid model id: {id}"), + Self::FieldNotFound { field, model } => { + write!(f, "Field '{field}' not found in model '{model}'") + } + Self::MissingRelationData { field, model } => { + write!( + f, + "Field '{field}' missing relation data in model '{model}'" + ) + } + Self::RelatedEntryNotFound { field, model } => { + write!( + f, + "Related entry for field '{field}' not found in table '{model}'" + ) + } + } + } + } + + impl std::error::Error for Error {} } } @@ -103,6 +168,7 @@ fn handle_crdt_ops_relation( .and_then(|(_m, sync)| sync.as_ref()) .map(|sync| snake_ident(sync.sync_id()[0].name())) .expect("missing sync id field name for relation"); + let item_model_name_snake = snake_ident(item.related_model().name()); let item_field_name_snake = snake_ident(item.name()); @@ -155,11 +221,15 @@ fn handle_crdt_ops_relation( vec![], ) .exec() - .await - .ok(); + .await?; }, - sd_sync::CRDTOperationData::Update { field, value } => { - let data = vec![prisma::#model_name_snake::SetParam::deserialize(&field, value).unwrap()]; + + sd_sync::CRDTOperationData::Update(data) => { + let data = data.into_iter() + .map(|(field, value)| { + prisma::#model_name_snake::SetParam::deserialize(&field, value) + }) + .collect::, _>>()?; db.#model_name_snake() .upsert( @@ -171,15 +241,14 @@ fn handle_crdt_ops_relation( data, ) .exec() - .await - .ok(); + .await?; }, + sd_sync::CRDTOperationData::Delete => { db.#model_name_snake() .delete(id) .exec() - .await - .ok(); + .await?; }, } } @@ -198,8 +267,10 @@ fn handle_crdt_ops_shared( .expect("missing fields") .next() .expect("empty fields"); + let id_name_snake = snake_ident(scalar_field.name()); let field_name_snake = snake_ident(rel.name()); + let opposite_model_name_snake = snake_ident( rel.opposite_relation_field() .expect("missing opposite relation field") @@ -211,12 +282,16 @@ fn handle_crdt_ops_shared( id.#field_name_snake.pub_id.clone() )); + let pub_id_field = format!("{field_name_snake}::pub_id"); + let rel_fetch = quote! { let rel = db.#opposite_model_name_snake() .find_unique(#relation_equals_condition) .exec() - .await? - .unwrap(); + .await?.ok_or_else(|| Error::RelatedEntryNotFound { + field: #pub_id_field.to_string(), + model: prisma::#opposite_model_name_snake::NAME.to_string(), + })?; }; ( @@ -226,6 +301,7 @@ fn handle_crdt_ops_shared( relation_equals_condition, ) } + RefinedFieldWalker::Scalar(s) => { let field_name_snake = snake_ident(s.name()); let thing = quote!(id.#field_name_snake.clone()); @@ -238,24 +314,12 @@ fn handle_crdt_ops_shared( #get_id match data { - sd_sync::CRDTOperationData::Create(data) => { - let data: Vec<_> = data.into_iter().map(|(field, value)| { - prisma::#model_name_snake::SetParam::deserialize(&field, value).unwrap() - }).collect(); - - db.#model_name_snake() - .upsert( - prisma::#model_name_snake::#id_name_snake::equals(#equals_value), - prisma::#model_name_snake::create(#create_id, data.clone()), - data - ) - .exec() - .await?; - }, - sd_sync::CRDTOperationData::Update { field, value } => { - let data = vec![ - prisma::#model_name_snake::SetParam::deserialize(&field, value).unwrap() - ]; + sd_sync::CRDTOperationData::Create(data) | sd_sync::CRDTOperationData::Update(data) => { + let data = data.into_iter() + .map(|(field, value)| { + prisma::#model_name_snake::SetParam::deserialize(&field, value) + }) + .collect::, _>>()?; db.#model_name_snake() .upsert( @@ -266,6 +330,7 @@ fn handle_crdt_ops_shared( .exec() .await?; }, + sd_sync::CRDTOperationData::Delete => { db.#model_name_snake() .delete(prisma::#model_name_snake::#id_name_snake::equals(#equals_value)) @@ -275,8 +340,8 @@ fn handle_crdt_ops_shared( db.crdt_operation() .delete_many(vec![ prisma::crdt_operation::model::equals(#model_id as i32), - prisma::crdt_operation::record_id::equals(rmp_serde::to_vec(&id).unwrap()), - prisma::crdt_operation::kind::equals(sd_sync::OperationKind::Create.to_string()) + prisma::crdt_operation::record_id::equals(rmp_serde::to_vec(&id)?), + prisma::crdt_operation::kind::equals(sd_sync::OperationKind::Create.to_string()), ]) .exec() .await?; diff --git a/crates/sync/src/crdt.rs b/crates/sync/src/crdt.rs index 13eda3ffa..3cbdf23d2 100644 --- a/crates/sync/src/crdt.rs +++ b/crates/sync/src/crdt.rs @@ -7,7 +7,7 @@ use uhlc::NTP64; pub enum OperationKind<'a> { Create, - Update(&'a str), + Update(Vec<&'a str>), Delete, } @@ -15,7 +15,7 @@ impl fmt::Display for OperationKind<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { OperationKind::Create => write!(f, "c"), - OperationKind::Update(field) => write!(f, "u:{field}"), + OperationKind::Update(fields) => write!(f, "u:{}:", fields.join(":")), OperationKind::Delete => write!(f, "d"), } } @@ -26,7 +26,7 @@ pub enum CRDTOperationData { #[serde(rename = "c")] Create(BTreeMap), #[serde(rename = "u")] - Update { field: String, value: rmpv::Value }, + Update(BTreeMap), #[serde(rename = "d")] Delete, } @@ -41,7 +41,9 @@ impl CRDTOperationData { pub fn as_kind(&self) -> OperationKind<'_> { match self { Self::Create(_) => OperationKind::Create, - Self::Update { field, .. } => OperationKind::Update(field), + Self::Update(fields_and_values) => { + OperationKind::Update(fields_and_values.keys().map(String::as_str).collect()) + } Self::Delete => OperationKind::Delete, } } diff --git a/crates/sync/src/factory.rs b/crates/sync/src/factory.rs index 9fed9a52f..7c73f8b5f 100644 --- a/crates/sync/src/factory.rs +++ b/crates/sync/src/factory.rs @@ -46,15 +46,16 @@ pub trait OperationFactory { fn shared_update( &self, id: impl SyncId, - field: impl Into, - value: rmpv::Value, + values: impl IntoIterator + 'static, ) -> CRDTOperation { self.new_op( &id, - CRDTOperationData::Update { - field: field.into(), - value, - }, + CRDTOperationData::Update( + values + .into_iter() + .map(|(name, value)| (name.to_string(), value)) + .collect(), + ), ) } @@ -77,20 +78,23 @@ pub trait OperationFactory { ), ) } + fn relation_update( &self, id: impl RelationSyncId, - field: impl Into, - value: rmpv::Value, + values: impl IntoIterator + 'static, ) -> CRDTOperation { self.new_op( &id, - CRDTOperationData::Update { - field: field.into(), - value, - }, + CRDTOperationData::Update( + values + .into_iter() + .map(|(name, value)| (name.to_string(), value)) + .collect(), + ), ) } + fn relation_delete( &self, id: impl RelationSyncId, @@ -101,9 +105,14 @@ pub trait OperationFactory { #[macro_export] macro_rules! sync_entry { + (nil, $($prisma_column_module:tt)+) => { + ($($prisma_column_module)+::NAME, ::sd_utils::msgpack!(nil)) + }; + ($value:expr, $($prisma_column_module:tt)+) => { ($($prisma_column_module)+::NAME, ::sd_utils::msgpack!($value)) - } + }; + } #[macro_export] @@ -124,6 +133,28 @@ macro_rules! sync_db_entry { }} } +#[macro_export] +macro_rules! sync_db_nullable_entry { + ($value:expr, $($prisma_column_module:tt)+) => {{ + let value = $value.into(); + ( + $crate::sync_entry!(&value, $($prisma_column_module)+), + $($prisma_column_module)+::set(value) + ) + }} +} + +#[macro_export] +macro_rules! sync_db_not_null_entry { + ($value:expr, $($prisma_column_module:tt)+) => {{ + let value = $value.into(); + ( + $crate::sync_entry!(&value, $($prisma_column_module)+), + $($prisma_column_module)+::set(value) + ) + }} +} + #[macro_export] macro_rules! option_sync_db_entry { ($value:expr, $($prisma_column_module:tt)+) => { diff --git a/crates/utils/Cargo.toml b/crates/utils/Cargo.toml index 45891b344..2960a21c4 100644 --- a/crates/utils/Cargo.toml +++ b/crates/utils/Cargo.toml @@ -14,6 +14,8 @@ sd-prisma = { path = "../prisma" } # Workspace dependencies chrono = { workspace = true } prisma-client-rust = { workspace = true } +rmp-serde = { workspace = true } +rmpv = { workspace = true } rspc = { workspace = true, features = ["unstable"] } thiserror = { workspace = true } tracing = { workspace = true } From 3e763295f2adad4b5fa851a9d1bfa5c1c98335bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?V=C3=ADtor=20Vasconcellos?= Date: Sat, 19 Oct 2024 08:06:11 -0300 Subject: [PATCH 201/218] Remove obsolete settings cloud page - Fixed some typings - Update vite --- apps/desktop/package.json | 4 +- apps/mobile/package.json | 3 +- .../components/modal/ImportLibraryModal.tsx | 4 +- .../src/navigation/tabs/SettingsStack.tsx | 12 - .../library/CloudSettings/CloudSettings.tsx | 130 --------- .../library/CloudSettings/Instance.tsx | 64 ----- .../library/CloudSettings/Library.tsx | 66 ----- .../settings/library/CloudSettings/Login.tsx | 45 --- .../library/CloudSettings/ThisInstance.tsx | 76 ----- .../screens/settings/library/SyncSettings.tsx | 158 ----------- apps/storybook/package.json | 2 +- apps/web/package.json | 4 +- .../SidebarLayout/LibrariesDropdown.tsx | 22 +- interface/app/$libraryId/debug/actors.tsx | 69 ----- interface/app/$libraryId/debug/cloud.tsx | 262 ------------------ interface/app/$libraryId/debug/index.ts | 6 - interface/app/$libraryId/index.tsx | 5 +- .../$libraryId/settings/client/general.tsx | 4 +- .../app/$libraryId/settings/library/index.tsx | 1 - .../app/$libraryId/settings/library/sync.tsx | 228 --------------- .../settings/node/libraries/JoinDialog.tsx | 99 ------- .../settings/node/libraries/ListItem.tsx | 43 +-- .../settings/node/libraries/index.tsx | 18 +- interface/app/onboarding/index.tsx | 2 - interface/app/onboarding/join-library.tsx | 84 ------ interface/app/onboarding/new-library.tsx | 15 - interface/components/Authentication.tsx | 4 +- interface/components/Login.tsx | 2 +- interface/package.json | 3 +- package.json | 4 +- packages/config/package.json | 5 +- .../@react-navigation__drawer@6.6.15.patch | 54 +--- pnpm-lock.yaml | Bin 1193120 -> 1188670 bytes 33 files changed, 33 insertions(+), 1465 deletions(-) delete mode 100644 apps/mobile/src/screens/settings/library/CloudSettings/CloudSettings.tsx delete mode 100644 apps/mobile/src/screens/settings/library/CloudSettings/Instance.tsx delete mode 100644 apps/mobile/src/screens/settings/library/CloudSettings/Library.tsx delete mode 100644 apps/mobile/src/screens/settings/library/CloudSettings/Login.tsx delete mode 100644 apps/mobile/src/screens/settings/library/CloudSettings/ThisInstance.tsx delete mode 100644 apps/mobile/src/screens/settings/library/SyncSettings.tsx delete mode 100644 interface/app/$libraryId/debug/actors.tsx delete mode 100644 interface/app/$libraryId/debug/cloud.tsx delete mode 100644 interface/app/$libraryId/debug/index.ts delete mode 100644 interface/app/$libraryId/settings/library/sync.tsx delete mode 100644 interface/app/$libraryId/settings/node/libraries/JoinDialog.tsx delete mode 100644 interface/app/onboarding/join-library.tsx diff --git a/apps/desktop/package.json b/apps/desktop/package.json index 08058a5de..5fd357b0e 100644 --- a/apps/desktop/package.json +++ b/apps/desktop/package.json @@ -41,7 +41,7 @@ "@types/react-dom": "^18.2.22", "sass": "^1.72.0", "typescript": "^5.6.2", - "vite": "^5.2.0", - "vite-tsconfig-paths": "^4.3.2" + "vite": "^5.4.9", + "vite-tsconfig-paths": "^5.0.1" } } diff --git a/apps/mobile/package.json b/apps/mobile/package.json index 31c28d3f3..741a20ee0 100644 --- a/apps/mobile/package.json +++ b/apps/mobile/package.json @@ -22,11 +22,10 @@ "@gorhom/bottom-sheet": "^4.6.1", "@hookform/resolvers": "^3.1.0", "@oscartbeaumont-sd/rspc-client": "github:spacedriveapp/rspc#path:packages/client&bc882f4724", - "@oscartbeaumont-sd/rspc-react": "github:spacedriveapp/rspc#path:packages/react&bc882f4724", "@react-native-async-storage/async-storage": "~1.23.1", "@react-native-masked-view/masked-view": "^0.3.1", "@react-navigation/bottom-tabs": "^6.5.19", - "@react-navigation/drawer": "^6.6.14", + "@react-navigation/drawer": "^6.6.15", "@react-navigation/native": "^6.1.16", "@react-navigation/native-stack": "^6.9.25", "@sd/assets": "workspace:*", diff --git a/apps/mobile/src/components/modal/ImportLibraryModal.tsx b/apps/mobile/src/components/modal/ImportLibraryModal.tsx index dddedcc53..fac41a218 100644 --- a/apps/mobile/src/components/modal/ImportLibraryModal.tsx +++ b/apps/mobile/src/components/modal/ImportLibraryModal.tsx @@ -57,11 +57,11 @@ const ImportModalLibrary = forwardRef((_, ref) => { description="No cloud libraries available to join" /> } - keyExtractor={(item) => item.uuid} + keyExtractor={(item) => item.pub_id} showsVerticalScrollIndicator={false} renderItem={({ item }) => ( diff --git a/apps/mobile/src/navigation/tabs/SettingsStack.tsx b/apps/mobile/src/navigation/tabs/SettingsStack.tsx index 07cdc3688..c07f1111d 100644 --- a/apps/mobile/src/navigation/tabs/SettingsStack.tsx +++ b/apps/mobile/src/navigation/tabs/SettingsStack.tsx @@ -14,12 +14,10 @@ import PrivacySettingsScreen from '~/screens/settings/client/PrivacySettings'; import AboutScreen from '~/screens/settings/info/About'; import DebugScreen from '~/screens/settings/info/Debug'; import SupportScreen from '~/screens/settings/info/Support'; -import CloudSettings from '~/screens/settings/library/CloudSettings/CloudSettings'; import EditLocationSettingsScreen from '~/screens/settings/library/EditLocationSettings'; import LibraryGeneralSettingsScreen from '~/screens/settings/library/LibraryGeneralSettings'; import LocationSettingsScreen from '~/screens/settings/library/LocationSettings'; import NodesSettingsScreen from '~/screens/settings/library/NodesSettings'; -import SyncSettingsScreen from '~/screens/settings/library/SyncSettings'; import TagsSettingsScreen from '~/screens/settings/library/TagsSettings'; import SettingsScreen from '~/screens/settings/Settings'; @@ -106,16 +104,6 @@ export default function SettingsStack() { component={TagsSettingsScreen} options={{ header: () =>
}} /> -
}} - /> -
}} - /> {/* { - return ( - - - - ); -}; - -const AuthSensitiveChild = () => { - const authState = useAuthStateSnapshot(); - if (authState.status === 'loggedIn') return ; - if (authState.status === 'notLoggedIn' || authState.status === 'loggingIn') return ; - - return null; -}; - -const Authenticated = () => { - const { library } = useLibraryContext(); - const cloudLibrary = useLibraryQuery(['cloud.library.get'], { retry: false }); - const createLibrary = useLibraryMutation(['cloud.library.create']); - - const cloudInstances = useMemo( - () => - cloudLibrary.data?.instances.filter( - (instance) => instance.uuid !== library.instance_id - ), - [cloudLibrary.data, library.instance_id] - ); - - if (cloudLibrary.isLoading) { - return ( - - - - ); - } - - return ( - - {cloudLibrary.data ? ( - - - - - - - - {cloudInstances?.length} - - - Instances - - - - - } - contentContainerStyle={twStyle( - cloudInstances?.length === 0 && 'flex-row' - )} - showsHorizontalScrollIndicator={false} - ItemSeparatorComponent={() => } - renderItem={({ item }) => } - keyExtractor={(item) => item.id} - numColumns={1} - /> - - - - ) : ( - - - - - Uploading your library to the cloud will allow you to access your - library from other devices using your account & importing. - - - - - )} - - ); -}; - -export default CloudSettings; diff --git a/apps/mobile/src/screens/settings/library/CloudSettings/Instance.tsx b/apps/mobile/src/screens/settings/library/CloudSettings/Instance.tsx deleted file mode 100644 index dbac4a60a..000000000 --- a/apps/mobile/src/screens/settings/library/CloudSettings/Instance.tsx +++ /dev/null @@ -1,64 +0,0 @@ -import { Text, View } from 'react-native'; -import { CloudInstance, HardwareModel } from '@sd/client'; -import { Icon } from '~/components/icons/Icon'; -import { hardwareModelToIcon } from '~/components/overview/Devices'; -import { tw } from '~/lib/tailwind'; - -import { InfoBox } from './CloudSettings'; - -interface Props { - data: CloudInstance; -} - -const Instance = ({ data }: Props) => { - return ( - - - - - - - {data.metadata.name} - - - - Id: - - {data.id} - - - - - - - - UUID: - - {data.uuid} - - - - - - - - Public key: - - {data.identity} - - - - - - ); -}; - -export default Instance; diff --git a/apps/mobile/src/screens/settings/library/CloudSettings/Library.tsx b/apps/mobile/src/screens/settings/library/CloudSettings/Library.tsx deleted file mode 100644 index 7e385bd31..000000000 --- a/apps/mobile/src/screens/settings/library/CloudSettings/Library.tsx +++ /dev/null @@ -1,66 +0,0 @@ -import { CheckCircle, XCircle } from 'phosphor-react-native'; -import { useMemo } from 'react'; -import { Text, View } from 'react-native'; -import { CloudLibrary, useLibraryContext, useLibraryMutation } from '@sd/client'; -import Card from '~/components/layout/Card'; -import { Button } from '~/components/primitive/Button'; -import { Divider } from '~/components/primitive/Divider'; -import { SettingsTitle } from '~/components/settings/SettingsContainer'; -import { tw } from '~/lib/tailwind'; -import { logout, useAuthStateSnapshot } from '~/stores/auth'; - -import { InfoBox } from './CloudSettings'; - -interface LibraryProps { - cloudLibrary?: CloudLibrary; -} - -const Library = ({ cloudLibrary }: LibraryProps) => { - const authState = useAuthStateSnapshot(); - const { library } = useLibraryContext(); - const syncLibrary = useLibraryMutation(['cloud.library.sync']); - const thisInstance = useMemo( - () => cloudLibrary?.instances.find((instance) => instance.uuid === library.instance_id), - [cloudLibrary, library.instance_id] - ); - - return ( - - - Library - {authState.status === 'loggedIn' && ( - - )} - - - Name - - {cloudLibrary?.name} - - - - ); -}; - -export default Library; diff --git a/apps/mobile/src/screens/settings/library/CloudSettings/Login.tsx b/apps/mobile/src/screens/settings/library/CloudSettings/Login.tsx deleted file mode 100644 index 88738c329..000000000 --- a/apps/mobile/src/screens/settings/library/CloudSettings/Login.tsx +++ /dev/null @@ -1,45 +0,0 @@ -import { Text, View } from 'react-native'; -import { Icon } from '~/components/icons/Icon'; -import Card from '~/components/layout/Card'; -import { Button } from '~/components/primitive/Button'; -import { tw } from '~/lib/tailwind'; -import { cancel, login, useAuthStateSnapshot } from '~/stores/auth'; - -const Login = () => { - const authState = useAuthStateSnapshot(); - const buttonText = { - notLoggedIn: 'Login', - loggingIn: 'Cancel' - }; - return ( - - - - - - Cloud Sync will upload your library to the cloud so you can access your - library from other devices by importing it from the cloud. - - - {(authState.status === 'notLoggedIn' || authState.status === 'loggingIn') && ( - - )} - - - ); -}; - -export default Login; diff --git a/apps/mobile/src/screens/settings/library/CloudSettings/ThisInstance.tsx b/apps/mobile/src/screens/settings/library/CloudSettings/ThisInstance.tsx deleted file mode 100644 index 041d6591c..000000000 --- a/apps/mobile/src/screens/settings/library/CloudSettings/ThisInstance.tsx +++ /dev/null @@ -1,76 +0,0 @@ -import { useMemo } from 'react'; -import { Text, View } from 'react-native'; -import { CloudLibrary, HardwareModel, useLibraryContext } from '@sd/client'; -import { Icon } from '~/components/icons/Icon'; -import Card from '~/components/layout/Card'; -import { hardwareModelToIcon } from '~/components/overview/Devices'; -import { Divider } from '~/components/primitive/Divider'; -import { tw } from '~/lib/tailwind'; - -import { InfoBox } from './CloudSettings'; - -interface ThisInstanceProps { - cloudLibrary?: CloudLibrary; -} - -const ThisInstance = ({ cloudLibrary }: ThisInstanceProps) => { - const { library } = useLibraryContext(); - const thisInstance = useMemo( - () => cloudLibrary?.instances.find((instance) => instance.uuid === library.instance_id), - [cloudLibrary, library.instance_id] - ); - - if (!thisInstance) return null; - - return ( - - - This Instance - - - - - - {thisInstance.metadata.name} - - - - - - Id: - {thisInstance.id} - - - - - - - UUID: - - {thisInstance.uuid} - - - - - - - - Publc Key: - - {thisInstance.identity} - - - - - - ); -}; - -export default ThisInstance; diff --git a/apps/mobile/src/screens/settings/library/SyncSettings.tsx b/apps/mobile/src/screens/settings/library/SyncSettings.tsx deleted file mode 100644 index d6cb460de..000000000 --- a/apps/mobile/src/screens/settings/library/SyncSettings.tsx +++ /dev/null @@ -1,158 +0,0 @@ -import { inferSubscriptionResult } from '@oscartbeaumont-sd/rspc-client'; -import { useIsFocused } from '@react-navigation/native'; -import { MotiView } from 'moti'; -import { Circle } from 'phosphor-react-native'; -import React, { useEffect, useRef, useState } from 'react'; -import { Text, View } from 'react-native'; -import { - Procedures, - useLibraryMutation, - useLibraryQuery, - useLibrarySubscription -} from '@sd/client'; -import { Icon } from '~/components/icons/Icon'; -import Card from '~/components/layout/Card'; -import { ModalRef } from '~/components/layout/Modal'; -import ScreenContainer from '~/components/layout/ScreenContainer'; -import CloudModal from '~/components/modal/cloud/CloudModal'; -import { Button } from '~/components/primitive/Button'; -import { tw } from '~/lib/tailwind'; -import { SettingsStackScreenProps } from '~/navigation/tabs/SettingsStack'; - -const SyncSettingsScreen = ({ navigation }: SettingsStackScreenProps<'SyncSettings'>) => { - const syncEnabled = useLibraryQuery(['sync.enabled']); - const [data, setData] = useState>({}); - const modalRef = useRef(null); - - const [startBackfill, setStart] = useState(false); - const pageFocused = useIsFocused(); - const [showCloudModal, setShowCloudModal] = useState(false); - - useLibrarySubscription(['library.actors'], { onData: setData }); - - useEffect(() => { - if (startBackfill === true) { - navigation.navigate('BackfillWaitingStack', { - screen: 'BackfillWaiting' - }); - setTimeout(() => setShowCloudModal(true), 1000); - } - }, [startBackfill, navigation]); - - useEffect(() => { - if (pageFocused && showCloudModal) modalRef.current?.present(); - return () => { - if (showCloudModal) setShowCloudModal(false); - }; - }, [pageFocused, showCloudModal]); - - return ( - - {syncEnabled.data === false ? ( - - - - - - With Sync, you can share your library with other devices using P2P - technology. - - - Additionally, allowing you to enable Cloud services to upload your - library to the cloud, making it accessible on any of your devices. - - - - - - ) : ( - - {Object.keys(data).map((key) => { - return ( - - - - {key} - - {data[key] ? : } - - ); - })} - - )} - - - ); -}; - -export default SyncSettingsScreen; - -function OnlineIndicator({ online }: { online: boolean }) { - const size = 6; - return ( - - {online ? ( - - - - - ) : ( - - )} - - ); -} - -function StartButton({ name }: { name: string }) { - const startActor = useLibraryMutation(['library.startActor']); - return ( - - ); -} - -function StopButton({ name }: { name: string }) { - const stopActor = useLibraryMutation(['library.stopActor']); - return ( - - ); -} diff --git a/apps/storybook/package.json b/apps/storybook/package.json index 0f5786227..5dfe9f15f 100644 --- a/apps/storybook/package.json +++ b/apps/storybook/package.json @@ -30,6 +30,6 @@ "storybook": "^8.0.1", "tailwindcss": "^3.4.10", "typescript": "^5.6.2", - "vite": "^5.2.0" + "vite": "^5.4.9" } } diff --git a/apps/web/package.json b/apps/web/package.json index b487de1d8..8fa032697 100644 --- a/apps/web/package.json +++ b/apps/web/package.json @@ -41,7 +41,7 @@ "rollup-plugin-visualizer": "^5.12.0", "start-server-and-test": "^2.0.3", "typescript": "^5.6.2", - "vite": "^5.2.0", - "vite-tsconfig-paths": "^4.3.2" + "vite": "^5.4.9", + "vite-tsconfig-paths": "^5.0.1" } } diff --git a/interface/app/$libraryId/Layout/Sidebar/SidebarLayout/LibrariesDropdown.tsx b/interface/app/$libraryId/Layout/Sidebar/SidebarLayout/LibrariesDropdown.tsx index 0511cf33b..44515a41c 100644 --- a/interface/app/$libraryId/Layout/Sidebar/SidebarLayout/LibrariesDropdown.tsx +++ b/interface/app/$libraryId/Layout/Sidebar/SidebarLayout/LibrariesDropdown.tsx @@ -1,9 +1,7 @@ -import { CloudArrowDown, Gear, Lock, Plus } from '@phosphor-icons/react'; +import { Gear, Plus } from '@phosphor-icons/react'; import clsx from 'clsx'; import { useClientContext } from '@sd/client'; import { dialogManager, Dropdown, DropdownMenu } from '@sd/ui'; -import JoinDialog from '~/app/$libraryId/settings/node/libraries/JoinDialog'; -import RequestAddDialog from '~/components/RequestAddDialog'; import { useLocale } from '~/hooks'; import CreateDialog from '../../../settings/node/libraries/CreateDialog'; @@ -64,17 +62,6 @@ export default () => { onClick={() => dialogManager.create((dp) => )} className="font-medium" /> - - dialogManager.create((dp) => ( - - )) - } - className="font-medium" - /> { to="settings/library/general" className="font-medium" /> - {/* alert('TODO: Not implemented yet!')} - className="font-medium" - /> */} ); }; diff --git a/interface/app/$libraryId/debug/actors.tsx b/interface/app/$libraryId/debug/actors.tsx deleted file mode 100644 index a744583a6..000000000 --- a/interface/app/$libraryId/debug/actors.tsx +++ /dev/null @@ -1,69 +0,0 @@ -import { inferSubscriptionResult } from '@oscartbeaumont-sd/rspc-client'; -import { useMemo, useState } from 'react'; -import { Procedures, useLibraryMutation, useLibrarySubscription } from '@sd/client'; -import { Button } from '@sd/ui'; -import { useRouteTitle } from '~/hooks/useRouteTitle'; - -// @million-ignore -export const Component = () => { - useRouteTitle('Actors'); - - const [data, setData] = useState>({}); - - useLibrarySubscription(['library.actors'], { onData: setData }); - - const sortedData = useMemo(() => { - const sorted = Object.entries(data).sort(([a], [b]) => a.localeCompare(b)); - return sorted; - }, [data]); - - return ( -
- - - - - - {sortedData.map(([name, running]) => ( - - - - - - ))} -
NameRunning
{name} - {running ? 'Running' : 'Not Running'} - - {running ? : } -
-
- ); -}; - -function StartButton({ name }: { name: string }) { - const startActor = useLibraryMutation(['library.startActor']); - - return ( - - ); -} - -function StopButton({ name }: { name: string }) { - const stopActor = useLibraryMutation(['library.stopActor']); - - return ( - - ); -} diff --git a/interface/app/$libraryId/debug/cloud.tsx b/interface/app/$libraryId/debug/cloud.tsx deleted file mode 100644 index dfc60cbea..000000000 --- a/interface/app/$libraryId/debug/cloud.tsx +++ /dev/null @@ -1,262 +0,0 @@ -import { CheckCircle, XCircle } from '@phosphor-icons/react'; -import { Suspense, useMemo } from 'react'; -import { - auth, - HardwareModel, - useBridgeQuery, - useLibraryContext, - useLibraryMutation, - useLibraryQuery -} from '@sd/client'; -import { Button, Card, Loader, tw } from '@sd/ui'; -import { Icon } from '~/components'; -import { AuthRequiredOverlay } from '~/components/AuthRequiredOverlay'; -import { LoginButton } from '~/components/LoginButton'; -import { useLocale, useRouteTitle } from '~/hooks'; -import { hardwareModelToIcon } from '~/util/hardware'; - -const DataBox = tw.div`max-w-[300px] rounded-md border border-app-line/50 bg-app-lightBox/20 p-2`; -const Count = tw.div`min-w-[20px] flex h-[20px] px-1 items-center justify-center rounded-full border border-app-button/40 text-[9px]`; - -export const Component = () => { - useRouteTitle('Cloud'); - - // const authState = auth.useStateSnapshot(); - - // const authSensitiveChild = () => { - // if (authState.status === 'loggedIn') return ; - // if (authState.status === 'notLoggedIn' || authState.status === 'loggingIn') - // return ( - //
- // - //
- // - //

- // To access cloud related features, please login - //

- //
- // - //
- //
- // ); - - // return null; - // }; - - // return
{Authenticated()}
; - return
; -}; - -// million-ignore -// function Authenticated() { -// const { library } = useLibraryContext(); -// const cloudLibrary: any = useLibraryQuery(['cloud.library.get'], { -// suspense: true, -// retry: false -// }); -// const getCloudDevice = useBridgeQuery(['cloud.devices.get'], { -// suspense: true, -// retry: false -// }); -// const cloudDevicesList = useBridgeQuery(['cloud.devices.list'], { -// suspense: true, -// retry: false -// }); -// console.log('[DEBUG] fetch cloud device:', getCloudDevice.data); -// console.log('[DEBUG] cloudDevicesList', cloudDevicesList.data); -// const createLibrary = useLibraryMutation(['cloud.library.create']); -// const { t } = useLocale(); - -// const thisInstance = useMemo(() => { -// if (!cloudLibrary.data) return undefined; -// return cloudLibrary.data.instances.find( -// (instance: any) => instance.uuid === library.instance_id -// ); -// }, [cloudLibrary.data, library.instance_id]); - -// return ( -// -// -//
-// } -// > -// {cloudLibrary.data ? ( -//
-// -// {thisInstance && } -// -//
-// ) : ( -//
-// -// -//
-// -//

-// {t('cloud_connect_description')} -//

-//
-// -//
-//
-// )} -// -// ); -// } - -// // million-ignore -// const Instances = ({ instances }: { instances: any[] }) => { -// const { library } = useLibraryContext(); -// const filteredInstances = instances.filter((instance) => instance.uuid !== library.instance_id); -// return ( -//
-//
-//

Instances

-// {filteredInstances.length} -//
-//
-// {filteredInstances.map((instance) => ( -// -//
-// -//

-// {instance.metadata.name} -//

-//
-//
-// -//

-// Id:{' '} -// {instance.id} -//

-//
-// -//

-// UUID:{' '} -// -// {instance.uuid} -// -//

-//
-// -//

-// Public Key:{' '} -// -// {instance.identity} -// -//

-//
-//
-//
-// ))} -//
-//
-// ); -// }; - -// interface LibraryProps { -// cloudLibrary: any; -// thisInstance: any | undefined; -// } - -// // million-ignore -// const Library = ({ thisInstance, cloudLibrary }: LibraryProps) => { -// const syncLibrary = useLibraryMutation(['cloud.library.sync']); -// return ( -//
-//

Library

-// -//

-// Name: {cloudLibrary.name} -//

-// -//
-//
-// ); -// }; - -// interface ThisInstanceProps { -// instance: any; -// } - -// // million-ignore -// const ThisInstance = ({ instance }: ThisInstanceProps) => { -// return ( -//
-//

This Instance

-// -//
-// -//

-// {instance.metadata.name} -//

-//
-//
-// -//

-// Id: {instance.id} -//

-//
-// -//

-// UUID: {instance.uuid} -//

-//
-// -//

-// Public Key:{' '} -// {instance.identity} -//

-//
-//
-//
-//
-// ); -// }; diff --git a/interface/app/$libraryId/debug/index.ts b/interface/app/$libraryId/debug/index.ts deleted file mode 100644 index 4cf60b56c..000000000 --- a/interface/app/$libraryId/debug/index.ts +++ /dev/null @@ -1,6 +0,0 @@ -import { RouteObject } from 'react-router'; - -export const debugRoutes = [ - { path: 'cloud', lazy: () => import('./cloud') }, - { path: 'actors', lazy: () => import('./actors') } -] satisfies RouteObject[]; diff --git a/interface/app/$libraryId/index.tsx b/interface/app/$libraryId/index.tsx index 6be3b0dd3..10a95e97a 100644 --- a/interface/app/$libraryId/index.tsx +++ b/interface/app/$libraryId/index.tsx @@ -3,15 +3,13 @@ import { type RouteObject } from 'react-router-dom'; import { guessOperatingSystem } from '~/hooks'; import { Platform } from '~/util/Platform'; -import { debugRoutes } from './debug'; import settingsRoutes from './settings'; // Routes that should be contained within the standard Page layout const pageRoutes: RouteObject = { lazy: () => import('./PageLayout'), children: [ - { path: 'overview', lazy: () => import('./overview') }, - { path: 'debug', children: debugRoutes } + { path: 'overview', lazy: () => import('./overview') } ] }; @@ -38,7 +36,6 @@ function loadTopBarRoutes() { ...explorerRoutes, pageRoutes, { path: 'settings', lazy: () => import('./settings/Layout'), children: settingsRoutes }, - { path: 'debug', children: debugRoutes } ]; } else return [...explorerRoutes, pageRoutes]; } diff --git a/interface/app/$libraryId/settings/client/general.tsx b/interface/app/$libraryId/settings/client/general.tsx index 4b42f6753..bba74c7f6 100644 --- a/interface/app/$libraryId/settings/client/general.tsx +++ b/interface/app/$libraryId/settings/client/general.tsx @@ -50,7 +50,7 @@ export const Component = () => { schema: z .object({ name: z.string().min(1).max(250).optional(), - image_labeler_version: z.string().optional(), + // image_labeler_version: z.string().optional(), background_processing_percentage: z.coerce .number({ invalid_type_error: 'Must use numbers from 0 to 100' @@ -63,7 +63,7 @@ export const Component = () => { reValidateMode: 'onChange', defaultValues: { name: node.data?.name, - image_labeler_version: node.data?.image_labeler_version ?? undefined + // image_labeler_version: node.data?.image_labeler_version ?? undefined // background_processing_percentage: // node.data?.preferences.thumbnailer.background_processing_percentage || 50 } diff --git a/interface/app/$libraryId/settings/library/index.tsx b/interface/app/$libraryId/settings/library/index.tsx index fd05bbd68..9be623d80 100644 --- a/interface/app/$libraryId/settings/library/index.tsx +++ b/interface/app/$libraryId/settings/library/index.tsx @@ -8,7 +8,6 @@ export default [ // { path: 'keys', lazy: () => import('./keys') }, { path: 'security', lazy: () => import('./security') }, { path: 'sharing', lazy: () => import('./sharing') }, - { path: 'sync', lazy: () => import('./sync') }, { path: 'general', lazy: () => import('./general') }, { path: 'tags', lazy: () => import('./tags') }, // { path: 'saved-searches', lazy: () => import('./saved-searches') }, diff --git a/interface/app/$libraryId/settings/library/sync.tsx b/interface/app/$libraryId/settings/library/sync.tsx deleted file mode 100644 index fe6124525..000000000 --- a/interface/app/$libraryId/settings/library/sync.tsx +++ /dev/null @@ -1,228 +0,0 @@ -import { inferSubscriptionResult } from '@oscartbeaumont-sd/rspc-client'; -import clsx from 'clsx'; -import { useEffect, useState } from 'react'; -import { - Procedures, - useFeatureFlag, - useLibraryMutation, - useLibraryQuery, - useLibrarySubscription, - useZodForm -} from '@sd/client'; -import { Button, Dialog, dialogManager, useDialog, UseDialogProps, z } from '@sd/ui'; -import { useLocale } from '~/hooks'; - -import { Heading } from '../Layout'; -import Setting from '../Setting'; - -const ACTORS = { - Ingest: 'Sync Ingest', - CloudSend: 'Cloud Sync Sender', - CloudReceive: 'Cloud Sync Receiver', - CloudIngest: 'Cloud Sync Ingest' -}; - -export const Component = () => { - const { t } = useLocale(); - - const syncEnabled = useLibraryQuery(['sync.enabled']); - - const backfillSync = useLibraryMutation(['sync.backfill'], { - onSuccess: async () => { - await syncEnabled.refetch(); - } - }); - - const [data, setData] = useState>({}); - - useLibrarySubscription(['library.actors'], { onData: setData }); - - const cloudSync = useFeatureFlag('cloudSync'); - - return ( - <> - - {syncEnabled.data === false ? ( - -
- -
-
- ) : ( - <> - - {t('ingester')} - - - } - description={t('injester_description')} - > -
- {data[ACTORS.Ingest] ? ( - - ) : ( - - )} -
-
- - {cloudSync && } - - )} - - ); -}; - -function SyncBackfillDialog(props: UseDialogProps & { onEnabled: () => void }) { - const form = useZodForm({ schema: z.object({}) }); - const dialog = useDialog(props); - const { t } = useLocale(); - - const enableSync = useLibraryMutation(['sync.backfill'], {}); - - // dialog is in charge of enabling sync - useEffect(() => { - form.handleSubmit( - async () => { - await enableSync.mutateAsync(null).then(() => (dialog.state.open = false)); - await props.onEnabled(); - }, - () => {} - )(); - // eslint-disable-next-line react-hooks/exhaustive-deps - }, []); - - return ( - - ); -} - -function CloudSync({ data }: { data: inferSubscriptionResult }) { - const { t } = useLocale(); - return ( - <> -
-

{t('cloud_sync')}

-

{t('cloud_sync_description')}

-
- - {t('sender')} - - } - description={t('sender_description')} - > -
- {data[ACTORS.CloudSend] ? ( - - ) : ( - - )} -
-
- - {t('receiver')} - - - } - description={t('receiver_description')} - > -
- {data[ACTORS.CloudReceive] ? ( - - ) : ( - - )} -
-
- - {t('ingester')} - - - } - description={t('ingester_description')} - > -
- {data[ACTORS.CloudIngest] ? ( - - ) : ( - - )} -
-
- - ); -} - -function StartButton({ name }: { name: string }) { - const startActor = useLibraryMutation(['library.startActor']); - const { t } = useLocale(); - - return ( - - ); -} - -function StopButton({ name }: { name: string }) { - const stopActor = useLibraryMutation(['library.stopActor']); - const { t } = useLocale(); - - return ( - - ); -} - -function OnlineIndicator({ online }: { online: boolean }) { - return ( -
- ); -} diff --git a/interface/app/$libraryId/settings/node/libraries/JoinDialog.tsx b/interface/app/$libraryId/settings/node/libraries/JoinDialog.tsx deleted file mode 100644 index e57f5340c..000000000 --- a/interface/app/$libraryId/settings/node/libraries/JoinDialog.tsx +++ /dev/null @@ -1,99 +0,0 @@ -import { useQueryClient } from '@tanstack/react-query'; -import { useNavigate } from 'react-router'; -import { LibraryConfigWrapped, useBridgeMutation, useBridgeQuery, useZodForm } from '@sd/client'; -import { Dialog, Loader, Select, SelectOption, toast, useDialog, UseDialogProps, z } from '@sd/ui'; -import { useLocale } from '~/hooks'; -import { usePlatform } from '~/util/Platform'; - -const schema = z.object({ - libraryId: z.string().refine((value) => value !== 'select_library', { - message: 'Please select a library' - }) -}); - -export default (props: UseDialogProps & { librariesCtx: LibraryConfigWrapped[] | undefined }) => { - const cloudLibraries = useBridgeQuery(['cloud.library.list']); - const joinLibrary = useBridgeMutation(['cloud.library.join']); - - const { t } = useLocale(); - const navigate = useNavigate(); - const platform = usePlatform(); - const queryClient = useQueryClient(); - - const form = useZodForm({ schema, defaultValues: { libraryId: 'select_library' } }); - - // const queryClient = useQueryClient(); - // const submitPlausibleEvent = usePlausibleEvent(); - // const platform = usePlatform(); - - const onSubmit = form.handleSubmit(async (data) => { - try { - const library = await joinLibrary.mutateAsync(data.libraryId); - - queryClient.setQueryData(['library.list'], (libraries: any) => { - // The invalidation system beat us to it - if ((libraries || []).find((l: any) => l.uuid === library.uuid)) return libraries; - - return [...(libraries || []), library]; - }); - - if (platform.refreshMenuBar) platform.refreshMenuBar(); - - navigate(`/${library.uuid}`, { replace: true }); - } catch (e: any) { - console.error(e); - toast.error(e); - } - }); - - return ( - -
- {cloudLibraries.isLoading && ( -
- - {t('loading')}... -
- )} - {cloudLibraries.data && ( - - )} -
-
- ); -}; diff --git a/interface/app/$libraryId/settings/node/libraries/ListItem.tsx b/interface/app/$libraryId/settings/node/libraries/ListItem.tsx index 78bc8beec..0d1bc8550 100644 --- a/interface/app/$libraryId/settings/node/libraries/ListItem.tsx +++ b/interface/app/$libraryId/settings/node/libraries/ListItem.tsx @@ -1,13 +1,12 @@ import { CaretRight, Pencil, Trash } from '@phosphor-icons/react'; import { AnimatePresence, motion } from 'framer-motion'; -import { Key, useState } from 'react'; -import { LibraryConfigWrapped, useBridgeQuery } from '@sd/client'; +import { useState } from 'react'; +import { LibraryConfigWrapped } from '@sd/client'; import { Button, ButtonLink, Card, dialogManager, Tooltip } from '@sd/ui'; import { Icon } from '~/components'; import { useAccessToken, useLocale } from '~/hooks'; import DeleteDialog from './DeleteDialog'; -import DeviceItem from './DeviceItem'; interface Props { library: LibraryConfigWrapped; @@ -19,7 +18,6 @@ export default (props: Props) => { const [isExpanded, setIsExpanded] = useState(false); const accessToken = useAccessToken(); - const cloudDevicesList = useBridgeQuery(['cloud.devices.list', { access_token: accessToken }]); const toggleExpansion = () => { setIsExpanded((prev) => !prev); }; @@ -86,43 +84,6 @@ export default (props: Props) => { className="relative mt-2 flex origin-top flex-col gap-1 pl-8" >
- - {cloudDevicesList.data?.map( - ( - device: { - pub_id: Key | null | undefined; - name: string; - os: string; - storage_size: bigint; - used_storage: bigint; - created_at: string; - device_model: string; - }, - index: number - ) => ( -
- -
-
- -
-
-
- ) - )} )} diff --git a/interface/app/$libraryId/settings/node/libraries/index.tsx b/interface/app/$libraryId/settings/node/libraries/index.tsx index dc8ff541b..db7d6f5d8 100644 --- a/interface/app/$libraryId/settings/node/libraries/index.tsx +++ b/interface/app/$libraryId/settings/node/libraries/index.tsx @@ -1,18 +1,15 @@ -import { useBridgeQuery, useClientContext, useFeatureFlag, useLibraryContext } from '@sd/client'; +import { useBridgeQuery, useClientContext, useLibraryContext } from '@sd/client'; import { Button, dialogManager } from '@sd/ui'; import { useLocale } from '~/hooks'; import { Heading } from '../../Layout'; import CreateDialog from './CreateDialog'; -import JoinDialog from './JoinDialog'; import ListItem from './ListItem'; export const Component = () => { const librariesQuery = useBridgeQuery(['library.list']); const libraries = librariesQuery.data; - const cloudEnabled = useFeatureFlag('cloudSync'); - const { library } = useLibraryContext(); const { libraries: librariesCtx } = useClientContext(); const librariesCtxData = librariesCtx.data; @@ -35,19 +32,6 @@ export const Component = () => { > {t('add_library')} - {cloudEnabled && ( - - )}
} /> diff --git a/interface/app/onboarding/index.tsx b/interface/app/onboarding/index.tsx index f57a4e379..22ba946c3 100644 --- a/interface/app/onboarding/index.tsx +++ b/interface/app/onboarding/index.tsx @@ -4,7 +4,6 @@ import { onboardingStore } from '@sd/client'; import { useOnboardingContext } from './context'; import CreatingLibrary from './creating-library'; import { FullDisk } from './full-disk'; -import { JoinLibrary } from './join-library'; import Locations from './locations'; import NewLibrary from './new-library'; import PreRelease from './prerelease'; @@ -38,7 +37,6 @@ export default [ // path: 'login' // }, { Component: NewLibrary, path: 'new-library' }, - { Component: JoinLibrary, path: 'join-library' }, { Component: FullDisk, path: 'full-disk' }, { Component: Locations, path: 'locations' }, { Component: Privacy, path: 'privacy' }, diff --git a/interface/app/onboarding/join-library.tsx b/interface/app/onboarding/join-library.tsx deleted file mode 100644 index d4bbed977..000000000 --- a/interface/app/onboarding/join-library.tsx +++ /dev/null @@ -1,84 +0,0 @@ -import { useQueryClient } from '@tanstack/react-query'; -import { useNavigate } from 'react-router'; -import { - resetOnboardingStore, - useBridgeMutation, - useBridgeQuery, - useLibraryMutation -} from '@sd/client'; -import { Button } from '@sd/ui'; -import { Icon } from '~/components'; -import { AuthRequiredOverlay } from '~/components/AuthRequiredOverlay'; -import { useLocale, useRouteTitle } from '~/hooks'; -import { usePlatform } from '~/util/Platform'; - -import { OnboardingContainer, OnboardingDescription, OnboardingTitle } from './components'; - -export function JoinLibrary() { - const { t } = useLocale(); - - useRouteTitle('Join Library'); - - return ( - - - {t('join_library')} - {t('join_library_description')} - -
- Cloud Libraries -
    - - -
-
-
- ); -} - -function CloudLibraries() { - const { t } = useLocale(); - - const cloudLibraries = useBridgeQuery(['cloud.library.list']); - const joinLibrary = useBridgeMutation(['cloud.library.join']); - - const navigate = useNavigate(); - const queryClient = useQueryClient(); - const platform = usePlatform(); - - if (cloudLibraries.isLoading) return {t('loading')}...; - - return ( - <> - {cloudLibraries.data?.map((cloudLibrary) => ( -
  • - {cloudLibrary.name} - -
  • - ))} - - ); -} diff --git a/interface/app/onboarding/new-library.tsx b/interface/app/onboarding/new-library.tsx index 6242a2d1e..36d6ea453 100644 --- a/interface/app/onboarding/new-library.tsx +++ b/interface/app/onboarding/new-library.tsx @@ -1,6 +1,5 @@ import { useState } from 'react'; import { useNavigate } from 'react-router'; -import { useFeatureFlag } from '@sd/client'; import { Button, Form, InputField } from '@sd/ui'; import { Icon } from '~/components'; import { useLocale, useOperatingSystem } from '~/hooks'; @@ -21,8 +20,6 @@ export default function OnboardingNewLibrary() { // TODO }; - const cloudFeatureFlag = useFeatureFlag('cloudSync'); - return ( */}
    - {cloudFeatureFlag && ( - <> - {t('or')} - - - )} )} diff --git a/interface/components/Authentication.tsx b/interface/components/Authentication.tsx index 4d530bb32..6c7667b80 100644 --- a/interface/components/Authentication.tsx +++ b/interface/components/Authentication.tsx @@ -1,4 +1,4 @@ -import { AlphaRSPCError } from '@oscartbeaumont-sd/rspc-client/v2'; +import { AlphaRSPCError } from '@oscartbeaumont-sd/rspc-client/src/v2'; import { GoogleLogo, Icon } from '@phosphor-icons/react'; import { Apple, Github } from '@sd/assets/svgs/brands'; import { UseMutationResult } from '@tanstack/react-query'; @@ -7,7 +7,7 @@ import clsx from 'clsx'; import { motion } from 'framer-motion'; import { Dispatch, SetStateAction, useState } from 'react'; import { getAuthorisationURLWithQueryParamsAndSetState } from 'supertokens-web-js/recipe/thirdparty'; -import { Card, Divider, toast } from '@sd/ui'; +import { Card, toast } from '@sd/ui'; import { Icon as Logo } from '~/components'; import { useIsDark } from '~/hooks'; diff --git a/interface/components/Login.tsx b/interface/components/Login.tsx index 463029c57..cd3c0b3f9 100644 --- a/interface/components/Login.tsx +++ b/interface/components/Login.tsx @@ -1,4 +1,4 @@ -import { AlphaRSPCError } from '@oscartbeaumont-sd/rspc-client/v2'; +import { AlphaRSPCError } from '@oscartbeaumont-sd/rspc-client/src/v2'; import { ArrowLeft } from '@phosphor-icons/react'; import { UseMutationResult } from '@tanstack/react-query'; import clsx from 'clsx'; diff --git a/interface/package.json b/interface/package.json index dda41dbc4..40bb3d7dc 100644 --- a/interface/package.json +++ b/interface/package.json @@ -13,6 +13,7 @@ "@dnd-kit/utilities": "^3.2.2", "@headlessui/react": "^1.7.17", "@icons-pack/react-simple-icons": "^9.1.0", + "@oscartbeaumont-sd/rspc-client": "github:spacedriveapp/rspc#path:packages/client", "@phosphor-icons/react": "^2.0.13", "@radix-ui/react-dialog": "^1.0.5", "@radix-ui/react-dropdown-menu": "^2.0.6", @@ -82,7 +83,7 @@ "tailwindcss": "^3.4.10", "type-fest": "^4.13.0", "typescript": "^5.6.2", - "vite": "^5.2.0", + "vite": "^5.4.9", "vite-plugin-svgr": "^3.3.0" } } diff --git a/package.json b/package.json index 80924fac7..ea61bc8d9 100644 --- a/package.json +++ b/package.json @@ -62,7 +62,7 @@ "turbo": "^1.12.5", "turbo-ignore": "^1.12.5", "typescript": "^5.6.2", - "vite": "^5.2.0" + "vite": "^5.4.9" }, "engines": { "pnpm": ">=9.0.0", @@ -73,5 +73,5 @@ "eslintConfig": { "root": true }, - "packageManager": "pnpm@9.9.0" + "packageManager": "pnpm@9.12.2" } diff --git a/packages/config/package.json b/packages/config/package.json index 90ee1a5fc..fab5fce96 100644 --- a/packages/config/package.json +++ b/packages/config/package.json @@ -11,6 +11,7 @@ "lint": "eslint . --cache" }, "devDependencies": { + "@babel/preset-typescript": "^7.24.0", "@typescript-eslint/eslint-plugin": "^8.8.0", "@typescript-eslint/parser": "^8.8.0", "@vitejs/plugin-react-swc": "^3.6.0", @@ -26,8 +27,8 @@ "eslint-utils": "^3.0.0", "regexpp": "^3.2.0", "vite-plugin-html": "^3.2.2", - "vite-plugin-i18next-loader": "^2.0.12", - "vite-plugin-inspect": "^0.8.3", + "vite-plugin-i18next-loader": "^2.0.14", + "vite-plugin-inspect": "^0.8.7", "vite-plugin-solid": "^2.10.2", "vite-plugin-svgr": "^3.3.0" }, diff --git a/patches/@react-navigation__drawer@6.6.15.patch b/patches/@react-navigation__drawer@6.6.15.patch index fa4c115a9..067d678a9 100644 --- a/patches/@react-navigation__drawer@6.6.15.patch +++ b/patches/@react-navigation__drawer@6.6.15.patch @@ -1,59 +1,21 @@ diff --git a/src/views/modern/Drawer.tsx b/src/views/modern/Drawer.tsx -index 9909e9698e51379de6469eb2053a1432636d0c7d..220fa07f6784c5da13e6949e9c4893e015a5d1f8 100644 +index 9909e96..a7dd9b7 100644 --- a/src/views/modern/Drawer.tsx +++ b/src/views/modern/Drawer.tsx -@@ -1,26 +1,27 @@ +@@ -1,5 +1,6 @@ import * as React from 'react'; import { -- I18nManager, -- InteractionManager, -- Keyboard, -- Platform, -- StatusBar, -- StyleSheet, -- View, -+ Dimensions, -+ I18nManager, -+ InteractionManager, -+ Keyboard, -+ Platform, -+ StatusBar, -+ StyleSheet, -+ View, - } from 'react-native'; - import { -- PanGestureHandler, -- PanGestureHandlerGestureEvent, -- State as GestureState, -+ PanGestureHandler, -+ PanGestureHandlerGestureEvent, -+ State as GestureState, - } from 'react-native-gesture-handler'; - import Animated, { -- interpolate, -- runOnJS, -- useAnimatedGestureHandler, -- useAnimatedStyle, -- useDerivedValue, -- useSharedValue, -- withSpring, -+ interpolate, -+ runOnJS, -+ useAnimatedGestureHandler, -+ useAnimatedStyle, -+ useDerivedValue, -+ useSharedValue, -+ withSpring, - } from 'react-native-reanimated'; - - import type { DrawerProps } from '../../types'; ++ Dimensions, + I18nManager, + InteractionManager, + Keyboard, @@ -72,7 +73,8 @@ export default function Drawer({ const percentage = Number(width.replace(/%$/, '')); if (Number.isFinite(percentage)) { - return dimensions.width * (percentage / 100); -+ const dimensionsWidth = Dimensions.get("screen").width -+ return dimensionsWidth * (percentage / 100); ++ const dimensionsWidth = Dimensions.get("screen").width ++ return dimensionsWidth * (percentage / 100); } } diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 634052ff92d012d1ee218a57aa513966d6bae2b7..9b68549c9bfefcdd68ee98abc3d09c383ee70309 100644 GIT binary patch delta 7047 zcmbt3YgAk3k>?_W;JaMuO6K8!F|v&vKsJ)l3mn@Z9s&VE;voU5V-tbnDis={EIl94B#`Y@0Nv4&6S|$Hw08O1$io zX7`-xhtBZ6su~I~`&7h!c*Qvg=FLHjPHE)+no0vcW;8%Bhw) z6!uEH(^)kjlgTyJYNtx$bSTt`jzBJP>+G4awsot-3t^o|RFdfVMwnGMwVJ*5jVqZ6 z*~=LqNTYdRNTah8Md`VTJ2{0g^(`a@Z!cdoyYSeVj0BaxmQVu0741k|9pWW+Kb4)x zKe-_h{`MNKtfHzyGiy58EQ6DEX{EgqmAZ1I+N-qbt7Lw;#1b!RZ<@j*jl! z311N+)oj~;klf7qR)u12pd6klJYp*eAXomg&N0w-5q2m&N(vxSOl)~z z?7IXr_T4nj38*VM|c2)b=*D zbV=j(m~^nN+tC&_d$meqLwLjk9ls$;m;=MtR_g+WL5&`SWyCGaul}4KyPv3rv#+I>ndL)crn;7pUK+A{I$W(o zdULg0uUGeYZE=aC&EBpssKbMmu~Ac%t1}i>G)FsnJ(jRT+p36o>SZCftuZz>ZW#6S z*1PPD-R>5jQsdKtaFDy1`TIWZhigz^#2pR>{LVlWet8d1(miI522EbW(9poR%wUR} z-1?rdyfvh>IeS_>4OVrRVZ_|&a{E0F>rkXK))4aPRMJSb%RbcPiuW2^5s7*v6o>`g z)=IC=9aHEGfi5_DYgz%!lyXSsy`$Wp6Chg0-C`bV_QvGBN?E5-GH9%dw^a4GDhDgW z)*g#zIPMydj}HyY^*cFY2 zBHN`>XT;|YL@R>fA?YIRwn!`#3WlTN!C+V%jJlloKNQ6YG*ST_)7<-QjY8TEZRtC^N_}%Rt~tk0;XuEN7mEc5tT>=I=!Ag3tjp{&A!@IRR*xA|CuJU!ZyL>@KoyR;}O-V_nrP<|=1jIM6Lu z_es@3YnxXy>~y;ARkp6?sHbDJqrSndv3F`({ez8$xVFAU=ds1wo0>r`CpN(JTvi6W z^L*B3NbA7CDedhI#uNsNU9QlojMdW0c87Y*KJE#dN23~RTwm>~3Uvp}25(r^6>+uM z2V|xev(YkKX_53=ybh;X8Sq4gwNZa-m8QGd8Z%bfgH3Su-_wg=0n#=yFP_eNFO5*a z-GAhknd8HwBa*gA)U0VAH96adZ651*TVtIiWY(G04Y8ru9U zBSf`AunPovFyGJTLAw~;#T0Eq7kDr!N4LOTdZd8czlXNM%r^AM)K=60^EM=c+Il2~ zht`uOm@Y+`FyTdoiOD~vL%bZV0ht`dZ`^t1%XbyY@vfP@s0C&i%omF)fj8x7bVc60 z7OnjPc?{lpi2&2z5t&lNR86kKiey5864A>n#c-Dj@!^~Ton?=OmX`%ESBcV@IVC!s zw#o`cKOlKv-7n0C&v&2*oKd4SQ_rDRsNF@@L%#;){Ucf($g0t2{|Fm{$dmkBpj(g* zYAs|toZo@6{}FMLRUK^Ffu8>&5(aBlPJ9uD)j7F&GwXvj%#U}Wzoo-I-I9s>r-eCi zRfo<4e&R7f9r`S7+1bBaerjdrTTnJ{*|s<~K0p-1??gWo!OZK}2Gcd96#CZ_JVxw9 zucZO~a~v>1H>!krEs3K|M$oX=h0Vbt7>t;}p85&8jb$l<1rc=%toNc6=1 zi*g94{a2)ielfKUuHTDYFq2D);Y21ShtM6U5X1p=|1xYV)czHfgE)kCupSS<#5>5! z;N^FS6b*e0Z`rk*EJ|&z9mXos1Q~!U!$=FJd6W-x8Mw)rC?)d18bQ9)wjb2v^CiW} zqUgS5Sb0hu)^#Ja#Bf9qn zR$>hV;;B##Kq!k^3sfAvnWCMzjzbkMOO_=jchTS*$0os6#ld(T^)Iud;gLI5A@r%E znf9R`)-Eb$*FMySO_KE8o#-Ic?#24eeUG0HXaZj^Ow$jh1K5eCyHUo}1iEvXf`ULL z*3^A>O4Ajx{V9^^I}s233PK394_P5}4?49hhX*I_!ACG%Li-^}cjP{7suf{)dH*Ud zd#z+HT-%T1YlUP97G6a+eLosqB?B)DVc`LEn0faBWKK_EChABLocTKT?~3%|orDMk zhgc^s8iEH)FnSf;^&|L5R}QCc&EkPE47V=JX2Q$yl)(FGWU4lhYO83{3 z8$ogmg;R2umU*nwalo-AguLT7Fj#^uGLT)viFW=kNC;=@hz!u}CUZc%2Z#Gjh`|^} zgq}rrvxp)%H+6$4lQH%3)RJ#8i4ZPq!aej?VM(|~>BKT1A=oi8wDLcRSD7v`l+D7sg zSa=lWIy+wmQpynyaqg`GdeUk}q?UBZ6K@H3dvc~p`N zH!)PWv4rz)p{EzuzAaQH^Vr+y6HZEl|GA8C4|Qqjfb$suDNJ#ovUnp2Y z5{yTQY$z=yA6&I;_T!AQ#48I9uOa7Id+!001()=qLUIkfFTlmvlb2F+u|r7KFn&Hc z1pUR>oC{ji!I<*MukzUHj9}~~J_VEAt6)w$i{n< zup9W{0{a~H5ZgjAc&w1kg#m#eKh+IZy@;Aq4ZzFC$!rLn#!kIZghN2Q0oR^+A6*Rd zURn&MQhX)6fQ77dk}G?Q7+gAPdhj;fHD<#5*+e#z_YnCD^(yK!)%Ydt(?_v=GA@w0 zC|4sce)FtfiWBk3z@LDOHzpD=>YNck#Z3)~Ne0g*uc_GS(@uEd5VeE-Q{sT8^nk}rB-9+GN@2#rNr$#x z&f3&cJc*V5%@In+4xjD^JaJ0W4YO0HUF^WA7Ct^k)v#lxBw`~JJ2)A*EfHoiR}xf- z=%1Q8_3N`q_~wTcg86$fS9XHL;0eY|6lf>E9O#Rb4CW7Wa^TdTs0|=kpteHjRs1yf9F@le z-=LZ|t`e2;5!IDk;%R3V#3_*i_PvBtsPREgCS3Z6+OmXP!;0L(_&=sZiy}YG5|uFT z{FXZBUlnvmto#~~_V9ZWlV`RtclPn`S-H~Xa3&4ERz9A8s4u+gBK#cJecBCSfp1VbSt6;=)sQ zoNKP%%jZJkUcT}s1cMQON+&ch{7rtbaAg>^T`aGVSIC%>gM7Lyl>>3wrxGuzvYGzp z_|KJREzRT9ASZ-H(ElDkpSgIMe^9|nh%pCf!7=h?VV&y)O&NH!_IONK%#Vkh+t?Y# zb}=4esF{D;C^*aIuV_$<6@Y!Ef|Kd0?czH?dYroncHc(lFnh`b^{Z|?gY(!k!c}%V z&??v|T9(X%%nP`v&iS#`*LMp93}=tvm&z4#xNl6bmMKUGvUJOP7H#&Gw*@^n!+!g= z;PsnfzrG~sy$QyQyd(JgM%Hdy;Nc8<1K`>xgf}y2Tv<-?vzfnM6F4|uicTlk_u4$> zI)|nSwvbf8gt*no1NZSnx5<*{x#7IqqRz12 z9kBVfi#6;WxXE^}^F(n%lQ5lZ1sP9N08eC!a`0FOLX@Z)Ih+HrAvT~DU=s+|rb>E2 zmZ%Vqn$Et4``qpIg9fQF%kK-+ZglWfOI-f`@>(z2PqO8m2&eQfWTGGjttvzY=+%e{Q zHnm~MN2DWic%0KGZ|$oHTeIEWt^a5ZFk}cUX+3-IAe}o52R5|6^vn=EX5`>=cR%<5qeXjk zpe0z%Xg&UUXRGxELF?1y0R}Om8j-@e_xNUXrWJEL9(rBW>U#T1U)rDI$=m`bo@VrNv&ncVREY{Ic|o-t6~M3iu(j|Oj%@^U ztZR5q^=to+;gLIV?kDbhoJQXFc-Qs^p=9jZ;gbgg4tq7EDe8@)NohP#cBLCmjc069 zI*}AxVGA82d#j=85o+H6i03`NXRuf`e4gsSdZ zC1gq^GCFnF6qv8iUE5U%GE)_PbMj;e#MMr6`e@I@3TTUf(jztDrjzq;u> z7tey$#ILqWY;CVQR{TN=$xL?j!QKam3Tb9dpCTR-Zcos`^z*kf$OHf+=-;B z9!y3;UX#}|;gWk~HJ4i=@ul5XyDy&~Ck!&1(B!T={gZB2Qmbgj^ksw9;t9A6#@bX< zo;4V%`i3ziYHAAVRMlNbIGrXKgWJtsX5}(wga1L+>lv65a{_SUIRlR8{s+S}BtIJdRSst`GVHT5&F08uOV7L@;cT z=#?3FwQBc;^im-_>qGj|Ui!~EO0-h3vowj?StjmlmRzZcdeUAm8x7&mL_M1}C#4Q; zDenx(L%yQSNND6WQO)PmR1D<_aV|ENi&yflLe!QyTh7M7Ai_}wlVEj>Xl+mtT~ZL2*ZwqC7{ly^J--(8SubviQc`(F12_2TqvZS~{c{T*YkGAeRoxnkbG|Z`kV#`EXEdNys}kB! zOr_QZoQ0aOQ6+RLr@?J^6H`fzGCW=_nhf@^c*-QqN=k`D*&MEm6Lpk+#UIkQ!W%yN z3K$8}f80qrnW7q1HM5nhNbc}@6$NcAl}Uz)X_cfnnQqkNvT=_ohzxaltHGSlD6G_K zZU3F7u^KVyR7`suxRUchnvBnj;$6A7Bl^@$2XU_+xM(6MPQ2$Xc{S zEbdIPF%_+vqyb&jFIARHT2)pX%eb4KlD8b6@`}SEuR5j3NaB+5e-)r`Jowg~vRA*02TU>RcyAUFbxkN%6 zx7l^ISl%&NlNhxAY*b|vX>5jAV!~k^ui8CAXVn}LD}-b6W+O-#BL!Qg=%{(DZgDY2 zd9PEvd+BU=E{7*64`h-=My(vvJF5x5Ph3_CLvF7q zRu!kc)kfScvzDDi%vEj1fuHUjhSLxAFvx#Pbk}Yt&%f1u{T(zFoO`HyyQdKvizVxPwYd_`Dhd|DlXFe0Dmi~qly$oar`X_f*G+a!u`bJK#$*|p+8foyRPynxIHz_w z3|V=xTGBdF^`zKc(KtO4zrK+O!rR8K4M1#WN;%RiF-#!HhDzyZWK;|{A7}3H2E-zt zq#26Y!}iFeQ|l-E1&!H1tqY|c_CikKB?6X)$5fVv>ve^%VV@5A#A6=2Juqph8KrT* zR241?la_iWWi)zn)=EYfixXlv_bPJ}yt{(F;uBl9Fv+c3dLKUo#TaY5M;eI6&CzTs z@5}|v+45M%Zj4ziK8HgiZ&=b!mDOmmxa*?WSjiCbrS+kvMK0DQGme7IW+{rJjgner zkc^2v(U{}5;GHoz7) zbF0@Du8AjQ?y4d+Wv-=VwT8|ZvDOoHMLy#<_|nsPi!vaUxC%;PEoDPDjR{pXtSFC< zxe3DOo=Qa|qU_kT*;{j}OH&i|;^del6c?eoU|k6>KgzlSzI|)=R*!qkH*KGYI7I1s zRFO!fD@u!dQl=w>E`6~Wm8&$>Y&fTlxomZrxgyO@xzz>zSUQ{#=BkCN#4*;WNh;2y zLalcio#LQIT#Jv1V56V00cMZWHBwv79t1(2jOr>9s~u@r>D}&^Z0hDYa=l=~lYJlDbw=wWh*Z<#=9X zGurG5slJ{x`D4|z+7T}%9ip6kLTA!WC#~sNU7eG8rwYnUrDCg33spg%8D5<0S_f;t z#ySS4zs7nS-Z;rBkT=b-lpdOrB$?PKpJcDayuEO!7hen9t1v0-QDAHsp2U0L*u&@| zxpxb86i%GP2Ep+Tb`9`0VN76u725_kU4e06W)Q>SnH^|V*pIDj&A-ir@i4}N869Q< zVkfrh?+QK()9+yY|4?Q(97SrL+=~5*4l|#@hG35tW42Dca|Qg*4oVjb-%qK*VB0Qa z$UpDEp1pjFOy7lFffzQn1s&Onl}N`<>^&MhD8bS&x*6Fpe=lbZ9NL8pCJ5XNgalgy z2Ueo+kPj@W{Uw-$&H_U+F0cno8a#wlzCLKm@ha6E$TfKdl_Jte0f z_9swS&dYEnJl4zJu|#sm-@sl0`*RozPUMh)2L{;d;kz+xH57h?-3X^%#N-`=DFlfS zchQz4{C;VLZV)--bU_)}aUzAzc$ppOvVIrFg&S6|dtg|Jujv?r=x$pQ@F9|cC5wcx zJ4^YDz_vsFbtL6j7Nv$`KPnBBF4~TOStZT|o&(o{;LDsrIP*W~Ae4LHnk@FqWh6v$ z8ObFbb4w%|%CswhXW{k1`5bb>+2*1@k&bJYtdfDC7hNJoZ=rR=`2v=w1gxcM^Zipsj)5S#b1=coOq>f_(|m z8$$?z8zDZ-R!1&s`@Nln9gaG7`{i`K@J$Jg2Z1Rp2-|YyKcow%Q)s2V5^oQyqv?ln})&sZO9#$bS%Pn z9ga(gH*QC{c>ZHp6VPfJMyIizRN1VD`|rROEGPERc7W{@m$_FkO4|nbor^jax68#N z7>g>MTRLVeZnu8eX5N3JmTCCpWsY8iE%)&Zi_M2adE||YpIr3DWw4KU17X*I@6a;h zHSq4zmQbOTeehFDr(D=xs+t9#jyh4G{&aYeZgKk&y7_ysm0-RH6-?W^i`#)q9w!lO z6RqRpStsgpyn9j5QFAcEya4Ng!js7F&+NoUmj!N-?mJ7_CWKQa+5kCtA9g<~^Od*& z&f94i`RZq}Z}L0)!*(8<{P#KRpLi5gKg@iALr|@uAI^RseS434c-=r8KuP!fchIR8 zNrX#XaDi?nPGgZ@`YtADC#wK%O43%4e|`@8;=;b*;PnFW0@h8QegXS0dPn!}hdG1f z`IoSL-IU<9)YoM-eEeq^0Y`t@`DmG7_7&9OW`2P(ZHa_LBwuB9M4akFO?>bh*l%EQ zwVL`Co`&;BP!V4o#W7U-G$xt-4R#zQI;F+73ER~!r7B39y1>xG;x9Y zN8nEC&N2cg2;5E1ZmyIjQB02U;t;ezj&8ONJL`cc4RrO#E0N{9>4ufL+NP1Iq>}FXl(dR z0yWsByV#M3uubq7<$}ea^}`}-+jZ@4=OX{VApSqyhPN+SO~hJmUrmnqsL$O!7@s_R z1wPW%b~6y$kuz@HhK!)x)!ve5d*ecawb93UYzDb=7ycD5IBrDe7u<@BL}j*6SLpc0 zP8+@U7;ill$1ULaYu_sPp@Y}9Z3~=t^ENN#Hcq0sCYnLduEK_4{UO{9!n^U8Vg8f& zb!7e!zFG;hSMxTI-}nwbfTKd=2*@{14DR) z0QL(0)FlLo5brR6=;rMt-LK(Obh!H`_z1lD6PyLJO>Q5_`5mt8rX&x;fw%ALH?xQZ?|T=za}6$oCz8RSjB$E{q-+9C4wi}-7t#ZkBgMzj1rIB_Sx z2Szx3GZdUeGxz4oVH$}LPaTE7^!D$D<2v3| zM|Q9|WZ_KT7lz;!HLf}GMGg!0{iRO_)-Q7YncR=FpJ0%`r?ZJoAWHEzf)U5okio6& z>gK;QvTvPHZ6JG0>~r!iu@pkLvW4&!2Ym=cGwe;&3WwntHe)%i4bdz?&~S8+qTWu; zD|?oM+Y?NFN0P`#N7+BaW-k#eLS~mk`XG2S+prw6UX3jR){{f4IngDw%@a~|mg#D+{#r*tB zY%2&p&t647eUfe5w3s6{nEW~0xwsQkk00-q9I@WC3{nj%G$l8V}kS7cKr;ISv%)`T01!bEjLq79!%2VRME3|$Z%9Q51C#o-+Ba8|n&Iepuy4mU);@Hx(@9VGJwj#Ua6=YM$ z4e{HRPQHH)_Zrk6pW|$YJN4X^{@0eSE??y#YArU!q>Wx!v5z_l-JpY5#P zVWXde-~JI-*RgQ&dG3Wx)T!DHZUCAWxErYkhXZ#S{RaQa1@4y4_Tg>lA?3{rTs=AR zHkVERf3W~!d4y`ytH@XW$mP+fhN^-`e~Ye*`!#qEseg~lp|xELXEW$li;S4r!X-wC zZHsd($p`_@mXH(se#m3f6(bT*Y~UGJNztf_`s{l^9n^zeCLSJU~ZQL+yM?^-^J7@L0^%&6(i*72)5Aad;r{#q_Lg#1UybDQ~J z*O8AU`1^ZlVtD-fd;t(eem~ssarD#ijxY2L!G^!^nII^kf2a@gm{(pUkd2@osVVW- zb~@u@MLv`27i|dYgMF9NAP5s{C}hhRH}m4TYBV&JOvIK!H$l!z?^({giKCG1%2vyn zWmLmdr^A<14a1#fe!F8K3x=uDeeZJgT0mXxLv}CrIo3xAd$`EdUDImd;CHEhjI$#O8h;36V1TT z>uHmwgjX)|)o{|p*#Xa7;143tzxV_85L~#x-ve(Q;$Woaef~juyVJe!Xa0NCf_mqL zPrc6{2O_{(MNVGi|BCiE-mg2B2`Zf)G<@H4{WrA5pk^2V$CuC*#4*L|zK_@6-!s~N z6mQFYqpSZ~xaOC<-o8>klc^RKVtg*xCxlrQrx%WEIDPQYk5M{*lHM;~V5Q#E3HS%R R=yxZ*|620NuKqg({|7A$(Qg0% From 97950140997feb220bd37273db09c207587ac932 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?V=C3=ADtor=20Vasconcellos?= Date: Sat, 19 Oct 2024 08:20:18 -0300 Subject: [PATCH 202/218] autoformat --- interface/app/$libraryId/index.tsx | 6 ++---- interface/app/$libraryId/settings/client/general.tsx | 2 +- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/interface/app/$libraryId/index.tsx b/interface/app/$libraryId/index.tsx index 10a95e97a..6ff9f4b3e 100644 --- a/interface/app/$libraryId/index.tsx +++ b/interface/app/$libraryId/index.tsx @@ -8,9 +8,7 @@ import settingsRoutes from './settings'; // Routes that should be contained within the standard Page layout const pageRoutes: RouteObject = { lazy: () => import('./PageLayout'), - children: [ - { path: 'overview', lazy: () => import('./overview') } - ] + children: [{ path: 'overview', lazy: () => import('./overview') }] }; // Routes that render the explorer and don't need padding and stuff @@ -35,7 +33,7 @@ function loadTopBarRoutes() { return [ ...explorerRoutes, pageRoutes, - { path: 'settings', lazy: () => import('./settings/Layout'), children: settingsRoutes }, + { path: 'settings', lazy: () => import('./settings/Layout'), children: settingsRoutes } ]; } else return [...explorerRoutes, pageRoutes]; } diff --git a/interface/app/$libraryId/settings/client/general.tsx b/interface/app/$libraryId/settings/client/general.tsx index bba74c7f6..2196b0e27 100644 --- a/interface/app/$libraryId/settings/client/general.tsx +++ b/interface/app/$libraryId/settings/client/general.tsx @@ -62,7 +62,7 @@ export const Component = () => { .strict(), reValidateMode: 'onChange', defaultValues: { - name: node.data?.name, + name: node.data?.name // image_labeler_version: node.data?.image_labeler_version ?? undefined // background_processing_percentage: // node.data?.preferences.thumbnailer.background_processing_percentage || 50 From b29d165fed3a3151bea7615b50db2eb945ceb875 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?V=C3=ADtor=20Vasconcellos?= Date: Sat, 19 Oct 2024 08:35:53 -0300 Subject: [PATCH 203/218] autoformat --- interface/app/$libraryId/Explorer/FilePath/Original.tsx | 2 +- .../$libraryId/settings/library/locations/AddLocationButton.tsx | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/interface/app/$libraryId/Explorer/FilePath/Original.tsx b/interface/app/$libraryId/Explorer/FilePath/Original.tsx index 451b13d35..b1b14ef75 100644 --- a/interface/app/$libraryId/Explorer/FilePath/Original.tsx +++ b/interface/app/$libraryId/Explorer/FilePath/Original.tsx @@ -83,7 +83,7 @@ const TEXT_RENDERER: OriginalRenderer = (props) => ( onLoad={props.onLoad} onError={props.onError} className={clsx( - 'textviewer-scroll size-full overflow-y-auto whitespace-pre-wrap break-words px-4 font-mono', + 'textviewer-scroll font-mono size-full overflow-y-auto whitespace-pre-wrap break-words px-4', !props.mediaControls ? 'overflow-hidden' : 'overflow-auto', props.className, props.frame && [props.frameClassName, '!bg-none p-2'] diff --git a/interface/app/$libraryId/settings/library/locations/AddLocationButton.tsx b/interface/app/$libraryId/settings/library/locations/AddLocationButton.tsx index 23b70e901..e9cc0fd75 100644 --- a/interface/app/$libraryId/settings/library/locations/AddLocationButton.tsx +++ b/interface/app/$libraryId/settings/library/locations/AddLocationButton.tsx @@ -71,7 +71,7 @@ export const AddLocationButton = ({ {...props} > {path ? ( -
    +
    Date: Sat, 19 Oct 2024 12:45:57 -0300 Subject: [PATCH 204/218] Fix onbording test --- interface/app/onboarding/prerelease.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/interface/app/onboarding/prerelease.tsx b/interface/app/onboarding/prerelease.tsx index 156d6e5a5..5432ebec5 100644 --- a/interface/app/onboarding/prerelease.tsx +++ b/interface/app/onboarding/prerelease.tsx @@ -17,7 +17,7 @@ export default function OnboardingPreRelease() {
    From 2c91484dd460dc17bba8c791e03dfb2666ac934f Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Sun, 20 Oct 2024 22:26:06 -0300 Subject: [PATCH 205/218] Some tweaks on sync messages send and compression --- core/crates/cloud-services/src/sync/send.rs | 39 ++++++++++++++++---- crates/sync/src/compressed.rs | 41 +++++++++++++++------ 2 files changed, 61 insertions(+), 19 deletions(-) diff --git a/core/crates/cloud-services/src/sync/send.rs b/core/crates/cloud-services/src/sync/send.rs index 4fd3842da..c0ab06e88 100644 --- a/core/crates/cloud-services/src/sync/send.rs +++ b/core/crates/cloud-services/src/sync/send.rs @@ -34,14 +34,14 @@ use tokio::{ sync::{broadcast, Notify}, time::sleep, }; -use tracing::error; +use tracing::{debug, error}; use uuid::Uuid; use super::{SyncActors, ONE_MINUTE}; const TEN_SECONDS: Duration = Duration::from_secs(10); -const MESSAGES_COLLECTION_SIZE: u32 = 100_000; +const MESSAGES_COLLECTION_SIZE: u32 = 10_000; enum RaceNotifiedOrStopped { Notified, @@ -147,6 +147,8 @@ impl Sender { } async fn run_loop_iteration(&mut self) -> Result { + debug!("Starting cloud sender actor loop iteration"); + let current_device_pub_id = devices::PubId(Uuid::from(&self.sync.device_pub_id)); let (key_hash, secret_key) = self @@ -166,6 +168,11 @@ impl Sender { let mut status = LoopStatus::Idle; let mut new_latest_timestamp = current_latest_timestamp; + + debug!( + chunk_size = MESSAGES_COLLECTION_SIZE, + "Trying to fetch chunk of sync messages from the database" + ); while let Some(ops_res) = crdt_ops_stream.next().await { let ops = ops_res?; @@ -173,9 +180,13 @@ impl Sender { break; }; + debug!("Got first and last sync messages"); + #[allow(clippy::cast_possible_truncation)] let operations_count = ops.len() as u32; + debug!(operations_count, "Got chunk of sync messages"); + new_latest_timestamp = last.timestamp; let start_time = timestamp_to_datetime(first.timestamp); @@ -188,6 +199,16 @@ impl Sender { let messages_bytes = rmp_serde::to_vec_named(&compressed_ops) .map_err(Error::SerializationFailureToPushSyncMessages)?; + let encrypted_messages = + encrypt_messages(&secret_key, &mut self.rng, messages_bytes).await?; + + let encrypted_messages_size = encrypted_messages.len(); + + debug!( + operations_count, + encrypted_messages_size, "Sending sync messages to cloud", + ); + self.cloud_client .sync() .messages() @@ -202,20 +223,22 @@ impl Sender { key_hash: key_hash.clone(), operations_count, time_range: (start_time, end_time), - encrypted_messages: encrypt_messages( - &secret_key, - &mut self.rng, - messages_bytes, - ) - .await?, + encrypted_messages, }) .await??; + debug!( + operations_count, + encrypted_messages_size, "Sent sync messages to cloud", + ); + status = LoopStatus::SentMessages; } self.maybe_latest_timestamp = Some(new_latest_timestamp); + debug!("Finished cloud sender actor loop iteration"); + Ok(status) } diff --git a/crates/sync/src/compressed.rs b/crates/sync/src/compressed.rs index aa084c4b7..47c38e7fe 100644 --- a/crates/sync/src/compressed.rs +++ b/crates/sync/src/compressed.rs @@ -1,6 +1,6 @@ use crate::{CRDTOperation, CRDTOperationData, DevicePubId, ModelId, RecordId}; -use std::collections::BTreeMap; +use std::collections::{hash_map::Entry, BTreeMap, HashMap}; use serde::{Deserialize, Serialize}; use uhlc::NTP64; @@ -17,11 +17,16 @@ pub struct CompressedCRDTOperationsPerModelPerDevice( ); impl CompressedCRDTOperationsPerModelPerDevice { + /// Creates a new [`CompressedCRDTOperationsPerModelPerDevice`] from a vector of [`CRDTOperation`]s. + /// + /// # Panics + /// + /// Will panic if for some reason `rmp_serde::to_vec` fails to serialize a `rmpv::Value` to bytes. #[must_use] pub fn new(ops: Vec) -> Self { let mut compressed_map = BTreeMap::< DevicePubId, - BTreeMap)>>, + BTreeMap, (RecordId, Vec)>>, >::new(); for CRDTOperation { @@ -38,14 +43,21 @@ impl CompressedCRDTOperationsPerModelPerDevice { .entry(model_id) .or_default(); - // Can't use RecordId as a key because rmpv::Value doesn't implement Hash + Eq - if let Some((_, ops)) = records - .iter_mut() - .find(|(current_record_id, _)| *current_record_id == record_id) - { - ops.push(CompressedCRDTOperation { timestamp, data }); - } else { - records.push((record_id, vec![CompressedCRDTOperation { timestamp, data }])); + // Can't use RecordId as a key because rmpv::Value doesn't implement Hash + Eq. + // So we use it's serialized bytes as a key. + let record_id_bytes = + rmp_serde::to_vec(&record_id).expect("already serialized to Value"); + + match records.entry(record_id_bytes) { + Entry::Occupied(mut entry) => { + entry + .get_mut() + .1 + .push(CompressedCRDTOperation { timestamp, data }); + } + Entry::Vacant(entry) => { + entry.insert((record_id, vec![CompressedCRDTOperation { timestamp, data }])); + } } } @@ -55,7 +67,14 @@ impl CompressedCRDTOperationsPerModelPerDevice { .map(|(device_pub_id, model_map)| { ( device_pub_id, - CompressedCRDTOperationsPerModel(model_map.into_iter().collect()), + CompressedCRDTOperationsPerModel( + model_map + .into_iter() + .map(|(model_id, ops_per_record_map)| { + (model_id, ops_per_record_map.into_values().collect()) + }) + .collect(), + ), ) }) .collect(), From 17d82ee20d3e93d514c6c86ec48880c67a8f4ca7 Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Mon, 21 Oct 2024 00:32:00 -0300 Subject: [PATCH 206/218] Ensure single cloud bootstrap ever --- core/crates/cloud-services/src/client.rs | 10 +++----- core/src/api/cloud/mod.rs | 29 +++++++++++++++++++----- 2 files changed, 26 insertions(+), 13 deletions(-) diff --git a/core/crates/cloud-services/src/client.rs b/core/crates/cloud-services/src/client.rs index 71d4da975..d9ec361e1 100644 --- a/core/crates/cloud-services/src/client.rs +++ b/core/crates/cloud-services/src/client.rs @@ -2,11 +2,7 @@ use crate::p2p::{NotifyUser, UserResponse}; use sd_cloud_schema::{Client, Service, ServicesALPN}; -use std::{ - net::SocketAddr, - sync::{atomic::AtomicBool, Arc}, - time::Duration, -}; +use std::{net::SocketAddr, sync::Arc, time::Duration}; use futures::Stream; use iroh_net::relay::RelayUrl; @@ -15,7 +11,7 @@ use quinn::{crypto::rustls::QuicClientConfig, ClientConfig, Endpoint}; use reqwest::{IntoUrl, Url}; use reqwest_middleware::{reqwest, ClientBuilder, ClientWithMiddleware}; use reqwest_retry::{policies::ExponentialBackoff, RetryTransientMiddleware}; -use tokio::sync::RwLock; +use tokio::sync::{Mutex, RwLock}; use tracing::warn; use super::{ @@ -53,7 +49,7 @@ pub struct CloudServices { notify_user_rx: flume::Receiver, user_response_tx: flume::Sender, pub(crate) user_response_rx: flume::Receiver, - pub has_bootstrapped: Arc, + pub has_bootstrapped: Arc>, } impl CloudServices { diff --git a/core/src/api/cloud/mod.rs b/core/src/api/cloud/mod.rs index a47ced682..dd2bde419 100644 --- a/core/src/api/cloud/mod.rs +++ b/core/src/api/cloud/mod.rs @@ -15,7 +15,7 @@ use sd_cloud_schema::{ use sd_crypto::{CryptoRng, SeedableRng}; use sd_utils::error::report_error; -use std::{pin::pin, sync::atomic::Ordering}; +use std::pin::pin; use async_stream::stream; use futures::{FutureExt, StreamExt}; @@ -50,7 +50,19 @@ pub(crate) fn mount() -> AlphaRouter { |node, (access_token, refresh_token): (auth::AccessToken, auth::RefreshToken)| async move { use sd_cloud_schema::devices; - if node.cloud_services.has_bootstrapped.load(Ordering::Acquire) { + // Only allow a single bootstrap request in flight at a time + let mut has_bootstrapped_lock = node + .cloud_services + .has_bootstrapped + .try_lock() + .map_err(|_| { + rspc::Error::new( + rspc::ErrorCode::Conflict, + String::from("Bootstrap in progress"), + ) + })?; + + if *has_bootstrapped_lock { return Err(rspc::Error::new( rspc::ErrorCode::Conflict, String::from("Already bootstrapped"), @@ -210,9 +222,7 @@ pub(crate) fn mount() -> AlphaRouter { .try_join() .await?; - node.cloud_services - .has_bootstrapped - .store(true, Ordering::Release); + *has_bootstrapped_lock = true; Ok(()) }, @@ -242,7 +252,14 @@ pub(crate) fn mount() -> AlphaRouter { .procedure( "hasBootstrapped", R.query(|node, _: ()| async move { - Ok(node.cloud_services.has_bootstrapped.load(Ordering::Relaxed)) + // If we can't lock immediately, it means that there is a bootstrap in progress + // so we didn't bootstrapped yet + Ok(node + .cloud_services + .has_bootstrapped + .try_lock() + .map(|lock| *lock) + .unwrap_or(false)) }), ) } From 177763c536c4fdf49fcb214111dd1a795ac59346 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?V=C3=ADtor=20Vasconcellos?= Date: Mon, 21 Oct 2024 14:49:02 -0300 Subject: [PATCH 207/218] Autoformat --- .../src/screens/settings/client/AccountSettings/Login.tsx | 2 +- interface/components/Authentication.tsx | 2 +- interface/components/Login.tsx | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/apps/mobile/src/screens/settings/client/AccountSettings/Login.tsx b/apps/mobile/src/screens/settings/client/AccountSettings/Login.tsx index 464a701a5..fddd07041 100644 --- a/apps/mobile/src/screens/settings/client/AccountSettings/Login.tsx +++ b/apps/mobile/src/screens/settings/client/AccountSettings/Login.tsx @@ -1,6 +1,6 @@ -import { RSPCError } from '@spacedrive/rspc-client'; import AsyncStorage from '@react-native-async-storage/async-storage'; import { useNavigation } from '@react-navigation/native'; +import { RSPCError } from '@spacedrive/rspc-client'; import { UseMutationResult } from '@tanstack/react-query'; import { useState } from 'react'; import { Controller } from 'react-hook-form'; diff --git a/interface/components/Authentication.tsx b/interface/components/Authentication.tsx index 4669eb0c6..22c4c7269 100644 --- a/interface/components/Authentication.tsx +++ b/interface/components/Authentication.tsx @@ -1,6 +1,6 @@ -import { RSPCError } from '@spacedrive/rspc-client'; import { GoogleLogo, Icon } from '@phosphor-icons/react'; import { Apple, Github } from '@sd/assets/svgs/brands'; +import { RSPCError } from '@spacedrive/rspc-client'; import { UseMutationResult } from '@tanstack/react-query'; import { open } from '@tauri-apps/plugin-shell'; import clsx from 'clsx'; diff --git a/interface/components/Login.tsx b/interface/components/Login.tsx index 0a9a6d386..a4ac8bf63 100644 --- a/interface/components/Login.tsx +++ b/interface/components/Login.tsx @@ -1,5 +1,5 @@ -import { RSPCError } from '@spacedrive/rspc-client'; import { ArrowLeft } from '@phosphor-icons/react'; +import { RSPCError } from '@spacedrive/rspc-client'; import { UseMutationResult } from '@tanstack/react-query'; import clsx from 'clsx'; import { Dispatch, SetStateAction, useState } from 'react'; From 2fa90d64632accea4b2c00e05e84b60f44a42101 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?V=C3=ADtor=20Vasconcellos?= Date: Mon, 21 Oct 2024 19:44:08 -0300 Subject: [PATCH 208/218] Update iroh and sd-cloud-schema - Fix usage of deprecated functions - Implement clippy beta suggestions --- Cargo.lock | Bin 332951 -> 331411 bytes Cargo.toml | 2 +- core/crates/cloud-services/Cargo.toml | 2 +- .../src/p2p/new_sync_messages_notifier.rs | 2 +- core/crates/cloud-services/src/p2p/runner.rs | 2 +- .../heavy-lifting/src/file_identifier/job.rs | 8 ++++---- .../heavy-lifting/src/file_identifier/mod.rs | 2 +- .../src/media_processor/shallow.rs | 2 +- core/crates/indexer-rules/src/serde_impl.rs | 2 +- core/crates/prisma-helpers/src/lib.rs | 2 +- core/src/api/utils/invalidate.rs | 1 + core/src/api/utils/library.rs | 5 ++++- core/src/util/mpscrr.rs | 2 +- crates/ai/Cargo.toml | 1 + crates/crypto/src/ct.rs | 2 +- crates/ffmpeg/src/dict.rs | 2 +- crates/images/src/consts.rs | 2 +- crates/media-metadata/src/exif/datetime.rs | 2 +- crates/p2p/src/smart_guards.rs | 6 +++--- 19 files changed, 26 insertions(+), 21 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b04ea2fcab529017e2a76465f9690ce71a16a464..082cbc3c36e6e6886d58a9c73488ea16165bf231 100644 GIT binary patch delta 1193 zcmYL|TZorM7{+tf_$QFBzm6YH87LSf-z z1ZrRHK&UMeM0c_aO@fD(y67q|f@E(Zy9om&3+vKGH}Y;6W}fHed46yFva>$ldFh<0 zZ*5o*f4Kr}ar4K}IXqiH3L(t*heIjo7n+_5SMUSTSzlAbb58D~@|6tHqu(Mm`75t=nx)Tkk? ziW`oW(`%Y~v%dZH>^RWg=qe8!*&olg72V_WE>C5Y zgSa*pIqk5;MtiALcFu+sHI|u53Zt-9oC?WBz5VHC{9zBwh$jYMVeGgMv*O)l1U`%~9$=v@}?{9W!yr-$Kfs5h@)5Wjf_x<@|951%^) zo61L}rgI;{tfF3Rcf_k_;AH)d>WLGl;f1(!tT8iPJqvST>fLW2!4627gG11<;@OVn zEA|{ap@|bhnu-}Bq*aOq>#)m} z1sdn=f@Yc=gXK@eOYOy+`o_xVTO_A0gcBYa>Pc|gupGA`*l=ciZj(`Msm%>*VyzDf zRh~(t9i=$qR8JgirqRo=ub7f-O>Ws}vvHsZ5#q5G8lzy5_DFN1Gw_&VBP=zFxC{#q z+$y1sRy6pEX3K|)anrTjhu!4>%Qk$!wo!Iq5-=g(3y9x`Q)^Smx5kCcfw zj3R<7Z!<4}T4Iz7EJ&>g6)1QrJf?Y*x@#n6q;1z=?|*6ExDM0P&)4D3+BoN@#?o~A zE^Oi_#L{zyl&ZWBj_HaZtb>W#LU&qy xr0gvgPT@2(bZE_>KCs(ArfZ74;iKizIm3c1SVfhqt1 delta 1802 zcmZ9NTWFVM9LIV7?|)OL(-zzQ&eZ*#P?_%Ub3VxQzd)gj4%3KOczTXn~LIO{QT4lzL` zB2)+E8S#R|^CP87{@XUfIS81137j8Ll_^idxLo%yv3^RuCscEz^mK*xB; z#jieymltIN>x35GI?JSzfon?A!8C)d@)&b&yh-zpx?lq4gd&L5ak>TY%;)yV^;t@ z4cl#VckbJ_rwVJhhH!rVr-gA#0PBmo2d{O^Mk`a4x#DGn_p2zi}ib4VEP zCAKit%p#LAObiQ3bM7%`nk!;_Jo8uzx%zdfWu~3Du?MU!&K?E7f^vi1_m%-vTp0!H zvl;xh7hT6dYpI;brV*?Jo7PDSC4tz>pb23n@o0pURtOdX<6dY;wC6MgnG!-}!#Nnd zH3E{f9EZQird)~r_0p_({W$1|RRr4N)xL7eDx7kVQp_jWnvs+LzI)AXcOY$|H6E9| z1vbP(CjiQ-JtJ}3YOo|e^)6_NUB|(V=IK2rMAn{PUOQ)6aYZ_#C*?P-&-S=sq+E?x zNqKf03JHQWpMlomo3}ybo?XR1?|{vqeM;g}&K%g|_cnAMyT2 zKpkYmg-rVdSpvtI3*-qlLRlpoM_xgU156TN-e9Ac;WW+J*mfYR z{lrSj6Ed8Oj;yzIy1=@i}nBX`EVQk Result<(), Error> { let client = Client::new(RpcClient::new(QuinnConnection::::from_connection( endpoint - .connect_by_node_id(*connection_id, CloudP2PALPN::LATEST) + .connect(*connection_id, CloudP2PALPN::LATEST) .await .map_err(Error::ConnectToCloudP2PNode)?, ))); diff --git a/core/crates/cloud-services/src/p2p/runner.rs b/core/crates/cloud-services/src/p2p/runner.rs index 298a6700d..3dfc33be2 100644 --- a/core/crates/cloud-services/src/p2p/runner.rs +++ b/core/crates/cloud-services/src/p2p/runner.rs @@ -601,7 +601,7 @@ async fn connect_to_first_available_client( ) -> Result, Service>, CloudP2PError> { for (device_pub_id, device_connection_id) in devices_in_group { if let Ok(connection) = endpoint - .connect_by_node_id(*device_connection_id, CloudP2PALPN::LATEST) + .connect(*device_connection_id, CloudP2PALPN::LATEST) .await .map_err( |e| error!(?e, %device_pub_id, "Failed to connect to authorizor device candidate"), diff --git a/core/crates/heavy-lifting/src/file_identifier/job.rs b/core/crates/heavy-lifting/src/file_identifier/job.rs index 249ea57f2..dc2d6866c 100644 --- a/core/crates/heavy-lifting/src/file_identifier/job.rs +++ b/core/crates/heavy-lifting/src/file_identifier/job.rs @@ -374,7 +374,7 @@ impl FileIdentifier { self.last_orphan_file_path_id = None; self.dispatch_deep_identifier_tasks( - &maybe_sub_iso_file_path, + maybe_sub_iso_file_path.as_ref(), ctx, device_id, dispatcher, @@ -419,7 +419,7 @@ impl FileIdentifier { self.last_orphan_file_path_id = None; self.dispatch_deep_identifier_tasks( - &maybe_sub_iso_file_path, + maybe_sub_iso_file_path.as_ref(), ctx, device_id, dispatcher, @@ -433,7 +433,7 @@ impl FileIdentifier { Phase::SearchingOrphans => { self.dispatch_deep_identifier_tasks( - &maybe_sub_iso_file_path, + maybe_sub_iso_file_path.as_ref(), ctx, device_id, dispatcher, @@ -752,7 +752,7 @@ impl FileIdentifier { async fn dispatch_deep_identifier_tasks( &mut self, - maybe_sub_iso_file_path: &Option>, + maybe_sub_iso_file_path: Option<&IsolatedFilePathData<'static>>, ctx: &impl JobContext, device_id: device::id::Type, dispatcher: &JobTaskDispatcher, diff --git a/core/crates/heavy-lifting/src/file_identifier/mod.rs b/core/crates/heavy-lifting/src/file_identifier/mod.rs index a44afbc40..f777c118d 100644 --- a/core/crates/heavy-lifting/src/file_identifier/mod.rs +++ b/core/crates/heavy-lifting/src/file_identifier/mod.rs @@ -176,7 +176,7 @@ fn orphan_path_filters_shallow( fn orphan_path_filters_deep( location_id: location::id::Type, file_path_id: Option, - maybe_sub_iso_file_path: &Option>, + maybe_sub_iso_file_path: Option<&IsolatedFilePathData<'_>>, ) -> Vec { sd_utils::chain_optional_iter( [ diff --git a/core/crates/heavy-lifting/src/media_processor/shallow.rs b/core/crates/heavy-lifting/src/media_processor/shallow.rs index fd7caac14..675dcd791 100644 --- a/core/crates/heavy-lifting/src/media_processor/shallow.rs +++ b/core/crates/heavy-lifting/src/media_processor/shallow.rs @@ -220,7 +220,7 @@ async fn dispatch_media_data_extractor_tasks( async fn dispatch_thumbnailer_tasks( parent_iso_file_path: &IsolatedFilePathData<'_>, should_regenerate: bool, - location_path: &PathBuf, + location_path: &Path, dispatcher: &BaseTaskDispatcher, ctx: &impl OuterContext, ) -> Result>, Error> { diff --git a/core/crates/indexer-rules/src/serde_impl.rs b/core/crates/indexer-rules/src/serde_impl.rs index a0b24dd23..461630669 100644 --- a/core/crates/indexer-rules/src/serde_impl.rs +++ b/core/crates/indexer-rules/src/serde_impl.rs @@ -60,7 +60,7 @@ impl<'de> Deserialize<'de> for RulePerKind { struct FieldsVisitor; - impl<'de> de::Visitor<'de> for FieldsVisitor { + impl de::Visitor<'_> for FieldsVisitor { type Value = Fields; fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { diff --git a/core/crates/prisma-helpers/src/lib.rs b/core/crates/prisma-helpers/src/lib.rs index ee8f11bb9..311d81947 100644 --- a/core/crates/prisma-helpers/src/lib.rs +++ b/core/crates/prisma-helpers/src/lib.rs @@ -338,7 +338,7 @@ impl Clone for CasId<'_> { } } -impl<'cas_id> CasId<'cas_id> { +impl CasId<'_> { #[must_use] pub fn as_str(&self) -> &str { self.0.as_ref() diff --git a/core/src/api/utils/invalidate.rs b/core/src/api/utils/invalidate.rs index 8df2eea6d..e888b08a2 100644 --- a/core/src/api/utils/invalidate.rs +++ b/core/src/api/utils/invalidate.rs @@ -121,6 +121,7 @@ impl InvalidRequests { } /// `invalidate_query` is a macro which stores a list of all of it's invocations so it can ensure all of the queries match the queries attached to the router. +/// /// This allows invalidate to be type-safe even when the router keys are stringly typed. /// ```ignore /// invalidate_query!( diff --git a/core/src/api/utils/library.rs b/core/src/api/utils/library.rs index effdb89ba..001943f55 100644 --- a/core/src/api/utils/library.rs +++ b/core/src/api/utils/library.rs @@ -22,7 +22,10 @@ pub(crate) struct LibraryArgs { pub(crate) struct LibraryArgsLike; impl MwArgMapper for LibraryArgsLike { - type Input = LibraryArgs where T: Type + DeserializeOwned + 'static; + type Input + = LibraryArgs + where + T: Type + DeserializeOwned + 'static; type State = Uuid; fn map( diff --git a/core/src/util/mpscrr.rs b/core/src/util/mpscrr.rs index 4c7826bea..72daf3441 100644 --- a/core/src/util/mpscrr.rs +++ b/core/src/util/mpscrr.rs @@ -230,7 +230,7 @@ impl<'a> Bomb<'a> { } } -impl<'a> Drop for Bomb<'a> { +impl Drop for Bomb<'_> { fn drop(&mut self) { self.0.store(false, Ordering::Relaxed); } diff --git a/crates/ai/Cargo.toml b/crates/ai/Cargo.toml index b521ee93d..35f9dc198 100644 --- a/crates/ai/Cargo.toml +++ b/crates/ai/Cargo.toml @@ -40,6 +40,7 @@ uuid = { workspace = true, features = ["serde", "v4"] } # Note: half and ndarray version must be the same as used in ort half = { version = "2.4", features = ['num-traits'] } ndarray = "0.15" +ort-sys = '=2.0.0-rc.0' # lock sys crate to the same version as ort url = '2.5' # Microsoft does not provide a release for osx-gpu. See: https://github.com/microsoft/onnxruntime/releases diff --git a/crates/crypto/src/ct.rs b/crates/crypto/src/ct.rs index e7edf6a89..8ce937ab9 100644 --- a/crates/crypto/src/ct.rs +++ b/crates/crypto/src/ct.rs @@ -87,7 +87,7 @@ impl ConstantTimeEq for String { } } -impl<'a> ConstantTimeEq for &'a str { +impl ConstantTimeEq for &str { fn ct_eq(&self, rhs: &Self) -> Choice { // Here we are just able to convert both values to bytes and use the // appropriate methods to compare the two in constant-time. diff --git a/crates/ffmpeg/src/dict.rs b/crates/ffmpeg/src/dict.rs index 7d1d5726b..feb84184f 100644 --- a/crates/ffmpeg/src/dict.rs +++ b/crates/ffmpeg/src/dict.rs @@ -87,7 +87,7 @@ pub struct FFmpegDictIter<'a> { _lifetime: std::marker::PhantomData<&'a ()>, } -impl<'a> Iterator for FFmpegDictIter<'a> { +impl Iterator for FFmpegDictIter<'_> { type Item = (String, Option); fn next(&mut self) -> Option<(String, Option)> { diff --git a/crates/images/src/consts.rs b/crates/images/src/consts.rs index cc844e19f..4afd8da84 100644 --- a/crates/images/src/consts.rs +++ b/crates/images/src/consts.rs @@ -159,7 +159,7 @@ impl serde::Serialize for ConvertibleExtension { struct ExtensionVisitor; #[cfg(feature = "serde")] -impl<'de> serde::de::Visitor<'de> for ExtensionVisitor { +impl serde::de::Visitor<'_> for ExtensionVisitor { type Value = ConvertibleExtension; fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { diff --git a/crates/media-metadata/src/exif/datetime.rs b/crates/media-metadata/src/exif/datetime.rs index 39c6a40b6..b238dcd4f 100644 --- a/crates/media-metadata/src/exif/datetime.rs +++ b/crates/media-metadata/src/exif/datetime.rs @@ -77,7 +77,7 @@ impl serde::Serialize for MediaDate { struct MediaDateVisitor; -impl<'de> Visitor<'de> for MediaDateVisitor { +impl Visitor<'_> for MediaDateVisitor { type Value = MediaDate; fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { diff --git a/crates/p2p/src/smart_guards.rs b/crates/p2p/src/smart_guards.rs index 6177ed930..a920508aa 100644 --- a/crates/p2p/src/smart_guards.rs +++ b/crates/p2p/src/smart_guards.rs @@ -28,7 +28,7 @@ impl<'a, T: Clone> SmartWriteGuard<'a, T> { } } -impl<'a, T> Deref for SmartWriteGuard<'a, T> { +impl Deref for SmartWriteGuard<'_, T> { type Target = T; fn deref(&self) -> &Self::Target { @@ -36,13 +36,13 @@ impl<'a, T> Deref for SmartWriteGuard<'a, T> { } } -impl<'a, T> DerefMut for SmartWriteGuard<'a, T> { +impl DerefMut for SmartWriteGuard<'_, T> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.lock } } -impl<'a, T> Drop for SmartWriteGuard<'a, T> { +impl Drop for SmartWriteGuard<'_, T> { fn drop(&mut self) { (self.save)( self.p2p, From f2f9f91e626b8cc324e059bd705fad80fd5d2a72 Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Mon, 21 Oct 2024 22:27:09 -0300 Subject: [PATCH 209/218] Optimize and fix attempt at cloud ingester --- core/crates/cloud-services/src/sync/ingest.rs | 85 +++----- core/crates/sync/src/ingest_utils.rs | 19 +- core/crates/sync/src/lib.rs | 4 +- core/crates/sync/src/manager.rs | 200 +++++++++++++----- core/src/location/mod.rs | 2 + crates/sync/src/compressed.rs | 2 +- 6 files changed, 199 insertions(+), 113 deletions(-) diff --git a/core/crates/cloud-services/src/sync/ingest.rs b/core/crates/cloud-services/src/sync/ingest.rs index 065ceb964..9592b64ab 100644 --- a/core/crates/cloud-services/src/sync/ingest.rs +++ b/core/crates/cloud-services/src/sync/ingest.rs @@ -1,10 +1,8 @@ use crate::Error; -use sd_core_sync::{from_cloud_crdt_ops, CompressedCRDTOperationsPerModelPerDevice, SyncManager}; +use sd_core_sync::SyncManager; use sd_actors::{Actor, Stopper}; -use sd_prisma::prisma::{cloud_crdt_operation, SortOrder}; -use sd_utils::timestamp_to_datetime; use std::{ future::IntoFuture, @@ -12,7 +10,6 @@ use std::{ atomic::{AtomicBool, Ordering}, Arc, }, - time::SystemTime, }; use futures::FutureExt; @@ -22,8 +19,6 @@ use tracing::{debug, error}; use super::{ReceiveAndIngestNotifiers, SyncActors, ONE_MINUTE}; -const BATCH_SIZE: i64 = 1000; - /// Responsible for taking sync operations received from the cloud, /// and applying them to the local database via the sync system's ingest actor. @@ -43,20 +38,14 @@ impl Actor for Ingester { Stopped, } - 'outer: loop { + loop { self.active.store(true, Ordering::Relaxed); self.active_notify.notify_waiters(); - loop { - match self.run_loop_iteration().await { - Ok(IngestStatus::Completed) => break, - Ok(IngestStatus::InProgress) => {} - Err(e) => { - error!(?e, "Error during cloud sync ingester actor iteration"); - sleep(ONE_MINUTE).await; - continue 'outer; - } - } + if let Err(e) = self.run_loop_iteration().await { + error!(?e, "Error during cloud sync ingester actor iteration"); + sleep(ONE_MINUTE).await; + continue; } self.active.store(false, Ordering::Relaxed); @@ -79,11 +68,6 @@ impl Actor for Ingester { } } -enum IngestStatus { - Completed, - InProgress, -} - impl Ingester { pub const fn new( sync: SyncManager, @@ -99,48 +83,33 @@ impl Ingester { } } - async fn run_loop_iteration(&self) -> Result { - let (ops_ids, ops) = self + async fn run_loop_iteration(&self) -> Result<(), Error> { + let operations_to_ingest_count = self .sync .db .cloud_crdt_operation() - .find_many(vec![]) - .take(BATCH_SIZE) - .order_by(cloud_crdt_operation::timestamp::order(SortOrder::Asc)) - .exec() - .await - .map_err(sd_core_sync::Error::from)? - .into_iter() - .map(from_cloud_crdt_ops) - .collect::, Vec<_>), _>>()?; - - if ops_ids.is_empty() { - return Ok(IngestStatus::Completed); - } - - debug!( - messages_count = ops.len(), - first_message = ?ops - .first() - .map_or_else(|| SystemTime::UNIX_EPOCH.into(), |op| timestamp_to_datetime(op.timestamp)), - last_message = ?ops - .last() - .map_or_else(|| SystemTime::UNIX_EPOCH.into(), |op| timestamp_to_datetime(op.timestamp)), - "Messages to ingest", - ); - - self.sync - .ingest_ops(CompressedCRDTOperationsPerModelPerDevice::new(ops)) - .await?; - - self.sync - .db - .cloud_crdt_operation() - .delete_many(vec![cloud_crdt_operation::id::in_vec(ops_ids)]) + .count(vec![]) .exec() .await .map_err(sd_core_sync::Error::from)?; - Ok(IngestStatus::InProgress) + if operations_to_ingest_count == 0 { + debug!("Nothing to ingest, early finishing ingester loop"); + return Ok(()); + } + + debug!( + operations_to_ingest_count, + "Starting sync messages cloud ingestion loop" + ); + + self.sync.ingest_ops().await?; + + debug!( + operations_to_ingest_count, + "Finished sync messages cloud ingestion loop" + ); + + Ok(()) } } diff --git a/core/crates/sync/src/ingest_utils.rs b/core/crates/sync/src/ingest_utils.rs index 6c77a96b7..e63f317ed 100644 --- a/core/crates/sync/src/ingest_utils.rs +++ b/core/crates/sync/src/ingest_utils.rs @@ -298,7 +298,24 @@ async fn handle_crdt_deletion( record_id: rmpv::Value, delete_op: &CompressedCRDTOperation, ) -> Result<(), Error> { - // deletes are the be all and end all, no need to check anything + // deletes are the be all and end all, except if we never created the object to begin with + // in this case we don't need to delete anything + + if db + .crdt_operation() + .count(vec![ + crdt_operation::model::equals(i32::from(model)), + crdt_operation::record_id::equals(rmp_serde::to_vec(&record_id)?), + ]) + .exec() + .await? + == 0 + { + // This means that in the other device this entry was created and deleted, before this + // device here could even take notice of it. So we don't need to do anything here. + return Ok(()); + } + let op = CRDTOperation { device_pub_id: device_pub_id.into(), model_id: model, diff --git a/core/crates/sync/src/lib.rs b/core/crates/sync/src/lib.rs index 56822509c..5b8d90efe 100644 --- a/core/crates/sync/src/lib.rs +++ b/core/crates/sync/src/lib.rs @@ -35,7 +35,7 @@ use sd_utils::uuid_to_bytes; use std::{collections::HashMap, sync::Arc}; -use tokio::sync::RwLock; +use tokio::{sync::RwLock, task::JoinError}; pub mod backfill; mod db_operation; @@ -77,6 +77,8 @@ pub enum Error { EmptyOperations, #[error("device not found: {0}")] DeviceNotFound(DevicePubId), + #[error("processes crdt task panicked")] + ProcessCrdtPanic(JoinError), } impl From for rspc::Error { diff --git a/core/crates/sync/src/manager.rs b/core/crates/sync/src/manager.rs index 382261b9d..a01b6716b 100644 --- a/core/crates/sync/src/manager.rs +++ b/core/crates/sync/src/manager.rs @@ -1,36 +1,39 @@ use sd_core_prisma_helpers::DevicePubId; use sd_prisma::{ - prisma::{crdt_operation, device, PrismaClient, SortOrder}, + prisma::{cloud_crdt_operation, crdt_operation, device, PrismaClient, SortOrder}, prisma_sync, }; -use sd_sync::{ - CRDTOperation, CompressedCRDTOperationsPerModel, CompressedCRDTOperationsPerModelPerDevice, - ModelId, OperationFactory, -}; +use sd_sync::{CRDTOperation, CompressedCRDTOperation, ModelId, OperationFactory, RecordId}; use sd_utils::timestamp_to_datetime; use std::{ - collections::BTreeMap, + collections::{hash_map::Entry, BTreeMap, HashMap}, fmt, num::NonZeroU128, sync::{ atomic::{self, AtomicBool}, Arc, }, + time::SystemTime, }; use async_stream::stream; use futures::Stream; -use futures_concurrency::future::TryJoin; -use tokio::sync::{broadcast, Mutex, Notify, RwLock}; +use futures_concurrency::future::{Join, TryJoin}; +use tokio::{ + spawn, + sync::{broadcast, Mutex, Notify, RwLock, Semaphore}, +}; use tracing::{debug, warn}; use uhlc::{HLCBuilder, HLC}; use uuid::Uuid; use super::{ - crdt_op_db, db_operation::from_crdt_ops, ingest_utils::process_crdt_operations, Error, - SyncEvent, TimestampPerDevice, NTP64, + crdt_op_db, + db_operation::{from_cloud_crdt_ops, from_crdt_ops}, + ingest_utils::process_crdt_operations, + Error, SyncEvent, TimestampPerDevice, NTP64, }; /// Wrapper that spawns the ingest actor and provides utilities for reading and writing sync operations. @@ -44,7 +47,8 @@ pub struct Manager { pub clock: Arc, pub active: Arc, pub active_notify: Arc, - pub sync_lock: Arc>, + pub(crate) sync_lock: Arc>, + pub(crate) available_parallelism: usize, } impl fmt::Debug for Manager { @@ -131,62 +135,154 @@ impl Manager { active: Arc::default(), active_notify: Arc::default(), sync_lock: Arc::new(Mutex::default()), + available_parallelism: std::thread::available_parallelism() + .map_or(1, std::num::NonZero::get), }, rx, )) } - pub async fn ingest_ops( + async fn fetch_cloud_crdt_ops( &self, - CompressedCRDTOperationsPerModelPerDevice(compressed_ops): CompressedCRDTOperationsPerModelPerDevice, - ) -> Result<(), Error> { - // WARN: this order here exists because sync messages MUST be processed in this exact order - // due to relationship dependencies between these tables. - const INGEST_ORDER: &[ModelId] = &[ - prisma_sync::device::MODEL_ID, - prisma_sync::storage_statistics::MODEL_ID, - prisma_sync::tag::MODEL_ID, - prisma_sync::location::MODEL_ID, - prisma_sync::object::MODEL_ID, - prisma_sync::exif_data::MODEL_ID, - prisma_sync::file_path::MODEL_ID, - prisma_sync::label::MODEL_ID, - prisma_sync::tag_on_object::MODEL_ID, - prisma_sync::label_on_object::MODEL_ID, - ]; + model_id: ModelId, + batch_size: i64, + ) -> Result<(Vec, Vec), Error> { + self.db + .cloud_crdt_operation() + .find_many(vec![cloud_crdt_operation::model::equals(i32::from( + model_id, + ))]) + .take(batch_size) + .order_by(cloud_crdt_operation::timestamp::order(SortOrder::Asc)) + .exec() + .await? + .into_iter() + .map(from_cloud_crdt_ops) + .collect::, Vec<_>), _>>() + } - let _lock_guard = self.sync_lock.lock().await; + async fn ingest_by_model(&self, model_id: ModelId) -> Result<(), Error> { + let (ops_ids, ops) = self.fetch_cloud_crdt_ops(model_id, 10_000).await?; + if ops_ids.is_empty() { + return Ok(()); + } - let mut ops_fut_by_model = INGEST_ORDER - .iter() - .map(|&model_id| (model_id, vec![])) - .collect::>(); + debug!( + messages_count = ops.len(), + first_message = ?ops + .first() + .map_or_else(|| SystemTime::UNIX_EPOCH.into(), |op| timestamp_to_datetime(op.timestamp)), + last_message = ?ops + .last() + .map_or_else(|| SystemTime::UNIX_EPOCH.into(), |op| timestamp_to_datetime(op.timestamp)), + model_id, + "Messages by model to ingest", + ); - for (device_pub_id, CompressedCRDTOperationsPerModel(ops_per_model)) in compressed_ops { - for (model_id, ops_per_record) in ops_per_model { - for (record_id, ops) in ops_per_record { - ops_fut_by_model - .get_mut(&model_id) - .ok_or(Error::InvalidModelId(model_id))? - .push(process_crdt_operations( - &self.clock, - &self.timestamp_per_device, - &self.db, - device_pub_id.into(), - model_id, - record_id, - ops, - )); + let mut compressed_map = + BTreeMap::, (RecordId, Vec)>>::new(); + + for CRDTOperation { + device_pub_id, + timestamp, + model_id: _, // Ignoring model_id as we know it already + record_id, + data, + } in ops + { + let records = compressed_map.entry(device_pub_id).or_default(); + + // Can't use RecordId as a key because rmpv::Value doesn't implement Hash + Eq. + // So we use it's serialized bytes as a key. + let record_id_bytes = + rmp_serde::to_vec_named(&record_id).expect("already serialized to Value"); + + match records.entry(record_id_bytes) { + Entry::Occupied(mut entry) => { + entry + .get_mut() + .1 + .push(CompressedCRDTOperation { timestamp, data }); + } + Entry::Vacant(entry) => { + entry.insert((record_id, vec![CompressedCRDTOperation { timestamp, data }])); } } } - for model_id in INGEST_ORDER { - if let Some(futs) = ops_fut_by_model.remove(model_id) { - futs.try_join().await?; - } + let _lock_guard = self.sync_lock.lock().await; + + let semaphore = &Arc::new(Semaphore::new(self.available_parallelism)); + + let handles = compressed_map + .into_iter() + .flat_map(|(device_pub_id, records)| { + records.into_values().map(move |(record_id, ops)| { + // We can process each record in parallel as they are independent + spawn({ + let clock = Arc::clone(&self.clock); + let timestamp_per_device = Arc::clone(&self.timestamp_per_device); + let db = Arc::clone(&self.db); + let device_pub_id = device_pub_id.into(); + let semaphore = Arc::clone(semaphore); + + async move { + let _permit = + semaphore.acquire().await.expect("semaphore never closes"); + + process_crdt_operations( + &clock, + ×tamp_per_device, + &db, + device_pub_id, + model_id, + record_id, + ops, + ) + .await + } + }) + }) + }) + .collect::>(); + + for res in handles.join().await { + res.map_err(Error::ProcessCrdtPanic)??; } + self.db + .cloud_crdt_operation() + .delete_many(vec![cloud_crdt_operation::id::in_vec(ops_ids)]) + .exec() + .await?; + + Ok(()) + } + + pub async fn ingest_ops(&self) -> Result<(), Error> { + // WARN: this order here exists because sync messages MUST be processed in this exact order + // due to relationship dependencies between these tables. + self.ingest_by_model(prisma_sync::device::MODEL_ID).await?; + + ( + self.ingest_by_model(prisma_sync::storage_statistics::MODEL_ID), + self.ingest_by_model(prisma_sync::tag::MODEL_ID), + self.ingest_by_model(prisma_sync::location::MODEL_ID), + self.ingest_by_model(prisma_sync::object::MODEL_ID), + self.ingest_by_model(prisma_sync::label::MODEL_ID), + ) + .try_join() + .await?; + + ( + self.ingest_by_model(prisma_sync::exif_data::MODEL_ID), + self.ingest_by_model(prisma_sync::file_path::MODEL_ID), + self.ingest_by_model(prisma_sync::tag_on_object::MODEL_ID), + self.ingest_by_model(prisma_sync::label_on_object::MODEL_ID), + ) + .try_join() + .await?; + if self.tx.send(SyncEvent::Ingested).is_err() { warn!("failed to send ingested message on `ingest_ops`"); } diff --git a/core/src/location/mod.rs b/core/src/location/mod.rs index a4a998995..d3baf8532 100644 --- a/core/src/location/mod.rs +++ b/core/src/location/mod.rs @@ -466,6 +466,7 @@ pub async fn scan_location( ) .await? } + ScanState::Indexed => { node.job_system .dispatch( @@ -478,6 +479,7 @@ pub async fn scan_location( ) .await? } + ScanState::FilesIdentified => { node.job_system .dispatch( diff --git a/crates/sync/src/compressed.rs b/crates/sync/src/compressed.rs index 47c38e7fe..a2e3a147d 100644 --- a/crates/sync/src/compressed.rs +++ b/crates/sync/src/compressed.rs @@ -46,7 +46,7 @@ impl CompressedCRDTOperationsPerModelPerDevice { // Can't use RecordId as a key because rmpv::Value doesn't implement Hash + Eq. // So we use it's serialized bytes as a key. let record_id_bytes = - rmp_serde::to_vec(&record_id).expect("already serialized to Value"); + rmp_serde::to_vec_named(&record_id).expect("already serialized to Value"); match records.entry(record_id_bytes) { Entry::Occupied(mut entry) => { From 3eba4bb31e30c3340a66cbddba5dacec36d9d0ab Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Mon, 21 Oct 2024 23:20:34 -0300 Subject: [PATCH 210/218] bruh --- core/crates/sync/src/ingest_utils.rs | 3 + core/crates/sync/src/manager.rs | 166 ++++++++++++++------------- 2 files changed, 92 insertions(+), 77 deletions(-) diff --git a/core/crates/sync/src/ingest_utils.rs b/core/crates/sync/src/ingest_utils.rs index e63f317ed..297dd3e5c 100644 --- a/core/crates/sync/src/ingest_utils.rs +++ b/core/crates/sync/src/ingest_utils.rs @@ -159,6 +159,7 @@ pub async fn process_crdt_operations( Ok(()) } +#[instrument(skip_all, err)] async fn handle_crdt_updates( db: &PrismaClient, device_pub_id: &DevicePubId, @@ -213,6 +214,7 @@ async fn handle_crdt_updates( .await } +#[instrument(skip_all, err)] async fn handle_crdt_create_and_updates( db: &PrismaClient, device_pub_id: &DevicePubId, @@ -291,6 +293,7 @@ async fn handle_crdt_create_and_updates( .await } +#[instrument(skip_all, err)] async fn handle_crdt_deletion( db: &PrismaClient, device_pub_id: &DevicePubId, diff --git a/core/crates/sync/src/manager.rs b/core/crates/sync/src/manager.rs index a01b6716b..d330852d4 100644 --- a/core/crates/sync/src/manager.rs +++ b/core/crates/sync/src/manager.rs @@ -25,7 +25,7 @@ use tokio::{ spawn, sync::{broadcast, Mutex, Notify, RwLock, Semaphore}, }; -use tracing::{debug, warn}; +use tracing::{debug, instrument, warn}; use uhlc::{HLCBuilder, HLC}; use uuid::Uuid; @@ -161,100 +161,112 @@ impl Manager { .collect::, Vec<_>), _>>() } + #[instrument(skip(self))] async fn ingest_by_model(&self, model_id: ModelId) -> Result<(), Error> { - let (ops_ids, ops) = self.fetch_cloud_crdt_ops(model_id, 10_000).await?; - if ops_ids.is_empty() { - return Ok(()); - } + let mut total_count = 0; - debug!( - messages_count = ops.len(), - first_message = ?ops - .first() - .map_or_else(|| SystemTime::UNIX_EPOCH.into(), |op| timestamp_to_datetime(op.timestamp)), - last_message = ?ops - .last() - .map_or_else(|| SystemTime::UNIX_EPOCH.into(), |op| timestamp_to_datetime(op.timestamp)), - model_id, - "Messages by model to ingest", - ); + loop { + let (ops_ids, ops) = self.fetch_cloud_crdt_ops(model_id, 10_000).await?; + if ops_ids.is_empty() { + break; + } - let mut compressed_map = - BTreeMap::, (RecordId, Vec)>>::new(); + debug!( + messages_count = ops.len(), + first_message = ?ops + .first() + .map_or_else(|| SystemTime::UNIX_EPOCH.into(), |op| timestamp_to_datetime(op.timestamp)), + last_message = ?ops + .last() + .map_or_else(|| SystemTime::UNIX_EPOCH.into(), |op| timestamp_to_datetime(op.timestamp)), + "Messages by model to ingest", + ); - for CRDTOperation { - device_pub_id, - timestamp, - model_id: _, // Ignoring model_id as we know it already - record_id, - data, - } in ops - { - let records = compressed_map.entry(device_pub_id).or_default(); + let mut compressed_map = + BTreeMap::, (RecordId, Vec)>>::new(); - // Can't use RecordId as a key because rmpv::Value doesn't implement Hash + Eq. - // So we use it's serialized bytes as a key. - let record_id_bytes = - rmp_serde::to_vec_named(&record_id).expect("already serialized to Value"); + for CRDTOperation { + device_pub_id, + timestamp, + model_id: _, // Ignoring model_id as we know it already + record_id, + data, + } in ops + { + let records = compressed_map.entry(device_pub_id).or_default(); - match records.entry(record_id_bytes) { - Entry::Occupied(mut entry) => { - entry - .get_mut() - .1 - .push(CompressedCRDTOperation { timestamp, data }); - } - Entry::Vacant(entry) => { - entry.insert((record_id, vec![CompressedCRDTOperation { timestamp, data }])); + // Can't use RecordId as a key because rmpv::Value doesn't implement Hash + Eq. + // So we use it's serialized bytes as a key. + let record_id_bytes = + rmp_serde::to_vec_named(&record_id).expect("already serialized to Value"); + + match records.entry(record_id_bytes) { + Entry::Occupied(mut entry) => { + entry + .get_mut() + .1 + .push(CompressedCRDTOperation { timestamp, data }); + } + Entry::Vacant(entry) => { + entry + .insert((record_id, vec![CompressedCRDTOperation { timestamp, data }])); + } } } - } - let _lock_guard = self.sync_lock.lock().await; + let _lock_guard = self.sync_lock.lock().await; - let semaphore = &Arc::new(Semaphore::new(self.available_parallelism)); + let semaphore = &Arc::new(Semaphore::new(self.available_parallelism)); - let handles = compressed_map - .into_iter() - .flat_map(|(device_pub_id, records)| { - records.into_values().map(move |(record_id, ops)| { - // We can process each record in parallel as they are independent - spawn({ - let clock = Arc::clone(&self.clock); - let timestamp_per_device = Arc::clone(&self.timestamp_per_device); - let db = Arc::clone(&self.db); - let device_pub_id = device_pub_id.into(); - let semaphore = Arc::clone(semaphore); + let handles = compressed_map + .into_iter() + .flat_map(|(device_pub_id, records)| { + records.into_values().map(move |(record_id, ops)| { + // We can process each record in parallel as they are independent + spawn({ + let clock = Arc::clone(&self.clock); + let timestamp_per_device = Arc::clone(&self.timestamp_per_device); + let db = Arc::clone(&self.db); + let device_pub_id = device_pub_id.into(); + let semaphore = Arc::clone(semaphore); - async move { - let _permit = - semaphore.acquire().await.expect("semaphore never closes"); + async move { + let _permit = + semaphore.acquire().await.expect("semaphore never closes"); - process_crdt_operations( - &clock, - ×tamp_per_device, - &db, - device_pub_id, - model_id, - record_id, - ops, - ) - .await - } + let count = ops.len(); + + process_crdt_operations( + &clock, + ×tamp_per_device, + &db, + device_pub_id, + model_id, + record_id, + ops, + ) + .await + .map(|()| count) + } + }) }) }) - }) - .collect::>(); + .collect::>(); - for res in handles.join().await { - res.map_err(Error::ProcessCrdtPanic)??; + for res in handles.join().await { + let count = res.map_err(Error::ProcessCrdtPanic)??; + debug!(count, "Ingested operations of model"); + total_count += count; + } + + self.db + .cloud_crdt_operation() + .delete_many(vec![cloud_crdt_operation::id::in_vec(ops_ids)]) + .exec() + .await?; } - self.db - .cloud_crdt_operation() - .delete_many(vec![cloud_crdt_operation::id::in_vec(ops_ids)]) - .exec() - .await?; + debug!(total_count, "Ingested all operations of this model"); Ok(()) } From 114d0df4bb67ebcf43dd7daf010520f8249a36e8 Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Tue, 22 Oct 2024 00:33:34 -0300 Subject: [PATCH 211/218] New ingester optimization attempt --- core/crates/sync/src/manager.rs | 93 +++++++++++++++++++++------------ 1 file changed, 59 insertions(+), 34 deletions(-) diff --git a/core/crates/sync/src/manager.rs b/core/crates/sync/src/manager.rs index d330852d4..108ade013 100644 --- a/core/crates/sync/src/manager.rs +++ b/core/crates/sync/src/manager.rs @@ -9,7 +9,7 @@ use sd_utils::timestamp_to_datetime; use std::{ collections::{hash_map::Entry, BTreeMap, HashMap}, - fmt, + fmt, mem, num::NonZeroU128, sync::{ atomic::{self, AtomicBool}, @@ -19,11 +19,11 @@ use std::{ }; use async_stream::stream; -use futures::Stream; -use futures_concurrency::future::{Join, TryJoin}; +use futures::{stream::FuturesUnordered, Stream, TryStreamExt}; +use futures_concurrency::future::TryJoin; use tokio::{ spawn, - sync::{broadcast, Mutex, Notify, RwLock, Semaphore}, + sync::{broadcast, Mutex, Notify, RwLock}, }; use tracing::{debug, instrument, warn}; use uhlc::{HLCBuilder, HLC}; @@ -36,6 +36,8 @@ use super::{ Error, SyncEvent, TimestampPerDevice, NTP64, }; +const INGESTION_BATCH_SIZE: i64 = 10_000; + /// Wrapper that spawns the ingest actor and provides utilities for reading and writing sync operations. #[derive(Clone)] pub struct Manager { @@ -165,14 +167,22 @@ impl Manager { async fn ingest_by_model(&self, model_id: ModelId) -> Result<(), Error> { let mut total_count = 0; + let mut buckets = (0..self.available_parallelism) + .map(|_| FuturesUnordered::new()) + .collect::>(); + loop { - let (ops_ids, ops) = self.fetch_cloud_crdt_ops(model_id, 10_000).await?; + let (ops_ids, ops) = self + .fetch_cloud_crdt_ops(model_id, INGESTION_BATCH_SIZE) + .await?; if ops_ids.is_empty() { break; } + let messages_count = ops.len(); + debug!( - messages_count = ops.len(), + messages_count, first_message = ?ops .first() .map_or_else(|| SystemTime::UNIX_EPOCH.into(), |op| timestamp_to_datetime(op.timestamp)), @@ -216,45 +226,60 @@ impl Manager { let _lock_guard = self.sync_lock.lock().await; - let semaphore = &Arc::new(Semaphore::new(self.available_parallelism)); - - let handles = compressed_map + compressed_map .into_iter() .flat_map(|(device_pub_id, records)| { records.into_values().map(move |(record_id, ops)| { // We can process each record in parallel as they are independent - spawn({ - let clock = Arc::clone(&self.clock); - let timestamp_per_device = Arc::clone(&self.timestamp_per_device); - let db = Arc::clone(&self.db); - let device_pub_id = device_pub_id.into(); - let semaphore = Arc::clone(semaphore); - async move { - let _permit = - semaphore.acquire().await.expect("semaphore never closes"); + let clock = Arc::clone(&self.clock); + let timestamp_per_device = Arc::clone(&self.timestamp_per_device); + let db = Arc::clone(&self.db); + let device_pub_id = device_pub_id.into(); - let count = ops.len(); + async move { + let count = ops.len(); - process_crdt_operations( - &clock, - ×tamp_per_device, - &db, - device_pub_id, - model_id, - record_id, - ops, - ) - .await - .map(|()| count) - } - }) + process_crdt_operations( + &clock, + ×tamp_per_device, + &db, + device_pub_id, + model_id, + record_id, + ops, + ) + .await + .map(|()| count) + } + }) + }) + .enumerate() + .for_each(|(idx, fut)| buckets[idx % self.available_parallelism].push(fut)); + + let handles = buckets + .iter_mut() + .enumerate() + .filter(|(_idx, bucket)| !bucket.is_empty()) + .map(|(idx, bucket)| { + let mut bucket = mem::take(bucket); + + spawn(async move { + let mut ops_count = 0; + while let Some(count) = bucket.try_next().await? { + ops_count += count; + } + + Ok::<_, Error>((ops_count, idx, bucket)) }) }) .collect::>(); - for res in handles.join().await { - let count = res.map_err(Error::ProcessCrdtPanic)??; + for res in handles.try_join().await.map_err(Error::ProcessCrdtPanic)? { + let (count, idx, bucket) = res?; + + buckets[idx] = bucket; + debug!(count, "Ingested operations of model"); total_count += count; } From 2313a7f2e3a0a9c0ebc71ade9c0cc5ec4118ac50 Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Tue, 22 Oct 2024 00:51:49 -0300 Subject: [PATCH 212/218] Small bootstrap fix --- core/src/api/cloud/mod.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/core/src/api/cloud/mod.rs b/core/src/api/cloud/mod.rs index dd2bde419..611fb0c31 100644 --- a/core/src/api/cloud/mod.rs +++ b/core/src/api/cloud/mod.rs @@ -215,7 +215,15 @@ pub(crate) fn mount() -> AlphaRouter { }| { let node = &node; - async move { initialize_cloud_sync(pub_id, library, node).await } + async move { + match initialize_cloud_sync(pub_id, library, node).await { + // If we don't have this library locally, we didn't joined this group yet + Ok(()) | Err(LibraryManagerError::LibraryNotFound) => { + Ok(()) + } + Err(e) => Err(e), + } + } }, ) .collect::>() From 86e4d21d1c5777751cc7c813cfb81034b981138a Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Tue, 22 Oct 2024 01:39:44 -0300 Subject: [PATCH 213/218] More optimization attempts and collecting metrics --- core/crates/sync/src/ingest_utils.rs | 40 +++++++++++++++++++---- core/crates/sync/src/manager.rs | 49 ++++++++++++++++++++++++---- 2 files changed, 75 insertions(+), 14 deletions(-) diff --git a/core/crates/sync/src/ingest_utils.rs b/core/crates/sync/src/ingest_utils.rs index 297dd3e5c..8fa2a89b0 100644 --- a/core/crates/sync/src/ingest_utils.rs +++ b/core/crates/sync/src/ingest_utils.rs @@ -8,9 +8,10 @@ use sd_sync::{ CRDTOperation, CRDTOperationData, CompressedCRDTOperation, ModelId, OperationKind, RecordId, }; -use std::{collections::BTreeMap, num::NonZeroU128}; +use std::{collections::BTreeMap, num::NonZeroU128, sync::Arc}; use futures_concurrency::future::TryJoin; +use tokio::sync::Mutex; use tracing::{debug, instrument, trace, warn}; use uhlc::{Timestamp, HLC, NTP64}; use uuid::Uuid; @@ -24,11 +25,11 @@ crdt_operation::select!(crdt_operation_id { id }); pub async fn process_crdt_operations( clock: &HLC, timestamp_per_device: &TimestampPerDevice, + sync_lock: Arc>, db: &PrismaClient, device_pub_id: DevicePubId, model_id: ModelId, - record_id: RecordId, - mut ops: Vec, + (record_id, mut ops): (RecordId, Vec), ) -> Result<(), Error> { ops.sort_by_key(|op| op.timestamp); @@ -52,7 +53,15 @@ pub async fn process_crdt_operations( .find(|op| matches!(op.data, CRDTOperationData::Delete)) { trace!("Deleting operation"); - handle_crdt_deletion(db, &device_pub_id, model_id, record_id, delete_op).await?; + handle_crdt_deletion( + db, + &sync_lock, + &device_pub_id, + model_id, + record_id, + delete_op, + ) + .await?; } // Create + > 0 Update - overwrites the create's data with the updates else if let Some(timestamp) = ops @@ -78,8 +87,16 @@ pub async fn process_crdt_operations( return Ok(()); } - handle_crdt_create_and_updates(db, &device_pub_id, model_id, record_id, ops, timestamp) - .await?; + handle_crdt_create_and_updates( + db, + &sync_lock, + &device_pub_id, + model_id, + record_id, + ops, + timestamp, + ) + .await?; } // > 0 Update - batches updates with a fake Create op else { @@ -138,7 +155,7 @@ pub async fn process_crdt_operations( } } - handle_crdt_updates(db, &device_pub_id, model_id, record_id, data).await?; + handle_crdt_updates(db, &sync_lock, &device_pub_id, model_id, record_id, data).await?; } // read the timestamp for the operation's device, or insert one if it doesn't exist @@ -162,6 +179,7 @@ pub async fn process_crdt_operations( #[instrument(skip_all, err)] async fn handle_crdt_updates( db: &PrismaClient, + sync_lock: &Mutex<()>, device_pub_id: &DevicePubId, model_id: ModelId, record_id: rmpv::Value, @@ -169,6 +187,8 @@ async fn handle_crdt_updates( ) -> Result<(), Error> { let device_pub_id = sd_sync::DevicePubId::from(device_pub_id); + let _lock_guard = sync_lock.lock().await; + db._transaction() .with_timeout(30 * 10000) .with_max_wait(30 * 10000) @@ -217,6 +237,7 @@ async fn handle_crdt_updates( #[instrument(skip_all, err)] async fn handle_crdt_create_and_updates( db: &PrismaClient, + sync_lock: &Mutex<()>, device_pub_id: &DevicePubId, model_id: ModelId, record_id: rmpv::Value, @@ -251,6 +272,8 @@ async fn handle_crdt_create_and_updates( } } + let _lock_guard = sync_lock.lock().await; + db._transaction() .with_timeout(30 * 10000) .with_max_wait(30 * 10000) @@ -296,6 +319,7 @@ async fn handle_crdt_create_and_updates( #[instrument(skip_all, err)] async fn handle_crdt_deletion( db: &PrismaClient, + sync_lock: &Mutex<()>, device_pub_id: &DevicePubId, model: u16, record_id: rmpv::Value, @@ -327,6 +351,8 @@ async fn handle_crdt_deletion( data: CRDTOperationData::Delete, }; + let _lock_guard = sync_lock.lock().await; + db._transaction() .with_timeout(30 * 10000) .with_max_wait(30 * 10000) diff --git a/core/crates/sync/src/manager.rs b/core/crates/sync/src/manager.rs index 108ade013..c70e31ba5 100644 --- a/core/crates/sync/src/manager.rs +++ b/core/crates/sync/src/manager.rs @@ -15,7 +15,7 @@ use std::{ atomic::{self, AtomicBool}, Arc, }, - time::SystemTime, + time::{Duration, SystemTime}, }; use async_stream::stream; @@ -24,6 +24,7 @@ use futures_concurrency::future::TryJoin; use tokio::{ spawn, sync::{broadcast, Mutex, Notify, RwLock}, + time::Instant, }; use tracing::{debug, instrument, warn}; use uhlc::{HLCBuilder, HLC}; @@ -171,7 +172,14 @@ impl Manager { .map(|_| FuturesUnordered::new()) .collect::>(); + let mut total_fetch_time = Duration::ZERO; + let mut total_compression_time = Duration::ZERO; + let mut total_work_distribution_time = Duration::ZERO; + let mut total_process_time = Duration::ZERO; + loop { + let fetching_start = Instant::now(); + let (ops_ids, ops) = self .fetch_cloud_crdt_ops(model_id, INGESTION_BATCH_SIZE) .await?; @@ -179,6 +187,8 @@ impl Manager { break; } + total_fetch_time += fetching_start.elapsed(); + let messages_count = ops.len(); debug!( @@ -192,6 +202,8 @@ impl Manager { "Messages by model to ingest", ); + let compression_start = Instant::now(); + let mut compressed_map = BTreeMap::, (RecordId, Vec)>>::new(); @@ -224,7 +236,9 @@ impl Manager { } } - let _lock_guard = self.sync_lock.lock().await; + total_compression_time += compression_start.elapsed(); + + let work_distribution_start = Instant::now(); compressed_map .into_iter() @@ -236,6 +250,7 @@ impl Manager { let timestamp_per_device = Arc::clone(&self.timestamp_per_device); let db = Arc::clone(&self.db); let device_pub_id = device_pub_id.into(); + let sync_lock = Arc::clone(&self.sync_lock); async move { let count = ops.len(); @@ -243,11 +258,11 @@ impl Manager { process_crdt_operations( &clock, ×tamp_per_device, + sync_lock, &db, device_pub_id, model_id, - record_id, - ops, + (record_id, ops), ) .await .map(|()| count) @@ -257,6 +272,10 @@ impl Manager { .enumerate() .for_each(|(idx, fut)| buckets[idx % self.available_parallelism].push(fut)); + total_work_distribution_time += work_distribution_start.elapsed(); + + let processing_start = Instant::now(); + let handles = buckets .iter_mut() .enumerate() @@ -266,21 +285,30 @@ impl Manager { spawn(async move { let mut ops_count = 0; + let processing_start = Instant::now(); while let Some(count) = bucket.try_next().await? { ops_count += count; } + debug!( + "Ingested {ops_count} operations in {:?}", + processing_start.elapsed() + ); + Ok::<_, Error>((ops_count, idx, bucket)) }) }) .collect::>(); - for res in handles.try_join().await.map_err(Error::ProcessCrdtPanic)? { + let results = handles.try_join().await.map_err(Error::ProcessCrdtPanic)?; + + total_process_time += processing_start.elapsed(); + + for res in results { let (count, idx, bucket) = res?; buckets[idx] = bucket; - debug!(count, "Ingested operations of model"); total_count += count; } @@ -291,7 +319,14 @@ impl Manager { .await?; } - debug!(total_count, "Ingested all operations of this model"); + debug!( + total_count, + ?total_fetch_time, + ?total_compression_time, + ?total_work_distribution_time, + ?total_process_time, + "Ingested all operations of this model" + ); Ok(()) } From b8d9a44da72bdadc6b67eaf5fb01f8c2ce257de1 Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Tue, 22 Oct 2024 12:52:23 -0300 Subject: [PATCH 214/218] Introducing partial indexes on crdt_operations and more optimization on ingestion --- core/crates/sync/src/ingest_utils.rs | 70 +++++++++++++++++++--------- core/src/library/manager/mod.rs | 66 ++++++++++++++++++++++++-- 2 files changed, 110 insertions(+), 26 deletions(-) diff --git a/core/crates/sync/src/ingest_utils.rs b/core/crates/sync/src/ingest_utils.rs index 8fa2a89b0..7cf7606a2 100644 --- a/core/crates/sync/src/ingest_utils.rs +++ b/core/crates/sync/src/ingest_utils.rs @@ -114,30 +114,40 @@ pub async fn process_crdt_operations( } } + let earlier_time = data.values().fold( + NTP64(u64::from(u32::MAX)), + |earlier_time, (_, timestamp)| { + if timestamp.0 < earlier_time.0 { + *timestamp + } else { + earlier_time + } + }, + ); + // conflict resolution - let (create, newer_updates_count) = db + let (create, possible_newer_updates_count) = db ._batch(( db.crdt_operation().count(vec![ crdt_operation::model::equals(i32::from(model_id)), crdt_operation::record_id::equals(rmp_serde::to_vec(&record_id)?), crdt_operation::kind::equals(OperationKind::Create.to_string()), ]), - data.iter() - .map(|(k, (_, timestamp))| { - Ok(db.crdt_operation().count(vec![ - crdt_operation::timestamp::gt({ - #[allow(clippy::cast_possible_wrap)] - // SAFETY: we had to store using i64 due to SQLite limitations - { - timestamp.as_u64() as i64 - } - }), - crdt_operation::model::equals(i32::from(model_id)), - crdt_operation::record_id::equals(rmp_serde::to_vec(&record_id)?), - crdt_operation::kind::contains(format!(":{k}:")), - ])) - }) - .collect::, Error>>()?, + // Fetching all update operations newer than our current earlier timestamp + db.crdt_operation() + .find_many(vec![ + crdt_operation::timestamp::gt({ + #[allow(clippy::cast_possible_wrap)] + // SAFETY: we had to store using i64 due to SQLite limitations + { + earlier_time.as_u64() as i64 + } + }), + crdt_operation::model::equals(i32::from(model_id)), + crdt_operation::record_id::equals(rmp_serde::to_vec(&record_id)?), + crdt_operation::kind::starts_with("u".to_string()), + ]) + .select(crdt_operation::select!({ kind timestamp })), )) .await?; @@ -146,12 +156,28 @@ pub async fn process_crdt_operations( return Ok(()); } - let keys = data.keys().cloned().collect::>(); + for candidate in possible_newer_updates_count { + // The first element is "u" meaning that this is an update, so we skip it + for key in candidate + .kind + .split(':') + .filter(|field| !field.is_empty()) + .skip(1) + { + // remove entries if we possess locally more recent updates for this field + if data.get(key).is_some_and(|(_, new_timestamp)| { + #[allow(clippy::cast_sign_loss)] + { + // we need to store as i64 due to SQLite limitations + *new_timestamp < NTP64(candidate.timestamp as u64) + } + }) { + data.remove(key); + } + } - // remove entries if we possess locally more recent updates for this field - for (update, key) in newer_updates_count.into_iter().zip(keys) { - if update > 0 { - data.remove(&key); + if data.is_empty() { + break; } } diff --git a/core/src/library/manager/mod.rs b/core/src/library/manager/mod.rs index 06aa0b8c9..2cd6652c3 100644 --- a/core/src/library/manager/mod.rs +++ b/core/src/library/manager/mod.rs @@ -9,8 +9,13 @@ use crate::{ }; use sd_core_sync::{SyncEvent, SyncManager}; + use sd_p2p::{Identity, RemoteIdentity}; -use sd_prisma::prisma::{device, instance, location}; +use sd_prisma::{ + prisma::{self, device, instance, location, PrismaClient}, + prisma_sync, +}; +use sd_sync::ModelId; use sd_utils::{ db, error::{FileIOError, NonUtf8PathError}, @@ -28,6 +33,7 @@ use std::{ use chrono::Utc; use futures_concurrency::future::{Join, TryJoin}; +use prisma_client_rust::Raw; use tokio::{ fs, io, spawn, sync::{broadcast, RwLock}, @@ -458,6 +464,10 @@ impl Libraries { ); let db = Arc::new(db::load_and_migrate(&db_url).await?); + // Configure database + configure_pragmas(&db).await?; + special_sync_indexes(&db).await?; + if let Some(create) = maybe_create_device { create.to_query(&db).exec().await?; } @@ -552,9 +562,6 @@ impl Libraries { ) .await?; - // Configure database - configure_pragmas(&db).await?; - let library = Library::new(id, config, instance_id, identity, db, node, sync).await; // This is an exception. Generally subscribe to this by `self.tx.subscribe`. @@ -635,3 +642,54 @@ async fn sync_rx_actor( } } } + +async fn special_sync_indexes(db: &PrismaClient) -> Result<(), LibraryManagerError> { + async fn create_index( + db: &PrismaClient, + model_id: ModelId, + model_name: &str, + ) -> Result<(), LibraryManagerError> { + db._execute_raw(Raw::new( + &format!( + "CREATE INDEX IF NOT EXISTS partial_index_model_{model_name} \ + ON crdt_operation(model,record_id,kind,timestamp) \ + WHERE model = {model_id} + " + ), + vec![], + )) + .exec() + .await?; + + debug!(model_name, "Created sync partial index"); + + Ok(()) + } + + for (model_id, model_name) in [ + (prisma_sync::device::MODEL_ID, prisma::device::NAME), + ( + prisma_sync::storage_statistics::MODEL_ID, + prisma::storage_statistics::NAME, + ), + (prisma_sync::tag::MODEL_ID, prisma::tag::NAME), + (prisma_sync::location::MODEL_ID, prisma::location::NAME), + (prisma_sync::object::MODEL_ID, prisma::object::NAME), + (prisma_sync::label::MODEL_ID, prisma::label::NAME), + (prisma_sync::exif_data::MODEL_ID, prisma::exif_data::NAME), + (prisma_sync::file_path::MODEL_ID, prisma::file_path::NAME), + ( + prisma_sync::tag_on_object::MODEL_ID, + prisma::tag_on_object::NAME, + ), + ( + prisma_sync::label_on_object::MODEL_ID, + prisma::label_on_object::NAME, + ), + ] { + // Creating indexes sequentially just in case + create_index(db, model_id, model_name).await?; + } + + Ok(()) +} From 3190a7b822497b0545140e5464db2f69b8b5440c Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Tue, 22 Oct 2024 13:26:55 -0300 Subject: [PATCH 215/218] Separate timestamp keepers per sync group --- core/crates/cloud-services/src/error.rs | 2 ++ core/crates/cloud-services/src/sync/receive.rs | 16 ++++++++++++---- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/core/crates/cloud-services/src/error.rs b/core/crates/cloud-services/src/error.rs index 5fb4691be..f90ee028e 100644 --- a/core/crates/cloud-services/src/error.rs +++ b/core/crates/cloud-services/src/error.rs @@ -123,6 +123,8 @@ pub enum Error { EndUpdatePushSyncMessages(io::Error), #[error("Unexpected end of stream while encrypting sync messages")] UnexpectedEndOfStream, + #[error("Failed to create directory to store timestamp keeper files")] + FailedToCreateTimestampKeepersDirectory(io::Error), #[error("Failed to read last timestamp keeper for pulling sync messages: {0}")] FailedToReadLastTimestampKeeper(io::Error), #[error("Failed to handle last timestamp keeper serialization: {0}")] diff --git a/core/crates/cloud-services/src/sync/receive.rs b/core/crates/cloud-services/src/sync/receive.rs index af915c9a9..f4db4b4c5 100644 --- a/core/crates/cloud-services/src/sync/receive.rs +++ b/core/crates/cloud-services/src/sync/receive.rs @@ -40,7 +40,7 @@ use uuid::Uuid; use super::{ReceiveAndIngestNotifiers, SyncActors, ONE_MINUTE}; -const CLOUD_SYNC_DATA_KEEPER_FILE: &str = "cloud_sync_data_keeper.bin"; +const CLOUD_SYNC_DATA_KEEPER_DIRECTORY: &str = "cloud_sync_data_keeper"; /// Responsible for downloading sync operations from the cloud to be processed by the ingester @@ -111,7 +111,7 @@ impl Receiver { active_notify: Arc, ) -> Result { let (keeper, cloud_client, key_manager) = ( - LastTimestampKeeper::load(data_dir.as_ref()), + LastTimestampKeeper::load(data_dir.as_ref(), sync_group_pub_id), cloud_services.client(), cloud_services.key_manager(), ) @@ -317,8 +317,16 @@ struct LastTimestampKeeper { } impl LastTimestampKeeper { - async fn load(data_dir: &Path) -> Result { - let file_path = data_dir.join(CLOUD_SYNC_DATA_KEEPER_FILE).into_boxed_path(); + async fn load(data_dir: &Path, sync_group_pub_id: groups::PubId) -> Result { + let cloud_sync_data_directory = data_dir.join(CLOUD_SYNC_DATA_KEEPER_DIRECTORY); + + fs::create_dir_all(&cloud_sync_data_directory) + .await + .map_err(Error::FailedToCreateTimestampKeepersDirectory)?; + + let file_path = cloud_sync_data_directory + .join(format!("{sync_group_pub_id}.bin")) + .into_boxed_path(); match fs::read(&file_path).await { Ok(bytes) => Ok(Self { From 9913b62ea267e8dc20902688c4f9aa3cf0f83adf Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Wed, 23 Oct 2024 03:01:04 -0300 Subject: [PATCH 216/218] Next attempt at ingestion optimization --- Cargo.lock | Bin 331411 -> 331432 bytes core/crates/sync/Cargo.toml | 1 + core/crates/sync/src/ingest_utils.rs | 170 +++++++++++++++++++++++---- core/crates/sync/src/manager.rs | 118 ++++++++++++++++++- 4 files changed, 262 insertions(+), 27 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 082cbc3c36e6e6886d58a9c73488ea16165bf231..5395ee416cca6d75bc05cc2f78838f10b66a7968 100644 GIT binary patch delta 39 vcmbO{S7gOpk%kt=7N#xC-)koy@fM$+F@f1)dQ}lKU%P!B^LG0>mXHYmJ_!#F delta 40 wcmZ26S7h>Bk%kt=7N#xC-)pD0, + sync_lock: Arc>, +) -> Result<(), Error> { + let latest_timestamp = ops.iter().fold(NTP64(0), |latest, (_, op)| { + if latest < op.timestamp { + op.timestamp + } else { + latest + } + }); + + update_clock(clock, latest_timestamp, &device_pub_id); + + let ops = ops + .into_iter() + .map(|(record_id, op)| { + rmp_serde::to_vec(&record_id) + .map(|serialized_record_id| (record_id, serialized_record_id, op)) + }) + .collect::, _>>()?; + + // conflict resolution + let delete_counts = db + ._batch( + ops.iter() + .map(|(_, serialized_record_id, _)| { + db.crdt_operation().count(vec![ + crdt_operation::model::equals(i32::from(model_id)), + crdt_operation::record_id::equals(serialized_record_id.clone()), + crdt_operation::kind::equals(OperationKind::Delete.to_string()), + ]) + }) + .collect::>(), + ) + .await?; + + let lock_guard = sync_lock.lock().await; + + db._transaction() + .with_timeout(30 * 10000) + .with_max_wait(30 * 10000) + .run(|db| { + let device_pub_id = device_pub_id.clone(); + + async move { + // complying with borrowck + let device_pub_id = &device_pub_id; + + let (crdt_creates, model_sync_data) = ops + .into_iter() + .zip(delete_counts) + .filter_map(|(data, delete_count)| (delete_count == 0).then_some(data)) + .map( + |( + record_id, + serialized_record_id, + CompressedCRDTOperation { timestamp, data }, + )| { + let crdt_create = crdt_operation::CreateUnchecked { + timestamp: { + #[allow(clippy::cast_possible_wrap)] + // SAFETY: we have to store using i64 due to SQLite limitations + { + timestamp.0 as i64 + } + }, + model: i32::from(model_id), + record_id: serialized_record_id, + kind: "c".to_string(), + data: rmp_serde::to_vec(&data)?, + device_pub_id: device_pub_id.to_db(), + _params: vec![], + }; + + // NOTE(@fogodev): I wish I could do a create many here instead of creating separately each + // entry, but it's not supported by PCR + let model_sync_data = ModelSyncData::from_op(CRDTOperation { + device_pub_id: Uuid::from(device_pub_id), + model_id, + record_id, + timestamp, + data, + })? + .exec(&db); + + Ok::<_, Error>((crdt_create, model_sync_data)) + }, + ) + .collect::, _>>()? + .into_iter() + .unzip::<_, _, Vec<_>, Vec<_>>(); + + model_sync_data.try_join().await?; + + db.crdt_operation().create_many(crdt_creates).exec().await?; + + Ok::<_, Error>(()) + } + }) + .await?; + + drop(lock_guard); + + update_timestamp_per_device(timestamp_per_device, device_pub_id, latest_timestamp).await; Ok(()) } @@ -389,3 +481,37 @@ async fn handle_crdt_deletion( }) .await } + +fn update_clock(clock: &HLC, latest_timestamp: NTP64, device_pub_id: &DevicePubId) { + // first, we update the HLC's timestamp with the incoming one. + // this involves a drift check + sets the last time of the clock + clock + .update_with_timestamp(&Timestamp::new( + latest_timestamp, + uhlc::ID::from( + NonZeroU128::new(Uuid::from(device_pub_id).to_u128_le()).expect("Non zero id"), + ), + )) + .expect("timestamp has too much drift!"); +} + +async fn update_timestamp_per_device( + timestamp_per_device: &TimestampPerDevice, + device_pub_id: DevicePubId, + latest_timestamp: NTP64, +) { + // read the timestamp for the operation's device, or insert one if it doesn't exist + let current_last_timestamp = timestamp_per_device + .read() + .await + .get(&device_pub_id) + .copied(); + + // update the stored timestamp for this device - will be derived from the crdt operations table on restart + let new_ts = NTP64::max(current_last_timestamp.unwrap_or_default(), latest_timestamp); + + timestamp_per_device + .write() + .await + .insert(device_pub_id, new_ts); +} diff --git a/core/crates/sync/src/manager.rs b/core/crates/sync/src/manager.rs index c70e31ba5..e4d830bcc 100644 --- a/core/crates/sync/src/manager.rs +++ b/core/crates/sync/src/manager.rs @@ -4,7 +4,9 @@ use sd_prisma::{ prisma::{cloud_crdt_operation, crdt_operation, device, PrismaClient, SortOrder}, prisma_sync, }; -use sd_sync::{CRDTOperation, CompressedCRDTOperation, ModelId, OperationFactory, RecordId}; +use sd_sync::{ + CRDTOperation, CRDTOperationData, CompressedCRDTOperation, ModelId, OperationFactory, RecordId, +}; use sd_utils::timestamp_to_datetime; use std::{ @@ -21,6 +23,7 @@ use std::{ use async_stream::stream; use futures::{stream::FuturesUnordered, Stream, TryStreamExt}; use futures_concurrency::future::TryJoin; +use itertools::Itertools; use tokio::{ spawn, sync::{broadcast, Mutex, Notify, RwLock}, @@ -33,7 +36,7 @@ use uuid::Uuid; use super::{ crdt_op_db, db_operation::{from_cloud_crdt_ops, from_crdt_ops}, - ingest_utils::process_crdt_operations, + ingest_utils::{bulk_ingest_create_only_ops, process_crdt_operations}, Error, SyncEvent, TimestampPerDevice, NTP64, }; @@ -236,6 +239,32 @@ impl Manager { } } + // Now that we separated all operations by their record_ids, we can do an optimization + // to process all records that only posses a single create operation, batching them together + let mut create_only_ops: BTreeMap> = + BTreeMap::new(); + for (device_pub_id, records) in &mut compressed_map { + for (record_id, ops) in records.values_mut() { + if ops.len() == 1 && matches!(ops[0].data, CRDTOperationData::Create(_)) { + create_only_ops + .entry(*device_pub_id) + .or_default() + .push((mem::replace(record_id, rmpv::Value::Nil), ops.remove(0))); + } + } + } + + bulk_process_of_create_only_ops( + self.available_parallelism, + Arc::clone(&self.clock), + Arc::clone(&self.timestamp_per_device), + Arc::clone(&self.db), + Arc::clone(&self.sync_lock), + model_id, + create_only_ops, + ) + .await?; + total_compression_time += compression_start.elapsed(); let work_distribution_start = Instant::now(); @@ -243,7 +272,11 @@ impl Manager { compressed_map .into_iter() .flat_map(|(device_pub_id, records)| { - records.into_values().map(move |(record_id, ops)| { + records.into_values().filter_map(move |(record_id, ops)| { + if record_id.is_nil() { + return None; + } + // We can process each record in parallel as they are independent let clock = Arc::clone(&self.clock); @@ -252,7 +285,7 @@ impl Manager { let device_pub_id = device_pub_id.into(); let sync_lock = Arc::clone(&self.sync_lock); - async move { + Some(async move { let count = ops.len(); process_crdt_operations( @@ -266,7 +299,7 @@ impl Manager { ) .await .map(|()| count) - } + }) }) }) .enumerate() @@ -618,6 +651,81 @@ impl Manager { // } } +async fn bulk_process_of_create_only_ops( + available_parallelism: usize, + clock: Arc, + timestamp_per_device: TimestampPerDevice, + db: Arc, + sync_lock: Arc>, + model_id: ModelId, + create_only_ops: BTreeMap>, +) -> Result { + let buckets = (0..available_parallelism) + .map(|_| FuturesUnordered::new()) + .collect::>(); + + let mut bucket_idx = 0; + + for (device_pub_id, records) in create_only_ops { + records + .into_iter() + .chunks(100) + .into_iter() + .for_each(|chunk| { + let ops = chunk.collect::>(); + + buckets[bucket_idx % available_parallelism].push({ + let clock = Arc::clone(&clock); + let timestamp_per_device = Arc::clone(×tamp_per_device); + let db = Arc::clone(&db); + let device_pub_id = device_pub_id.into(); + let sync_lock = Arc::clone(&sync_lock); + + async move { + let count = ops.len(); + bulk_ingest_create_only_ops( + &clock, + ×tamp_per_device, + &db, + device_pub_id, + model_id, + ops, + sync_lock, + ) + .await + .map(|()| count) + } + }); + + bucket_idx += 1; + }); + } + + let handles = buckets + .into_iter() + .map(|mut bucket| { + spawn(async move { + let mut total_count = 0; + + while let Some(count) = bucket.try_next().await? { + total_count += count; + } + + Ok::<_, Error>(total_count) + }) + }) + .collect::>(); + + Ok(handles + .try_join() + .await + .map_err(Error::ProcessCrdtPanic)? + .into_iter() + .collect::, _>>()? + .into_iter() + .sum()) +} + impl OperationFactory for Manager { fn get_clock(&self) -> &HLC { &self.clock From e4e6a116cca38f36fba3514349059f8418fd5030 Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Wed, 23 Oct 2024 13:14:18 -0300 Subject: [PATCH 217/218] More ingestion metrics --- core/crates/cloud-services/src/p2p/mod.rs | 39 +++++++++++++----- core/crates/cloud-services/src/sync/ingest.rs | 14 +++++-- core/crates/sync/src/manager.rs | 41 ++++++++++++------- 3 files changed, 65 insertions(+), 29 deletions(-) diff --git a/core/crates/cloud-services/src/p2p/mod.rs b/core/crates/cloud-services/src/p2p/mod.rs index 316b6f028..0f31f977c 100644 --- a/core/crates/cloud-services/src/p2p/mod.rs +++ b/core/crates/cloud-services/src/p2p/mod.rs @@ -9,20 +9,20 @@ use sd_cloud_schema::{ }; use sd_crypto::{CryptoRng, SeedableRng}; -use std::sync::Arc; +use std::{sync::Arc, time::Duration}; use iroh_net::{ discovery::{ dns::DnsDiscovery, local_swarm_discovery::LocalSwarmDiscovery, pkarr::dht::DhtDiscovery, - ConcurrentDiscovery, + ConcurrentDiscovery, Discovery, }, relay::{RelayMap, RelayMode, RelayUrl}, Endpoint, NodeId, }; use reqwest::Url; use serde::{Deserialize, Serialize}; -use tokio::{spawn, sync::oneshot}; -use tracing::error; +use tokio::{spawn, sync::oneshot, time::sleep}; +use tracing::{debug, error, warn}; mod new_sync_messages_notifier; mod runner; @@ -110,6 +110,12 @@ impl CloudP2P { dns_pkarr_url: Url, relay_url: RelayUrl, ) -> Result { + let dht_discovery = DhtDiscovery::builder() + .secret_key(iroh_secret_key.clone()) + .pkarr_relay(dns_pkarr_url) + .build() + .map_err(Error::DhtDiscoveryInit)?; + let endpoint = Endpoint::builder() .alpns(vec![CloudP2PALPN::LATEST.to_vec()]) .discovery(Box::new(ConcurrentDiscovery::from_services(vec![ @@ -118,13 +124,7 @@ impl CloudP2P { LocalSwarmDiscovery::new(iroh_secret_key.public()) .map_err(Error::LocalSwarmDiscoveryInit)?, ), - Box::new( - DhtDiscovery::builder() - .secret_key(iroh_secret_key.clone()) - .pkarr_relay(dns_pkarr_url) - .build() - .map_err(Error::DhtDiscoveryInit)?, - ), + Box::new(dht_discovery.clone()), ]))) .secret_key(iroh_secret_key) .relay_mode(RelayMode::Custom(RelayMap::from_url(relay_url))) @@ -132,6 +132,23 @@ impl CloudP2P { .await .map_err(Error::CreateCloudP2PEndpoint)?; + spawn({ + let endpoint = endpoint.clone(); + async move { + loop { + let Ok(node_addr) = endpoint.node_addr().await.map_err(|e| { + warn!(?e, "Failed to get direct addresses to force publish on DHT"); + }) else { + sleep(Duration::from_secs(5)).await; + continue; + }; + + debug!("Force publishing peer on DHT"); + return dht_discovery.publish(&node_addr.info); + } + } + }); + let (msgs_tx, msgs_rx) = flume::bounded(16); spawn({ diff --git a/core/crates/cloud-services/src/sync/ingest.rs b/core/crates/cloud-services/src/sync/ingest.rs index 9592b64ab..a7dd65af3 100644 --- a/core/crates/cloud-services/src/sync/ingest.rs +++ b/core/crates/cloud-services/src/sync/ingest.rs @@ -14,7 +14,10 @@ use std::{ use futures::FutureExt; use futures_concurrency::future::Race; -use tokio::{sync::Notify, time::sleep}; +use tokio::{ + sync::Notify, + time::{sleep, Instant}, +}; use tracing::{debug, error}; use super::{ReceiveAndIngestNotifiers, SyncActors, ONE_MINUTE}; @@ -84,6 +87,8 @@ impl Ingester { } async fn run_loop_iteration(&self) -> Result<(), Error> { + let start = Instant::now(); + let operations_to_ingest_count = self .sync .db @@ -103,11 +108,12 @@ impl Ingester { "Starting sync messages cloud ingestion loop" ); - self.sync.ingest_ops().await?; + let ingested_count = self.sync.ingest_ops().await?; debug!( - operations_to_ingest_count, - "Finished sync messages cloud ingestion loop" + ingested_count, + "Finished sync messages cloud ingestion loop in {:?}", + start.elapsed() ); Ok(()) diff --git a/core/crates/sync/src/manager.rs b/core/crates/sync/src/manager.rs index e4d830bcc..47460afac 100644 --- a/core/crates/sync/src/manager.rs +++ b/core/crates/sync/src/manager.rs @@ -168,7 +168,7 @@ impl Manager { } #[instrument(skip(self))] - async fn ingest_by_model(&self, model_id: ModelId) -> Result<(), Error> { + async fn ingest_by_model(&self, model_id: ModelId) -> Result { let mut total_count = 0; let mut buckets = (0..self.available_parallelism) @@ -254,7 +254,7 @@ impl Manager { } } - bulk_process_of_create_only_ops( + total_count += bulk_process_of_create_only_ops( self.available_parallelism, Arc::clone(&self.clock), Arc::clone(&self.timestamp_per_device), @@ -361,38 +361,44 @@ impl Manager { "Ingested all operations of this model" ); - Ok(()) + Ok(total_count) } - pub async fn ingest_ops(&self) -> Result<(), Error> { + pub async fn ingest_ops(&self) -> Result { + let mut total_count = 0; + // WARN: this order here exists because sync messages MUST be processed in this exact order // due to relationship dependencies between these tables. - self.ingest_by_model(prisma_sync::device::MODEL_ID).await?; + total_count += self.ingest_by_model(prisma_sync::device::MODEL_ID).await?; - ( + total_count += [ self.ingest_by_model(prisma_sync::storage_statistics::MODEL_ID), self.ingest_by_model(prisma_sync::tag::MODEL_ID), self.ingest_by_model(prisma_sync::location::MODEL_ID), self.ingest_by_model(prisma_sync::object::MODEL_ID), self.ingest_by_model(prisma_sync::label::MODEL_ID), - ) - .try_join() - .await?; + ] + .try_join() + .await? + .into_iter() + .sum::(); - ( + total_count += [ self.ingest_by_model(prisma_sync::exif_data::MODEL_ID), self.ingest_by_model(prisma_sync::file_path::MODEL_ID), self.ingest_by_model(prisma_sync::tag_on_object::MODEL_ID), self.ingest_by_model(prisma_sync::label_on_object::MODEL_ID), - ) - .try_join() - .await?; + ] + .try_join() + .await? + .into_iter() + .sum::(); if self.tx.send(SyncEvent::Ingested).is_err() { warn!("failed to send ingested message on `ingest_ops`"); } - Ok(()) + Ok(total_count) } #[must_use] @@ -707,10 +713,17 @@ async fn bulk_process_of_create_only_ops( spawn(async move { let mut total_count = 0; + let process_creates_batch_start = Instant::now(); + while let Some(count) = bucket.try_next().await? { total_count += count; } + debug!( + "Processed {total_count} creates in {:?}", + process_creates_batch_start.elapsed() + ); + Ok::<_, Error>(total_count) }) }) From 538af492204f044fe2e1a3a49b3fb324933bc191 Mon Sep 17 00:00:00 2001 From: Ericson Soares Date: Thu, 24 Oct 2024 01:31:42 -0300 Subject: [PATCH 218/218] Properly sync file_path deletion when deleting directories --- core/src/location/mod.rs | 38 ++++++++++++++++++++++++++++++++++---- 1 file changed, 34 insertions(+), 4 deletions(-) diff --git a/core/src/location/mod.rs b/core/src/location/mod.rs index d3baf8532..e89639285 100644 --- a/core/src/location/mod.rs +++ b/core/src/location/mod.rs @@ -880,11 +880,9 @@ pub async fn delete_directory( library: &Library, location_id: location::id::Type, parent_iso_file_path: Option<&IsolatedFilePathData<'_>>, -) -> Result<(), QueryError> { +) -> Result<(), sd_core_sync::Error> { let Library { db, .. } = library; - // This is NOT sync-compatible! - // Sync requires having sync ids available. let children_params = sd_utils::chain_optional_iter( [file_path::location_id::equals(Some(location_id))], [parent_iso_file_path.and_then(|parent| { @@ -899,7 +897,39 @@ pub async fn delete_directory( })], ); - db.file_path().delete_many(children_params).exec().await?; + let pub_ids = library + .db + .file_path() + .find_many(children_params.clone()) + .select(file_path::select!({ pub_id })) + .exec() + .await? + .into_iter() + .map(|fp| fp.pub_id) + .collect::>(); + + if pub_ids.is_empty() { + debug!("No file paths to delete"); + return Ok(()); + } + + library + .sync + .write_ops( + &library.db, + ( + pub_ids + .into_iter() + .map(|pub_id| { + library + .sync + .shared_delete(prisma_sync::file_path::SyncId { pub_id }) + }) + .collect(), + db.file_path().delete_many(children_params), + ), + ) + .await?; // library.orphan_remover.invoke().await;