diff --git a/Cargo.lock b/Cargo.lock
index a453654f5..5395ee416 100644
Binary files a/Cargo.lock and b/Cargo.lock differ
diff --git a/Cargo.toml b/Cargo.toml
index 2a215dfbf..9ea80477d 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -19,6 +19,9 @@ repository = "https://github.com/spacedriveapp/spacedrive"
rust-version = "1.81"
[workspace.dependencies]
+# First party dependencies
+sd-cloud-schema = { git = "https://github.com/spacedriveapp/cloud-services-schema", rev = "bbc69c5cb2" }
+
# Third party dependencies used by one or more of our crates
async-channel = "2.3"
async-stream = "0.3.6"
@@ -26,23 +29,25 @@ async-trait = "0.1.83"
axum = "0.7.7"
axum-extra = "0.9.4"
base64 = "0.22.1"
-blake3 = "1.5"
+blake3 = "1.5.4"
+bytes = "1.7.1" # Update blocked by hyper
chrono = "0.4.38"
ed25519-dalek = "2.1"
+flume = "0.11.0"
futures = "0.3.31"
futures-concurrency = "7.6"
globset = "0.4.15"
http = "1.1"
hyper = "1.5"
-image = "0.24.9" # Update blocked due to https://github.com/image-rs/image/issues/2230
+image = "0.25.4"
itertools = "0.13.0"
lending-stream = "1.0"
-libc = "0.2"
+libc = "0.2.159"
mimalloc = "0.1.43"
normpath = "1.3"
pin-project-lite = "0.2.14"
rand = "0.9.0-alpha.2"
-regex = "1"
+regex = "1.11"
reqwest = { version = "0.12.8", default-features = false }
rmp = "0.8.14"
rmp-serde = "1.3"
@@ -62,7 +67,8 @@ tracing-subscriber = "0.3.18"
tracing-test = "0.2.5"
uhlc = "0.8.0" # Must follow version used by specta
uuid = "1.10" # Must follow version used by specta
-webp = "0.2.6" # Update blocked by image
+webp = "0.3.0"
+zeroize = "1.8"
[workspace.dependencies.rspc]
git = "https://github.com/spacedriveapp/rspc.git"
diff --git a/apps/desktop/package.json b/apps/desktop/package.json
index 999a28bae..31b5cdee0 100644
--- a/apps/desktop/package.json
+++ b/apps/desktop/package.json
@@ -20,26 +20,28 @@
"@sd/ui": "workspace:*",
"@t3-oss/env-core": "^0.7.1",
"@tanstack/react-query": "^5.59",
- "@tauri-apps/api": "=2.0.2",
- "@tauri-apps/plugin-dialog": "2.0.0",
+ "@tauri-apps/api": "=2.0.3",
+ "@tauri-apps/plugin-dialog": "2.0.1",
+ "@tauri-apps/plugin-http": "2.0.1",
"@tauri-apps/plugin-os": "2.0.0",
- "@tauri-apps/plugin-shell": "2.0.0",
+ "@tauri-apps/plugin-shell": "2.0.1",
"consistent-hash": "^1.2.2",
"immer": "^10.0.3",
"react": "^18.2.0",
"react-dom": "^18.2.0",
"react-router-dom": "=6.20.1",
- "sonner": "^1.0.3"
+ "sonner": "^1.0.3",
+ "supertokens-web-js": "^0.13.0"
},
"devDependencies": {
"@sd/config": "workspace:*",
"@sentry/vite-plugin": "^2.16.0",
- "@tauri-apps/cli": "2.0.1",
+ "@tauri-apps/cli": "2.0.4",
"@types/react": "^18.2.67",
"@types/react-dom": "^18.2.22",
"sass": "^1.72.0",
"typescript": "^5.6.2",
- "vite": "^5.2.0",
- "vite-tsconfig-paths": "^4.3.2"
+ "vite": "^5.4.9",
+ "vite-tsconfig-paths": "^5.0.1"
}
}
diff --git a/apps/desktop/src-tauri/Cargo.toml b/apps/desktop/src-tauri/Cargo.toml
index 2d0572fb0..6963addb8 100644
--- a/apps/desktop/src-tauri/Cargo.toml
+++ b/apps/desktop/src-tauri/Cargo.toml
@@ -35,12 +35,15 @@ uuid = { workspace = true, features = ["serde"] }
# Specific Desktop dependencies
# WARNING: Do NOT enable default features, as that vendors dbus (see below)
-opener = { version = "0.7.1", features = ["reveal"], default-features = false }
-specta-typescript = "=0.0.7"
-tauri-plugin-dialog = "=2.0.2"
-tauri-plugin-os = "=2.0.1"
-tauri-plugin-shell = "=2.0.2"
-tauri-plugin-updater = "=2.0.2"
+opener = { version = "0.7.1", features = ["reveal"], default-features = false }
+specta-typescript = "=0.0.7"
+tauri-plugin-clipboard-manager = "=2.0.1"
+tauri-plugin-deep-link = "=2.0.1"
+tauri-plugin-dialog = "=2.0.3"
+tauri-plugin-http = "=2.0.3"
+tauri-plugin-os = "=2.0.1"
+tauri-plugin-shell = "=2.0.2"
+tauri-plugin-updater = "=2.0.2"
# memory allocator
mimalloc = { workspace = true }
diff --git a/apps/desktop/src-tauri/capabilities/default.json b/apps/desktop/src-tauri/capabilities/default.json
index cc710d277..5b68a580b 100644
--- a/apps/desktop/src-tauri/capabilities/default.json
+++ b/apps/desktop/src-tauri/capabilities/default.json
@@ -17,6 +17,7 @@
"dialog:allow-open",
"dialog:allow-save",
"dialog:allow-confirm",
+ "deep-link:default",
"os:allow-os-type",
"core:window:allow-close",
"core:window:allow-create",
@@ -24,6 +25,32 @@
"core:window:allow-minimize",
"core:window:allow-toggle-maximize",
"core:window:allow-start-dragging",
- "core:webview:allow-internal-toggle-devtools"
+ "core:webview:allow-internal-toggle-devtools",
+ {
+ "identifier": "http:default",
+ "allow": [
+ {
+ "url": "http://ipc.localhost"
+ },
+ {
+ "url": "http://asset.localhost"
+ },
+ {
+ "url": "http://localhost:8001"
+ },
+ {
+ "url": "http://tauri.localhost"
+ },
+ {
+ "url": "http://localhost:9420"
+ },
+ {
+ "url": "https://auth.spacedrive.com"
+ },
+ {
+ "url": "https://plausible.io"
+ }
+ ]
+ }
]
}
diff --git a/apps/desktop/src-tauri/src/main.rs b/apps/desktop/src-tauri/src/main.rs
index a1701893e..ffa95903b 100644
--- a/apps/desktop/src-tauri/src/main.rs
+++ b/apps/desktop/src-tauri/src/main.rs
@@ -14,13 +14,13 @@ use sd_core::{Node, NodeError};
use sd_fda::DiskAccess;
use serde::{Deserialize, Serialize};
use specta_typescript::Typescript;
-use tauri::Emitter;
use tauri::{async_runtime::block_on, webview::PlatformWebview, AppHandle, Manager, WindowEvent};
+use tauri::{Emitter, Listener};
use tauri_plugins::{sd_error_plugin, sd_server_plugin};
use tauri_specta::{collect_events, Builder};
use tokio::task::block_in_place;
use tokio::time::sleep;
-use tracing::error;
+use tracing::{debug, error};
mod file;
mod menu;
@@ -179,7 +179,11 @@ pub enum DragAndDropEvent {
Cancelled,
}
-const CLIENT_ID: &str = "2abb241e-40b8-4517-a3e3-5594375c8fbb";
+#[derive(Debug, Clone, Serialize, Deserialize, specta::Type, tauri_specta::Event)]
+#[serde(rename_all = "camelCase")]
+pub struct DeepLinkEvent {
+ data: String,
+}
#[tokio::main]
async fn main() -> tauri::Result<()> {
@@ -221,9 +225,20 @@ async fn main() -> tauri::Result<()> {
tauri::Builder::default()
.invoke_handler(builder.invoke_handler())
+ .plugin(tauri_plugin_deep_link::init())
.setup(move |app| {
// We need a the app handle to determine the data directory now.
// This means all the setup code has to be within `setup`, however it doesn't support async so we `block_on`.
+ let handle = app.handle().clone();
+ app.listen("deep-link://new-url", move |event| {
+ let deep_link_event = DeepLinkEvent {
+ data: event.payload().to_string(),
+ };
+ println!("Deep link event={:?}", deep_link_event);
+
+ handle.emit("deeplink", deep_link_event).unwrap();
+ });
+
block_in_place(|| {
block_on(async move {
builder.mount_events(app);
@@ -239,10 +254,7 @@ async fn main() -> tauri::Result<()> {
// The `_guard` must be assigned to variable for flushing remaining logs on main exit through Drop
let (_guard, result) = match Node::init_logger(&data_dir) {
- Ok(guard) => (
- Some(guard),
- Node::new(data_dir, sd_core::Env::new(CLIENT_ID)).await,
- ),
+ Ok(guard) => (Some(guard), Node::new(data_dir).await),
Err(err) => (None, Err(NodeError::Logger(err))),
};
@@ -256,7 +268,7 @@ async fn main() -> tauri::Result<()> {
}
};
- let should_clear_localstorage = node.libraries.get_all().await.is_empty();
+ let should_clear_local_storage = node.libraries.get_all().await.is_empty();
handle.plugin(rspc::integrations::tauri::plugin(router, {
let node = node.clone();
@@ -266,8 +278,8 @@ async fn main() -> tauri::Result<()> {
handle.manage(node.clone());
handle.windows().iter().for_each(|(_, window)| {
- if should_clear_localstorage {
- println!("cleaning localStorage");
+ if should_clear_local_storage {
+ debug!("cleaning localStorage");
for webview in window.webviews() {
webview.eval("localStorage.clear();").ok();
}
@@ -344,6 +356,7 @@ async fn main() -> tauri::Result<()> {
.plugin(tauri_plugin_dialog::init())
.plugin(tauri_plugin_os::init())
.plugin(tauri_plugin_shell::init())
+ .plugin(tauri_plugin_http::init())
// TODO: Bring back Tauri Plugin Window State - it was buggy so we removed it.
.plugin(tauri_plugin_updater::Builder::new().build())
.plugin(updater::plugin())
diff --git a/apps/desktop/src-tauri/tauri.conf.json b/apps/desktop/src-tauri/tauri.conf.json
index 85c60bc0c..1bf5fa3b9 100644
--- a/apps/desktop/src-tauri/tauri.conf.json
+++ b/apps/desktop/src-tauri/tauri.conf.json
@@ -1,5 +1,5 @@
{
- "$schema": "https://raw.githubusercontent.com/tauri-apps/tauri/tauri-v2.0.0-rc.2/core/tauri-config-schema/schema.json",
+ "$schema": "https://raw.githubusercontent.com/tauri-apps/tauri/tauri-v2.0.0-rc.8/crates/tauri-cli/tauri.config.schema.json",
"productName": "Spacedrive",
"identifier": "com.spacedrive.desktop",
"build": {
@@ -36,7 +36,12 @@
}
],
"security": {
- "csp": "default-src webkit-pdfjs-viewer: asset: https://asset.localhost blob: data: filesystem: ws: wss: http: https: tauri: 'unsafe-eval' 'unsafe-inline' 'self' img-src: 'self'"
+ "csp": {
+ "default-src": "'self' webkit-pdfjs-viewer: asset: http://asset.localhost blob: data: filesystem: http: https: tauri:",
+ "connect-src": "'self' ipc: http://ipc.localhost ws: wss: http: https: tauri:",
+ "img-src": "'self' asset: http://asset.localhost blob: data: filesystem: http: https: tauri:",
+ "style-src": "'self' 'unsafe-inline' http: https: tauri:"
+ }
}
},
"bundle": {
@@ -100,6 +105,12 @@
"endpoints": [
"https://spacedrive.com/api/releases/tauri/{{version}}/{{target}}/{{arch}}"
]
+ },
+ "deep-link": {
+ "mobile": [],
+ "desktop": {
+ "schemes": ["spacedrive"]
+ }
}
}
}
diff --git a/apps/desktop/src/App.tsx b/apps/desktop/src/App.tsx
index 141458057..097240280 100644
--- a/apps/desktop/src/App.tsx
+++ b/apps/desktop/src/App.tsx
@@ -3,9 +3,10 @@ import { QueryClientProvider } from '@tanstack/react-query';
import { listen } from '@tauri-apps/api/event';
import { PropsWithChildren, startTransition, useEffect, useMemo, useRef, useState } from 'react';
import { createPortal } from 'react-dom';
-import { RspcProvider } from '@sd/client';
+import { RspcProvider, useBridgeMutation } from '@sd/client';
import {
createRoutes,
+ DeeplinkEvent,
ErrorPage,
KeybindEvent,
PlatformProvider,
@@ -17,14 +18,11 @@ import { RouteTitleContext } from '@sd/interface/hooks/useRouteTitle';
import '@sd/ui/style/style.scss';
-import { useLocale } from '@sd/interface/hooks';
-
-import { commands } from './commands';
-import { platform } from './platform';
-import { queryClient } from './query';
-import { createMemoryRouterWithHistory } from './router';
-import { createUpdater } from './updater';
-
+import SuperTokens from 'supertokens-web-js';
+import EmailPassword from 'supertokens-web-js/recipe/emailpassword';
+import Passwordless from 'supertokens-web-js/recipe/passwordless';
+import Session from 'supertokens-web-js/recipe/session';
+import ThirdParty from 'supertokens-web-js/recipe/thirdparty';
// TODO: Bring this back once upstream is fixed up.
// const client = hooks.createClient({
// links: [
@@ -34,6 +32,32 @@ import { createUpdater } from './updater';
// tauriLink()
// ]
// });
+import getCookieHandler from '@sd/interface/app/$libraryId/settings/client/account/handlers/cookieHandler';
+import getWindowHandler from '@sd/interface/app/$libraryId/settings/client/account/handlers/windowHandler';
+import { useLocale } from '@sd/interface/hooks';
+import { AUTH_SERVER_URL, getTokens } from '@sd/interface/util';
+
+import { commands } from './commands';
+import { platform } from './platform';
+import { queryClient } from './query';
+import { createMemoryRouterWithHistory } from './router';
+import { createUpdater } from './updater';
+
+SuperTokens.init({
+ appInfo: {
+ apiDomain: AUTH_SERVER_URL,
+ apiBasePath: '/api/auth',
+ appName: 'Spacedrive Auth Service'
+ },
+ cookieHandler: getCookieHandler,
+ windowHandler: getWindowHandler,
+ recipeList: [
+ Session.init({ tokenTransferMethod: 'header' }),
+ EmailPassword.init(),
+ ThirdParty.init(),
+ Passwordless.init()
+ ]
+});
const startupError = (window as any).__SD_ERROR__ as string | undefined;
@@ -41,15 +65,31 @@ export default function App() {
useEffect(() => {
// This tells Tauri to show the current window because it's finished loading
commands.appReady();
+ // .then(() => {
+ // if (import.meta.env.PROD) window.fetch = fetch;
+ // });
}, []);
useEffect(() => {
const keybindListener = listen('keybind', (input) => {
document.dispatchEvent(new KeybindEvent(input.payload as string));
});
+ const deeplinkListener = listen('deeplink', async (data) => {
+ const payload = (data.payload as any).data as string;
+ if (!payload) return;
+ const json = JSON.parse(payload)[0];
+ if (!json) return;
+ //json output: "spacedrive://-/URL"
+ if (typeof json !== 'string') return;
+ if (!json.startsWith('spacedrive://-')) return;
+ const url = (json as string).split('://-/')[1];
+ if (!url) return;
+ document.dispatchEvent(new DeeplinkEvent(url));
+ });
return () => {
keybindListener.then((unlisten) => unlisten());
+ deeplinkListener.then((unlisten) => unlisten());
};
}, []);
@@ -79,6 +119,15 @@ type RedirectPath = { pathname: string; search: string | undefined };
function AppInner() {
const [tabs, setTabs] = useState(() => [createTab()]);
const [selectedTabIndex, setSelectedTabIndex] = useState(0);
+ const tokens = getTokens();
+ const cloudBootstrap = useBridgeMutation('cloud.bootstrap');
+
+ useEffect(() => {
+ // If the access token and/or refresh token are missing, we need to skip the cloud bootstrap
+ if (tokens.accessToken.length === 0 || tokens.refreshToken.length === 0) return;
+ cloudBootstrap.mutate([tokens.accessToken, tokens.refreshToken]);
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, []);
const selectedTab = tabs[selectedTabIndex]!;
diff --git a/apps/desktop/tsconfig.json b/apps/desktop/tsconfig.json
index d3855c6fb..11d32a210 100644
--- a/apps/desktop/tsconfig.json
+++ b/apps/desktop/tsconfig.json
@@ -5,7 +5,8 @@
"declarationDir": "dist",
"paths": {
"~/*": ["./src/*"]
- }
+ },
+ "moduleResolution": "bundler"
},
"include": ["src"],
"references": [
diff --git a/apps/landing/src/components/mdx/Pre.tsx b/apps/landing/src/components/mdx/Pre.tsx
index 9400737dc..b90328a70 100644
--- a/apps/landing/src/components/mdx/Pre.tsx
+++ b/apps/landing/src/components/mdx/Pre.tsx
@@ -19,18 +19,6 @@ const Pre: FC<{ children: React.ReactNode }> = ({ children }) => {
return (
- {/*
*/}
{JSON.stringify(featureFlags)}
{JSON.stringify(debugState)}
-
{
navigation.popToTop();
navigation.replace('Settings');
@@ -39,13 +65,13 @@ const DebugScreen = ({ navigation }: SettingsStackScreenProps<'Debug'>) => {
}}
>
Disable Debug Mode
-
-
*/}
+ {/* {
const url =
- origin.data === 'https://app.spacedrive.com'
+ origin.data === 'https://api.spacedrive.com'
? 'http://localhost:3000'
- : 'https://app.spacedrive.com';
+ : 'https://api.spacedrive.com';
setOrigin.mutateAsync(url).then(async () => {
await auth.logout();
await queryClient.invalidateQueries();
@@ -53,7 +79,7 @@ const DebugScreen = ({ navigation }: SettingsStackScreenProps<'Debug'>) => {
}}
>
Toggle API Route ({origin.data})
-
+ */}
{
navigation.popToTop();
@@ -64,12 +90,53 @@ const DebugScreen = ({ navigation }: SettingsStackScreenProps<'Debug'>) => {
>
Go to Backfill Waiting Page
-
{
await auth.logout();
}}
>
Logout
+ */}
+
{
+ const tokens = await getTokens();
+ cloudBootstrap.mutate([tokens.accessToken, tokens.refreshToken]);
+ }}
+ >
+ Cloud Bootstrap
+
+
{
+ addLibraryToCloud.mutate(null);
+ }}
+ >
+ Add Library to Cloud
+
+
{
+ createSyncGroup.mutate(null);
+ }}
+ >
+ Create Sync Group
+
+
{
+ if (
+ currentDevice.data &&
+ getGroup.data &&
+ getGroup.data.kind === 'WithDevices'
+ ) {
+ currentDevice.refetch();
+ console.log('Current Device: ', currentDevice.data);
+ console.log('Get Group: ', getGroup.data.data);
+ requestJoinSyncGroup.mutate({
+ sync_group: getGroup.data.data,
+ asking_device: currentDevice.data
+ });
+ }
+ }}
+ >
+ Request Join Sync Group
diff --git a/apps/mobile/src/screens/settings/library/CloudSettings/CloudSettings.tsx b/apps/mobile/src/screens/settings/library/CloudSettings/CloudSettings.tsx
deleted file mode 100644
index 892556d0e..000000000
--- a/apps/mobile/src/screens/settings/library/CloudSettings/CloudSettings.tsx
+++ /dev/null
@@ -1,130 +0,0 @@
-import { useMemo } from 'react';
-import { ActivityIndicator, FlatList, Text, View } from 'react-native';
-import { useLibraryContext, useLibraryMutation, useLibraryQuery } from '@sd/client';
-import { Icon } from '~/components/icons/Icon';
-import Card from '~/components/layout/Card';
-import Empty from '~/components/layout/Empty';
-import ScreenContainer from '~/components/layout/ScreenContainer';
-import VirtualizedListWrapper from '~/components/layout/VirtualizedListWrapper';
-import { Button } from '~/components/primitive/Button';
-import { Divider } from '~/components/primitive/Divider';
-import { styled, tw, twStyle } from '~/lib/tailwind';
-import { useAuthStateSnapshot } from '~/stores/auth';
-
-import Instance from './Instance';
-import Library from './Library';
-import Login from './Login';
-import ThisInstance from './ThisInstance';
-
-export const InfoBox = styled(View, 'rounded-md border gap-1 border-app bg-transparent p-2');
-
-const CloudSettings = () => {
- return (
-
-
-
- );
-};
-
-const AuthSensitiveChild = () => {
- const authState = useAuthStateSnapshot();
- if (authState.status === 'loggedIn') return
;
- if (authState.status === 'notLoggedIn' || authState.status === 'loggingIn') return
;
-
- return null;
-};
-
-const Authenticated = () => {
- const { library } = useLibraryContext();
- const cloudLibrary = useLibraryQuery(['cloud.library.get'], { retry: false });
- const createLibrary = useLibraryMutation(['cloud.library.create']);
-
- const cloudInstances = useMemo(
- () =>
- cloudLibrary.data?.instances.filter(
- (instance) => instance.uuid !== library.instance_id
- ),
- [cloudLibrary.data, library.instance_id]
- );
-
- if (cloudLibrary.isLoading) {
- return (
-
-
-
- );
- }
-
- return (
-
- {cloudLibrary.data ? (
-
-
-
-
-
-
-
- {cloudInstances?.length}
-
-
- Instances
-
-
-
-
- }
- contentContainerStyle={twStyle(
- cloudInstances?.length === 0 && 'flex-row'
- )}
- showsHorizontalScrollIndicator={false}
- ItemSeparatorComponent={() => }
- renderItem={({ item }) => }
- keyExtractor={(item) => item.id}
- numColumns={1}
- />
-
-
-
- ) : (
-
-
-
-
- Uploading your library to the cloud will allow you to access your
- library from other devices using your account & importing.
-
- await createLibrary.mutateAsync(null)}
- >
- {createLibrary.isPending ? (
- Connecting library...
- ) : (
- Connect library
- )}
-
-
-
- )}
-
- );
-};
-
-export default CloudSettings;
diff --git a/apps/mobile/src/screens/settings/library/CloudSettings/Instance.tsx b/apps/mobile/src/screens/settings/library/CloudSettings/Instance.tsx
deleted file mode 100644
index dbac4a60a..000000000
--- a/apps/mobile/src/screens/settings/library/CloudSettings/Instance.tsx
+++ /dev/null
@@ -1,64 +0,0 @@
-import { Text, View } from 'react-native';
-import { CloudInstance, HardwareModel } from '@sd/client';
-import { Icon } from '~/components/icons/Icon';
-import { hardwareModelToIcon } from '~/components/overview/Devices';
-import { tw } from '~/lib/tailwind';
-
-import { InfoBox } from './CloudSettings';
-
-interface Props {
- data: CloudInstance;
-}
-
-const Instance = ({ data }: Props) => {
- return (
-
-
-
-
-
-
- {data.metadata.name}
-
-
-
- Id:
-
- {data.id}
-
-
-
-
-
-
-
- UUID:
-
- {data.uuid}
-
-
-
-
-
-
-
- Public key:
-
- {data.identity}
-
-
-
-
-
- );
-};
-
-export default Instance;
diff --git a/apps/mobile/src/screens/settings/library/CloudSettings/Library.tsx b/apps/mobile/src/screens/settings/library/CloudSettings/Library.tsx
deleted file mode 100644
index 9f848173f..000000000
--- a/apps/mobile/src/screens/settings/library/CloudSettings/Library.tsx
+++ /dev/null
@@ -1,66 +0,0 @@
-import { CheckCircle, XCircle } from 'phosphor-react-native';
-import { useMemo } from 'react';
-import { Text, View } from 'react-native';
-import { CloudLibrary, useLibraryContext, useLibraryMutation } from '@sd/client';
-import Card from '~/components/layout/Card';
-import { Button } from '~/components/primitive/Button';
-import { Divider } from '~/components/primitive/Divider';
-import { SettingsTitle } from '~/components/settings/SettingsContainer';
-import { tw } from '~/lib/tailwind';
-import { logout, useAuthStateSnapshot } from '~/stores/auth';
-
-import { InfoBox } from './CloudSettings';
-
-interface LibraryProps {
- cloudLibrary?: CloudLibrary;
-}
-
-const Library = ({ cloudLibrary }: LibraryProps) => {
- const authState = useAuthStateSnapshot();
- const { library } = useLibraryContext();
- const syncLibrary = useLibraryMutation(['cloud.library.sync']);
- const thisInstance = useMemo(
- () => cloudLibrary?.instances.find((instance) => instance.uuid === library.instance_id),
- [cloudLibrary, library.instance_id]
- );
-
- return (
-
-
- Library
- {authState.status === 'loggedIn' && (
-
- Logout
-
- )}
-
-
- Name
-
- {cloudLibrary?.name}
-
- syncLibrary.mutate(null)}
- style={tw`mt-2 flex-row gap-1 py-2`}
- >
- {thisInstance ? (
-
- ) : (
-
- )}
-
- {thisInstance !== undefined ? 'Library synced' : 'Library not synced'}
-
-
-
- );
-};
-
-export default Library;
diff --git a/apps/mobile/src/screens/settings/library/CloudSettings/Login.tsx b/apps/mobile/src/screens/settings/library/CloudSettings/Login.tsx
deleted file mode 100644
index 88738c329..000000000
--- a/apps/mobile/src/screens/settings/library/CloudSettings/Login.tsx
+++ /dev/null
@@ -1,45 +0,0 @@
-import { Text, View } from 'react-native';
-import { Icon } from '~/components/icons/Icon';
-import Card from '~/components/layout/Card';
-import { Button } from '~/components/primitive/Button';
-import { tw } from '~/lib/tailwind';
-import { cancel, login, useAuthStateSnapshot } from '~/stores/auth';
-
-const Login = () => {
- const authState = useAuthStateSnapshot();
- const buttonText = {
- notLoggedIn: 'Login',
- loggingIn: 'Cancel'
- };
- return (
-
-
-
-
-
- Cloud Sync will upload your library to the cloud so you can access your
- library from other devices by importing it from the cloud.
-
-
- {(authState.status === 'notLoggedIn' || authState.status === 'loggingIn') && (
- {
- e.preventDefault();
- if (authState.status === 'loggingIn') {
- await cancel();
- } else {
- await login();
- }
- }}
- >
- {buttonText[authState.status]}
-
- )}
-
-
- );
-};
-
-export default Login;
diff --git a/apps/mobile/src/screens/settings/library/CloudSettings/ThisInstance.tsx b/apps/mobile/src/screens/settings/library/CloudSettings/ThisInstance.tsx
deleted file mode 100644
index 041d6591c..000000000
--- a/apps/mobile/src/screens/settings/library/CloudSettings/ThisInstance.tsx
+++ /dev/null
@@ -1,76 +0,0 @@
-import { useMemo } from 'react';
-import { Text, View } from 'react-native';
-import { CloudLibrary, HardwareModel, useLibraryContext } from '@sd/client';
-import { Icon } from '~/components/icons/Icon';
-import Card from '~/components/layout/Card';
-import { hardwareModelToIcon } from '~/components/overview/Devices';
-import { Divider } from '~/components/primitive/Divider';
-import { tw } from '~/lib/tailwind';
-
-import { InfoBox } from './CloudSettings';
-
-interface ThisInstanceProps {
- cloudLibrary?: CloudLibrary;
-}
-
-const ThisInstance = ({ cloudLibrary }: ThisInstanceProps) => {
- const { library } = useLibraryContext();
- const thisInstance = useMemo(
- () => cloudLibrary?.instances.find((instance) => instance.uuid === library.instance_id),
- [cloudLibrary, library.instance_id]
- );
-
- if (!thisInstance) return null;
-
- return (
-
-
- This Instance
-
-
-
-
-
- {thisInstance.metadata.name}
-
-
-
-
-
- Id:
- {thisInstance.id}
-
-
-
-
-
-
- UUID:
-
- {thisInstance.uuid}
-
-
-
-
-
-
-
- Publc Key:
-
- {thisInstance.identity}
-
-
-
-
-
- );
-};
-
-export default ThisInstance;
diff --git a/apps/mobile/src/screens/settings/library/SyncSettings.tsx b/apps/mobile/src/screens/settings/library/SyncSettings.tsx
deleted file mode 100644
index 0097625c2..000000000
--- a/apps/mobile/src/screens/settings/library/SyncSettings.tsx
+++ /dev/null
@@ -1,158 +0,0 @@
-import { useIsFocused } from '@react-navigation/native';
-import { inferSubscriptionResult } from '@spacedrive/rspc-client';
-import { MotiView } from 'moti';
-import { Circle } from 'phosphor-react-native';
-import React, { useEffect, useRef, useState } from 'react';
-import { Text, View } from 'react-native';
-import {
- Procedures,
- useLibraryMutation,
- useLibraryQuery,
- useLibrarySubscription
-} from '@sd/client';
-import { Icon } from '~/components/icons/Icon';
-import Card from '~/components/layout/Card';
-import { ModalRef } from '~/components/layout/Modal';
-import ScreenContainer from '~/components/layout/ScreenContainer';
-import CloudModal from '~/components/modal/cloud/CloudModal';
-import { Button } from '~/components/primitive/Button';
-import { tw } from '~/lib/tailwind';
-import { SettingsStackScreenProps } from '~/navigation/tabs/SettingsStack';
-
-const SyncSettingsScreen = ({ navigation }: SettingsStackScreenProps<'SyncSettings'>) => {
- const syncEnabled = useLibraryQuery(['sync.enabled']);
- const [data, setData] = useState
>({});
- const modalRef = useRef(null);
-
- const [startBackfill, setStart] = useState(false);
- const pageFocused = useIsFocused();
- const [showCloudModal, setShowCloudModal] = useState(false);
-
- useLibrarySubscription(['library.actors'], { onData: setData });
-
- useEffect(() => {
- if (startBackfill === true) {
- navigation.navigate('BackfillWaitingStack', {
- screen: 'BackfillWaiting'
- });
- setTimeout(() => setShowCloudModal(true), 1000);
- }
- }, [startBackfill, navigation]);
-
- useEffect(() => {
- if (pageFocused && showCloudModal) modalRef.current?.present();
- return () => {
- if (showCloudModal) setShowCloudModal(false);
- };
- }, [pageFocused, showCloudModal]);
-
- return (
-
- {syncEnabled.data === false ? (
-
-
-
-
-
- With Sync, you can share your library with other devices using P2P
- technology.
-
-
- Additionally, allowing you to enable Cloud services to upload your
- library to the cloud, making it accessible on any of your devices.
-
-
- setStart(true)}
- >
- Start
-
-
-
- ) : (
-
- {Object.keys(data).map((key) => {
- return (
-
-
-
- {key}
-
- {data[key] ? : }
-
- );
- })}
-
- )}
-
-
- );
-};
-
-export default SyncSettingsScreen;
-
-function OnlineIndicator({ online }: { online: boolean }) {
- const size = 6;
- return (
-
- {online ? (
-
-
-
-
- ) : (
-
- )}
-
- );
-}
-
-function StartButton({ name }: { name: string }) {
- const startActor = useLibraryMutation(['library.startActor']);
- return (
- startActor.mutate(name)}
- >
-
- {startActor.isPending ? 'Starting' : 'Start'}
-
-
- );
-}
-
-function StopButton({ name }: { name: string }) {
- const stopActor = useLibraryMutation(['library.stopActor']);
- return (
- stopActor.mutate(name)}
- >
-
- {stopActor.isPending ? 'Stopping' : 'Stop'}
-
-
- );
-}
diff --git a/apps/mobile/src/stores/auth.ts b/apps/mobile/src/stores/auth.ts
index 336b3ff22..3f99024dd 100644
--- a/apps/mobile/src/stores/auth.ts
+++ b/apps/mobile/src/stores/auth.ts
@@ -18,16 +18,16 @@ export function useAuthStateSnapshot() {
return useSolidStore(store).state;
}
-nonLibraryClient
- .query(['auth.me'])
- .then(() => (store.state = { status: 'loggedIn' }))
- .catch((e) => {
- if (e instanceof RSPCError && e.code === 401) {
- // TODO: handle error?
- console.error('error', e);
- }
- store.state = { status: 'notLoggedIn' };
- });
+// nonLibraryClient
+// .query(['auth.me'])
+// .then(() => (store.state = { status: 'loggedIn' }))
+// .catch((e) => {
+// if (e instanceof RSPCError && e.code === 401) {
+// // TODO: handle error?
+// console.error('error', e);
+// }
+// store.state = { status: 'notLoggedIn' };
+// });
type CallbackStatus = 'success' | { error: string } | 'cancel';
const loginCallbacks = new Set<(status: CallbackStatus) => void>();
@@ -41,29 +41,29 @@ export function login() {
store.state = { status: 'loggingIn' };
- let authCleanup = nonLibraryClient.addSubscription(['auth.loginSession'], {
- onData(data) {
- if (data === 'Complete') {
- loginCallbacks.forEach((cb) => cb('success'));
- } else if ('Error' in data) {
- console.error('[auth] error: ', data.Error);
- onError(data.Error);
- } else {
- console.log('[auth] verification url: ', data.Start.verification_url_complete);
- Promise.resolve()
- .then(() => Linking.openURL(data.Start.verification_url_complete))
- .then(
- (res) => {
- authCleanup = res;
- },
- (e) => onError(e.message)
- );
- }
- },
- onError(e) {
- onError(e.message);
- }
- });
+ // let authCleanup = nonLibraryClient.addSubscription(['auth.loginSession'], {
+ // onData(data) {
+ // if (data === 'Complete') {
+ // loginCallbacks.forEach((cb) => cb('success'));
+ // } else if ('Error' in data) {
+ // console.error('[auth] error: ', data.Error);
+ // onError(data.Error);
+ // } else {
+ // console.log('[auth] verification url: ', data.Start.verification_url_complete);
+ // Promise.resolve()
+ // .then(() => Linking.openURL(data.Start.verification_url_complete))
+ // .then(
+ // (res) => {
+ // authCleanup = res;
+ // },
+ // (e) => onError(e.message)
+ // );
+ // }
+ // },
+ // onError(e) {
+ // onError(e.message);
+ // }
+ // });
return new Promise((res, rej) => {
const cb = async (status: CallbackStatus) => {
@@ -71,7 +71,7 @@ export function login() {
if (status === 'success') {
store.state = { status: 'loggedIn' };
- nonLibraryClient.query(['auth.me']);
+ // nonLibraryClient.query(['auth.me']);
res();
} else {
store.state = { status: 'notLoggedIn' };
@@ -88,8 +88,8 @@ export function set_logged_in() {
export function logout() {
store.state = { status: 'loggingOut' };
- nonLibraryClient.mutation(['auth.logout']);
- nonLibraryClient.query(['auth.me']);
+ // nonLibraryClient.mutation(['auth.logout']);
+ // nonLibraryClient.query(['auth.me']);
store.state = { status: 'notLoggedIn' };
}
diff --git a/apps/mobile/src/stores/userStore.ts b/apps/mobile/src/stores/userStore.ts
new file mode 100644
index 000000000..8a1bfe6a7
--- /dev/null
+++ b/apps/mobile/src/stores/userStore.ts
@@ -0,0 +1,22 @@
+import { proxy, useSnapshot } from 'valtio';
+
+export type User = {
+ id: string;
+ email: string;
+ timeJoined: number;
+ tenantIds: string[];
+};
+
+const state = {
+ userInfo: undefined as User | undefined
+};
+
+const store = proxy({
+ ...state
+});
+
+// for reading
+export const useUserStore = () => useSnapshot(store);
+
+// for writing
+export const getUserStore = () => store;
diff --git a/apps/mobile/src/utils/index.ts b/apps/mobile/src/utils/index.ts
new file mode 100644
index 000000000..8d4e4077b
--- /dev/null
+++ b/apps/mobile/src/utils/index.ts
@@ -0,0 +1,13 @@
+import AsyncStorage from '@react-native-async-storage/async-storage';
+
+export async function getTokens() {
+ const fetchedToken = await AsyncStorage.getItem('access_token');
+ const fetchedRefreshToken = await AsyncStorage.getItem('refresh_token');
+ return {
+ accessToken: fetchedToken ?? '',
+ refreshToken: fetchedRefreshToken ?? ''
+ };
+}
+
+// export const AUTH_SERVER_URL = __DEV__ ? 'http://localhost:9420' : 'https://auth.spacedrive.com';
+export const AUTH_SERVER_URL = 'https://auth.spacedrive.com';
diff --git a/apps/server/src/main.rs b/apps/server/src/main.rs
index 5a6304bf3..4a4246312 100644
--- a/apps/server/src/main.rs
+++ b/apps/server/src/main.rs
@@ -144,19 +144,7 @@ async fn main() {
let state = AppState { auth };
- let (node, router) = match Node::new(
- data_dir,
- sd_core::Env {
- api_url: tokio::sync::Mutex::new(
- std::env::var("SD_API_URL")
- .unwrap_or_else(|_| "https://app.spacedrive.com".to_string()),
- ),
- client_id: std::env::var("SD_CLIENT_ID")
- .unwrap_or_else(|_| "04701823-a498-406e-aef9-22081c1dae34".to_string()),
- },
- )
- .await
- {
+ let (node, router) = match Node::new(data_dir).await {
Ok(d) => d,
Err(e) => {
panic!("{}", e.to_string())
diff --git a/apps/storybook/package.json b/apps/storybook/package.json
index 0f5786227..5dfe9f15f 100644
--- a/apps/storybook/package.json
+++ b/apps/storybook/package.json
@@ -30,6 +30,6 @@
"storybook": "^8.0.1",
"tailwindcss": "^3.4.10",
"typescript": "^5.6.2",
- "vite": "^5.2.0"
+ "vite": "^5.4.9"
}
}
diff --git a/apps/web/package.json b/apps/web/package.json
index ad2a51ebf..85ab05a77 100644
--- a/apps/web/package.json
+++ b/apps/web/package.json
@@ -41,7 +41,7 @@
"rollup-plugin-visualizer": "^5.12.0",
"start-server-and-test": "^2.0.3",
"typescript": "^5.6.2",
- "vite": "^5.2.0",
- "vite-tsconfig-paths": "^4.3.2"
+ "vite": "^5.4.9",
+ "vite-tsconfig-paths": "^5.0.1"
}
}
diff --git a/core/Cargo.toml b/core/Cargo.toml
index 3d3762464..d2880a0e8 100644
--- a/core/Cargo.toml
+++ b/core/Cargo.toml
@@ -20,6 +20,7 @@ heif = ["sd-images/heif"]
[dependencies]
# Inner Core Sub-crates
+sd-core-cloud-services = { path = "./crates/cloud-services" }
sd-core-file-path-helper = { path = "./crates/file-path-helper" }
sd-core-heavy-lifting = { path = "./crates/heavy-lifting" }
sd-core-indexer-rules = { path = "./crates/indexer-rules" }
@@ -29,7 +30,8 @@ sd-core-sync = { path = "./crates/sync" }
# Spacedrive Sub-crates
sd-actors = { path = "../crates/actors" }
sd-ai = { path = "../crates/ai", optional = true }
-sd-cloud-api = { path = "../crates/cloud-api" }
+sd-crypto = { path = "../crates/crypto" }
+sd-ffmpeg = { path = "../crates/ffmpeg", optional = true }
sd-file-ext = { path = "../crates/file-ext" }
sd-images = { path = "../crates/images", features = ["rspc", "serde", "specta"] }
sd-media-metadata = { path = "../crates/media-metadata" }
@@ -49,6 +51,7 @@ async-trait = { workspace = true }
axum = { workspace = true, features = ["ws"] }
base64 = { workspace = true }
blake3 = { workspace = true }
+bytes = { workspace = true }
chrono = { workspace = true, features = ["serde"] }
futures = { workspace = true }
futures-concurrency = { workspace = true }
@@ -64,6 +67,7 @@ reqwest = { workspace = true, features = ["json", "native-tls-vendor
rmp-serde = { workspace = true }
rmpv = { workspace = true }
rspc = { workspace = true, features = ["alpha", "axum", "chrono", "unstable", "uuid"] }
+sd-cloud-schema = { workspace = true }
serde = { workspace = true, features = ["derive", "rc"] }
serde_json = { workspace = true }
specta = { workspace = true }
@@ -75,12 +79,11 @@ tokio-stream = { workspace = true, features = ["fs"] }
tokio-util = { workspace = true, features = ["io"] }
tracing = { workspace = true }
tracing-subscriber = { workspace = true, features = ["env-filter"] }
-uuid = { workspace = true, features = ["serde", "v4"] }
+uuid = { workspace = true, features = ["serde", "v4", "v7"] }
# Specific Core dependencies
async-recursion = "1.1"
base91 = "0.1.0"
-bytes = "1.6"
ctor = "0.2.8"
directories = "5.0"
flate2 = "1.0"
@@ -98,6 +101,7 @@ sysinfo = "0.29.11" # Update blocked
tar = "0.4.41"
tower-service = "0.3.2"
tracing-appender = "0.2.3"
+whoami = "1.5.2"
[dependencies.tokio]
features = ["io-util", "macros", "process", "rt-multi-thread", "sync", "time"]
diff --git a/core/crates/cloud-services/Cargo.toml b/core/crates/cloud-services/Cargo.toml
new file mode 100644
index 000000000..baffe812d
--- /dev/null
+++ b/core/crates/cloud-services/Cargo.toml
@@ -0,0 +1,55 @@
+[package]
+name = "sd-core-cloud-services"
+version = "0.1.0"
+
+edition = "2021"
+
+[dependencies]
+# Core Spacedrive Sub-crates
+sd-core-sync = { path = "../sync" }
+
+# Spacedrive Sub-crates
+sd-actors = { path = "../../../crates/actors" }
+sd-cloud-schema = { workspace = true }
+sd-crypto = { path = "../../../crates/crypto" }
+sd-prisma = { path = "../../../crates/prisma" }
+sd-utils = { path = "../../../crates/utils" }
+
+# Workspace dependencies
+async-stream = { workspace = true }
+base64 = { workspace = true }
+blake3 = { workspace = true }
+chrono = { workspace = true, features = ["serde"] }
+flume = { workspace = true }
+futures = { workspace = true }
+futures-concurrency = { workspace = true }
+rmp-serde = { workspace = true }
+rspc = { workspace = true }
+serde = { workspace = true, features = ["derive"] }
+serde_json = { workspace = true }
+specta = { workspace = true }
+thiserror = { workspace = true }
+tokio = { workspace = true, features = ["sync", "time"] }
+tokio-stream = { workspace = true }
+tokio-util = { workspace = true }
+tracing = { workspace = true }
+uuid = { workspace = true, features = ["serde"] }
+zeroize = { workspace = true }
+
+# External dependencies
+anyhow = "1.0.86"
+dashmap = "6.1.0"
+iroh-net = { version = "0.27", features = ["discovery-local-network", "iroh-relay"] }
+paste = "=1.0.15"
+quic-rpc = { version = "0.12.1", features = ["quinn-transport"] }
+quinn = { package = "iroh-quinn", version = "0.11" }
+# Using whatever version of reqwest that reqwest-middleware uses, just putting here to enable some features
+reqwest = { version = "0.12", features = ["json", "native-tls-vendored", "stream"] }
+reqwest-middleware = { version = "0.3", features = ["json"] }
+reqwest-retry = "0.6"
+rustls = { version = "=0.23.15", default-features = false, features = ["brotli", "ring", "std"] }
+rustls-platform-verifier = "0.3.3"
+
+
+[dev-dependencies]
+tokio = { workspace = true, features = ["rt", "sync", "time"] }
diff --git a/core/crates/cloud-services/src/client.rs b/core/crates/cloud-services/src/client.rs
new file mode 100644
index 000000000..d9ec361e1
--- /dev/null
+++ b/core/crates/cloud-services/src/client.rs
@@ -0,0 +1,358 @@
+use crate::p2p::{NotifyUser, UserResponse};
+
+use sd_cloud_schema::{Client, Service, ServicesALPN};
+
+use std::{net::SocketAddr, sync::Arc, time::Duration};
+
+use futures::Stream;
+use iroh_net::relay::RelayUrl;
+use quic_rpc::{transport::quinn::QuinnConnection, RpcClient};
+use quinn::{crypto::rustls::QuicClientConfig, ClientConfig, Endpoint};
+use reqwest::{IntoUrl, Url};
+use reqwest_middleware::{reqwest, ClientBuilder, ClientWithMiddleware};
+use reqwest_retry::{policies::ExponentialBackoff, RetryTransientMiddleware};
+use tokio::sync::{Mutex, RwLock};
+use tracing::warn;
+
+use super::{
+ error::Error, key_manager::KeyManager, p2p::CloudP2P, token_refresher::TokenRefresher,
+};
+
+#[derive(Debug, Default, Clone)]
+enum ClientState {
+ #[default]
+ NotConnected,
+ Connected(Client, Service>),
+}
+
+/// Cloud services are a optional feature that allows you to interact with the cloud services
+/// of Spacedrive.
+/// They're optional in two different ways:
+/// - The cloud services depends on a user being logged in with our server.
+/// - The user being connected to the internet to begin with.
+///
+/// As we don't want to force the user to be connected to the internet, we have to make sure
+/// that core can always operate without the cloud services.
+#[derive(Debug)]
+pub struct CloudServices {
+ client_state: Arc>,
+ get_cloud_api_address: Url,
+ http_client: ClientWithMiddleware,
+ domain_name: String,
+ pub cloud_p2p_dns_origin_name: String,
+ pub cloud_p2p_relay_url: RelayUrl,
+ pub cloud_p2p_dns_pkarr_url: Url,
+ pub token_refresher: TokenRefresher,
+ key_manager: Arc>>>,
+ cloud_p2p: Arc>>>,
+ pub(crate) notify_user_tx: flume::Sender,
+ notify_user_rx: flume::Receiver,
+ user_response_tx: flume::Sender,
+ pub(crate) user_response_rx: flume::Receiver,
+ pub has_bootstrapped: Arc>,
+}
+
+impl CloudServices {
+ /// Creates a new cloud services client that can be used to interact with the cloud services.
+ /// The client will try to connect to the cloud services on a best effort basis, as the user
+ /// might not be connected to the internet.
+ /// If the client fails to connect, it will try again the next time it's used.
+ pub async fn new(
+ get_cloud_api_address: impl IntoUrl + Send,
+ cloud_p2p_relay_url: impl IntoUrl + Send,
+ cloud_p2p_dns_pkarr_url: impl IntoUrl + Send,
+ cloud_p2p_dns_origin_name: String,
+ domain_name: String,
+ ) -> Result {
+ let http_client_builder = reqwest::Client::builder().timeout(Duration::from_secs(3));
+
+ #[cfg(not(debug_assertions))]
+ {
+ http_client_builder = http_client_builder.https_only(true);
+ }
+
+ let cloud_p2p_relay_url = cloud_p2p_relay_url
+ .into_url()
+ .map_err(Error::InvalidUrl)?
+ .into();
+
+ let cloud_p2p_dns_pkarr_url = cloud_p2p_dns_pkarr_url
+ .into_url()
+ .map_err(Error::InvalidUrl)?;
+
+ let http_client =
+ ClientBuilder::new(http_client_builder.build().map_err(Error::HttpClientInit)?)
+ .with(RetryTransientMiddleware::new_with_policy(
+ ExponentialBackoff::builder().build_with_max_retries(3),
+ ))
+ .build();
+ let get_cloud_api_address = get_cloud_api_address
+ .into_url()
+ .map_err(Error::InvalidUrl)?;
+
+ let client_state = match Self::init_client(
+ &http_client,
+ get_cloud_api_address.clone(),
+ domain_name.clone(),
+ )
+ .await
+ {
+ Ok(client) => Arc::new(RwLock::new(ClientState::Connected(client))),
+ Err(e) => {
+ warn!(
+ ?e,
+ "Failed to initialize cloud services client; \
+ This is a best effort and we will continue in Not Connected mode"
+ );
+ Arc::new(RwLock::new(ClientState::NotConnected))
+ }
+ };
+
+ let (notify_user_tx, notify_user_rx) = flume::bounded(16);
+ let (user_response_tx, user_response_rx) = flume::bounded(16);
+
+ Ok(Self {
+ client_state,
+ token_refresher: TokenRefresher::new(
+ http_client.clone(),
+ get_cloud_api_address.clone(),
+ ),
+ get_cloud_api_address,
+ http_client,
+ cloud_p2p_dns_origin_name,
+ cloud_p2p_relay_url,
+ cloud_p2p_dns_pkarr_url,
+ domain_name,
+ key_manager: Arc::default(),
+ cloud_p2p: Arc::default(),
+ notify_user_tx,
+ notify_user_rx,
+ user_response_tx,
+ user_response_rx,
+ has_bootstrapped: Arc::default(),
+ })
+ }
+
+ pub fn stream_user_notifications(&self) -> impl Stream- + '_ {
+ self.notify_user_rx.stream()
+ }
+
+ #[must_use]
+ pub const fn http_client(&self) -> &ClientWithMiddleware {
+ &self.http_client
+ }
+
+ /// Send back a user response to the Cloud P2P actor
+ ///
+ /// # Panics
+ /// Will panic if the channel is closed, which should never happen
+ pub async fn send_user_response(&self, response: UserResponse) {
+ self.user_response_tx
+ .send_async(response)
+ .await
+ .expect("user response channel must never close");
+ }
+
+ async fn init_client(
+ http_client: &ClientWithMiddleware,
+ get_cloud_api_address: Url,
+ domain_name: String,
+ ) -> Result, Service>, Error> {
+ let cloud_api_address = http_client
+ .get(get_cloud_api_address)
+ .send()
+ .await
+ .map_err(Error::FailedToRequestApiAddress)?
+ .error_for_status()
+ .map_err(Error::AuthServerError)?
+ .text()
+ .await
+ .map_err(Error::FailedToExtractApiAddress)?
+ .parse::()?;
+
+ let mut crypto_config = {
+ #[cfg(debug_assertions)]
+ {
+ #[derive(Debug)]
+ struct SkipServerVerification;
+ impl rustls::client::danger::ServerCertVerifier for SkipServerVerification {
+ fn verify_server_cert(
+ &self,
+ _end_entity: &rustls::pki_types::CertificateDer<'_>,
+ _intermediates: &[rustls::pki_types::CertificateDer<'_>],
+ _server_name: &rustls::pki_types::ServerName<'_>,
+ _ocsp_response: &[u8],
+ _now: rustls::pki_types::UnixTime,
+ ) -> Result {
+ Ok(rustls::client::danger::ServerCertVerified::assertion())
+ }
+
+ fn verify_tls12_signature(
+ &self,
+ _message: &[u8],
+ _cert: &rustls::pki_types::CertificateDer<'_>,
+ _dss: &rustls::DigitallySignedStruct,
+ ) -> Result
+ {
+ Ok(rustls::client::danger::HandshakeSignatureValid::assertion())
+ }
+
+ fn verify_tls13_signature(
+ &self,
+ _message: &[u8],
+ _cert: &rustls::pki_types::CertificateDer<'_>,
+ _dss: &rustls::DigitallySignedStruct,
+ ) -> Result
+ {
+ Ok(rustls::client::danger::HandshakeSignatureValid::assertion())
+ }
+
+ fn supported_verify_schemes(&self) -> Vec {
+ vec![
+ rustls::SignatureScheme::RSA_PKCS1_SHA1,
+ rustls::SignatureScheme::ECDSA_SHA1_Legacy,
+ rustls::SignatureScheme::RSA_PKCS1_SHA256,
+ rustls::SignatureScheme::ECDSA_NISTP256_SHA256,
+ rustls::SignatureScheme::RSA_PKCS1_SHA384,
+ rustls::SignatureScheme::ECDSA_NISTP384_SHA384,
+ rustls::SignatureScheme::RSA_PKCS1_SHA512,
+ rustls::SignatureScheme::ECDSA_NISTP521_SHA512,
+ rustls::SignatureScheme::RSA_PSS_SHA256,
+ rustls::SignatureScheme::RSA_PSS_SHA384,
+ rustls::SignatureScheme::RSA_PSS_SHA512,
+ rustls::SignatureScheme::ED25519,
+ rustls::SignatureScheme::ED448,
+ ]
+ }
+ }
+
+ rustls::ClientConfig::builder_with_protocol_versions(&[&rustls::version::TLS13])
+ .dangerous()
+ .with_custom_certificate_verifier(Arc::new(SkipServerVerification))
+ .with_no_client_auth()
+ }
+
+ #[cfg(not(debug_assertions))]
+ {
+ rustls::ClientConfig::builder_with_protocol_versions(&[&rustls::version::TLS13])
+ .dangerous()
+ .with_custom_certificate_verifier(Arc::new(
+ rustls_platform_verifier::Verifier::new(),
+ ))
+ .with_no_client_auth()
+ }
+ };
+
+ crypto_config
+ .alpn_protocols
+ .extend([ServicesALPN::LATEST.to_vec()]);
+
+ let client_config = ClientConfig::new(Arc::new(
+ QuicClientConfig::try_from(crypto_config)
+ .expect("misconfigured TLS client config, this is a bug and should crash"),
+ ));
+
+ let mut endpoint = Endpoint::client("[::]:0".parse().expect("hardcoded address"))
+ .map_err(Error::FailedToCreateEndpoint)?;
+ endpoint.set_default_client_config(client_config);
+
+ // TODO(@fogodev): It's possible that we can't keep the connection alive all the time,
+ // and need to use single shot connections. I will only be sure when we have
+ // actually battle-tested the cloud services in core.
+ Ok(Client::new(RpcClient::new(QuinnConnection::new(
+ endpoint,
+ cloud_api_address,
+ domain_name,
+ ))))
+ }
+
+ /// Returns a client to the cloud services.
+ ///
+ /// If the client is not connected, it will try to connect to the cloud services.
+ /// Available routes documented in
+ /// [`sd_cloud_schema::Service`](https://github.com/spacedriveapp/cloud-services-schema).
+ pub async fn client(&self) -> Result, Service>, Error> {
+ if let ClientState::Connected(client) = { self.client_state.read().await.clone() } {
+ return Ok(client);
+ }
+
+ // If we're not connected, we need to try to connect.
+ let client = Self::init_client(
+ &self.http_client,
+ self.get_cloud_api_address.clone(),
+ self.domain_name.clone(),
+ )
+ .await?;
+ *self.client_state.write().await = ClientState::Connected(client.clone());
+
+ Ok(client)
+ }
+
+ pub async fn set_key_manager(&self, key_manager: KeyManager) {
+ self.key_manager
+ .write()
+ .await
+ .replace(Arc::new(key_manager));
+ }
+
+ pub async fn key_manager(&self) -> Result, Error> {
+ self.key_manager
+ .read()
+ .await
+ .as_ref()
+ .map_or(Err(Error::KeyManagerNotInitialized), |key_manager| {
+ Ok(Arc::clone(key_manager))
+ })
+ }
+
+ pub async fn set_cloud_p2p(&self, cloud_p2p: CloudP2P) {
+ self.cloud_p2p.write().await.replace(Arc::new(cloud_p2p));
+ }
+
+ pub async fn cloud_p2p(&self) -> Result, Error> {
+ self.cloud_p2p
+ .read()
+ .await
+ .as_ref()
+ .map_or(Err(Error::CloudP2PNotInitialized), |cloud_p2p| {
+ Ok(Arc::clone(cloud_p2p))
+ })
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use sd_cloud_schema::{auth, devices};
+
+ use super::*;
+
+ #[ignore]
+ #[tokio::test]
+ async fn test_client() {
+ let response = CloudServices::new(
+ "http://localhost:9420/cloud-api-address",
+ "http://relay.localhost:9999/",
+ "http://pkarr.localhost:9999/",
+ "dns.localhost:9999".to_string(),
+ "localhost".to_string(),
+ )
+ .await
+ .unwrap()
+ .client()
+ .await
+ .unwrap()
+ .devices()
+ .list(devices::list::Request {
+ access_token: auth::AccessToken("invalid".to_string()),
+ })
+ .await
+ .unwrap();
+
+ assert!(matches!(
+ response,
+ Err(sd_cloud_schema::Error::Client(
+ sd_cloud_schema::error::ClientSideError::Unauthorized
+ ))
+ ));
+ }
+}
diff --git a/core/crates/cloud-services/src/error.rs b/core/crates/cloud-services/src/error.rs
new file mode 100644
index 000000000..f90ee028e
--- /dev/null
+++ b/core/crates/cloud-services/src/error.rs
@@ -0,0 +1,170 @@
+use sd_cloud_schema::{cloud_p2p, sync::groups, Service};
+use sd_utils::error::FileIOError;
+
+use std::{io, net::AddrParseError};
+
+use quic_rpc::{
+ pattern::{bidi_streaming, rpc, server_streaming},
+ transport::quinn::QuinnConnection,
+};
+
+#[derive(thiserror::Error, Debug)]
+pub enum Error {
+ // Setup errors
+ #[error("Couldn't parse Cloud Services API address URL: {0}")]
+ InvalidUrl(reqwest::Error),
+ #[error("Failed to parse Cloud Services API address URL")]
+ FailedToParseRelayUrl,
+ #[error("Failed to initialize http client: {0}")]
+ HttpClientInit(reqwest::Error),
+ #[error("Failed to request Cloud Services API address from Auth Server route: {0}")]
+ FailedToRequestApiAddress(reqwest_middleware::Error),
+ #[error("Auth Server's Cloud Services API address route returned an error: {0}")]
+ AuthServerError(reqwest::Error),
+ #[error(
+ "Failed to extract response body from Auth Server's Cloud Services API address route: {0}"
+ )]
+ FailedToExtractApiAddress(reqwest::Error),
+ #[error("Failed to parse auth server's Cloud Services API address: {0}")]
+ FailedToParseApiAddress(#[from] AddrParseError),
+ #[error("Failed to create endpoint: {0}")]
+ FailedToCreateEndpoint(io::Error),
+
+ // Token refresher errors
+ #[error("Invalid token format, missing claims")]
+ MissingClaims,
+ #[error("Failed to decode access token data: {0}")]
+ DecodeAccessTokenData(#[from] base64::DecodeError),
+ #[error("Failed to deserialize access token json data: {0}")]
+ DeserializeAccessTokenData(#[from] serde_json::Error),
+ #[error("Token expired")]
+ TokenExpired,
+ #[error("Failed to request refresh token: {0}")]
+ RefreshTokenRequest(reqwest_middleware::Error),
+ #[error("Missing tokens on refresh response")]
+ MissingTokensOnRefreshResponse,
+ #[error("Failed to parse token header value to string: {0}")]
+ FailedToParseTokenHeaderValueToString(#[from] reqwest::header::ToStrError),
+
+ // Key Manager errors
+ #[error("Failed to handle File on KeyManager: {0}")]
+ FileIO(#[from] FileIOError),
+ #[error("Failed to handle key store serialization: {0}")]
+ KeyStoreSerialization(rmp_serde::encode::Error),
+ #[error("Failed to handle key store deserialization: {0}")]
+ KeyStoreDeserialization(rmp_serde::decode::Error),
+ #[error("Key store encryption related error: {{context: \"{context}\", source: {source}}}")]
+ KeyStoreCrypto {
+ #[source]
+ source: sd_crypto::Error,
+ context: &'static str,
+ },
+ #[error("Key manager not initialized")]
+ KeyManagerNotInitialized,
+
+ // Cloud P2P errors
+ #[error("Failed to create Cloud P2P endpoint: {0}")]
+ CreateCloudP2PEndpoint(anyhow::Error),
+ #[error("Failed to connect to Cloud P2P node: {0}")]
+ ConnectToCloudP2PNode(anyhow::Error),
+ #[error("Communication error with Cloud P2P node: {0}")]
+ CloudP2PRpcCommunication(#[from] rpc::Error>),
+ #[error("Cloud P2P not initialized")]
+ CloudP2PNotInitialized,
+ #[error("Failed to initialize LocalSwarmDiscovery: {0}")]
+ LocalSwarmDiscoveryInit(anyhow::Error),
+ #[error("Failed to initialize DhtDiscovery: {0}")]
+ DhtDiscoveryInit(anyhow::Error),
+
+ // Communication errors
+ #[error("Failed to communicate with RPC backend: {0}")]
+ RpcCommunication(#[from] rpc::Error>),
+ #[error("Failed to communicate with Server Streaming RPC backend: {0}")]
+ ServerStreamCommunication(#[from] server_streaming::Error>),
+ #[error("Failed to receive next response from Server Streaming RPC backend: {0}")]
+ ServerStreamRecv(#[from] server_streaming::ItemError>),
+ #[error("Failed to communicate with Bidi Streaming RPC backend: {0}")]
+ BidiStreamCommunication(#[from] bidi_streaming::Error>),
+ #[error("Failed to receive next response from Bidi Streaming RPC backend: {0}")]
+ BidiStreamRecv(#[from] bidi_streaming::ItemError>),
+ #[error("Error from backend: {0}")]
+ Backend(#[from] sd_cloud_schema::Error),
+ #[error("Failed to get access token from refresher: {0}")]
+ GetToken(#[from] GetTokenError),
+ #[error("Unexpected empty response from backend, context: {0}")]
+ EmptyResponse(&'static str),
+ #[error("Unexpected response from backend, context: {0}")]
+ UnexpectedResponse(&'static str),
+
+ // Sync error
+ #[error("Sync error: {0}")]
+ Sync(#[from] sd_core_sync::Error),
+ #[error("Tried to sync messages with a group without having needed key")]
+ MissingSyncGroupKey(groups::PubId),
+ #[error("Failed to encrypt sync messages: {0}")]
+ Encrypt(sd_crypto::Error),
+ #[error("Failed to decrypt sync messages: {0}")]
+ Decrypt(sd_crypto::Error),
+ #[error("Failed to upload sync messages: {0}")]
+ UploadSyncMessages(reqwest_middleware::Error),
+ #[error("Failed to download sync messages: {0}")]
+ DownloadSyncMessages(reqwest_middleware::Error),
+ #[error("Received an error response from uploading sync messages: {0}")]
+ ErrorResponseUploadSyncMessages(reqwest::Error),
+ #[error("Received an error response from downloading sync messages: {0}")]
+ ErrorResponseDownloadSyncMessages(reqwest::Error),
+ #[error(
+ "Received an error response from downloading sync messages while reading its bytes: {0}"
+ )]
+ ErrorResponseDownloadReadBytesSyncMessages(reqwest::Error),
+ #[error("Critical error while uploading sync messages")]
+ CriticalErrorWhileUploadingSyncMessages,
+ #[error("Failed to send End update to push sync messages")]
+ EndUpdatePushSyncMessages(io::Error),
+ #[error("Unexpected end of stream while encrypting sync messages")]
+ UnexpectedEndOfStream,
+ #[error("Failed to create directory to store timestamp keeper files")]
+ FailedToCreateTimestampKeepersDirectory(io::Error),
+ #[error("Failed to read last timestamp keeper for pulling sync messages: {0}")]
+ FailedToReadLastTimestampKeeper(io::Error),
+ #[error("Failed to handle last timestamp keeper serialization: {0}")]
+ LastTimestampKeeperSerialization(rmp_serde::encode::Error),
+ #[error("Failed to handle last timestamp keeper deserialization: {0}")]
+ LastTimestampKeeperDeserialization(rmp_serde::decode::Error),
+ #[error("Failed to write last timestamp keeper for pulling sync messages: {0}")]
+ FailedToWriteLastTimestampKeeper(io::Error),
+ #[error("Sync messages download and decrypt task panicked")]
+ SyncMessagesDownloadAndDecryptTaskPanicked,
+ #[error("Serialization failure to push sync messages: {0}")]
+ SerializationFailureToPushSyncMessages(rmp_serde::encode::Error),
+ #[error("Deserialization failure to pull sync messages: {0}")]
+ DeserializationFailureToPullSyncMessages(rmp_serde::decode::Error),
+ #[error("Read nonce stream decryption: {0}")]
+ ReadNonceStreamDecryption(io::Error),
+ #[error("Incomplete download bytes sync messages")]
+ IncompleteDownloadBytesSyncMessages,
+
+ // Temporary errors
+ #[error("Device missing secret key for decrypting sync messages")]
+ MissingKeyHash,
+}
+
+#[derive(thiserror::Error, Debug)]
+pub enum GetTokenError {
+ #[error("Token refresher not initialized")]
+ RefresherNotInitialized,
+ #[error("Token refresher failed to refresh and need to be initialized again")]
+ FailedToRefresh,
+}
+
+impl From for rspc::Error {
+ fn from(e: Error) -> Self {
+ Self::with_cause(rspc::ErrorCode::InternalServerError, e.to_string(), e)
+ }
+}
+
+impl From for rspc::Error {
+ fn from(e: GetTokenError) -> Self {
+ Self::with_cause(rspc::ErrorCode::InternalServerError, e.to_string(), e)
+ }
+}
diff --git a/core/crates/cloud-services/src/key_manager/key_store.rs b/core/crates/cloud-services/src/key_manager/key_store.rs
new file mode 100644
index 000000000..acf97dad9
--- /dev/null
+++ b/core/crates/cloud-services/src/key_manager/key_store.rs
@@ -0,0 +1,331 @@
+use crate::Error;
+
+use sd_cloud_schema::{
+ sync::{groups, KeyHash},
+ NodeId, SecretKey as IrohSecretKey,
+};
+use sd_crypto::{
+ cloud::{decrypt, encrypt, secret_key::SecretKey},
+ primitives::{EncryptedBlock, OneShotNonce, StreamNonce},
+ CryptoRng,
+};
+use sd_utils::error::FileIOError;
+use tracing::debug;
+
+use std::{
+ collections::{BTreeMap, VecDeque},
+ fs::Metadata,
+ path::PathBuf,
+ pin::pin,
+};
+
+use futures::StreamExt;
+use serde::{Deserialize, Serialize};
+use tokio::{
+ fs,
+ io::{AsyncReadExt, AsyncWriteExt, BufWriter},
+};
+use zeroize::{Zeroize, ZeroizeOnDrop};
+
+type KeyStack = VecDeque<(KeyHash, SecretKey)>;
+
+#[derive(Serialize, Deserialize)]
+pub struct KeyStore {
+ iroh_secret_key: IrohSecretKey,
+ keys: BTreeMap,
+}
+
+impl KeyStore {
+ pub const fn new(iroh_secret_key: IrohSecretKey) -> Self {
+ Self {
+ iroh_secret_key,
+ keys: BTreeMap::new(),
+ }
+ }
+
+ pub fn add_key(&mut self, group_pub_id: groups::PubId, key: SecretKey) {
+ self.keys.entry(group_pub_id).or_default().push_front((
+ KeyHash(blake3::hash(key.as_ref()).to_hex().to_string()),
+ key,
+ ));
+ }
+
+ pub fn add_key_with_hash(
+ &mut self,
+ group_pub_id: groups::PubId,
+ key: SecretKey,
+ key_hash: KeyHash,
+ ) {
+ debug!(
+ key_hash = key_hash.0,
+ ?group_pub_id,
+ "Added single cloud sync key to key manager"
+ );
+
+ self.keys
+ .entry(group_pub_id)
+ .or_default()
+ .push_front((key_hash, key));
+ }
+
+ pub fn add_many_keys(
+ &mut self,
+ group_pub_id: groups::PubId,
+ keys: impl IntoIterator
- >,
+ ) {
+ let group_entry = self.keys.entry(group_pub_id).or_default();
+
+ // We reverse the secret keys as a implementation detail to
+ // keep the keys in the same order as they were added as a stack
+ for key in keys.into_iter().rev() {
+ let key_hash = blake3::hash(key.as_ref()).to_hex().to_string();
+
+ debug!(
+ key_hash,
+ ?group_pub_id,
+ "Added cloud sync key to key manager"
+ );
+
+ group_entry.push_front((KeyHash(key_hash), key));
+ }
+ }
+
+ pub fn remove_group(&mut self, group_pub_id: groups::PubId) {
+ self.keys.remove(&group_pub_id);
+ }
+
+ pub fn iroh_secret_key(&self) -> IrohSecretKey {
+ self.iroh_secret_key.clone()
+ }
+
+ pub fn node_id(&self) -> NodeId {
+ self.iroh_secret_key.public()
+ }
+
+ pub fn get_key(&self, group_pub_id: groups::PubId, hash: &KeyHash) -> Option {
+ self.keys.get(&group_pub_id).and_then(|group| {
+ group
+ .iter()
+ .find_map(|(key_hash, key)| (key_hash == hash).then(|| key.clone()))
+ })
+ }
+
+ pub fn get_latest_key(&self, group_pub_id: groups::PubId) -> Option<(KeyHash, SecretKey)> {
+ self.keys
+ .get(&group_pub_id)
+ .and_then(|group| group.front().cloned())
+ }
+
+ pub fn get_group_keys(&self, group_pub_id: groups::PubId) -> Vec {
+ self.keys
+ .get(&group_pub_id)
+ .map(|group| group.iter().map(|(_key_hash, key)| key.clone()).collect())
+ .unwrap_or_default()
+ }
+
+ pub async fn encrypt(
+ &self,
+ key: &SecretKey,
+ rng: &mut CryptoRng,
+ keys_file_path: &PathBuf,
+ ) -> Result<(), Error> {
+ let plain_text_bytes =
+ rmp_serde::to_vec_named(self).map_err(Error::KeyStoreSerialization)?;
+ let mut file = BufWriter::with_capacity(
+ EncryptedBlock::CIPHER_TEXT_SIZE,
+ fs::OpenOptions::new()
+ .create(true)
+ .write(true)
+ .truncate(true)
+ .open(&keys_file_path)
+ .await
+ .map_err(|e| {
+ FileIOError::from((
+ &keys_file_path,
+ e,
+ "Failed to open space keys file to encrypt",
+ ))
+ })?,
+ );
+
+ if plain_text_bytes.len() < EncryptedBlock::PLAIN_TEXT_SIZE {
+ use encrypt::OneShotEncryption;
+
+ let EncryptedBlock { nonce, cipher_text } = key
+ .encrypt(&plain_text_bytes, rng)
+ .map_err(|e| Error::KeyStoreCrypto {
+ source: e,
+ context: "Failed to oneshot encrypt key store",
+ })?;
+
+ file.write_all(nonce.as_slice()).await.map_err(|e| {
+ FileIOError::from((
+ &keys_file_path,
+ e,
+ "Failed to write space keys file oneshot nonce",
+ ))
+ })?;
+
+ file.write_all(cipher_text.as_slice()).await.map_err(|e| {
+ FileIOError::from((
+ &keys_file_path,
+ e,
+ "Failed to write space keys file oneshot cipher text",
+ ))
+ })?;
+ } else {
+ use encrypt::StreamEncryption;
+
+ let (nonce, stream) = key.encrypt(plain_text_bytes.as_slice(), rng);
+
+ file.write_all(nonce.as_slice()).await.map_err(|e| {
+ FileIOError::from((
+ &keys_file_path,
+ e,
+ "Failed to write space keys file stream nonce",
+ ))
+ })?;
+
+ let mut stream = pin!(stream);
+ while let Some(res) = stream.next().await {
+ file.write_all(&res.map_err(|e| Error::KeyStoreCrypto {
+ source: e,
+ context: "Failed to stream encrypt key store",
+ })?)
+ .await
+ .map_err(|e| {
+ FileIOError::from((
+ &keys_file_path,
+ e,
+ "Failed to write space keys file stream cipher text",
+ ))
+ })?;
+ }
+ };
+
+ file.flush().await.map_err(|e| {
+ FileIOError::from((&keys_file_path, e, "Failed to flush space keys file")).into()
+ })
+ }
+
+ pub async fn decrypt(
+ key: &SecretKey,
+ metadata: Metadata,
+ keys_file_path: &PathBuf,
+ ) -> Result {
+ let mut file = fs::File::open(&keys_file_path).await.map_err(|e| {
+ FileIOError::from((
+ keys_file_path,
+ e,
+ "Failed to open space keys file to decrypt",
+ ))
+ })?;
+
+ let usize_file_len =
+ usize::try_from(metadata.len()).expect("Failed to convert metadata length to usize");
+
+ let key_store_bytes =
+ if usize_file_len <= EncryptedBlock::CIPHER_TEXT_SIZE + size_of::() {
+ use decrypt::OneShotDecryption;
+
+ let mut nonce = OneShotNonce::default();
+
+ file.read_exact(&mut nonce).await.map_err(|e| {
+ FileIOError::from((
+ keys_file_path,
+ e,
+ "Failed to read space keys file oneshot nonce",
+ ))
+ })?;
+
+ let mut cipher_text = vec![0u8; usize_file_len - size_of::()];
+
+ file.read_exact(&mut cipher_text).await.map_err(|e| {
+ FileIOError::from((
+ keys_file_path,
+ e,
+ "Failed to read space keys file oneshot cipher text",
+ ))
+ })?;
+
+ key.decrypt_owned(&EncryptedBlock { nonce, cipher_text })
+ .map_err(|e| Error::KeyStoreCrypto {
+ source: e,
+ context: "Failed to oneshot decrypt space keys file",
+ })?
+ } else {
+ use decrypt::StreamDecryption;
+
+ let mut nonce = StreamNonce::default();
+
+ let mut key_store_bytes = Vec::with_capacity(
+ (usize_file_len - size_of::()) / EncryptedBlock::CIPHER_TEXT_SIZE
+ * EncryptedBlock::PLAIN_TEXT_SIZE,
+ );
+
+ file.read_exact(&mut nonce).await.map_err(|e| {
+ FileIOError::from((
+ keys_file_path,
+ e,
+ "Failed to read space keys file stream nonce",
+ ))
+ })?;
+
+ key.decrypt(&nonce, &mut file, &mut key_store_bytes)
+ .await
+ .map_err(|e| Error::KeyStoreCrypto {
+ source: e,
+ context: "Failed to stream decrypt space keys file",
+ })?;
+
+ key_store_bytes
+ };
+
+ let this = rmp_serde::from_slice::(&key_store_bytes)
+ .map_err(Error::KeyStoreDeserialization)?;
+
+ #[cfg(debug_assertions)]
+ {
+ use std::fmt::Write;
+ let mut key_hashes_log = String::new();
+
+ this.keys.iter().for_each(|(group_pub_id, key_stack)| {
+ writeln!(
+ key_hashes_log,
+ "Group: {group_pub_id:?} => KeyHashes: {:?}",
+ key_stack
+ .iter()
+ .map(|(KeyHash(key_hash), _)| key_hash)
+ .collect::>()
+ )
+ .expect("Failed to write to key hashes log");
+ });
+
+ tracing::info!("Loaded key hashes: {key_hashes_log}");
+ }
+
+ Ok(this)
+ }
+}
+
+/// Zeroize our secret keys and scrambles up iroh's secret key that doesn't implement zeroize
+impl Zeroize for KeyStore {
+ fn zeroize(&mut self) {
+ self.iroh_secret_key = IrohSecretKey::generate();
+ self.keys.values_mut().for_each(|group| {
+ group
+ .iter_mut()
+ .map(|(_key_hash, key)| key)
+ .for_each(Zeroize::zeroize);
+ });
+ self.keys = BTreeMap::new();
+ }
+}
+
+impl Drop for KeyStore {
+ fn drop(&mut self) {
+ self.zeroize();
+ }
+}
+
+impl ZeroizeOnDrop for KeyStore {}
diff --git a/core/crates/cloud-services/src/key_manager/mod.rs b/core/crates/cloud-services/src/key_manager/mod.rs
new file mode 100644
index 000000000..64007a190
--- /dev/null
+++ b/core/crates/cloud-services/src/key_manager/mod.rs
@@ -0,0 +1,183 @@
+use crate::Error;
+
+use sd_cloud_schema::{
+ sync::{groups, KeyHash},
+ NodeId, SecretKey as IrohSecretKey,
+};
+use sd_crypto::{cloud::secret_key::SecretKey, CryptoRng};
+use sd_utils::error::FileIOError;
+
+use std::{
+ fmt,
+ path::{Path, PathBuf},
+};
+
+use tokio::{fs, sync::RwLock};
+
+mod key_store;
+
+use key_store::KeyStore;
+
+const KEY_FILE_NAME: &str = "space.keys";
+
+pub struct KeyManager {
+ master_key: SecretKey,
+ keys_file_path: PathBuf,
+ store: RwLock,
+}
+
+impl KeyManager {
+ pub async fn new(
+ master_key: SecretKey,
+ iroh_secret_key: IrohSecretKey,
+ data_directory: impl AsRef + Send,
+ rng: &mut CryptoRng,
+ ) -> Result {
+ async fn inner(
+ master_key: SecretKey,
+ iroh_secret_key: IrohSecretKey,
+ keys_file_path: PathBuf,
+ rng: &mut CryptoRng,
+ ) -> Result {
+ let store = KeyStore::new(iroh_secret_key);
+ store.encrypt(&master_key, rng, &keys_file_path).await?;
+
+ Ok(KeyManager {
+ master_key,
+ keys_file_path,
+ store: RwLock::new(store),
+ })
+ }
+
+ inner(
+ master_key,
+ iroh_secret_key,
+ data_directory.as_ref().join(KEY_FILE_NAME),
+ rng,
+ )
+ .await
+ }
+
+ pub async fn load(
+ master_key: SecretKey,
+ data_directory: impl AsRef + Send,
+ ) -> Result {
+ async fn inner(
+ master_key: SecretKey,
+ keys_file_path: PathBuf,
+ ) -> Result {
+ Ok(KeyManager {
+ store: RwLock::new(
+ KeyStore::decrypt(
+ &master_key,
+ fs::metadata(&keys_file_path).await.map_err(|e| {
+ FileIOError::from((
+ &keys_file_path,
+ e,
+ "Failed to read space keys file",
+ ))
+ })?,
+ &keys_file_path,
+ )
+ .await?,
+ ),
+ master_key,
+ keys_file_path,
+ })
+ }
+
+ inner(master_key, data_directory.as_ref().join(KEY_FILE_NAME)).await
+ }
+
+ pub async fn iroh_secret_key(&self) -> IrohSecretKey {
+ self.store.read().await.iroh_secret_key()
+ }
+
+ pub async fn node_id(&self) -> NodeId {
+ self.store.read().await.node_id()
+ }
+
+ pub async fn add_key(
+ &self,
+ group_pub_id: groups::PubId,
+ key: SecretKey,
+ rng: &mut CryptoRng,
+ ) -> Result<(), Error> {
+ let mut store = self.store.write().await;
+ store.add_key(group_pub_id, key);
+ // Keeping the write lock here, this way we ensure that we can't corrupt the file
+ store
+ .encrypt(&self.master_key, rng, &self.keys_file_path)
+ .await
+ }
+
+ pub async fn add_key_with_hash(
+ &self,
+ group_pub_id: groups::PubId,
+ key: SecretKey,
+ key_hash: KeyHash,
+ rng: &mut CryptoRng,
+ ) -> Result<(), Error> {
+ let mut store = self.store.write().await;
+ store.add_key_with_hash(group_pub_id, key, key_hash);
+ // Keeping the write lock here, this way we ensure that we can't corrupt the file
+ store
+ .encrypt(&self.master_key, rng, &self.keys_file_path)
+ .await
+ }
+
+ pub async fn remove_group(
+ &self,
+ group_pub_id: groups::PubId,
+ rng: &mut CryptoRng,
+ ) -> Result<(), Error> {
+ let mut store = self.store.write().await;
+ store.remove_group(group_pub_id);
+ // Keeping the write lock here, this way we ensure that we can't corrupt the file
+ store
+ .encrypt(&self.master_key, rng, &self.keys_file_path)
+ .await
+ }
+
+ pub async fn add_many_keys(
+ &self,
+ group_pub_id: groups::PubId,
+ keys: impl IntoIterator<
+ Item = SecretKey,
+ IntoIter = impl DoubleEndedIterator
- + Send,
+ > + Send,
+ rng: &mut CryptoRng,
+ ) -> Result<(), Error> {
+ let mut store = self.store.write().await;
+ store.add_many_keys(group_pub_id, keys);
+ // Keeping the write lock here, this way we ensure that we can't corrupt the file
+ store
+ .encrypt(&self.master_key, rng, &self.keys_file_path)
+ .await
+ }
+
+ pub async fn get_latest_key(
+ &self,
+ group_pub_id: groups::PubId,
+ ) -> Option<(KeyHash, SecretKey)> {
+ self.store.read().await.get_latest_key(group_pub_id)
+ }
+
+ pub async fn get_key(&self, group_pub_id: groups::PubId, hash: &KeyHash) -> Option {
+ self.store.read().await.get_key(group_pub_id, hash)
+ }
+
+ pub async fn get_group_keys(&self, group_pub_id: groups::PubId) -> Vec {
+ self.store.read().await.get_group_keys(group_pub_id)
+ }
+}
+
+impl fmt::Debug for KeyManager {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("KeyManager")
+ .field("master_key", &"[REDACTED]")
+ .field("keys_file_path", &self.keys_file_path)
+ .field("store", &"[REDACTED]")
+ .finish()
+ }
+}
diff --git a/core/crates/cloud-services/src/lib.rs b/core/crates/cloud-services/src/lib.rs
new file mode 100644
index 000000000..615d5397d
--- /dev/null
+++ b/core/crates/cloud-services/src/lib.rs
@@ -0,0 +1,55 @@
+#![recursion_limit = "256"]
+#![warn(
+ clippy::all,
+ clippy::pedantic,
+ clippy::correctness,
+ clippy::perf,
+ clippy::style,
+ clippy::suspicious,
+ clippy::complexity,
+ clippy::nursery,
+ clippy::unwrap_used,
+ unused_qualifications,
+ rust_2018_idioms,
+ trivial_casts,
+ trivial_numeric_casts,
+ unused_allocation,
+ clippy::unnecessary_cast,
+ clippy::cast_lossless,
+ clippy::cast_possible_truncation,
+ clippy::cast_possible_wrap,
+ clippy::cast_precision_loss,
+ clippy::cast_sign_loss,
+ clippy::dbg_macro,
+ clippy::deprecated_cfg_attr,
+ clippy::separated_literal_suffix,
+ deprecated
+)]
+#![forbid(deprecated_in_future)]
+#![allow(clippy::missing_errors_doc, clippy::module_name_repetitions)]
+
+mod error;
+
+mod client;
+mod key_manager;
+mod p2p;
+mod sync;
+mod token_refresher;
+
+pub use client::CloudServices;
+pub use error::{Error, GetTokenError};
+pub use key_manager::KeyManager;
+pub use p2p::{
+ CloudP2P, JoinSyncGroupResponse, JoinedLibraryCreateArgs, NotifyUser, Ticket, UserResponse,
+};
+pub use sync::{
+ declare_actors as declare_cloud_sync, SyncActors as CloudSyncActors,
+ SyncActorsState as CloudSyncActorsState,
+};
+
+// Re-exports
+pub use quic_rpc::transport::quinn::QuinnConnection;
+
+// Export URL for the auth server
+pub const AUTH_SERVER_URL: &str = "https://auth.spacedrive.com";
+// pub const AUTH_SERVER_URL: &str = "http://localhost:9420";
diff --git a/core/crates/cloud-services/src/p2p/mod.rs b/core/crates/cloud-services/src/p2p/mod.rs
new file mode 100644
index 000000000..0f31f977c
--- /dev/null
+++ b/core/crates/cloud-services/src/p2p/mod.rs
@@ -0,0 +1,242 @@
+use crate::{sync::ReceiveAndIngestNotifiers, CloudServices, Error};
+
+use sd_cloud_schema::{
+ cloud_p2p::{authorize_new_device_in_sync_group, CloudP2PALPN, CloudP2PError},
+ devices::{self, Device},
+ libraries,
+ sync::groups::{self, GroupWithDevices},
+ SecretKey as IrohSecretKey,
+};
+use sd_crypto::{CryptoRng, SeedableRng};
+
+use std::{sync::Arc, time::Duration};
+
+use iroh_net::{
+ discovery::{
+ dns::DnsDiscovery, local_swarm_discovery::LocalSwarmDiscovery, pkarr::dht::DhtDiscovery,
+ ConcurrentDiscovery, Discovery,
+ },
+ relay::{RelayMap, RelayMode, RelayUrl},
+ Endpoint, NodeId,
+};
+use reqwest::Url;
+use serde::{Deserialize, Serialize};
+use tokio::{spawn, sync::oneshot, time::sleep};
+use tracing::{debug, error, warn};
+
+mod new_sync_messages_notifier;
+mod runner;
+
+use runner::Runner;
+
+#[derive(Debug)]
+pub struct JoinedLibraryCreateArgs {
+ pub pub_id: libraries::PubId,
+ pub name: String,
+ pub description: Option,
+}
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize, specta::Type)]
+#[serde(transparent)]
+#[repr(transparent)]
+#[specta(rename = "CloudP2PTicket")]
+pub struct Ticket(u64);
+
+#[derive(Debug, Serialize, specta::Type)]
+#[serde(tag = "kind", content = "data")]
+#[specta(rename = "CloudP2PNotifyUser")]
+pub enum NotifyUser {
+ ReceivedJoinSyncGroupRequest {
+ ticket: Ticket,
+ asking_device: Device,
+ sync_group: GroupWithDevices,
+ },
+ ReceivedJoinSyncGroupResponse {
+ response: JoinSyncGroupResponse,
+ sync_group: GroupWithDevices,
+ },
+ SendingJoinSyncGroupResponseError {
+ error: JoinSyncGroupError,
+ sync_group: GroupWithDevices,
+ },
+ TimedOutJoinRequest {
+ device: Device,
+ succeeded: bool,
+ },
+}
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize, specta::Type)]
+pub enum JoinSyncGroupError {
+ Communication,
+ InternalServer,
+ Auth,
+}
+
+#[derive(Debug, Serialize, specta::Type)]
+pub enum JoinSyncGroupResponse {
+ Accepted { authorizor_device: Device },
+ Failed(CloudP2PError),
+ CriticalError,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize, specta::Type)]
+pub struct BasicLibraryCreationArgs {
+ pub id: libraries::PubId,
+ pub name: String,
+ pub description: Option,
+}
+
+#[derive(Debug, Deserialize, specta::Type)]
+#[serde(tag = "kind", content = "data")]
+#[specta(rename = "CloudP2PUserResponse")]
+pub enum UserResponse {
+ AcceptDeviceInSyncGroup {
+ ticket: Ticket,
+ accepted: Option,
+ },
+}
+#[derive(Debug, Clone)]
+pub struct CloudP2P {
+ msgs_tx: flume::Sender,
+}
+
+impl CloudP2P {
+ pub async fn new(
+ current_device_pub_id: devices::PubId,
+ cloud_services: &CloudServices,
+ mut rng: CryptoRng,
+ iroh_secret_key: IrohSecretKey,
+ dns_origin_domain: String,
+ dns_pkarr_url: Url,
+ relay_url: RelayUrl,
+ ) -> Result {
+ let dht_discovery = DhtDiscovery::builder()
+ .secret_key(iroh_secret_key.clone())
+ .pkarr_relay(dns_pkarr_url)
+ .build()
+ .map_err(Error::DhtDiscoveryInit)?;
+
+ let endpoint = Endpoint::builder()
+ .alpns(vec![CloudP2PALPN::LATEST.to_vec()])
+ .discovery(Box::new(ConcurrentDiscovery::from_services(vec![
+ Box::new(DnsDiscovery::new(dns_origin_domain)),
+ Box::new(
+ LocalSwarmDiscovery::new(iroh_secret_key.public())
+ .map_err(Error::LocalSwarmDiscoveryInit)?,
+ ),
+ Box::new(dht_discovery.clone()),
+ ])))
+ .secret_key(iroh_secret_key)
+ .relay_mode(RelayMode::Custom(RelayMap::from_url(relay_url)))
+ .bind()
+ .await
+ .map_err(Error::CreateCloudP2PEndpoint)?;
+
+ spawn({
+ let endpoint = endpoint.clone();
+ async move {
+ loop {
+ let Ok(node_addr) = endpoint.node_addr().await.map_err(|e| {
+ warn!(?e, "Failed to get direct addresses to force publish on DHT");
+ }) else {
+ sleep(Duration::from_secs(5)).await;
+ continue;
+ };
+
+ debug!("Force publishing peer on DHT");
+ return dht_discovery.publish(&node_addr.info);
+ }
+ }
+ });
+
+ let (msgs_tx, msgs_rx) = flume::bounded(16);
+
+ spawn({
+ let runner = Runner::new(
+ current_device_pub_id,
+ cloud_services,
+ msgs_tx.clone(),
+ endpoint,
+ )
+ .await?;
+ let user_response_rx = cloud_services.user_response_rx.clone();
+
+ async move {
+ // All cloned runners share a single state with internal mutability
+ while let Err(e) = spawn(runner.clone().run(
+ msgs_rx.clone(),
+ user_response_rx.clone(),
+ CryptoRng::from_seed(rng.generate_fixed()),
+ ))
+ .await
+ {
+ if e.is_panic() {
+ error!("Cloud P2P runner panicked");
+ } else {
+ break;
+ }
+ }
+ }
+ });
+
+ Ok(Self { msgs_tx })
+ }
+
+ /// Requests the device with the given connection ID asking for permission to the current device
+ /// to join the sync group
+ ///
+ /// # Panics
+ /// Will panic if the actor channel is closed, which should never happen
+ pub async fn request_join_sync_group(
+ &self,
+ devices_in_group: Vec<(devices::PubId, NodeId)>,
+ req: authorize_new_device_in_sync_group::Request,
+ tx: oneshot::Sender,
+ ) {
+ self.msgs_tx
+ .send_async(runner::Message::Request(runner::Request::JoinSyncGroup {
+ req,
+ devices_in_group,
+ tx,
+ }))
+ .await
+ .expect("Channel closed");
+ }
+
+ /// Register a notifier for the desired sync group, which will notify the receiver actor when
+ /// new sync messages arrive through cloud p2p notification requests.
+ ///
+ /// # Panics
+ /// Will panic if the actor channel is closed, which should never happen
+ pub async fn register_sync_messages_receiver_notifier(
+ &self,
+ sync_group_pub_id: groups::PubId,
+ notifier: Arc,
+ ) {
+ self.msgs_tx
+ .send_async(runner::Message::RegisterSyncMessageNotifier((
+ sync_group_pub_id,
+ notifier,
+ )))
+ .await
+ .expect("Channel closed");
+ }
+
+ /// Emit a notification that new sync messages were sent to cloud, so other devices should pull
+ /// them as soon as possible.
+ ///
+ /// # Panics
+ /// Will panic if the actor channel is closed, which should never happen
+ pub async fn notify_new_sync_messages(&self, group_pub_id: groups::PubId) {
+ self.msgs_tx
+ .send_async(runner::Message::NotifyPeersSyncMessages(group_pub_id))
+ .await
+ .expect("Channel closed");
+ }
+}
+
+impl Drop for CloudP2P {
+ fn drop(&mut self) {
+ self.msgs_tx.send(runner::Message::Stop).ok();
+ }
+}
diff --git a/core/crates/cloud-services/src/p2p/new_sync_messages_notifier.rs b/core/crates/cloud-services/src/p2p/new_sync_messages_notifier.rs
new file mode 100644
index 000000000..f4d0a3751
--- /dev/null
+++ b/core/crates/cloud-services/src/p2p/new_sync_messages_notifier.rs
@@ -0,0 +1,156 @@
+use crate::{token_refresher::TokenRefresher, Error};
+
+use sd_cloud_schema::{
+ cloud_p2p::{Client, CloudP2PALPN, Service},
+ devices,
+ sync::groups,
+};
+
+use std::time::Duration;
+
+use futures_concurrency::future::Join;
+use iroh_net::{Endpoint, NodeId};
+use quic_rpc::{transport::quinn::QuinnConnection, RpcClient};
+use tokio::time::Instant;
+use tracing::{debug, error, instrument, warn};
+
+use super::runner::Message;
+
+const CACHED_MAX_DURATION: Duration = Duration::from_secs(60 * 5);
+
+pub async fn dispatch_notifier(
+ group_pub_id: groups::PubId,
+ device_pub_id: devices::PubId,
+ devices: Option<(Instant, Vec<(devices::PubId, NodeId)>)>,
+ msgs_tx: flume::Sender,
+ cloud_services: sd_cloud_schema::Client<
+ QuinnConnection,
+ sd_cloud_schema::Service,
+ >,
+ token_refresher: TokenRefresher,
+ endpoint: Endpoint,
+) {
+ match notify_peers(
+ group_pub_id,
+ device_pub_id,
+ devices,
+ cloud_services,
+ token_refresher,
+ endpoint,
+ )
+ .await
+ {
+ Ok((true, devices)) => {
+ if msgs_tx
+ .send_async(Message::UpdateCachedDevices((group_pub_id, devices)))
+ .await
+ .is_err()
+ {
+ warn!("Failed to send update cached devices message to update cached devices");
+ }
+ }
+
+ Ok((false, _)) => {}
+
+ Err(e) => {
+ error!(?e, "Failed to notify peers");
+ }
+ }
+}
+
+#[instrument(skip(cloud_services, token_refresher, endpoint))]
+async fn notify_peers(
+ group_pub_id: groups::PubId,
+ device_pub_id: devices::PubId,
+ devices: Option<(Instant, Vec<(devices::PubId, NodeId)>)>,
+ cloud_services: sd_cloud_schema::Client<
+ QuinnConnection,
+ sd_cloud_schema::Service,
+ >,
+ token_refresher: TokenRefresher,
+ endpoint: Endpoint,
+) -> Result<(bool, Vec<(devices::PubId, NodeId)>), Error> {
+ let (devices, update_cache) = match devices {
+ Some((when, devices)) if when.elapsed() < CACHED_MAX_DURATION => (devices, false),
+ _ => {
+ debug!("Fetching devices connection ids for group");
+ let groups::get::Response(groups::get::ResponseKind::DevicesConnectionIds(devices)) =
+ cloud_services
+ .sync()
+ .groups()
+ .get(groups::get::Request {
+ access_token: token_refresher.get_access_token().await?,
+ pub_id: group_pub_id,
+ kind: groups::get::RequestKind::DevicesConnectionIds,
+ })
+ .await??
+ else {
+ unreachable!("Only DevicesConnectionIds response is expected, as we requested it");
+ };
+
+ (devices, true)
+ }
+ };
+
+ send_notifications(group_pub_id, device_pub_id, &devices, &endpoint).await;
+
+ Ok((update_cache, devices))
+}
+
+async fn send_notifications(
+ group_pub_id: groups::PubId,
+ device_pub_id: devices::PubId,
+ devices: &[(devices::PubId, NodeId)],
+ endpoint: &Endpoint,
+) {
+ devices
+ .iter()
+ .filter(|(peer_device_pub_id, _)| *peer_device_pub_id != device_pub_id)
+ .map(|(peer_device_pub_id, connection_id)| async move {
+ if let Err(e) =
+ connect_and_send_notification(group_pub_id, device_pub_id, connection_id, endpoint)
+ .await
+ {
+ // Using just a debug log here because we don't want to spam the logs with
+ // every single notification failure, as this is more a nice to have feature than a
+ // critical one
+ debug!(?e, %peer_device_pub_id, "Failed to send new sync messages notification to peer");
+ } else {
+ debug!(%peer_device_pub_id, "Sent new sync messages notification to peer");
+ }
+ })
+ .collect::>()
+ .join()
+ .await;
+}
+
+async fn connect_and_send_notification(
+ group_pub_id: groups::PubId,
+ device_pub_id: devices::PubId,
+ connection_id: &NodeId,
+ endpoint: &Endpoint,
+) -> Result<(), Error> {
+ let client = Client::new(RpcClient::new(QuinnConnection::::from_connection(
+ endpoint
+ .connect(*connection_id, CloudP2PALPN::LATEST)
+ .await
+ .map_err(Error::ConnectToCloudP2PNode)?,
+ )));
+
+ if let Err(e) = client
+ .notify_new_sync_messages(
+ sd_cloud_schema::cloud_p2p::notify_new_sync_messages::Request {
+ sync_group_pub_id: group_pub_id,
+ device_pub_id,
+ },
+ )
+ .await?
+ {
+ warn!(
+ ?e,
+ "This route shouldn't return an error, it's just a notification",
+ );
+ };
+
+ Ok(())
+}
diff --git a/core/crates/cloud-services/src/p2p/runner.rs b/core/crates/cloud-services/src/p2p/runner.rs
new file mode 100644
index 000000000..3dfc33be2
--- /dev/null
+++ b/core/crates/cloud-services/src/p2p/runner.rs
@@ -0,0 +1,651 @@
+use crate::{
+ p2p::JoinSyncGroupError, sync::ReceiveAndIngestNotifiers, token_refresher::TokenRefresher,
+ CloudServices, Error, KeyManager,
+};
+
+use sd_cloud_schema::{
+ cloud_p2p::{
+ self, authorize_new_device_in_sync_group, notify_new_sync_messages, Client, CloudP2PALPN,
+ CloudP2PError, Service,
+ },
+ devices::{self, Device},
+ sync::groups,
+};
+use sd_crypto::{CryptoRng, SeedableRng};
+
+use std::{
+ collections::HashMap,
+ pin::pin,
+ sync::{
+ atomic::{AtomicU64, Ordering},
+ Arc,
+ },
+ time::Duration,
+};
+
+use dashmap::DashMap;
+use flume::SendError;
+use futures::StreamExt;
+use futures_concurrency::stream::Merge;
+use iroh_net::{Endpoint, NodeId};
+use quic_rpc::{
+ server::{Accepting, RpcChannel, RpcServerError},
+ transport::quinn::{QuinnConnection, QuinnServerEndpoint},
+ RpcClient, RpcServer,
+};
+use tokio::{
+ spawn,
+ sync::{oneshot, Mutex},
+ task::JoinHandle,
+ time::{interval, Instant, MissedTickBehavior},
+};
+use tokio_stream::wrappers::IntervalStream;
+use tracing::{debug, error, warn};
+
+use super::{
+ new_sync_messages_notifier::dispatch_notifier, BasicLibraryCreationArgs, JoinSyncGroupResponse,
+ JoinedLibraryCreateArgs, NotifyUser, Ticket, UserResponse,
+};
+
+const TEN_SECONDS: Duration = Duration::from_secs(10);
+const FIVE_MINUTES: Duration = Duration::from_secs(60 * 5);
+
+#[allow(clippy::large_enum_variant)] // Ignoring because the enum Stop variant will only happen a single time ever
+pub enum Message {
+ Request(Request),
+ RegisterSyncMessageNotifier((groups::PubId, Arc)),
+ NotifyPeersSyncMessages(groups::PubId),
+ UpdateCachedDevices((groups::PubId, Vec<(devices::PubId, NodeId)>)),
+ Stop,
+}
+
+pub enum Request {
+ JoinSyncGroup {
+ req: authorize_new_device_in_sync_group::Request,
+ devices_in_group: Vec<(devices::PubId, NodeId)>,
+ tx: oneshot::Sender,
+ },
+}
+
+/// We use internal mutability here, but don't worry because there will always be a single
+/// [`Runner`] running at a time, so the lock is never contended
+pub struct Runner {
+ current_device_pub_id: devices::PubId,
+ token_refresher: TokenRefresher,
+ cloud_services: sd_cloud_schema::Client<
+ QuinnConnection,
+ sd_cloud_schema::Service,
+ >,
+ msgs_tx: flume::Sender,
+ endpoint: Endpoint,
+ key_manager: Arc,
+ ticketer: Arc,
+ notify_user_tx: flume::Sender,
+ sync_messages_receiver_notifiers_map:
+ Arc>>,
+ pending_sync_group_join_requests: Arc>>,
+ cached_devices_per_group: HashMap)>,
+ timeout_checker_buffer: Vec<(Ticket, PendingSyncGroupJoin)>,
+}
+
+impl Clone for Runner {
+ fn clone(&self) -> Self {
+ Self {
+ current_device_pub_id: self.current_device_pub_id,
+ token_refresher: self.token_refresher.clone(),
+ cloud_services: self.cloud_services.clone(),
+ msgs_tx: self.msgs_tx.clone(),
+ endpoint: self.endpoint.clone(),
+ key_manager: Arc::clone(&self.key_manager),
+ ticketer: Arc::clone(&self.ticketer),
+ notify_user_tx: self.notify_user_tx.clone(),
+ sync_messages_receiver_notifiers_map: Arc::clone(
+ &self.sync_messages_receiver_notifiers_map,
+ ),
+ pending_sync_group_join_requests: Arc::clone(&self.pending_sync_group_join_requests),
+ // Just cache the devices and their node_ids per group
+ cached_devices_per_group: HashMap::new(),
+ // This one is a temporary buffer only used for timeout checker
+ timeout_checker_buffer: vec![],
+ }
+ }
+}
+
+struct PendingSyncGroupJoin {
+ channel: RpcChannel>,
+ request: authorize_new_device_in_sync_group::Request,
+ this_device: Device,
+ since: Instant,
+}
+
+impl Runner {
+ pub async fn new(
+ current_device_pub_id: devices::PubId,
+ cloud_services: &CloudServices,
+ msgs_tx: flume::Sender,
+ endpoint: Endpoint,
+ ) -> Result {
+ Ok(Self {
+ current_device_pub_id,
+ token_refresher: cloud_services.token_refresher.clone(),
+ cloud_services: cloud_services.client().await?,
+ msgs_tx,
+ endpoint,
+ key_manager: cloud_services.key_manager().await?,
+ ticketer: Arc::default(),
+ notify_user_tx: cloud_services.notify_user_tx.clone(),
+ sync_messages_receiver_notifiers_map: Arc::default(),
+ pending_sync_group_join_requests: Arc::default(),
+ cached_devices_per_group: HashMap::new(),
+ timeout_checker_buffer: vec![],
+ })
+ }
+
+ pub async fn run(
+ mut self,
+ msgs_rx: flume::Receiver,
+ user_response_rx: flume::Receiver,
+ mut rng: CryptoRng,
+ ) {
+ // Ignoring because this is only used internally and I think that boxing will be more expensive than wasting
+ // some extra bytes for smaller variants
+ #[allow(clippy::large_enum_variant)]
+ enum StreamMessage {
+ AcceptResult(
+ Result<
+ Accepting>,
+ RpcServerError>,
+ >,
+ ),
+ Message(Message),
+ UserResponse(UserResponse),
+ Tick,
+ }
+
+ let mut ticker = interval(TEN_SECONDS);
+ ticker.set_missed_tick_behavior(MissedTickBehavior::Skip);
+
+ // FIXME(@fogodev): Update this function to use iroh-net transport instead of quinn
+ // when it's implemented
+ let (server, server_handle) = setup_server_endpoint(self.endpoint.clone());
+
+ let mut msg_stream = pin!((
+ async_stream::stream! {
+ loop {
+ yield StreamMessage::AcceptResult(server.accept().await);
+ }
+ },
+ msgs_rx.stream().map(StreamMessage::Message),
+ user_response_rx.stream().map(StreamMessage::UserResponse),
+ IntervalStream::new(ticker).map(|_| StreamMessage::Tick),
+ )
+ .merge());
+
+ while let Some(msg) = msg_stream.next().await {
+ match msg {
+ StreamMessage::AcceptResult(Ok(accepting)) => {
+ let Ok((request, channel)) = accepting.read_first().await.map_err(|e| {
+ error!(?e, "Failed to read first request from a new connection;");
+ }) else {
+ continue;
+ };
+
+ self.handle_request(request, channel).await;
+ }
+
+ StreamMessage::AcceptResult(Err(e)) => {
+ // TODO(@fogodev): Maybe report this error to the user on a toast?
+ error!(?e, "Error accepting connection;");
+ }
+
+ StreamMessage::Message(Message::Request(Request::JoinSyncGroup {
+ req,
+ devices_in_group,
+ tx,
+ })) => self.dispatch_join_requests(req, devices_in_group, &mut rng, tx),
+
+ StreamMessage::Message(Message::RegisterSyncMessageNotifier((
+ group_pub_id,
+ notifier,
+ ))) => {
+ self.sync_messages_receiver_notifiers_map
+ .insert(group_pub_id, notifier);
+ }
+
+ StreamMessage::Message(Message::NotifyPeersSyncMessages(group_pub_id)) => {
+ spawn(dispatch_notifier(
+ group_pub_id,
+ self.current_device_pub_id,
+ self.cached_devices_per_group.get(&group_pub_id).cloned(),
+ self.msgs_tx.clone(),
+ self.cloud_services.clone(),
+ self.token_refresher.clone(),
+ self.endpoint.clone(),
+ ));
+ }
+
+ StreamMessage::Message(Message::UpdateCachedDevices((
+ group_pub_id,
+ devices_connections_ids,
+ ))) => {
+ self.cached_devices_per_group
+ .insert(group_pub_id, (Instant::now(), devices_connections_ids));
+ }
+
+ StreamMessage::UserResponse(UserResponse::AcceptDeviceInSyncGroup {
+ ticket,
+ accepted,
+ }) => {
+ self.handle_join_response(ticket, accepted).await;
+ }
+
+ StreamMessage::Tick => self.tick().await,
+
+ StreamMessage::Message(Message::Stop) => {
+ server_handle.abort();
+ break;
+ }
+ }
+ }
+ }
+
+ fn dispatch_join_requests(
+ &self,
+ req: authorize_new_device_in_sync_group::Request,
+ devices_in_group: Vec<(devices::PubId, NodeId)>,
+ rng: &mut CryptoRng,
+ tx: oneshot::Sender,
+ ) {
+ async fn inner(
+ key_manager: Arc,
+ endpoint: Endpoint,
+ mut rng: CryptoRng,
+ req: authorize_new_device_in_sync_group::Request,
+ devices_in_group: Vec<(devices::PubId, NodeId)>,
+ tx: oneshot::Sender,
+ ) -> Result {
+ let group_pub_id = req.sync_group.pub_id;
+ loop {
+ let client =
+ match connect_to_first_available_client(&endpoint, &devices_in_group).await {
+ Ok(client) => client,
+ Err(e) => {
+ return Ok(JoinSyncGroupResponse::Failed(e));
+ }
+ };
+
+ match client
+ .authorize_new_device_in_sync_group(req.clone())
+ .await?
+ {
+ Ok(authorize_new_device_in_sync_group::Response {
+ authorizor_device,
+ keys,
+ library_pub_id,
+ library_name,
+ library_description,
+ }) => {
+ debug!(
+ device_pub_id = %authorizor_device.pub_id,
+ %group_pub_id,
+ keys_count = keys.len(),
+ %library_pub_id,
+ library_name,
+ "Received join sync group response"
+ );
+
+ key_manager
+ .add_many_keys(
+ group_pub_id,
+ keys.into_iter().map(|key| {
+ key.as_slice()
+ .try_into()
+ .expect("critical error, backend has invalid secret keys")
+ }),
+ &mut rng,
+ )
+ .await?;
+
+ if tx
+ .send(JoinedLibraryCreateArgs {
+ pub_id: library_pub_id,
+ name: library_name,
+ description: library_description,
+ })
+ .is_err()
+ {
+ error!("Failed to handle library creation locally from received library data");
+ return Ok(JoinSyncGroupResponse::CriticalError);
+ }
+
+ return Ok(JoinSyncGroupResponse::Accepted { authorizor_device });
+ }
+
+ // In case of timeout, we will try again
+ Err(CloudP2PError::TimedOut) => continue,
+
+ Err(e) => return Ok(JoinSyncGroupResponse::Failed(e)),
+ }
+ }
+ }
+
+ spawn({
+ let endpoint = self.endpoint.clone();
+ let notify_user_tx = self.notify_user_tx.clone();
+ let key_manager = Arc::clone(&self.key_manager);
+ let rng = CryptoRng::from_seed(rng.generate_fixed());
+ async move {
+ let sync_group = req.sync_group.clone();
+
+ if let Err(SendError(response)) = notify_user_tx
+ .send_async(NotifyUser::ReceivedJoinSyncGroupResponse {
+ response: inner(key_manager, endpoint, rng, req, devices_in_group, tx)
+ .await
+ .unwrap_or_else(|e| {
+ error!(
+ ?e,
+ "Failed to issue authorize new device in sync group request;"
+ );
+ JoinSyncGroupResponse::CriticalError
+ }),
+ sync_group,
+ })
+ .await
+ {
+ error!(?response, "Failed to send response to user;");
+ }
+ }
+ });
+ }
+
+ async fn handle_request(
+ &self,
+ request: cloud_p2p::Request,
+ channel: RpcChannel>,
+ ) {
+ match request {
+ cloud_p2p::Request::AuthorizeNewDeviceInSyncGroup(
+ authorize_new_device_in_sync_group::Request {
+ sync_group,
+ asking_device,
+ },
+ ) => {
+ let ticket = Ticket(self.ticketer.fetch_add(1, Ordering::Relaxed));
+ let this_device = sync_group
+ .devices
+ .iter()
+ .find(|device| device.pub_id == self.current_device_pub_id)
+ .expect(
+ "current device must be in the sync group, otherwise we wouldn't be here",
+ )
+ .clone();
+
+ self.notify_user_tx
+ .send_async(NotifyUser::ReceivedJoinSyncGroupRequest {
+ ticket,
+ asking_device: asking_device.clone(),
+ sync_group: sync_group.clone(),
+ })
+ .await
+ .expect("notify_user_tx must never closes!");
+
+ self.pending_sync_group_join_requests.lock().await.insert(
+ ticket,
+ PendingSyncGroupJoin {
+ channel,
+ request: authorize_new_device_in_sync_group::Request {
+ sync_group,
+ asking_device,
+ },
+ this_device,
+ since: Instant::now(),
+ },
+ );
+ }
+
+ cloud_p2p::Request::NotifyNewSyncMessages(req) => {
+ if let Err(e) = channel
+ .rpc(
+ req,
+ (),
+ |(),
+ notify_new_sync_messages::Request {
+ sync_group_pub_id,
+ device_pub_id,
+ }| async move {
+ debug!(%sync_group_pub_id, %device_pub_id, "Received new sync messages notification");
+ if let Some(notifier) = self
+ .sync_messages_receiver_notifiers_map
+ .get(&sync_group_pub_id)
+ {
+ notifier.notify_receiver();
+ } else {
+ warn!("Received new sync messages notification for unknown sync group");
+ }
+
+ Ok(notify_new_sync_messages::Response)
+ },
+ )
+ .await
+ {
+ error!(
+ ?e,
+ "Failed to reply to new sync messages notification request"
+ );
+ }
+ }
+ }
+ }
+
+ async fn handle_join_response(
+ &self,
+ ticket: Ticket,
+ accepted: Option,
+ ) {
+ let Some(PendingSyncGroupJoin {
+ channel,
+ request,
+ this_device,
+ ..
+ }) = self
+ .pending_sync_group_join_requests
+ .lock()
+ .await
+ .remove(&ticket)
+ else {
+ warn!("Received join response for unknown ticket; We probably timed out this request already");
+ return;
+ };
+
+ let sync_group = request.sync_group.clone();
+ let asking_device_pub_id = request.asking_device.pub_id;
+
+ let was_accepted = accepted.is_some();
+
+ let response = if let Some(BasicLibraryCreationArgs {
+ id: library_pub_id,
+ name: library_name,
+ description: library_description,
+ }) = accepted
+ {
+ Ok(authorize_new_device_in_sync_group::Response {
+ authorizor_device: this_device,
+ keys: self
+ .key_manager
+ .get_group_keys(request.sync_group.pub_id)
+ .await
+ .into_iter()
+ .map(Into::into)
+ .collect(),
+ library_pub_id,
+ library_name,
+ library_description,
+ })
+ } else {
+ Err(CloudP2PError::Rejected)
+ };
+
+ if let Err(e) = channel
+ .rpc(request, (), |(), _req| async move { response })
+ .await
+ {
+ error!(?e, "Failed to send response to user;");
+ self.notify_join_error(sync_group, JoinSyncGroupError::Communication)
+ .await;
+
+ return;
+ }
+
+ if was_accepted {
+ let Ok(access_token) = self
+ .token_refresher
+ .get_access_token()
+ .await
+ .map_err(|e| error!(?e, "Failed to get access token;"))
+ else {
+ self.notify_join_error(sync_group, JoinSyncGroupError::Auth)
+ .await;
+ return;
+ };
+
+ match self
+ .cloud_services
+ .sync()
+ .groups()
+ .reply_join_request(groups::reply_join_request::Request {
+ access_token,
+ group_pub_id: sync_group.pub_id,
+ authorized_device_pub_id: asking_device_pub_id,
+ authorizor_device_pub_id: self.current_device_pub_id,
+ })
+ .await
+ {
+ Ok(Ok(groups::reply_join_request::Response)) => {
+ // Everything is Awesome!
+ }
+ Ok(Err(e)) => {
+ error!(?e, "Failed to reply to join request");
+ self.notify_join_error(sync_group, JoinSyncGroupError::InternalServer)
+ .await;
+ }
+ Err(e) => {
+ error!(?e, "Failed to send reply to join request");
+ self.notify_join_error(sync_group, JoinSyncGroupError::Communication)
+ .await;
+ }
+ }
+ }
+ }
+
+ async fn notify_join_error(
+ &self,
+ sync_group: groups::GroupWithDevices,
+ error: JoinSyncGroupError,
+ ) {
+ self.notify_user_tx
+ .send_async(NotifyUser::SendingJoinSyncGroupResponseError { error, sync_group })
+ .await
+ .expect("notify_user_tx must never closes!");
+ }
+
+ async fn tick(&mut self) {
+ self.timeout_checker_buffer.clear();
+
+ let mut pending_sync_group_join_requests =
+ self.pending_sync_group_join_requests.lock().await;
+
+ for (ticket, pending_sync_group_join) in pending_sync_group_join_requests.drain() {
+ if pending_sync_group_join.since.elapsed() > FIVE_MINUTES {
+ let PendingSyncGroupJoin {
+ channel, request, ..
+ } = pending_sync_group_join;
+
+ let asking_device = request.asking_device.clone();
+
+ let notify_message = match channel
+ .rpc(request, (), |(), _req| async move {
+ Err(CloudP2PError::TimedOut)
+ })
+ .await
+ {
+ Ok(()) => NotifyUser::TimedOutJoinRequest {
+ device: asking_device,
+ succeeded: true,
+ },
+ Err(e) => {
+ error!(?e, "Failed to send timed out response to user;");
+ NotifyUser::TimedOutJoinRequest {
+ device: asking_device,
+ succeeded: false,
+ }
+ }
+ };
+
+ self.notify_user_tx
+ .send_async(notify_message)
+ .await
+ .expect("notify_user_tx must never closes!");
+ } else {
+ self.timeout_checker_buffer
+ .push((ticket, pending_sync_group_join));
+ }
+ }
+
+ pending_sync_group_join_requests.extend(self.timeout_checker_buffer.drain(..));
+ }
+}
+
+async fn connect_to_first_available_client(
+ endpoint: &Endpoint,
+ devices_in_group: &[(devices::PubId, NodeId)],
+) -> Result, Service>, CloudP2PError> {
+ for (device_pub_id, device_connection_id) in devices_in_group {
+ if let Ok(connection) = endpoint
+ .connect(*device_connection_id, CloudP2PALPN::LATEST)
+ .await
+ .map_err(
+ |e| error!(?e, %device_pub_id, "Failed to connect to authorizor device candidate"),
+ ) {
+ debug!(%device_pub_id, "Connected to authorizor device candidate");
+ return Ok(Client::new(RpcClient::new(
+ QuinnConnection::::from_connection(connection),
+ )));
+ }
+ }
+
+ Err(CloudP2PError::UnableToConnect)
+}
+
+fn setup_server_endpoint(
+ endpoint: Endpoint,
+) -> (
+ RpcServer>,
+ JoinHandle<()>,
+) {
+ let local_addr = {
+ let (ipv4_addr, maybe_ipv6_addr) = endpoint.bound_sockets();
+ // Trying to give preference to IPv6 addresses because it's 2024
+ maybe_ipv6_addr.unwrap_or(ipv4_addr)
+ };
+
+ let (connections_tx, connections_rx) = flume::bounded(16);
+
+ (
+ RpcServer::new(QuinnServerEndpoint::::handle_connections(
+ connections_rx,
+ local_addr,
+ )),
+ spawn(async move {
+ while let Some(connecting) = endpoint.accept().await {
+ if let Ok(connection) = connecting.await.map_err(|e| {
+ warn!(?e, "Cloud P2P failed to accept connection");
+ }) {
+ if connections_tx.send_async(connection).await.is_err() {
+ warn!("Connection receiver dropped");
+ break;
+ }
+ }
+ }
+ }),
+ )
+}
diff --git a/core/crates/cloud-services/src/sync/ingest.rs b/core/crates/cloud-services/src/sync/ingest.rs
new file mode 100644
index 000000000..a7dd65af3
--- /dev/null
+++ b/core/crates/cloud-services/src/sync/ingest.rs
@@ -0,0 +1,121 @@
+use crate::Error;
+
+use sd_core_sync::SyncManager;
+
+use sd_actors::{Actor, Stopper};
+
+use std::{
+ future::IntoFuture,
+ sync::{
+ atomic::{AtomicBool, Ordering},
+ Arc,
+ },
+};
+
+use futures::FutureExt;
+use futures_concurrency::future::Race;
+use tokio::{
+ sync::Notify,
+ time::{sleep, Instant},
+};
+use tracing::{debug, error};
+
+use super::{ReceiveAndIngestNotifiers, SyncActors, ONE_MINUTE};
+
+/// Responsible for taking sync operations received from the cloud,
+/// and applying them to the local database via the sync system's ingest actor.
+
+pub struct Ingester {
+ sync: SyncManager,
+ notifiers: Arc,
+ active: Arc,
+ active_notify: Arc,
+}
+
+impl Actor for Ingester {
+ const IDENTIFIER: SyncActors = SyncActors::Ingester;
+
+ async fn run(&mut self, stop: Stopper) {
+ enum Race {
+ Notified,
+ Stopped,
+ }
+
+ loop {
+ self.active.store(true, Ordering::Relaxed);
+ self.active_notify.notify_waiters();
+
+ if let Err(e) = self.run_loop_iteration().await {
+ error!(?e, "Error during cloud sync ingester actor iteration");
+ sleep(ONE_MINUTE).await;
+ continue;
+ }
+
+ self.active.store(false, Ordering::Relaxed);
+ self.active_notify.notify_waiters();
+
+ if matches!(
+ (
+ self.notifiers
+ .wait_notification_to_ingest()
+ .map(|()| Race::Notified),
+ stop.into_future().map(|()| Race::Stopped),
+ )
+ .race()
+ .await,
+ Race::Stopped
+ ) {
+ break;
+ }
+ }
+ }
+}
+
+impl Ingester {
+ pub const fn new(
+ sync: SyncManager,
+ notifiers: Arc,
+ active: Arc,
+ active_notify: Arc,
+ ) -> Self {
+ Self {
+ sync,
+ notifiers,
+ active,
+ active_notify,
+ }
+ }
+
+ async fn run_loop_iteration(&self) -> Result<(), Error> {
+ let start = Instant::now();
+
+ let operations_to_ingest_count = self
+ .sync
+ .db
+ .cloud_crdt_operation()
+ .count(vec![])
+ .exec()
+ .await
+ .map_err(sd_core_sync::Error::from)?;
+
+ if operations_to_ingest_count == 0 {
+ debug!("Nothing to ingest, early finishing ingester loop");
+ return Ok(());
+ }
+
+ debug!(
+ operations_to_ingest_count,
+ "Starting sync messages cloud ingestion loop"
+ );
+
+ let ingested_count = self.sync.ingest_ops().await?;
+
+ debug!(
+ ingested_count,
+ "Finished sync messages cloud ingestion loop in {:?}",
+ start.elapsed()
+ );
+
+ Ok(())
+ }
+}
diff --git a/core/crates/cloud-services/src/sync/mod.rs b/core/crates/cloud-services/src/sync/mod.rs
new file mode 100644
index 000000000..b694befb4
--- /dev/null
+++ b/core/crates/cloud-services/src/sync/mod.rs
@@ -0,0 +1,136 @@
+use crate::{CloudServices, Error};
+
+use sd_core_sync::SyncManager;
+
+use sd_actors::{ActorsCollection, IntoActor};
+use sd_cloud_schema::sync::groups;
+use sd_crypto::CryptoRng;
+
+use std::{
+ fmt,
+ path::Path,
+ sync::{atomic::AtomicBool, Arc},
+ time::Duration,
+};
+
+use futures_concurrency::future::TryJoin;
+use tokio::sync::Notify;
+
+mod ingest;
+mod receive;
+mod send;
+
+use ingest::Ingester;
+use receive::Receiver;
+use send::Sender;
+
+const ONE_MINUTE: Duration = Duration::from_secs(60);
+
+#[derive(Default)]
+pub struct SyncActorsState {
+ pub send_active: Arc,
+ pub receive_active: Arc,
+ pub ingest_active: Arc,
+ pub state_change_notifier: Arc,
+ receiver_and_ingester_notifiers: Arc,
+}
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, specta::Type)]
+#[specta(rename = "CloudSyncActors")]
+pub enum SyncActors {
+ Ingester,
+ Sender,
+ Receiver,
+}
+
+impl fmt::Display for SyncActors {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ Self::Ingester => write!(f, "Cloud Sync Ingester"),
+ Self::Sender => write!(f, "Cloud Sync Sender"),
+ Self::Receiver => write!(f, "Cloud Sync Receiver"),
+ }
+ }
+}
+
+#[derive(Debug, Default)]
+pub struct ReceiveAndIngestNotifiers {
+ ingester: Notify,
+ receiver: Notify,
+}
+
+impl ReceiveAndIngestNotifiers {
+ pub fn notify_receiver(&self) {
+ self.receiver.notify_one();
+ }
+
+ async fn wait_notification_to_receive(&self) {
+ self.receiver.notified().await;
+ }
+
+ fn notify_ingester(&self) {
+ self.ingester.notify_one();
+ }
+
+ async fn wait_notification_to_ingest(&self) {
+ self.ingester.notified().await;
+ }
+}
+
+pub async fn declare_actors(
+ data_dir: Box,
+ cloud_services: Arc,
+ actors: &ActorsCollection,
+ actors_state: &SyncActorsState,
+ sync_group_pub_id: groups::PubId,
+ sync: SyncManager,
+ rng: CryptoRng,
+) -> Result, Error> {
+ let (sender, receiver) = (
+ Sender::new(
+ sync_group_pub_id,
+ sync.clone(),
+ Arc::clone(&cloud_services),
+ Arc::clone(&actors_state.send_active),
+ Arc::clone(&actors_state.state_change_notifier),
+ rng,
+ ),
+ Receiver::new(
+ data_dir,
+ sync_group_pub_id,
+ cloud_services.clone(),
+ sync.clone(),
+ Arc::clone(&actors_state.receiver_and_ingester_notifiers),
+ Arc::clone(&actors_state.receive_active),
+ Arc::clone(&actors_state.state_change_notifier),
+ ),
+ )
+ .try_join()
+ .await?;
+
+ let ingester = Ingester::new(
+ sync,
+ Arc::clone(&actors_state.receiver_and_ingester_notifiers),
+ Arc::clone(&actors_state.ingest_active),
+ Arc::clone(&actors_state.state_change_notifier),
+ );
+
+ actors
+ .declare_many_boxed([
+ sender.into_actor(),
+ receiver.into_actor(),
+ ingester.into_actor(),
+ ])
+ .await;
+
+ cloud_services
+ .cloud_p2p()
+ .await?
+ .register_sync_messages_receiver_notifier(
+ sync_group_pub_id,
+ Arc::clone(&actors_state.receiver_and_ingester_notifiers),
+ )
+ .await;
+
+ Ok(Arc::clone(&actors_state.receiver_and_ingester_notifiers))
+}
diff --git a/core/crates/cloud-services/src/sync/receive.rs b/core/crates/cloud-services/src/sync/receive.rs
new file mode 100644
index 000000000..f4db4b4c5
--- /dev/null
+++ b/core/crates/cloud-services/src/sync/receive.rs
@@ -0,0 +1,356 @@
+use crate::{CloudServices, Error, KeyManager};
+
+use sd_cloud_schema::{
+ devices,
+ sync::{
+ groups,
+ messages::{pull, MessagesCollection},
+ },
+ Client, Service,
+};
+use sd_core_sync::{
+ cloud_crdt_op_db, CRDTOperation, CompressedCRDTOperationsPerModel, SyncManager,
+};
+
+use sd_actors::{Actor, Stopper};
+use sd_crypto::{
+ cloud::{OneShotDecryption, SecretKey, StreamDecryption},
+ primitives::{EncryptedBlock, StreamNonce},
+};
+use sd_prisma::prisma::PrismaClient;
+
+use std::{
+ collections::{hash_map::Entry, HashMap},
+ future::IntoFuture,
+ path::Path,
+ sync::{
+ atomic::{AtomicBool, Ordering},
+ Arc,
+ },
+};
+
+use chrono::{DateTime, Utc};
+use futures::{FutureExt, StreamExt};
+use futures_concurrency::future::{Race, TryJoin};
+use quic_rpc::transport::quinn::QuinnConnection;
+use serde::{Deserialize, Serialize};
+use tokio::{fs, io, sync::Notify, time::sleep};
+use tracing::{debug, error, instrument, warn};
+use uuid::Uuid;
+
+use super::{ReceiveAndIngestNotifiers, SyncActors, ONE_MINUTE};
+
+const CLOUD_SYNC_DATA_KEEPER_DIRECTORY: &str = "cloud_sync_data_keeper";
+
+/// Responsible for downloading sync operations from the cloud to be processed by the ingester
+
+pub struct Receiver {
+ keeper: LastTimestampKeeper,
+ sync_group_pub_id: groups::PubId,
+ device_pub_id: devices::PubId,
+ cloud_services: Arc,
+ cloud_client: Client>,
+ key_manager: Arc,
+ sync: SyncManager,
+ notifiers: Arc,
+ active: Arc,
+ active_notifier: Arc,
+}
+
+impl Actor for Receiver {
+ const IDENTIFIER: SyncActors = SyncActors::Receiver;
+
+ async fn run(&mut self, stop: Stopper) {
+ enum Race {
+ Continue,
+ Stop,
+ }
+
+ loop {
+ self.active.store(true, Ordering::Relaxed);
+ self.active_notifier.notify_waiters();
+
+ let res = self.run_loop_iteration().await;
+
+ self.active.store(false, Ordering::Relaxed);
+
+ if let Err(e) = res {
+ error!(?e, "Error during cloud sync receiver actor iteration");
+ sleep(ONE_MINUTE).await;
+ continue;
+ }
+
+ self.active_notifier.notify_waiters();
+
+ if matches!(
+ (
+ sleep(ONE_MINUTE).map(|()| Race::Continue),
+ self.notifiers
+ .wait_notification_to_receive()
+ .map(|()| Race::Continue),
+ stop.into_future().map(|()| Race::Stop),
+ )
+ .race()
+ .await,
+ Race::Stop
+ ) {
+ break;
+ }
+ }
+ }
+}
+
+impl Receiver {
+ pub async fn new(
+ data_dir: impl AsRef + Send,
+ sync_group_pub_id: groups::PubId,
+ cloud_services: Arc,
+ sync: SyncManager,
+ notifiers: Arc,
+ active: Arc,
+ active_notify: Arc,
+ ) -> Result {
+ let (keeper, cloud_client, key_manager) = (
+ LastTimestampKeeper::load(data_dir.as_ref(), sync_group_pub_id),
+ cloud_services.client(),
+ cloud_services.key_manager(),
+ )
+ .try_join()
+ .await?;
+
+ Ok(Self {
+ keeper,
+ sync_group_pub_id,
+ device_pub_id: devices::PubId(Uuid::from(&sync.device_pub_id)),
+ cloud_services,
+ cloud_client,
+ key_manager,
+ sync,
+ notifiers,
+ active,
+ active_notifier: active_notify,
+ })
+ }
+
+ async fn run_loop_iteration(&mut self) -> Result<(), Error> {
+ let mut responses_stream = self
+ .cloud_client
+ .sync()
+ .messages()
+ .pull(pull::Request {
+ access_token: self
+ .cloud_services
+ .token_refresher
+ .get_access_token()
+ .await?,
+ group_pub_id: self.sync_group_pub_id,
+ current_device_pub_id: self.device_pub_id,
+ start_time_per_device: self
+ .keeper
+ .timestamps
+ .iter()
+ .map(|(device_pub_id, timestamp)| (*device_pub_id, *timestamp))
+ .collect(),
+ })
+ .await?;
+
+ while let Some(new_messages_res) = responses_stream.next().await {
+ let pull::Response(new_messages) = new_messages_res??;
+ if new_messages.is_empty() {
+ break;
+ }
+
+ self.handle_new_messages(new_messages).await?;
+ }
+
+ debug!("Finished sync messages receiver actor iteration");
+
+ self.keeper.save().await
+ }
+
+ async fn handle_new_messages(
+ &mut self,
+ new_messages: Vec,
+ ) -> Result<(), Error> {
+ debug!(
+ new_messages_collections_count = new_messages.len(),
+ start_time = ?new_messages.first().map(|c| c.start_time),
+ end_time = ?new_messages.first().map(|c| c.end_time),
+ "Handling new sync messages collections",
+ );
+
+ for message in new_messages.into_iter().filter(|message| {
+ if message.original_device_pub_id == self.device_pub_id {
+ warn!("Received sync message from the current device, need to check backend, this is a bug!");
+ false
+ } else {
+ true
+ }
+ }) {
+ debug!(
+ new_messages_count = message.operations_count,
+ start_time = ?message.start_time,
+ end_time = ?message.end_time,
+ "Handling new sync messages",
+ );
+
+ let (device_pub_id, timestamp) = handle_single_message(
+ self.sync_group_pub_id,
+ message,
+ &self.key_manager,
+ &self.sync,
+ )
+ .await?;
+
+ match self.keeper.timestamps.entry(device_pub_id) {
+ Entry::Occupied(mut entry) => {
+ if entry.get() < ×tamp {
+ *entry.get_mut() = timestamp;
+ }
+ }
+
+ Entry::Vacant(entry) => {
+ entry.insert(timestamp);
+ }
+ }
+
+ // To ingest after each sync message collection is received, we MUST download and
+ // store the messages SEQUENTIALLY, otherwise we might ingest messages out of order
+ // due to parallel downloads
+ self.notifiers.notify_ingester();
+ }
+
+ Ok(())
+ }
+}
+
+#[instrument(
+ skip_all,
+ fields(%sync_group_pub_id, %original_device_pub_id, operations_count, ?key_hash, %end_time),
+)]
+async fn handle_single_message(
+ sync_group_pub_id: groups::PubId,
+ MessagesCollection {
+ original_device_pub_id,
+ end_time,
+ operations_count,
+ key_hash,
+ encrypted_messages,
+ ..
+ }: MessagesCollection,
+ key_manager: &KeyManager,
+ sync: &SyncManager,
+) -> Result<(devices::PubId, DateTime), Error> {
+ // FIXME(@fogodev): If we don't have the key hash, we need to fetch it from another device in the group if possible
+ let Some(secret_key) = key_manager.get_key(sync_group_pub_id, &key_hash).await else {
+ return Err(Error::MissingKeyHash);
+ };
+
+ debug!(
+ size = encrypted_messages.len(),
+ "Received encrypted sync messages collection"
+ );
+
+ let crdt_ops = decrypt_messages(encrypted_messages, secret_key, original_device_pub_id).await?;
+
+ assert_eq!(
+ crdt_ops.len(),
+ operations_count as usize,
+ "Sync messages count mismatch"
+ );
+
+ write_cloud_ops_to_db(crdt_ops, &sync.db).await?;
+
+ Ok((original_device_pub_id, end_time))
+}
+
+#[instrument(skip(encrypted_messages, secret_key), fields(messages_size = %encrypted_messages.len()), err)]
+async fn decrypt_messages(
+ encrypted_messages: Vec,
+ secret_key: SecretKey,
+ devices::PubId(device_pub_id): devices::PubId,
+) -> Result, Error> {
+ let plain_text = if encrypted_messages.len() <= EncryptedBlock::CIPHER_TEXT_SIZE {
+ OneShotDecryption::decrypt(&secret_key, encrypted_messages.as_slice().into())
+ .map_err(Error::Decrypt)?
+ } else {
+ let (nonce, cipher_text) = encrypted_messages.split_at(size_of::());
+
+ let mut plain_text = Vec::with_capacity(cipher_text.len());
+
+ StreamDecryption::decrypt(
+ &secret_key,
+ nonce.try_into().expect("we split the correct amount"),
+ cipher_text,
+ &mut plain_text,
+ )
+ .await
+ .map_err(Error::Decrypt)?;
+
+ plain_text
+ };
+
+ rmp_serde::from_slice::(&plain_text)
+ .map(|compressed_ops| compressed_ops.into_ops(device_pub_id))
+ .map_err(Error::DeserializationFailureToPullSyncMessages)
+}
+
+#[instrument(skip_all, err)]
+pub async fn write_cloud_ops_to_db(
+ ops: Vec,
+ db: &PrismaClient,
+) -> Result<(), sd_core_sync::Error> {
+ db._batch(
+ ops.into_iter()
+ .map(|op| cloud_crdt_op_db(&op).map(|op| op.to_query(db)))
+ .collect::, _>>()?,
+ )
+ .await?;
+
+ Ok(())
+}
+
+#[derive(Serialize, Deserialize, Debug)]
+struct LastTimestampKeeper {
+ timestamps: HashMap>,
+ file_path: Box,
+}
+
+impl LastTimestampKeeper {
+ async fn load(data_dir: &Path, sync_group_pub_id: groups::PubId) -> Result {
+ let cloud_sync_data_directory = data_dir.join(CLOUD_SYNC_DATA_KEEPER_DIRECTORY);
+
+ fs::create_dir_all(&cloud_sync_data_directory)
+ .await
+ .map_err(Error::FailedToCreateTimestampKeepersDirectory)?;
+
+ let file_path = cloud_sync_data_directory
+ .join(format!("{sync_group_pub_id}.bin"))
+ .into_boxed_path();
+
+ match fs::read(&file_path).await {
+ Ok(bytes) => Ok(Self {
+ timestamps: rmp_serde::from_slice(&bytes)
+ .map_err(Error::LastTimestampKeeperDeserialization)?,
+ file_path,
+ }),
+
+ Err(e) if e.kind() == io::ErrorKind::NotFound => Ok(Self {
+ timestamps: HashMap::new(),
+ file_path,
+ }),
+
+ Err(e) => Err(Error::FailedToReadLastTimestampKeeper(e)),
+ }
+ }
+
+ async fn save(&self) -> Result<(), Error> {
+ fs::write(
+ &self.file_path,
+ &rmp_serde::to_vec_named(&self.timestamps)
+ .map_err(Error::LastTimestampKeeperSerialization)?,
+ )
+ .await
+ .map_err(Error::FailedToWriteLastTimestampKeeper)
+ }
+}
diff --git a/core/crates/cloud-services/src/sync/send.rs b/core/crates/cloud-services/src/sync/send.rs
new file mode 100644
index 000000000..c0ab06e88
--- /dev/null
+++ b/core/crates/cloud-services/src/sync/send.rs
@@ -0,0 +1,337 @@
+use crate::{CloudServices, Error, KeyManager};
+
+use sd_core_sync::{CompressedCRDTOperationsPerModelPerDevice, SyncEvent, SyncManager, NTP64};
+
+use sd_actors::{Actor, Stopper};
+use sd_cloud_schema::{
+ devices,
+ error::{ClientSideError, NotFoundError},
+ sync::{groups, messages},
+ Client, Service,
+};
+use sd_crypto::{
+ cloud::{OneShotEncryption, SecretKey, StreamEncryption},
+ primitives::EncryptedBlock,
+ CryptoRng, SeedableRng,
+};
+use sd_utils::{datetime_to_timestamp, timestamp_to_datetime};
+
+use std::{
+ future::IntoFuture,
+ pin::pin,
+ sync::{
+ atomic::{AtomicBool, Ordering},
+ Arc,
+ },
+ time::{Duration, UNIX_EPOCH},
+};
+
+use chrono::{DateTime, Utc};
+use futures::{FutureExt, StreamExt, TryStreamExt};
+use futures_concurrency::future::{Race, TryJoin};
+use quic_rpc::transport::quinn::QuinnConnection;
+use tokio::{
+ sync::{broadcast, Notify},
+ time::sleep,
+};
+use tracing::{debug, error};
+use uuid::Uuid;
+
+use super::{SyncActors, ONE_MINUTE};
+
+const TEN_SECONDS: Duration = Duration::from_secs(10);
+
+const MESSAGES_COLLECTION_SIZE: u32 = 10_000;
+
+enum RaceNotifiedOrStopped {
+ Notified,
+ Stopped,
+}
+
+enum LoopStatus {
+ SentMessages,
+ Idle,
+}
+
+type LatestTimestamp = NTP64;
+
+#[derive(Debug)]
+pub struct Sender {
+ sync_group_pub_id: groups::PubId,
+ sync: SyncManager,
+ cloud_services: Arc,
+ cloud_client: Client>,
+ key_manager: Arc,
+ is_active: Arc,
+ state_notify: Arc,
+ rng: CryptoRng,
+ maybe_latest_timestamp: Option,
+}
+
+impl Actor for Sender {
+ const IDENTIFIER: SyncActors = SyncActors::Sender;
+
+ async fn run(&mut self, stop: Stopper) {
+ loop {
+ self.is_active.store(true, Ordering::Relaxed);
+ self.state_notify.notify_waiters();
+
+ let res = self.run_loop_iteration().await;
+
+ self.is_active.store(false, Ordering::Relaxed);
+
+ match res {
+ Ok(LoopStatus::SentMessages) => {
+ if let Ok(cloud_p2p) = self.cloud_services.cloud_p2p().await.map_err(|e| {
+ error!(?e, "Failed to get cloud p2p client on sender actor");
+ }) {
+ cloud_p2p
+ .notify_new_sync_messages(self.sync_group_pub_id)
+ .await;
+ }
+ }
+
+ Ok(LoopStatus::Idle) => {}
+
+ Err(e) => {
+ error!(?e, "Error during cloud sync sender actor iteration");
+ sleep(ONE_MINUTE).await;
+ continue;
+ }
+ }
+
+ self.state_notify.notify_waiters();
+
+ if matches!(
+ (
+ // recreate subscription each time so that existing messages are dropped
+ wait_notification(self.sync.subscribe()),
+ stop.into_future().map(|()| RaceNotifiedOrStopped::Stopped),
+ )
+ .race()
+ .await,
+ RaceNotifiedOrStopped::Stopped
+ ) {
+ break;
+ }
+
+ sleep(TEN_SECONDS).await;
+ }
+ }
+}
+
+impl Sender {
+ pub async fn new(
+ sync_group_pub_id: groups::PubId,
+ sync: SyncManager,
+ cloud_services: Arc,
+ is_active: Arc,
+ state_notify: Arc,
+ rng: CryptoRng,
+ ) -> Result {
+ let (cloud_client, key_manager) = (cloud_services.client(), cloud_services.key_manager())
+ .try_join()
+ .await?;
+
+ Ok(Self {
+ sync_group_pub_id,
+ sync,
+ cloud_services,
+ cloud_client,
+ key_manager,
+ is_active,
+ state_notify,
+ rng,
+ maybe_latest_timestamp: None,
+ })
+ }
+
+ async fn run_loop_iteration(&mut self) -> Result {
+ debug!("Starting cloud sender actor loop iteration");
+
+ let current_device_pub_id = devices::PubId(Uuid::from(&self.sync.device_pub_id));
+
+ let (key_hash, secret_key) = self
+ .key_manager
+ .get_latest_key(self.sync_group_pub_id)
+ .await
+ .ok_or(Error::MissingSyncGroupKey(self.sync_group_pub_id))?;
+
+ let current_latest_timestamp = self.get_latest_timestamp(current_device_pub_id).await?;
+
+ let mut crdt_ops_stream = pin!(self.sync.stream_device_ops(
+ &self.sync.device_pub_id,
+ MESSAGES_COLLECTION_SIZE,
+ current_latest_timestamp
+ ));
+
+ let mut status = LoopStatus::Idle;
+
+ let mut new_latest_timestamp = current_latest_timestamp;
+
+ debug!(
+ chunk_size = MESSAGES_COLLECTION_SIZE,
+ "Trying to fetch chunk of sync messages from the database"
+ );
+ while let Some(ops_res) = crdt_ops_stream.next().await {
+ let ops = ops_res?;
+
+ let (Some(first), Some(last)) = (ops.first(), ops.last()) else {
+ break;
+ };
+
+ debug!("Got first and last sync messages");
+
+ #[allow(clippy::cast_possible_truncation)]
+ let operations_count = ops.len() as u32;
+
+ debug!(operations_count, "Got chunk of sync messages");
+
+ new_latest_timestamp = last.timestamp;
+
+ let start_time = timestamp_to_datetime(first.timestamp);
+ let end_time = timestamp_to_datetime(last.timestamp);
+
+ // Ignoring this device_pub_id here as we already know it
+ let (_device_pub_id, compressed_ops) =
+ CompressedCRDTOperationsPerModelPerDevice::new_single_device(ops);
+
+ let messages_bytes = rmp_serde::to_vec_named(&compressed_ops)
+ .map_err(Error::SerializationFailureToPushSyncMessages)?;
+
+ let encrypted_messages =
+ encrypt_messages(&secret_key, &mut self.rng, messages_bytes).await?;
+
+ let encrypted_messages_size = encrypted_messages.len();
+
+ debug!(
+ operations_count,
+ encrypted_messages_size, "Sending sync messages to cloud",
+ );
+
+ self.cloud_client
+ .sync()
+ .messages()
+ .push(messages::push::Request {
+ access_token: self
+ .cloud_services
+ .token_refresher
+ .get_access_token()
+ .await?,
+ group_pub_id: self.sync_group_pub_id,
+ device_pub_id: current_device_pub_id,
+ key_hash: key_hash.clone(),
+ operations_count,
+ time_range: (start_time, end_time),
+ encrypted_messages,
+ })
+ .await??;
+
+ debug!(
+ operations_count,
+ encrypted_messages_size, "Sent sync messages to cloud",
+ );
+
+ status = LoopStatus::SentMessages;
+ }
+
+ self.maybe_latest_timestamp = Some(new_latest_timestamp);
+
+ debug!("Finished cloud sender actor loop iteration");
+
+ Ok(status)
+ }
+
+ async fn get_latest_timestamp(
+ &self,
+ current_device_pub_id: devices::PubId,
+ ) -> Result {
+ if let Some(latest_timestamp) = &self.maybe_latest_timestamp {
+ Ok(*latest_timestamp)
+ } else {
+ let latest_time = match self
+ .cloud_client
+ .sync()
+ .messages()
+ .get_latest_time(messages::get_latest_time::Request {
+ access_token: self
+ .cloud_services
+ .token_refresher
+ .get_access_token()
+ .await?,
+ group_pub_id: self.sync_group_pub_id,
+ kind: messages::get_latest_time::Kind::ForCurrentDevice(current_device_pub_id),
+ })
+ .await?
+ {
+ Ok(messages::get_latest_time::Response {
+ latest_time,
+ latest_device_pub_id,
+ }) => {
+ assert_eq!(latest_device_pub_id, current_device_pub_id);
+ latest_time
+ }
+
+ Err(sd_cloud_schema::Error::Client(ClientSideError::NotFound(
+ NotFoundError::LatestSyncMessageTime,
+ ))) => DateTime::::from(UNIX_EPOCH),
+
+ Err(e) => return Err(e.into()),
+ };
+
+ Ok(datetime_to_timestamp(latest_time))
+ }
+ }
+}
+
+async fn encrypt_messages(
+ secret_key: &SecretKey,
+ rng: &mut CryptoRng,
+ messages_bytes: Vec,
+) -> Result, Error> {
+ if messages_bytes.len() <= EncryptedBlock::PLAIN_TEXT_SIZE {
+ let mut nonce_and_cipher_text = Vec::with_capacity(OneShotEncryption::cipher_text_size(
+ secret_key,
+ messages_bytes.len(),
+ ));
+
+ let EncryptedBlock { nonce, cipher_text } =
+ OneShotEncryption::encrypt(secret_key, messages_bytes.as_slice(), rng)
+ .map_err(Error::Encrypt)?;
+
+ nonce_and_cipher_text.extend_from_slice(nonce.as_slice());
+ nonce_and_cipher_text.extend(&cipher_text);
+
+ Ok(nonce_and_cipher_text)
+ } else {
+ let mut rng = CryptoRng::from_seed(rng.generate_fixed());
+ let mut nonce_and_cipher_text = Vec::with_capacity(StreamEncryption::cipher_text_size(
+ secret_key,
+ messages_bytes.len(),
+ ));
+
+ let (nonce, cipher_stream) =
+ StreamEncryption::encrypt(secret_key, messages_bytes.as_slice(), &mut rng);
+
+ nonce_and_cipher_text.extend_from_slice(nonce.as_slice());
+
+ let mut cipher_stream = pin!(cipher_stream);
+
+ while let Some(ciphered_chunk) = cipher_stream.try_next().await.map_err(Error::Encrypt)? {
+ nonce_and_cipher_text.extend(ciphered_chunk);
+ }
+
+ Ok(nonce_and_cipher_text)
+ }
+}
+
+async fn wait_notification(mut rx: broadcast::Receiver) -> RaceNotifiedOrStopped {
+ // wait until Created message comes in
+ loop {
+ if matches!(rx.recv().await, Ok(SyncEvent::Created)) {
+ break;
+ };
+ }
+
+ RaceNotifiedOrStopped::Notified
+}
diff --git a/core/crates/cloud-services/src/token_refresher.rs b/core/crates/cloud-services/src/token_refresher.rs
new file mode 100644
index 000000000..ae11e15db
--- /dev/null
+++ b/core/crates/cloud-services/src/token_refresher.rs
@@ -0,0 +1,468 @@
+use sd_cloud_schema::auth::{AccessToken, RefreshToken};
+
+use std::{pin::pin, time::Duration};
+
+use base64::prelude::{Engine, BASE64_URL_SAFE_NO_PAD};
+use chrono::{DateTime, Utc};
+use futures::StreamExt;
+use futures_concurrency::stream::Merge;
+use reqwest::Url;
+use reqwest_middleware::{reqwest::header, ClientWithMiddleware};
+use tokio::{
+ spawn,
+ sync::oneshot,
+ time::{interval, sleep, MissedTickBehavior},
+};
+use tokio_stream::wrappers::IntervalStream;
+use tracing::{error, warn};
+
+use super::{Error, GetTokenError};
+
+const ONE_MINUTE: Duration = Duration::from_secs(60);
+const TEN_SECONDS: Duration = Duration::from_secs(10);
+
+enum Message {
+ Init(
+ (
+ AccessToken,
+ RefreshToken,
+ oneshot::Sender>,
+ ),
+ ),
+ CheckInitialization(oneshot::Sender>),
+ RequestToken(oneshot::Sender>),
+ RefreshTime,
+ Tick,
+}
+
+#[derive(Debug, Clone)]
+pub struct TokenRefresher {
+ tx: flume::Sender,
+}
+
+impl TokenRefresher {
+ pub(crate) fn new(http_client: ClientWithMiddleware, auth_server_url: Url) -> Self {
+ let (tx, rx) = flume::bounded(8);
+
+ spawn(async move {
+ let refresh_url = auth_server_url
+ .join("/api/auth/session/refresh")
+ .expect("hardcoded refresh url path");
+
+ while let Err(e) = spawn(Runner::run(
+ http_client.clone(),
+ refresh_url.clone(),
+ rx.clone(),
+ ))
+ .await
+ {
+ if e.is_panic() {
+ if let Some(msg) = e.into_panic().downcast_ref::<&str>() {
+ error!(?msg, "Panic in request handler!");
+ } else {
+ error!("Some unknown panic in request handler!");
+ }
+ }
+ }
+ });
+
+ Self { tx }
+ }
+
+ pub async fn init(
+ &self,
+ access_token: AccessToken,
+ refresh_token: RefreshToken,
+ ) -> Result<(), Error> {
+ let (tx, rx) = oneshot::channel();
+ self.tx
+ .send_async(Message::Init((access_token, refresh_token, tx)))
+ .await
+ .expect("Token refresher channel closed");
+
+ rx.await.expect("Token refresher channel closed")
+ }
+
+ pub async fn check_initialization(&self) -> Result<(), GetTokenError> {
+ let (tx, rx) = oneshot::channel();
+ self.tx
+ .send_async(Message::CheckInitialization(tx))
+ .await
+ .expect("Token refresher channel closed");
+
+ rx.await.expect("Token refresher channel closed")
+ }
+
+ pub async fn get_access_token(&self) -> Result {
+ let (tx, rx) = oneshot::channel();
+ self.tx
+ .send_async(Message::RequestToken(tx))
+ .await
+ .expect("Token refresher channel closed");
+
+ rx.await.expect("Token refresher channel closed")
+ }
+}
+
+struct Runner {
+ initialized: bool,
+ http_client: ClientWithMiddleware,
+ refresh_url: Url,
+ current_token: Option,
+ current_refresh_token: Option,
+ token_decoding_buffer: Vec,
+ refresh_tx: flume::Sender,
+}
+
+impl Runner {
+ async fn run(
+ http_client: ClientWithMiddleware,
+ refresh_url: Url,
+ msgs_rx: flume::Receiver,
+ ) {
+ let (refresh_tx, refresh_rx) = flume::bounded(1);
+
+ let mut ticker = interval(TEN_SECONDS);
+ ticker.set_missed_tick_behavior(MissedTickBehavior::Skip);
+
+ let mut msg_stream = pin!((
+ msgs_rx.into_stream(),
+ refresh_rx.into_stream(),
+ IntervalStream::new(ticker).map(|_| Message::Tick)
+ )
+ .merge());
+
+ let mut runner = Self {
+ initialized: false,
+ http_client,
+ refresh_url,
+ current_token: None,
+ current_refresh_token: None,
+ token_decoding_buffer: Vec::new(),
+ refresh_tx,
+ };
+
+ while let Some(msg) = msg_stream.next().await {
+ match msg {
+ Message::Init((access_token, refresh_token, ack)) => {
+ if ack
+ .send(runner.init(access_token, refresh_token).await)
+ .is_err()
+ {
+ error!("Failed to send init token refresher response, receiver dropped;");
+ }
+ }
+
+ Message::CheckInitialization(ack) => runner.check_initialization(ack),
+
+ Message::RequestToken(ack) => runner.reply_token(ack),
+
+ Message::RefreshTime => {
+ if let Err(e) = runner.refresh().await {
+ error!(?e, "Failed to refresh token: {e}");
+ }
+ }
+
+ Message::Tick => runner.tick().await,
+ }
+ }
+ }
+
+ async fn init(
+ &mut self,
+ access_token: AccessToken,
+ refresh_token: RefreshToken,
+ ) -> Result<(), Error> {
+ let access_token_duration =
+ Self::extract_access_token_duration(&mut self.token_decoding_buffer, &access_token)?;
+
+ self.initialized = true;
+ self.current_token = Some(access_token);
+ self.current_refresh_token = Some(refresh_token);
+
+ // If the token has an expiration smaller than a minute, we need to refresh it immediately.
+ if access_token_duration < ONE_MINUTE {
+ self.refresh_tx
+ .send_async(Message::RefreshTime)
+ .await
+ .expect("refresh channel never closes");
+ } else {
+ // This task will be mostly parked waiting a sleep
+ spawn(Self::schedule_refresh(
+ self.refresh_tx.clone(),
+ access_token_duration - ONE_MINUTE,
+ ));
+ }
+
+ Ok(())
+ }
+
+ fn reply_token(&self, ack: oneshot::Sender>) {
+ if ack
+ .send(self.current_token.clone().ok_or({
+ if self.initialized {
+ GetTokenError::FailedToRefresh
+ } else {
+ GetTokenError::RefresherNotInitialized
+ }
+ }))
+ .is_err()
+ {
+ warn!("Failed to send access token response, receiver dropped;");
+ }
+ }
+
+ async fn refresh(&mut self) -> Result<(), Error> {
+ let RefreshToken(refresh_token) = self
+ .current_refresh_token
+ .clone()
+ .expect("refresh token is set otherwise we wouldn't be here");
+
+ let response = self
+ .http_client
+ .post(self.refresh_url.clone())
+ .header("rid", "session")
+ .header(header::AUTHORIZATION, format!("Bearer {refresh_token}"))
+ .send()
+ .await
+ .map_err(Error::RefreshTokenRequest)?
+ .error_for_status()
+ .map_err(Error::AuthServerError)?;
+
+ if let (Some(access_token), Some(refresh_token)) = (
+ response.headers().get("st-access-token"),
+ response.headers().get("st-refresh-token"),
+ ) {
+ // Only set values if we can parse both of them to strings
+ let (access_token, refresh_token) = (
+ Self::token_header_value_to_string(access_token)?,
+ Self::token_header_value_to_string(refresh_token)?,
+ );
+
+ self.current_token = Some(AccessToken(access_token));
+ self.current_refresh_token = Some(RefreshToken(refresh_token));
+ } else {
+ return Err(Error::MissingTokensOnRefreshResponse);
+ }
+
+ Ok(())
+ }
+
+ fn extract_access_token_duration(
+ token_decoding_buffer: &mut Vec,
+ AccessToken(token): &AccessToken,
+ ) -> Result {
+ #[derive(serde::Deserialize)]
+ struct Token {
+ #[serde(with = "chrono::serde::ts_seconds")]
+ exp: DateTime,
+ }
+
+ token_decoding_buffer.clear();
+
+ // The format of a JWT token is simple:
+ // ".."
+ BASE64_URL_SAFE_NO_PAD.decode_vec(
+ token.split('.').nth(1).ok_or(Error::MissingClaims)?,
+ token_decoding_buffer,
+ )?;
+
+ serde_json::from_slice::(token_decoding_buffer)?
+ .exp
+ .signed_duration_since(Utc::now())
+ .to_std()
+ .map_err(|_| Error::TokenExpired)
+ }
+
+ async fn schedule_refresh(refresh_tx: flume::Sender, wait_time: Duration) {
+ sleep(wait_time).await;
+ refresh_tx
+ .send_async(Message::RefreshTime)
+ .await
+ .expect("Refresh channel closed");
+ }
+
+ fn token_header_value_to_string(token: &header::HeaderValue) -> Result {
+ token.to_str().map(str::to_string).map_err(Into::into)
+ }
+
+ fn check_initialization(&self, ack: oneshot::Sender>) {
+ if ack
+ .send(if self.initialized {
+ Ok(())
+ } else {
+ Err(GetTokenError::RefresherNotInitialized)
+ })
+ .is_err()
+ {
+ warn!("Failed to send access token response, receiver dropped;");
+ }
+ }
+
+ /// This method is a safeguard to make sure we try to keep refreshing tokens even if they
+ /// already expired, as the refresh token has a bigger expiration than the access token.
+ async fn tick(&mut self) {
+ if let Some(access_token) = &self.current_token {
+ if matches!(
+ Self::extract_access_token_duration(&mut self.token_decoding_buffer, access_token),
+ Err(Error::TokenExpired)
+ ) {
+ if let Err(e) = self.refresh().await {
+ error!(?e, "Failed to refresh expired token on tick method;");
+ }
+ }
+ }
+ }
+}
+
+/// This test is here for documentation purposes only, they are not meant to be run.
+/// They're just examples of how to sign-up/sign-in and refresh tokens
+#[cfg(test)]
+mod tests {
+ use reqwest::header;
+ use reqwest_middleware::ClientBuilder;
+ use reqwest_retry::{policies::ExponentialBackoff, RetryTransientMiddleware};
+ use serde_json::json;
+
+ use crate::AUTH_SERVER_URL;
+
+ use super::*;
+
+ async fn get_tokens() -> (AccessToken, RefreshToken) {
+ let client = reqwest::Client::new();
+
+ let req_body = json!({
+ "formFields": [
+ {
+ "id": "email",
+ "value": "johndoe@gmail.com"
+ },
+ {
+ "id": "password",
+ "value": "testPass123"
+ }
+ ]
+ });
+
+ let response = client
+ .post(format!("{AUTH_SERVER_URL}/api/auth/public/signup"))
+ .header("rid", "emailpassword")
+ .header("st-auth-mode", "header")
+ .json(&req_body)
+ .send()
+ .await
+ .unwrap();
+
+ assert_eq!(response.status(), 200);
+
+ if let (Some(access_token), Some(refresh_token)) = (
+ response.headers().get("st-access-token"),
+ response.headers().get("st-refresh-token"),
+ ) {
+ (
+ AccessToken(access_token.to_str().unwrap().to_string()),
+ RefreshToken(refresh_token.to_str().unwrap().to_string()),
+ )
+ } else {
+ let response = client
+ .post(format!("{AUTH_SERVER_URL}/api/auth/public/signin"))
+ .header("rid", "emailpassword")
+ .header("st-auth-mode", "header")
+ .json(&req_body)
+ .send()
+ .await
+ .unwrap();
+
+ assert_eq!(response.status(), 200);
+
+ (
+ AccessToken(
+ response
+ .headers()
+ .get("st-access-token")
+ .unwrap()
+ .to_str()
+ .unwrap()
+ .to_string(),
+ ),
+ RefreshToken(
+ response
+ .headers()
+ .get("st-refresh-token")
+ .unwrap()
+ .to_str()
+ .unwrap()
+ .to_string(),
+ ),
+ )
+ }
+ }
+
+ #[ignore = "Documentation only"]
+ #[tokio::test]
+ async fn test_refresh_token() {
+ let (AccessToken(access_token), RefreshToken(refresh_token)) = get_tokens().await;
+
+ let client = reqwest::Client::new();
+ let response = client
+ .post(format!("{AUTH_SERVER_URL}/api/auth/session/refresh"))
+ .header("rid", "session")
+ .header(header::AUTHORIZATION, format!("Bearer {refresh_token}"))
+ .send()
+ .await
+ .unwrap();
+
+ assert_eq!(response.status(), 200);
+
+ assert_ne!(
+ response
+ .headers()
+ .get("st-access-token")
+ .unwrap()
+ .to_str()
+ .unwrap(),
+ access_token.as_str()
+ );
+
+ assert_ne!(
+ response
+ .headers()
+ .get("st-refresh-token")
+ .unwrap()
+ .to_str()
+ .unwrap(),
+ refresh_token.as_str()
+ );
+ }
+
+ #[ignore = "Needs an actual SuperTokens auth server running"]
+ #[tokio::test]
+ async fn test_refresher_runner() {
+ let http_client_builder = reqwest::Client::builder().timeout(Duration::from_secs(3));
+
+ let http_client = ClientBuilder::new(http_client_builder.build().unwrap())
+ .with(RetryTransientMiddleware::new_with_policy(
+ ExponentialBackoff::builder().build_with_max_retries(3),
+ ))
+ .build();
+
+ let (refresh_tx, _refresh_rx) = flume::bounded(1);
+
+ let mut runner = Runner {
+ initialized: false,
+ http_client,
+ refresh_url: Url::parse(&format!("{AUTH_SERVER_URL}/api/auth/session/refresh"))
+ .unwrap(),
+ current_token: None,
+ current_refresh_token: None,
+ token_decoding_buffer: Vec::new(),
+ refresh_tx,
+ };
+
+ let (access_token, refresh_token) = get_tokens().await;
+
+ runner.init(access_token, refresh_token).await.unwrap();
+
+ runner.refresh().await.unwrap();
+ }
+}
diff --git a/core/crates/file-path-helper/src/isolated_file_path_data.rs b/core/crates/file-path-helper/src/isolated_file_path_data.rs
index 3e89cce0f..fe83bbee9 100644
--- a/core/crates/file-path-helper/src/isolated_file_path_data.rs
+++ b/core/crates/file-path-helper/src/isolated_file_path_data.rs
@@ -2,7 +2,7 @@ use sd_core_prisma_helpers::{
file_path_for_file_identifier, file_path_for_media_processor, file_path_for_object_validator,
file_path_to_full_path, file_path_to_handle_custom_uri, file_path_to_handle_p2p_serve_file,
file_path_to_isolate, file_path_to_isolate_with_id, file_path_to_isolate_with_pub_id,
- file_path_walker, file_path_with_object,
+ file_path_walker, file_path_watcher_remove, file_path_with_object,
};
use sd_prisma::prisma::{file_path, location};
@@ -506,7 +506,8 @@ impl_from_db!(
file_path_to_isolate_with_pub_id,
file_path_walker,
file_path_to_isolate_with_id,
- file_path_with_object
+ file_path_with_object,
+ file_path_watcher_remove
);
impl_from_db_without_location_id!(
diff --git a/core/crates/heavy-lifting/src/file_identifier/job.rs b/core/crates/heavy-lifting/src/file_identifier/job.rs
index a90c2ea6a..dc2d6866c 100644
--- a/core/crates/heavy-lifting/src/file_identifier/job.rs
+++ b/core/crates/heavy-lifting/src/file_identifier/job.rs
@@ -14,7 +14,11 @@ use crate::{
use sd_core_file_path_helper::IsolatedFilePathData;
use sd_core_prisma_helpers::{file_path_for_file_identifier, CasId};
-use sd_prisma::prisma::{file_path, location, SortOrder};
+use sd_prisma::{
+ prisma::{device, file_path, location, SortOrder},
+ prisma_sync,
+};
+use sd_sync::{sync_db_not_null_entry, OperationFactory};
use sd_task_system::{
AnyTaskOutput, IntoTask, SerializableTask, Task, TaskDispatcher, TaskHandle, TaskId,
TaskOutput, TaskStatus,
@@ -128,14 +132,14 @@ impl Job for FileIdentifier {
match task_kind {
TaskKind::Identifier => tasks::Identifier::deserialize(
&task_bytes,
- (Arc::clone(ctx.db()), Arc::clone(ctx.sync())),
+ (Arc::clone(ctx.db()), ctx.sync().clone()),
)
.await
.map(IntoTask::into_task),
TaskKind::ObjectProcessor => tasks::ObjectProcessor::deserialize(
&task_bytes,
- (Arc::clone(ctx.db()), Arc::clone(ctx.sync())),
+ (Arc::clone(ctx.db()), ctx.sync().clone()),
)
.await
.map(IntoTask::into_task),
@@ -173,8 +177,21 @@ impl Job for FileIdentifier {
) -> Result {
let mut pending_running_tasks = FuturesUnordered::new();
+ let device_pub_id = &ctx.sync().device_pub_id;
+ let device_id = ctx
+ .db()
+ .device()
+ .find_unique(device::pub_id::equals(device_pub_id.to_db()))
+ .exec()
+ .await
+ .map_err(file_identifier::Error::from)?
+ .ok_or(file_identifier::Error::DeviceNotFound(
+ device_pub_id.clone(),
+ ))?
+ .id;
+
match self
- .init_or_resume(&mut pending_running_tasks, &ctx, &dispatcher)
+ .init_or_resume(&mut pending_running_tasks, &ctx, device_id, &dispatcher)
.await
{
Ok(()) => { /* Everything is awesome! */ }
@@ -201,7 +218,7 @@ impl Job for FileIdentifier {
match task {
Ok(TaskStatus::Done((task_id, TaskOutput::Out(out)))) => {
match self
- .process_task_output(task_id, out, &ctx, &dispatcher)
+ .process_task_output(task_id, out, &ctx, device_id, &dispatcher)
.await
{
Ok(tasks) => pending_running_tasks.extend(tasks),
@@ -254,15 +271,25 @@ impl Job for FileIdentifier {
..
} = self;
- ctx.db()
- .location()
- .update(
- location::id::equals(location.id),
- vec![location::scan_state::set(
- LocationScanState::FilesIdentified as i32,
- )],
+ let (sync_param, db_param) = sync_db_not_null_entry!(
+ LocationScanState::FilesIdentified as i32,
+ location::scan_state
+ );
+
+ ctx.sync()
+ .write_op(
+ ctx.db(),
+ ctx.sync().shared_update(
+ prisma_sync::location::SyncId {
+ pub_id: location.pub_id.clone(),
+ },
+ [sync_param],
+ ),
+ ctx.db()
+ .location()
+ .update(location::id::equals(location.id), vec![db_param])
+ .select(location::select!({ id })),
)
- .exec()
.await
.map_err(file_identifier::Error::from)?;
@@ -302,6 +329,7 @@ impl FileIdentifier {
&mut self,
pending_running_tasks: &mut FuturesUnordered>,
ctx: &impl JobContext,
+ device_id: device::id::Type,
dispatcher: &JobTaskDispatcher,
) -> Result<(), JobErrorOrDispatcherError> {
// if we don't have any pending task, then this is a fresh job
@@ -335,6 +363,7 @@ impl FileIdentifier {
.as_ref()
.unwrap_or(&location_root_iso_file_path),
ctx,
+ device_id,
dispatcher,
pending_running_tasks,
)
@@ -345,8 +374,9 @@ impl FileIdentifier {
self.last_orphan_file_path_id = None;
self.dispatch_deep_identifier_tasks(
- &maybe_sub_iso_file_path,
+ maybe_sub_iso_file_path.as_ref(),
ctx,
+ device_id,
dispatcher,
pending_running_tasks,
)
@@ -378,6 +408,7 @@ impl FileIdentifier {
.as_ref()
.unwrap_or(&location_root_iso_file_path),
ctx,
+ device_id,
dispatcher,
pending_running_tasks,
)
@@ -388,8 +419,9 @@ impl FileIdentifier {
self.last_orphan_file_path_id = None;
self.dispatch_deep_identifier_tasks(
- &maybe_sub_iso_file_path,
+ maybe_sub_iso_file_path.as_ref(),
ctx,
+ device_id,
dispatcher,
pending_running_tasks,
)
@@ -401,8 +433,9 @@ impl FileIdentifier {
Phase::SearchingOrphans => {
self.dispatch_deep_identifier_tasks(
- &maybe_sub_iso_file_path,
+ maybe_sub_iso_file_path.as_ref(),
ctx,
+ device_id,
dispatcher,
pending_running_tasks,
)
@@ -447,6 +480,7 @@ impl FileIdentifier {
task_id: TaskId,
any_task_output: Box,
ctx: &impl JobContext,
+ device_id: device::id::Type,
dispatcher: &JobTaskDispatcher,
) -> Result>, DispatcherError> {
if any_task_output.is::() {
@@ -457,6 +491,7 @@ impl FileIdentifier {
.downcast::()
.expect("just checked"),
ctx,
+ device_id,
dispatcher,
)
.await;
@@ -501,6 +536,7 @@ impl FileIdentifier {
errors,
}: identifier::Output,
ctx: &impl JobContext,
+ device_id: device::id::Type,
dispatcher: &JobTaskDispatcher,
) -> Result>, DispatcherError> {
self.metadata.mean_extract_metadata_time += extract_metadata_time;
@@ -548,6 +584,7 @@ impl FileIdentifier {
let (tasks_count, res) = match dispatch_object_processor_tasks(
self.file_paths_accumulator.drain(),
ctx,
+ device_id,
dispatcher,
false,
)
@@ -636,6 +673,7 @@ impl FileIdentifier {
&mut self,
sub_iso_file_path: &IsolatedFilePathData<'static>,
ctx: &impl JobContext,
+ device_id: device::id::Type,
dispatcher: &JobTaskDispatcher,
pending_running_tasks: &FuturesUnordered>,
) -> Result<(), JobErrorOrDispatcherError> {
@@ -702,7 +740,8 @@ impl FileIdentifier {
orphan_paths,
true,
Arc::clone(ctx.db()),
- Arc::clone(ctx.sync()),
+ ctx.sync().clone(),
+ device_id,
))
.await?,
);
@@ -713,8 +752,9 @@ impl FileIdentifier {
async fn dispatch_deep_identifier_tasks(
&mut self,
- maybe_sub_iso_file_path: &Option>,
+ maybe_sub_iso_file_path: Option<&IsolatedFilePathData<'static>>,
ctx: &impl JobContext,
+ device_id: device::id::Type,
dispatcher: &JobTaskDispatcher,
pending_running_tasks: &FuturesUnordered>,
) -> Result<(), JobErrorOrDispatcherError> {
@@ -785,7 +825,8 @@ impl FileIdentifier {
orphan_paths,
false,
Arc::clone(ctx.db()),
- Arc::clone(ctx.sync()),
+ ctx.sync().clone(),
+ device_id,
))
.await?,
);
diff --git a/core/crates/heavy-lifting/src/file_identifier/mod.rs b/core/crates/heavy-lifting/src/file_identifier/mod.rs
index 9d7d2833a..f777c118d 100644
--- a/core/crates/heavy-lifting/src/file_identifier/mod.rs
+++ b/core/crates/heavy-lifting/src/file_identifier/mod.rs
@@ -2,9 +2,10 @@ use crate::{utils::sub_path, OuterContext};
use sd_core_file_path_helper::{FilePathError, IsolatedFilePathData};
use sd_core_prisma_helpers::CasId;
+use sd_core_sync::DevicePubId;
use sd_file_ext::{extensions::Extension, kind::ObjectKind};
-use sd_prisma::prisma::{file_path, location};
+use sd_prisma::prisma::{device, file_path, location};
use sd_task_system::{TaskDispatcher, TaskHandle};
use sd_utils::{db::MissingFieldError, error::FileIOError};
@@ -41,6 +42,8 @@ const CHUNK_SIZE: usize = 100;
#[derive(thiserror::Error, Debug)]
pub enum Error {
+ #[error("device not found: ,
- maybe_sub_iso_file_path: &Option>,
+ maybe_sub_iso_file_path: Option<&IsolatedFilePathData<'_>>,
) -> Vec {
sd_utils::chain_optional_iter(
[
@@ -197,6 +200,7 @@ fn orphan_path_filters_deep(
async fn dispatch_object_processor_tasks(
file_paths_by_cas_id: Iter,
ctx: &impl OuterContext,
+ device_id: device::id::Type,
dispatcher: &Dispatcher,
with_priority: bool,
) -> Result>, Dispatcher::DispatchError>
@@ -217,7 +221,8 @@ where
.dispatch(tasks::ObjectProcessor::new(
HashMap::from([(cas_id, objects_to_create_or_link)]),
Arc::clone(ctx.db()),
- Arc::clone(ctx.sync()),
+ ctx.sync().clone(),
+ device_id,
with_priority,
))
.await?,
@@ -239,7 +244,8 @@ where
.dispatch(tasks::ObjectProcessor::new(
mem::take(&mut current_batch),
Arc::clone(ctx.db()),
- Arc::clone(ctx.sync()),
+ ctx.sync().clone(),
+ device_id,
with_priority,
))
.await?,
@@ -256,7 +262,8 @@ where
.dispatch(tasks::ObjectProcessor::new(
current_batch,
Arc::clone(ctx.db()),
- Arc::clone(ctx.sync()),
+ ctx.sync().clone(),
+ device_id,
with_priority,
))
.await?,
diff --git a/core/crates/heavy-lifting/src/file_identifier/shallow.rs b/core/crates/heavy-lifting/src/file_identifier/shallow.rs
index cd165867d..4c00882da 100644
--- a/core/crates/heavy-lifting/src/file_identifier/shallow.rs
+++ b/core/crates/heavy-lifting/src/file_identifier/shallow.rs
@@ -6,7 +6,7 @@ use crate::{
use sd_core_file_path_helper::IsolatedFilePathData;
use sd_core_prisma_helpers::file_path_for_file_identifier;
-use sd_prisma::prisma::{file_path, location, SortOrder};
+use sd_prisma::prisma::{device, file_path, location, SortOrder};
use sd_task_system::{
BaseTaskDispatcher, CancelTaskOnDrop, TaskDispatcher, TaskHandle, TaskOutput, TaskStatus,
};
@@ -66,6 +66,19 @@ pub async fn shallow(
Ok,
)?;
+ let device_pub_id = &ctx.sync().device_pub_id;
+ let device_id = ctx
+ .db()
+ .device()
+ .find_unique(device::pub_id::equals(device_pub_id.to_db()))
+ .exec()
+ .await
+ .map_err(file_identifier::Error::from)?
+ .ok_or(file_identifier::Error::DeviceNotFound(
+ device_pub_id.clone(),
+ ))?
+ .id;
+
let mut orphans_count = 0;
let mut last_orphan_file_path_id = None;
@@ -103,7 +116,8 @@ pub async fn shallow(
orphan_paths,
true,
Arc::clone(ctx.db()),
- Arc::clone(ctx.sync()),
+ ctx.sync().clone(),
+ device_id,
))
.await
else {
@@ -119,13 +133,14 @@ pub async fn shallow(
return Ok(vec![]);
}
- process_tasks(identifier_tasks, dispatcher, ctx).await
+ process_tasks(identifier_tasks, dispatcher, ctx, device_id).await
}
async fn process_tasks(
identifier_tasks: Vec>,
dispatcher: &BaseTaskDispatcher,
ctx: &impl OuterContext,
+ device_id: device::id::Type,
) -> Result, Error> {
let total_identifier_tasks = identifier_tasks.len();
@@ -169,6 +184,7 @@ async fn process_tasks(
let Ok(tasks) = dispatch_object_processor_tasks(
file_paths_accumulator.drain(),
ctx,
+ device_id,
dispatcher,
true,
)
diff --git a/core/crates/heavy-lifting/src/file_identifier/tasks/identifier.rs b/core/crates/heavy-lifting/src/file_identifier/tasks/identifier.rs
index 11fc8a753..125a72713 100644
--- a/core/crates/heavy-lifting/src/file_identifier/tasks/identifier.rs
+++ b/core/crates/heavy-lifting/src/file_identifier/tasks/identifier.rs
@@ -5,18 +5,18 @@ use crate::{
use sd_core_file_path_helper::IsolatedFilePathData;
use sd_core_prisma_helpers::{file_path_for_file_identifier, CasId, FilePathPubId};
-use sd_core_sync::Manager as SyncManager;
+use sd_core_sync::SyncManager;
use sd_file_ext::kind::ObjectKind;
use sd_prisma::{
- prisma::{file_path, location, PrismaClient},
+ prisma::{device, file_path, location, PrismaClient},
prisma_sync,
};
-use sd_sync::OperationFactory;
+use sd_sync::{sync_db_entry, OperationFactory};
use sd_task_system::{
ExecStatus, Interrupter, InterruptionKind, IntoAnyTaskOutput, SerializableTask, Task, TaskId,
};
-use sd_utils::{error::FileIOError, msgpack};
+use sd_utils::error::FileIOError;
use std::{
collections::HashMap, convert::identity, future::IntoFuture, mem, path::PathBuf, pin::pin,
@@ -64,6 +64,7 @@ pub struct Identifier {
file_paths_by_id: HashMap,
// Inner state
+ device_id: device::id::Type,
identified_files: HashMap,
file_paths_without_cas_id: Vec,
@@ -72,7 +73,7 @@ pub struct Identifier {
// Dependencies
db: Arc,
- sync: Arc,
+ sync: SyncManager,
}
/// Output from the `[Identifier]` task
@@ -135,6 +136,7 @@ impl Task for Identifier {
let Self {
location,
location_path,
+ device_id,
file_paths_by_id,
file_paths_without_cas_id,
identified_files,
@@ -255,6 +257,7 @@ impl Task for Identifier {
file_paths_without_cas_id.drain(..),
&self.db,
&self.sync,
+ *device_id,
),
)
.try_join()
@@ -301,6 +304,7 @@ impl Task for Identifier {
file_paths_without_cas_id.drain(..),
&self.db,
&self.sync,
+ *device_id,
)
.await?;
@@ -324,7 +328,8 @@ impl Identifier {
file_paths: Vec,
with_priority: bool,
db: Arc,
- sync: Arc,
+ sync: SyncManager,
+ device_id: device::id::Type,
) -> Self {
let mut output = Output::default();
@@ -377,6 +382,7 @@ impl Identifier {
id: TaskId::new_v4(),
location,
location_path,
+ device_id,
identified_files: HashMap::with_capacity(file_paths_count - directories_count),
file_paths_without_cas_id,
file_paths_by_id,
@@ -394,33 +400,31 @@ async fn assign_cas_id_to_file_paths(
db: &PrismaClient,
sync: &SyncManager,
) -> Result<(), file_identifier::Error> {
- // Assign cas_id to each file path
- sync.write_ops(
- db,
- identified_files
- .iter()
- .map(|(pub_id, IdentifiedFile { cas_id, .. })| {
- (
- sync.shared_update(
- prisma_sync::file_path::SyncId {
- pub_id: pub_id.to_db(),
- },
- file_path::cas_id::NAME,
- msgpack!(cas_id),
- ),
- db.file_path()
- .update(
- file_path::pub_id::equals(pub_id.to_db()),
- vec![file_path::cas_id::set(cas_id.into())],
- )
- // We don't need any data here, just the id avoids receiving the entire object
- // as we can't pass an empty select macro call
- .select(file_path::select!({ id })),
- )
- })
- .unzip::<_, _, _, Vec<_>>(),
- )
- .await?;
+ let (ops, queries) = identified_files
+ .iter()
+ .map(|(pub_id, IdentifiedFile { cas_id, .. })| {
+ let (sync_param, db_param) = sync_db_entry!(cas_id, file_path::cas_id);
+
+ (
+ sync.shared_update(
+ prisma_sync::file_path::SyncId {
+ pub_id: pub_id.to_db(),
+ },
+ [sync_param],
+ ),
+ db.file_path()
+ .update(file_path::pub_id::equals(pub_id.to_db()), vec![db_param])
+ // We don't need any data here, just the id avoids receiving the entire object
+ // as we can't pass an empty select macro call
+ .select(file_path::select!({ id })),
+ )
+ })
+ .unzip::<_, _, Vec<_>, Vec<_>>();
+
+ if !ops.is_empty() && !queries.is_empty() {
+ // Assign cas_id to each file path
+ sync.write_ops(db, (ops, queries)).await?;
+ }
Ok(())
}
@@ -500,6 +504,7 @@ struct SaveState {
id: TaskId,
location: Arc,
location_path: Arc,
+ device_id: device::id::Type,
file_paths_by_id: HashMap,
identified_files: HashMap,
file_paths_without_cas_id: Vec,
@@ -512,13 +517,14 @@ impl SerializableTask for Identifier {
type DeserializeError = rmp_serde::decode::Error;
- type DeserializeCtx = (Arc, Arc);
+ type DeserializeCtx = (Arc, SyncManager);
async fn serialize(self) -> Result, Self::SerializeError> {
let Self {
id,
location,
location_path,
+ device_id,
file_paths_by_id,
identified_files,
file_paths_without_cas_id,
@@ -530,6 +536,7 @@ impl SerializableTask for Identifier {
id,
location,
location_path,
+ device_id,
file_paths_by_id,
identified_files,
file_paths_without_cas_id,
@@ -547,6 +554,7 @@ impl SerializableTask for Identifier {
id,
location,
location_path,
+ device_id,
file_paths_by_id,
identified_files,
file_paths_without_cas_id,
@@ -558,6 +566,7 @@ impl SerializableTask for Identifier {
location,
location_path,
file_paths_by_id,
+ device_id,
identified_files,
file_paths_without_cas_id,
output,
diff --git a/core/crates/heavy-lifting/src/file_identifier/tasks/mod.rs b/core/crates/heavy-lifting/src/file_identifier/tasks/mod.rs
index f74a03b4a..59f75d0a9 100644
--- a/core/crates/heavy-lifting/src/file_identifier/tasks/mod.rs
+++ b/core/crates/heavy-lifting/src/file_identifier/tasks/mod.rs
@@ -1,15 +1,15 @@
use crate::file_identifier;
use sd_core_prisma_helpers::{file_path_id, FilePathPubId, ObjectPubId};
-use sd_core_sync::Manager as SyncManager;
+use sd_core_sync::SyncManager;
use sd_file_ext::kind::ObjectKind;
use sd_prisma::{
- prisma::{file_path, object, PrismaClient},
+ prisma::{device, file_path, object, PrismaClient},
prisma_sync,
};
-use sd_sync::{CRDTOperation, OperationFactory};
-use sd_utils::msgpack;
+use sd_sync::{option_sync_db_entry, sync_db_entry, sync_entry, CRDTOperation, OperationFactory};
+use sd_utils::chain_optional_iter;
use std::collections::{HashMap, HashSet};
@@ -47,10 +47,12 @@ fn connect_file_path_to_object<'db>(
prisma_sync::file_path::SyncId {
pub_id: file_path_pub_id.to_db(),
},
- file_path::object::NAME,
- msgpack!(prisma_sync::object::SyncId {
- pub_id: object_pub_id.to_db(),
- }),
+ [sync_entry!(
+ prisma_sync::object::SyncId {
+ pub_id: object_pub_id.to_db(),
+ },
+ file_path::object
+ )],
),
db.file_path()
.update(
@@ -69,6 +71,7 @@ async fn create_objects_and_update_file_paths(
files_and_kinds: impl IntoIterator
- + Send,
db: &PrismaClient,
sync: &SyncManager,
+ device_id: device::id::Type,
) -> Result, file_identifier::Error> {
trace!("Preparing objects");
let (object_create_args, file_path_args) = files_and_kinds
@@ -84,16 +87,23 @@ async fn create_objects_and_update_file_paths(
let kind = kind as i32;
- let (sync_params, db_params) = [
- (
- (object::date_created::NAME, msgpack!(created_at)),
- object::date_created::set(created_at),
- ),
- (
- (object::kind::NAME, msgpack!(kind)),
- object::kind::set(Some(kind)),
- ),
- ]
+ let device_pub_id = sync.device_pub_id.to_db();
+
+ let (sync_params, db_params) = chain_optional_iter(
+ [
+ (
+ sync_entry!(
+ prisma_sync::device::SyncId {
+ pub_id: device_pub_id,
+ },
+ object::device
+ ),
+ object::device_id::set(Some(device_id)),
+ ),
+ sync_db_entry!(kind, object::kind),
+ ],
+ [option_sync_db_entry!(created_at, object::date_created)],
+ )
.into_iter()
.unzip::<_, _, Vec<_>, Vec<_>>();
@@ -121,51 +131,57 @@ async fn create_objects_and_update_file_paths(
.unzip::<_, _, HashMap<_, _>, Vec<_>>(
);
- trace!(
- new_objects_count = object_create_args.len(),
- "Creating new Objects!;",
- );
+ let new_objects_count = object_create_args.len();
+ if new_objects_count > 0 {
+ trace!(new_objects_count, "Creating new Objects!;",);
- // create new object records with assembled values
- let created_objects_count = sync
- .write_ops(db, {
- let (sync, db_params) = object_create_args
- .into_iter()
- .unzip::<_, _, Vec<_>, Vec<_>>();
-
- (
- sync.into_iter().flatten().collect(),
- db.object().create_many(db_params),
- )
- })
- .await?;
-
- trace!(%created_objects_count, "Created new Objects;");
-
- if created_objects_count > 0 {
- trace!("Updating file paths with created objects");
-
- let updated_file_path_ids = sync
- .write_ops(
- db,
- file_path_update_args
+ // create new object records with assembled values
+ let created_objects_count = sync
+ .write_ops(db, {
+ let (sync, db_params) = object_create_args
.into_iter()
- .unzip::<_, _, Vec<_>, Vec<_>>(),
- )
- .await
- .map(|file_paths| {
- file_paths
- .into_iter()
- .map(|file_path_id::Data { id }| id)
- .collect::>()
- })?;
+ .unzip::<_, _, Vec<_>, Vec<_>>();
- object_pub_id_by_file_path_id
- .retain(|file_path_id, _| updated_file_path_ids.contains(file_path_id));
+ (sync, db.object().create_many(db_params))
+ })
+ .await?;
- Ok(object_pub_id_by_file_path_id)
+ trace!(%created_objects_count, "Created new Objects;");
+
+ if created_objects_count > 0 {
+ let file_paths_to_update_count = file_path_update_args.len();
+ if file_paths_to_update_count > 0 {
+ trace!(
+ file_paths_to_update_count,
+ "Updating file paths with created objects"
+ );
+
+ let updated_file_path_ids = sync
+ .write_ops(
+ db,
+ file_path_update_args
+ .into_iter()
+ .unzip::<_, _, Vec<_>, Vec<_>>(),
+ )
+ .await
+ .map(|file_paths| {
+ file_paths
+ .into_iter()
+ .map(|file_path_id::Data { id }| id)
+ .collect::>()
+ })?;
+
+ object_pub_id_by_file_path_id
+ .retain(|file_path_id, _| updated_file_path_ids.contains(file_path_id));
+ }
+
+ Ok(object_pub_id_by_file_path_id)
+ } else {
+ trace!("No objects created, skipping file path updates");
+ Ok(HashMap::new())
+ }
} else {
- trace!("No objects created, skipping file path updates");
+ trace!("No objects to create, skipping file path updates");
Ok(HashMap::new())
}
}
diff --git a/core/crates/heavy-lifting/src/file_identifier/tasks/object_processor.rs b/core/crates/heavy-lifting/src/file_identifier/tasks/object_processor.rs
index 9569c1563..a99d89d8d 100644
--- a/core/crates/heavy-lifting/src/file_identifier/tasks/object_processor.rs
+++ b/core/crates/heavy-lifting/src/file_identifier/tasks/object_processor.rs
@@ -1,9 +1,9 @@
use crate::{file_identifier, Error};
use sd_core_prisma_helpers::{file_path_id, object_for_file_identifier, CasId, ObjectPubId};
-use sd_core_sync::Manager as SyncManager;
+use sd_core_sync::SyncManager;
-use sd_prisma::prisma::{file_path, object, PrismaClient};
+use sd_prisma::prisma::{device, file_path, object, PrismaClient};
use sd_task_system::{
check_interruption, ExecStatus, Interrupter, IntoAnyTaskOutput, SerializableTask, Task, TaskId,
};
@@ -29,13 +29,14 @@ pub struct ObjectProcessor {
// Inner state
stage: Stage,
+ device_id: device::id::Type,
// Out collector
output: Output,
// Dependencies
db: Arc,
- sync: Arc,
+ sync: SyncManager,
}
#[derive(Debug, Serialize, Deserialize)]
@@ -93,6 +94,7 @@ impl Task for ObjectProcessor {
let Self {
db,
sync,
+ device_id,
file_paths_by_cas_id,
stage,
output:
@@ -167,8 +169,13 @@ impl Task for ObjectProcessor {
);
let start = Instant::now();
let (more_file_paths_with_new_object, more_linked_objects_count) =
- assign_objects_to_duplicated_orphans(file_paths_by_cas_id, db, sync)
- .await?;
+ assign_objects_to_duplicated_orphans(
+ file_paths_by_cas_id,
+ db,
+ sync,
+ *device_id,
+ )
+ .await?;
*create_object_time = start.elapsed();
file_path_ids_with_new_object.extend(more_file_paths_with_new_object);
*linked_objects_count += more_linked_objects_count;
@@ -194,7 +201,8 @@ impl ObjectProcessor {
pub fn new(
file_paths_by_cas_id: HashMap, Vec>,
db: Arc,
- sync: Arc,
+ sync: SyncManager,
+ device_id: device::id::Type,
with_priority: bool,
) -> Self {
Self {
@@ -202,6 +210,7 @@ impl ObjectProcessor {
db,
sync,
file_paths_by_cas_id,
+ device_id,
stage: Stage::Starting,
output: Output::default(),
with_priority,
@@ -270,45 +279,44 @@ async fn assign_existing_objects_to_file_paths(
db: &PrismaClient,
sync: &SyncManager,
) -> Result, file_identifier::Error> {
- sync.write_ops(
- db,
- objects_by_cas_id
- .iter()
- .flat_map(|(cas_id, object_pub_id)| {
- file_paths_by_cas_id
- .remove(cas_id)
- .map(|file_paths| {
- file_paths.into_iter().map(
- |FilePathToCreateOrLinkObject {
- file_path_pub_id, ..
- }| {
- connect_file_path_to_object(
- &file_path_pub_id,
- object_pub_id,
- db,
- sync,
- )
- },
- )
- })
- .expect("must be here")
- })
- .unzip::<_, _, Vec<_>, Vec<_>>(),
- )
- .await
- .map(|file_paths| {
- file_paths
- .into_iter()
- .map(|file_path_id::Data { id }| id)
- .collect()
- })
- .map_err(Into::into)
+ let (ops, queries) = objects_by_cas_id
+ .iter()
+ .flat_map(|(cas_id, object_pub_id)| {
+ file_paths_by_cas_id
+ .remove(cas_id)
+ .map(|file_paths| {
+ file_paths.into_iter().map(
+ |FilePathToCreateOrLinkObject {
+ file_path_pub_id, ..
+ }| {
+ connect_file_path_to_object(&file_path_pub_id, object_pub_id, db, sync)
+ },
+ )
+ })
+ .expect("must be here")
+ })
+ .unzip::<_, _, Vec<_>, Vec<_>>();
+
+ if ops.is_empty() && queries.is_empty() {
+ return Ok(vec![]);
+ }
+
+ sync.write_ops(db, (ops, queries))
+ .await
+ .map(|file_paths| {
+ file_paths
+ .into_iter()
+ .map(|file_path_id::Data { id }| id)
+ .collect()
+ })
+ .map_err(Into::into)
}
async fn assign_objects_to_duplicated_orphans(
file_paths_by_cas_id: &mut HashMap, Vec>,
db: &PrismaClient,
sync: &SyncManager,
+ device_id: device::id::Type,
) -> Result<(Vec, u64), file_identifier::Error> {
// at least 1 file path per cas_id
let mut selected_file_paths = Vec::with_capacity(file_paths_by_cas_id.len());
@@ -327,7 +335,7 @@ async fn assign_objects_to_duplicated_orphans(
});
let (mut file_paths_with_new_object, objects_by_cas_id) =
- create_objects_and_update_file_paths(selected_file_paths, db, sync)
+ create_objects_and_update_file_paths(selected_file_paths, db, sync, device_id)
.await?
.into_iter()
.map(|(file_path_id, object_pub_id)| {
@@ -365,6 +373,7 @@ async fn assign_objects_to_duplicated_orphans(
pub struct SaveState {
id: TaskId,
file_paths_by_cas_id: HashMap, Vec>,
+ device_id: device::id::Type,
stage: Stage,
output: Output,
with_priority: bool,
@@ -375,12 +384,13 @@ impl SerializableTask for ObjectProcessor {
type DeserializeError = rmp_serde::decode::Error;
- type DeserializeCtx = (Arc, Arc);
+ type DeserializeCtx = (Arc, SyncManager);
async fn serialize(self) -> Result, Self::SerializeError> {
let Self {
id,
file_paths_by_cas_id,
+ device_id,
stage,
output,
with_priority,
@@ -390,6 +400,7 @@ impl SerializableTask for ObjectProcessor {
rmp_serde::to_vec_named(&SaveState {
id,
file_paths_by_cas_id,
+ device_id,
stage,
output,
with_priority,
@@ -404,6 +415,7 @@ impl SerializableTask for ObjectProcessor {
|SaveState {
id,
file_paths_by_cas_id,
+ device_id,
stage,
output,
with_priority,
@@ -412,6 +424,7 @@ impl SerializableTask for ObjectProcessor {
with_priority,
file_paths_by_cas_id,
stage,
+ device_id,
output,
db,
sync,
diff --git a/core/crates/heavy-lifting/src/indexer/job.rs b/core/crates/heavy-lifting/src/indexer/job.rs
index 22546950e..cf19fbb90 100644
--- a/core/crates/heavy-lifting/src/indexer/job.rs
+++ b/core/crates/heavy-lifting/src/indexer/job.rs
@@ -16,7 +16,11 @@ use sd_core_file_path_helper::IsolatedFilePathData;
use sd_core_indexer_rules::{IndexerRule, IndexerRuler};
use sd_core_prisma_helpers::location_with_indexer_rules;
-use sd_prisma::prisma::location;
+use sd_prisma::{
+ prisma::{device, location},
+ prisma_sync,
+};
+use sd_sync::{sync_db_not_null_entry, OperationFactory};
use sd_task_system::{
AnyTaskOutput, IntoTask, SerializableTask, Task, TaskDispatcher, TaskHandle, TaskId,
TaskOutput, TaskStatus,
@@ -116,13 +120,13 @@ impl Job for Indexer {
TaskKind::Save => tasks::Saver::deserialize(
&task_bytes,
- (Arc::clone(ctx.db()), Arc::clone(ctx.sync())),
+ (Arc::clone(ctx.db()), ctx.sync().clone()),
)
.await
.map(IntoTask::into_task),
TaskKind::Update => tasks::Updater::deserialize(
&task_bytes,
- (Arc::clone(ctx.db()), Arc::clone(ctx.sync())),
+ (Arc::clone(ctx.db()), ctx.sync().clone()),
)
.await
.map(IntoTask::into_task),
@@ -161,6 +165,17 @@ impl Job for Indexer {
) -> Result {
let mut pending_running_tasks = FuturesUnordered::new();
+ let device_pub_id = &ctx.sync().device_pub_id;
+ let device_id = ctx
+ .db()
+ .device()
+ .find_unique(device::pub_id::equals(device_pub_id.to_db()))
+ .exec()
+ .await
+ .map_err(indexer::Error::from)?
+ .ok_or(indexer::Error::DeviceNotFound(device_pub_id.clone()))?
+ .id;
+
match self
.init_or_resume(&mut pending_running_tasks, &ctx, &dispatcher)
.await
@@ -191,21 +206,26 @@ impl Job for Indexer {
}
if let Some(res) = self
- .process_handles(&mut pending_running_tasks, &ctx, &dispatcher)
+ .process_handles(&mut pending_running_tasks, &ctx, device_id, &dispatcher)
.await
{
return res;
}
if let Some(res) = self
- .dispatch_last_save_and_update_tasks(&mut pending_running_tasks, &ctx, &dispatcher)
+ .dispatch_last_save_and_update_tasks(
+ &mut pending_running_tasks,
+ &ctx,
+ device_id,
+ &dispatcher,
+ )
.await
{
return res;
}
if let Some(res) = self
- .index_pending_ancestors(&mut pending_running_tasks, &ctx, &dispatcher)
+ .index_pending_ancestors(&mut pending_running_tasks, &ctx, device_id, &dispatcher)
.await
{
return res;
@@ -253,7 +273,7 @@ impl Job for Indexer {
.await?;
}
- update_location_size(location.id, ctx.db(), &ctx).await?;
+ update_location_size(location.id, location.pub_id.clone(), &ctx).await?;
metadata.mean_db_write_time += start_size_update_time.elapsed();
}
@@ -271,13 +291,23 @@ impl Job for Indexer {
"all tasks must be completed here"
);
- ctx.db()
- .location()
- .update(
- location::id::equals(location.id),
- vec![location::scan_state::set(LocationScanState::Indexed as i32)],
+ let (sync_param, db_param) =
+ sync_db_not_null_entry!(LocationScanState::Indexed as i32, location::scan_state);
+
+ ctx.sync()
+ .write_op(
+ ctx.db(),
+ ctx.sync().shared_update(
+ prisma_sync::location::SyncId {
+ pub_id: location.pub_id.clone(),
+ },
+ [sync_param],
+ ),
+ ctx.db()
+ .location()
+ .update(location::id::equals(location.id), vec![db_param])
+ .select(location::select!({ id })),
)
- .exec()
.await
.map_err(indexer::Error::from)?;
@@ -338,6 +368,7 @@ impl Indexer {
task_id: TaskId,
any_task_output: Box,
ctx: &impl JobContext,
+ device_id: device::id::Type,
dispatcher: &JobTaskDispatcher,
) -> Result>, JobErrorOrDispatcherError> {
self.metadata.completed_tasks += 1;
@@ -349,6 +380,7 @@ impl Indexer {
.downcast::>()
.expect("just checked"),
ctx,
+ device_id,
dispatcher,
)
.await;
@@ -403,6 +435,7 @@ impl Indexer {
..
}: walker::Output,
ctx: &impl JobContext