Merge branch 'eng-1828-migration-to-new-cloud-api-system' into eng-294-volume-awareness

This commit is contained in:
Jamie Pine
2024-10-23 21:34:16 -07:00
267 changed files with 3384 additions and 6863 deletions

View File

@@ -9,9 +9,7 @@ runs:
using: 'composite'
steps:
- name: Install pnpm
uses: pnpm/action-setup@v3
with:
version: 9.0.6
uses: pnpm/action-setup@v4
- name: Install Node.js
uses: actions/setup-node@v4

View File

@@ -29,7 +29,7 @@ runs:
- name: Install LLVM and Clang
if: ${{ runner.os == 'Windows' }}
uses: KyleMayes/install-llvm-action@v1
uses: KyleMayes/install-llvm-action@v2
with:
cached: ${{ steps.cache-llvm-restore.outputs.cache-hit }}
version: '15'

View File

@@ -35,3 +35,5 @@ package*.json
# Dont format locales json
interface/locales
scripts/utils/.tmp/*

BIN
Cargo.lock generated
View File

Binary file not shown.

View File

@@ -20,38 +20,38 @@ rust-version = "1.81"
[workspace.dependencies]
# First party dependencies
sd-cloud-schema = { git = "https://github.com/spacedriveapp/cloud-services-schema", rev = "fb41a3c4eb" }
sd-cloud-schema = { git = "https://github.com/spacedriveapp/cloud-services-schema", rev = "bbc69c5cb2" }
# Third party dependencies used by one or more of our crates
async-channel = "2.3"
async-stream = "0.3.6"
async-trait = "0.1.83"
axum = "0.6.20" # Update blocked by hyper
axum = "0.7.7"
axum-extra = "0.9.4"
base64 = "0.22.1"
blake3 = "1.5.4"
bytes = "1.7.1" # Update blocked by hyper
bytes = "1.7.1" # Update blocked by hyper
chrono = "0.4.38"
ed25519-dalek = "2.1"
flume = "0.11.0"
futures = "0.3.30"
futures = "0.3.31"
futures-concurrency = "7.6"
globset = "0.4.15"
http = "0.2" # Update blocked by axum
hyper = "0.14" # Update blocked due to API breaking changes
image = "0.24.9" # Update blocked due to https://github.com/image-rs/image/issues/2230
http = "1.1"
hyper = "1.5"
image = "0.25.4"
itertools = "0.13.0"
lending-stream = "1.0"
libc = "0.2.159"
mimalloc = "0.1.43"
normpath = "1.2"
normpath = "1.3"
pin-project-lite = "0.2.14"
rand = "0.9.0-alpha.2"
regex = "1.11"
reqwest = { version = "0.11", default-features = false } # Update blocked by hyper
reqwest = { version = "0.12.8", default-features = false }
rmp = "0.8.14"
rmp-serde = "1.3"
rmpv = { version = "1.3", features = ["with-serde"] }
rspc = "0.1.4" # Update blocked by custom patch below
serde = "1.0"
serde_json = "1.0"
specta = "=2.0.0-rc.20"
@@ -65,46 +65,48 @@ tokio-util = "0.7.12"
tracing = "0.1.40"
tracing-subscriber = "0.3.18"
tracing-test = "0.2.5"
uhlc = "0.8.0" # Must follow version used by specta
uuid = "1.10" # Must follow version used by specta
webp = "0.2.6" # Update blocked by image
uhlc = "0.8.0" # Must follow version used by specta
uuid = "1.10" # Must follow version used by specta
webp = "0.3.0"
zeroize = "1.8"
[workspace.dependencies.rspc]
git = "https://github.com/spacedriveapp/rspc.git"
rev = "6a77167495"
[workspace.dependencies.prisma-client-rust]
default-features = false
features = ["migrations", "specta", "sqlite", "sqlite-create-many"]
git = "https://github.com/brendonovich/prisma-client-rust"
rev = "4f9ef9d38c"
git = "https://github.com/spacedriveapp/prisma-client-rust"
rev = "b22ad7dc7d"
[workspace.dependencies.prisma-client-rust-sdk]
default-features = false
features = ["sqlite"]
git = "https://github.com/brendonovich/prisma-client-rust"
rev = "4f9ef9d38c"
git = "https://github.com/spacedriveapp/prisma-client-rust"
rev = "b22ad7dc7d"
# Proper IOS Support
[patch.crates-io.if-watch]
git = "https://github.com/spacedriveapp/if-watch.git"
rev = "a92c17d3f8"
# We use our own version of rspc
[patch.crates-io.rspc]
git = "https://github.com/spacedriveapp/rspc.git"
rev = "bc882f4724"
# Add `Control::open_stream_with_addrs`
[patch.crates-io.libp2p]
git = "https://github.com/spacedriveapp/rust-libp2p.git"
rev = "a005656df7"
git = "https://github.com/spacedriveapp/rust-libp2p"
rev = "1024411ffa"
[patch.crates-io.libp2p-core]
git = "https://github.com/spacedriveapp/rust-libp2p.git"
rev = "a005656df7"
git = "https://github.com/spacedriveapp/rust-libp2p"
rev = "1024411ffa"
[patch.crates-io.libp2p-identity]
git = "https://github.com/spacedriveapp/rust-libp2p"
rev = "1024411ffa"
[patch.crates-io.libp2p-swarm]
git = "https://github.com/spacedriveapp/rust-libp2p.git"
rev = "a005656df7"
git = "https://github.com/spacedriveapp/rust-libp2p"
rev = "1024411ffa"
[patch.crates-io.libp2p-stream]
git = "https://github.com/spacedriveapp/rust-libp2p.git"
rev = "a005656df7"
git = "https://github.com/spacedriveapp/rust-libp2p"
rev = "1024411ffa"
[profile.dev]
# Make compilation faster on macOS

View File

@@ -12,19 +12,19 @@
"lint": "eslint src --cache"
},
"dependencies": {
"@oscartbeaumont-sd/rspc-client": "github:spacedriveapp/rspc#path:packages/client&bc882f4724",
"@oscartbeaumont-sd/rspc-tauri": "github:spacedriveapp/rspc#path:packages/tauri&bc882f4724",
"@spacedrive/rspc-client": "github:spacedriveapp/rspc#path:packages/client&6a77167495",
"@spacedrive/rspc-tauri": "github:spacedriveapp/rspc#path:packages/tauri&6a77167495",
"@remix-run/router": "=1.13.1",
"@sd/client": "workspace:*",
"@sd/interface": "workspace:*",
"@sd/ui": "workspace:*",
"@t3-oss/env-core": "^0.7.1",
"@tanstack/react-query": "^4.36.1",
"@tauri-apps/api": "=2.0.1",
"@tauri-apps/plugin-dialog": "2.0.0",
"@tauri-apps/plugin-http": "2.0.0",
"@tanstack/react-query": "^5.59",
"@tauri-apps/api": "=2.0.3",
"@tauri-apps/plugin-dialog": "2.0.1",
"@tauri-apps/plugin-http": "2.0.1",
"@tauri-apps/plugin-os": "2.0.0",
"@tauri-apps/plugin-shell": "2.0.0",
"@tauri-apps/plugin-shell": "2.0.1",
"consistent-hash": "^1.2.2",
"immer": "^10.0.3",
"react": "^18.2.0",
@@ -36,12 +36,12 @@
"devDependencies": {
"@sd/config": "workspace:*",
"@sentry/vite-plugin": "^2.16.0",
"@tauri-apps/cli": "2.0.1",
"@tauri-apps/cli": "2.0.4",
"@types/react": "^18.2.67",
"@types/react-dom": "^18.2.22",
"sass": "^1.72.0",
"typescript": "^5.6.2",
"vite": "^5.2.0",
"vite-tsconfig-paths": "^4.3.2"
"vite": "^5.4.9",
"vite-tsconfig-paths": "^5.0.1"
}
}

View File

@@ -16,7 +16,8 @@ sd-fda = { path = "../../../crates/fda" }
sd-prisma = { path = "../../../crates/prisma" }
# Workspace dependencies
axum = { workspace = true, features = ["headers", "query"] }
axum = { workspace = true, features = ["query"] }
axum-extra = { workspace = true, features = ["typed-header"] }
futures = { workspace = true }
http = { workspace = true }
hyper = { workspace = true }
@@ -38,10 +39,10 @@ opener = { version = "0.7.1", features = ["reveal"], def
specta-typescript = "=0.0.7"
tauri-plugin-clipboard-manager = "=2.0.1"
tauri-plugin-deep-link = "=2.0.1"
tauri-plugin-dialog = "=2.0.1"
tauri-plugin-http = "=2.0.1"
tauri-plugin-dialog = "=2.0.3"
tauri-plugin-http = "=2.0.3"
tauri-plugin-os = "=2.0.1"
tauri-plugin-shell = "=2.0.1"
tauri-plugin-shell = "=2.0.2"
tauri-plugin-updater = "=2.0.2"
# memory allocator
@@ -49,12 +50,12 @@ mimalloc = { workspace = true }
[dependencies.tauri]
features = ["linux-libxdo", "macos-private-api", "native-tls-vendored", "unstable"]
version = "=2.0.1"
version = "=2.0.6"
[dependencies.tauri-specta]
features = ["derive", "typescript"]
git = "https://github.com/spacedriveapp/tauri-specta"
rev = "1baf68be47"
rev = "8c85d40eb9"
[target.'cfg(target_os = "linux")'.dependencies]
# Spacedrive Sub-crates
@@ -76,7 +77,7 @@ sd-desktop-windows = { path = "../crates/windows" }
[build-dependencies]
# Specific Desktop dependencies
tauri-build = "=2.0.1"
tauri-build = "=2.0.2"
[features]
ai-models = ["sd-core/ai"]

View File

@@ -1,20 +1,18 @@
use std::{
net::Ipv4Addr,
pin::Pin,
sync::Arc,
task::{Context, Poll},
};
use std::{net::Ipv4Addr, sync::Arc};
use axum::{
extract::{Query, State, TypedHeader},
headers::authorization::{Authorization, Bearer},
body::Body,
extract::{Query, State},
http::{Request, StatusCode},
middleware::{self, Next},
response::Response,
RequestPartsExt,
};
use axum_extra::{
headers::authorization::{Authorization, Bearer},
TypedHeader,
};
use http::Method;
use hyper::server::{accept::Accept, conn::AddrIncoming};
use rand::{distr::Alphanumeric, Rng};
use sd_core::{custom_uri, Node, NodeError};
use serde::Deserialize;
@@ -66,29 +64,14 @@ pub async fn sd_server_plugin<R: Runtime>(
.fallback(|| async { "404 Not Found: We're past the event horizon..." });
// Only allow current device to access it
let listenera = TcpListener::bind((Ipv4Addr::LOCALHOST, 0)).await?;
let listen_addra = listenera.local_addr()?;
let listenerb = TcpListener::bind((Ipv4Addr::LOCALHOST, 0)).await?;
let listen_addrb = listenerb.local_addr()?;
let listenerc = TcpListener::bind((Ipv4Addr::LOCALHOST, 0)).await?;
let listen_addrc = listenerc.local_addr()?;
let listenerd = TcpListener::bind((Ipv4Addr::LOCALHOST, 0)).await?;
let listen_addrd = listenerd.local_addr()?;
// let listen_addr = listener.local_addr()?; // We get it from a listener so `0` is turned into a random port
let listener = TcpListener::bind((Ipv4Addr::LOCALHOST, 0)).await?;
let listen_addr = listener.local_addr()?; // We get it from a listener so `0` is turned into a random port
let (tx, mut rx) = tokio::sync::mpsc::channel(1);
info!("Internal server listening on: http://{listen_addra:?} http://{listen_addrb:?} http://{listen_addrc:?} http://{listen_addrd:?}");
let server = axum::Server::builder(CombinedIncoming {
a: AddrIncoming::from_listener(listenera)?,
b: AddrIncoming::from_listener(listenerb)?,
c: AddrIncoming::from_listener(listenerc)?,
d: AddrIncoming::from_listener(listenerd)?,
});
info!("Internal server listening on: http://{listen_addr:?}");
tokio::spawn(async move {
server
.serve(app.into_make_service())
.with_graceful_shutdown(async {
axum::serve(listener, app)
.with_graceful_shutdown(async move {
rx.recv().await;
})
.await
@@ -96,12 +79,7 @@ pub async fn sd_server_plugin<R: Runtime>(
});
let script = format!(
r#"window.__SD_CUSTOM_SERVER_AUTH_TOKEN__ = "{auth_token}"; window.__SD_CUSTOM_URI_SERVER__ = [{}];"#,
[listen_addra, listen_addrb, listen_addrc, listen_addrd]
.iter()
.map(|addr| format!("'http://{addr}'"))
.collect::<Vec<_>>()
.join(","),
r#"window.__SD_CUSTOM_SERVER_AUTH_TOKEN__ = "{auth_token}"; window.__SD_CUSTOM_URI_SERVER__ = ['http://{listen_addr}'];"#,
);
Ok(tauri::plugin::Builder::new("sd-server")
@@ -127,15 +105,12 @@ struct QueryParams {
token: Option<String>,
}
async fn auth_middleware<B>(
async fn auth_middleware(
Query(query): Query<QueryParams>,
State(auth_token): State<String>,
request: Request<B>,
next: Next<B>,
) -> Result<Response, StatusCode>
where
B: Send,
{
request: Request<Body>,
next: Next,
) -> Result<Response, StatusCode> {
let req = if query.token.as_ref() != Some(&auth_token) {
let (mut parts, body) = request.into_parts();
@@ -158,38 +133,3 @@ where
Ok(next.run(req).await)
}
struct CombinedIncoming {
a: AddrIncoming,
b: AddrIncoming,
c: AddrIncoming,
d: AddrIncoming,
}
impl Accept for CombinedIncoming {
type Conn = <AddrIncoming as Accept>::Conn;
type Error = <AddrIncoming as Accept>::Error;
fn poll_accept(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Self::Conn, Self::Error>>> {
if let Poll::Ready(Some(value)) = Pin::new(&mut self.a).poll_accept(cx) {
return Poll::Ready(Some(value));
}
if let Poll::Ready(Some(value)) = Pin::new(&mut self.b).poll_accept(cx) {
return Poll::Ready(Some(value));
}
if let Poll::Ready(Some(value)) = Pin::new(&mut self.c).poll_accept(cx) {
return Poll::Ready(Some(value));
}
if let Poll::Ready(Some(value)) = Pin::new(&mut self.d).poll_accept(cx) {
return Poll::Ready(Some(value));
}
Poll::Pending
}
}

View File

@@ -1,4 +1,4 @@
import { tauriLink } from '@oscartbeaumont-sd/rspc-tauri/src/v2';
import { tauriLink } from '@spacedrive/rspc-tauri/src/v2';
globalThis.isDev = import.meta.env.DEV;
globalThis.rspcLinks = [

View File

@@ -49,7 +49,7 @@
"three": "^0.161.0",
"tsparticles": "^3.3.0",
"unist-util-visit": "^5.0.0",
"zod": "~3.22.4"
"zod": "^3.23"
},
"devDependencies": {
"@next/bundle-analyzer": "^13.5.6",

View File

@@ -7,7 +7,6 @@ license.workspace = true
repository.workspace = true
rust-version.workspace = true
# Spacedrive Sub-crates
[target.'cfg(target_os = "ios")'.dependencies]
sd-core = { default-features = false, features = [

View File

@@ -1,4 +1,4 @@
import { AlphaRSPCError, Link, RspcRequest } from '@oscartbeaumont-sd/rspc-client/src/v2';
import { Link, RSPCError, RspcRequest } from '@spacedrive/rspc-client';
import { EventEmitter, requireNativeModule } from 'expo-modules-core';
// It loads the native module object from the JSI or falls back to
@@ -15,7 +15,7 @@ export function reactNativeLink(): Link {
string,
{
resolve: (result: any) => void;
reject: (error: Error | AlphaRSPCError) => void;
reject: (error: Error | RSPCError) => void;
}
>();
@@ -29,7 +29,7 @@ export function reactNativeLink(): Link {
activeMap.delete(id);
} else if (result.type === 'error') {
const { message, code } = result.data;
activeMap.get(id)?.reject(new AlphaRSPCError(code, message));
activeMap.get(id)?.reject(new RSPCError(code, message));
activeMap.delete(id);
} else {
console.error(`rspc: received event of unknown type '${result.type}'`);

View File

@@ -21,18 +21,17 @@
"@dr.pogodin/react-native-fs": "^2.24.1",
"@gorhom/bottom-sheet": "^4.6.1",
"@hookform/resolvers": "^3.1.0",
"@oscartbeaumont-sd/rspc-client": "github:spacedriveapp/rspc#path:packages/client&bc882f4724",
"@oscartbeaumont-sd/rspc-react": "github:spacedriveapp/rspc#path:packages/react&bc882f4724",
"@spacedrive/rspc-client": "github:spacedriveapp/rspc#path:packages/client&6a77167495",
"@react-native-async-storage/async-storage": "~1.23.1",
"@react-native-masked-view/masked-view": "^0.3.1",
"@react-navigation/bottom-tabs": "^6.5.19",
"@react-navigation/drawer": "^6.6.14",
"@react-navigation/drawer": "^6.6.15",
"@react-navigation/native": "^6.1.16",
"@react-navigation/native-stack": "^6.9.25",
"@sd/assets": "workspace:*",
"@sd/client": "workspace:*",
"@shopify/flash-list": "1.6.4",
"@tanstack/react-query": "^4.36.1",
"@tanstack/react-query": "^5.59",
"babel-preset-solid": "^1.9.0",
"class-variance-authority": "^0.7.0",
"dayjs": "^1.11.10",
@@ -75,8 +74,8 @@
"twrnc": "^4.1.0",
"use-count-up": "^3.0.1",
"use-debounce": "^9.0.4",
"valtio": "^1.11.2",
"zod": "~3.22.4"
"valtio": "^2.0",
"zod": "^3.23"
},
"devDependencies": {
"@babel/core": "^7.24.0",

View File

@@ -1,4 +1,5 @@
import { useNavigation } from '@react-navigation/native';
import { keepPreviousData } from '@tanstack/react-query';
import { Plus } from 'phosphor-react-native';
import { useRef, useState } from 'react';
import { FlatList, Text, View } from 'react-native';
@@ -22,7 +23,7 @@ const BrowseLocations = () => {
const modalRef = useRef<ModalRef>(null);
const [showAll, setShowAll] = useState(false);
const result = useLibraryQuery(['locations.list'], { keepPreviousData: true });
const result = useLibraryQuery(['locations.list'], { placeholderData: keepPreviousData });
const locations = result.data;
return (

View File

@@ -1,5 +1,6 @@
import { DrawerNavigationHelpers } from '@react-navigation/drawer/lib/typescript/src/types';
import { useNavigation } from '@react-navigation/native';
import { keepPreviousData } from '@tanstack/react-query';
import { useRef } from 'react';
import { Pressable, Text, View } from 'react-native';
import {
@@ -73,7 +74,7 @@ const DrawerLocations = () => {
const modalRef = useRef<ModalRef>(null);
const result = useLibraryQuery(['locations.list'], { keepPreviousData: true });
const result = useLibraryQuery(['locations.list'], { placeholderData: keepPreviousData });
const locations = result.data || [];
return (

View File

@@ -1,6 +1,6 @@
import { useNavigation } from '@react-navigation/native';
import { FlashList } from '@shopify/flash-list';
import { UseInfiniteQueryResult } from '@tanstack/react-query';
import { InfiniteData, UseInfiniteQueryResult } from '@tanstack/react-query';
import * as Haptics from 'expo-haptics';
import { useRef } from 'react';
import { ActivityIndicator } from 'react-native';
@@ -32,7 +32,7 @@ type ExplorerProps = {
items: ExplorerItem[] | null;
/** Function to fetch next page of items. */
loadMore: () => void;
query: UseInfiniteQueryResult<SearchData<ExplorerItem>>;
query: UseInfiniteQueryResult<InfiniteData<SearchData<ExplorerItem>>>;
count?: number;
empty?: never;
isEmpty?: never;

View File

@@ -12,7 +12,7 @@ type Props = {
const FavoriteButton = (props: Props) => {
const [favorite, setFavorite] = useState(props.data.favorite);
const { mutate: toggleFavorite, isLoading } = useLibraryMutation('files.setFavorite', {
const { mutate: toggleFavorite, isPending } = useLibraryMutation('files.setFavorite', {
onSuccess: () => {
// TODO: Invalidate search queries
setFavorite(!favorite);
@@ -22,7 +22,7 @@ const FavoriteButton = (props: Props) => {
return (
<Pressable
disabled={isLoading}
disabled={isPending}
onPress={() => toggleFavorite({ id: props.data.id, favorite: !favorite })}
style={props.style}
>

View File

@@ -191,7 +191,7 @@ function Options({ activeJob, group, setShowChildJobs, showChildJobs }: OptionsP
const clearJob = useLibraryMutation(['jobs.clear'], {
onSuccess: () => {
rspc.queryClient.invalidateQueries(['jobs.reports']);
rspc.queryClient.invalidateQueries({ queryKey: ['jobs.reports'] });
}
});

View File

@@ -35,8 +35,8 @@ const AddTagModal = forwardRef<ModalRef, unknown>((_, ref) => {
const mutation = useLibraryMutation(['tags.assign'], {
onSuccess: () => {
// this makes sure that the tags are updated in the UI
rspc.queryClient.invalidateQueries(['tags.getForObject']);
rspc.queryClient.invalidateQueries(['search.paths']);
rspc.queryClient.invalidateQueries({ queryKey: ['tags.getForObject'] });
rspc.queryClient.invalidateQueries({ queryKey: ['search.paths'] });
modalRef.current?.dismiss();
}
});

View File

@@ -17,7 +17,7 @@ const CreateLibraryModal = forwardRef<ModalRef, unknown>((_, ref) => {
const submitPlausibleEvent = usePlausibleEvent();
const { mutate: createLibrary, isLoading: createLibLoading } = useBridgeMutation(
const { mutate: createLibrary, isPending: createLibLoading } = useBridgeMutation(
'library.create',
{
onSuccess: (lib) => {

View File

@@ -57,11 +57,11 @@ const ImportModalLibrary = forwardRef<ModalRef, unknown>((_, ref) => {
description="No cloud libraries available to join"
/>
}
keyExtractor={(item) => item.uuid}
keyExtractor={(item) => item.pub_id}
showsVerticalScrollIndicator={false}
renderItem={({ item }) => (
<CloudLibraryCard
data={item}
// data={item}
navigation={navigation}
modalRef={modalRef}
/>
@@ -93,7 +93,7 @@ const CloudLibraryCard = ({ modalRef, navigation }: Props) => {
<Button
size="sm"
variant="accent"
// disabled={joinLibrary.isLoading}
// disabled={joinLibrary.isPending}
onPress={async () => {
// const library = await joinLibrary.mutateAsync(data.uuid);
@@ -121,7 +121,7 @@ const CloudLibraryCard = ({ modalRef, navigation }: Props) => {
}}
>
<Text style={tw`text-sm font-medium text-white`}>
{/* {joinLibrary.isLoading && joinLibrary.variables === data.uuid
{/* {joinLibrary.isPending && joinLibrary.variables === data.uuid
? 'Joining...'
: 'Join'} */}
THIS FILE NEEDS TO BE UPDATED TO USE THE NEW LIBRARY SYSTEM IN THE FUTURE

View File

@@ -47,7 +47,7 @@ const ImportModal = forwardRef<ModalRef, unknown>((_, ref) => {
toast.success('Location added successfully');
},
onSettled: () => {
rspc.queryClient.invalidateQueries(['locations.list']);
rspc.queryClient.invalidateQueries({ queryKey: ['locations.list'] });
modalRef.current?.close();
}
});

View File

@@ -15,14 +15,14 @@ const DeleteLibraryModal = ({ trigger, onSubmit, libraryUuid }: Props) => {
const submitPlausibleEvent = usePlausibleEvent();
const { mutate: deleteLibrary, isLoading: deleteLibLoading } = useBridgeMutation(
const { mutate: deleteLibrary, isPending: deleteLibLoading } = useBridgeMutation(
'library.delete',
{
onMutate: () => {
console.log('Deleting library');
},
onSuccess: () => {
queryClient.invalidateQueries(['library.list']);
queryClient.invalidateQueries({ queryKey: ['library.list'] });
onSubmit?.();
submitPlausibleEvent({ event: { type: 'libraryDelete' } });
},

View File

@@ -15,7 +15,7 @@ const DeleteLocationModal = ({ trigger, onSubmit, locationId, triggerStyle }: Pr
const rspc = useRspcLibraryContext();
const submitPlausibleEvent = usePlausibleEvent();
const { mutate: deleteLoc, isLoading: deleteLocLoading } = useLibraryMutation(
const { mutate: deleteLoc, isPending: deleteLocLoading } = useLibraryMutation(
'locations.delete',
{
onSuccess: () => {
@@ -30,7 +30,7 @@ const DeleteLocationModal = ({ trigger, onSubmit, locationId, triggerStyle }: Pr
},
onSettled: () => {
modalRef.current?.close();
rspc.queryClient.invalidateQueries(['locations.list']);
rspc.queryClient.invalidateQueries({ queryKey: ['locations.list'] });
}
}
);

View File

@@ -15,11 +15,11 @@ const DeleteTagModal = ({ trigger, onSubmit, tagId, triggerStyle }: Props) => {
const rspc = useRspcLibraryContext();
const submitPlausibleEvent = usePlausibleEvent();
const { mutate: deleteTag, isLoading: deleteTagLoading } = useLibraryMutation('tags.delete', {
const { mutate: deleteTag, isPending: deleteTagLoading } = useLibraryMutation('tags.delete', {
onSuccess: () => {
submitPlausibleEvent({ event: { type: 'tagDelete' } });
onSubmit?.();
rspc.queryClient.invalidateQueries(['tags.list']);
rspc.queryClient.invalidateQueries({ queryKey: ['tags.list'] });
toast.success('Tag deleted successfully');
},
onSettled: () => {

View File

@@ -79,7 +79,7 @@ export const ActionsModal = () => {
// Open
const updateAccessTime = useLibraryMutation('files.updateAccessTime', {
onSuccess: () => {
rspc.queryClient.invalidateQueries(['search.paths']);
rspc.queryClient.invalidateQueries({ queryKey: ['search.paths'] });
}
});
const queriedFullPath = useLibraryQuery(['files.getPath', filePath?.id ?? -1], {
@@ -88,7 +88,7 @@ export const ActionsModal = () => {
const deleteFile = useLibraryMutation('files.deleteFiles', {
onSuccess: () => {
rspc.queryClient.invalidateQueries(['search.paths']);
rspc.queryClient.invalidateQueries({ queryKey: ['search.paths'] });
modalRef.current?.dismiss();
}
});

View File

@@ -25,7 +25,7 @@ const RenameModal = forwardRef<ModalRef>((_, ref) => {
const renameFile = useLibraryMutation(['files.renameFile'], {
onSuccess: () => {
modalRef.current?.dismiss();
rspc.queryClient.invalidateQueries(['search.paths']);
rspc.queryClient.invalidateQueries({ queryKey: ['search.paths'] });
},
onError: () => {
toast.error('Failed to rename object');

View File

@@ -35,7 +35,7 @@ const CreateTagModal = forwardRef<ModalRef, unknown>((_, ref) => {
setTagColor(ToastDefautlColor);
setShowPicker(false);
rspc.queryClient.invalidateQueries(['tags.list']);
rspc.queryClient.invalidateQueries({ queryKey: ['tags.list'] });
toast.success('Tag created successfully');
submitPlausibleEvent({ event: { type: 'tagCreate' } });

View File

@@ -23,7 +23,7 @@ const UpdateTagModal = forwardRef<ModalRef, Props>((props, ref) => {
const [tagColor, setTagColor] = useState(props.tag.color!);
const [showPicker, setShowPicker] = useState(false);
const { mutate: updateTag, isLoading } = useLibraryMutation('tags.update', {
const { mutate: updateTag, isPending } = useLibraryMutation('tags.update', {
onMutate: () => {
console.log('Updating tag');
},
@@ -31,7 +31,7 @@ const UpdateTagModal = forwardRef<ModalRef, Props>((props, ref) => {
// Reset form
setShowPicker(false);
queryClient.invalidateQueries(['tags.list']);
queryClient.invalidateQueries({ queryKey: ['tags.list'] });
props.onSubmit?.();
},
@@ -85,7 +85,7 @@ const UpdateTagModal = forwardRef<ModalRef, Props>((props, ref) => {
variant="accent"
onPress={() => updateTag({ id: props.tag.id, color: tagColor, name: tagName })}
style={tw`mt-6`}
disabled={tagName.length === 0 || tagColor.length === 0 || isLoading}
disabled={tagName.length === 0 || tagColor.length === 0 || isPending}
>
<Text style={tw`text-sm font-medium text-white`}>Save</Text>
</Button>

View File

@@ -1,5 +1,5 @@
import * as RNFS from '@dr.pogodin/react-native-fs';
import { AlphaRSPCError } from '@oscartbeaumont-sd/rspc-client/src/v2';
import { RSPCError } from '@spacedrive/rspc-client';
import { UseQueryResult } from '@tanstack/react-query';
import React, { useEffect, useState } from 'react';
import { Platform, Text, View } from 'react-native';
@@ -17,7 +17,7 @@ import StatCard from './StatCard';
interface Props {
node: NodeState | undefined;
stats: UseQueryResult<StatisticsResponse, AlphaRSPCError>;
stats: UseQueryResult<StatisticsResponse, RSPCError>;
}
export function hardwareModelToIcon(hardwareModel: HardwareModel) {

View File

@@ -1,5 +1,5 @@
import * as RNFS from '@dr.pogodin/react-native-fs';
import { AlphaRSPCError } from '@oscartbeaumont-sd/rspc-client/src/v2';
import { RSPCError } from '@spacedrive/rspc-client';
import { UseQueryResult } from '@tanstack/react-query';
import { useEffect, useState } from 'react';
import { Platform, Text, View } from 'react-native';
@@ -47,7 +47,7 @@ const StatItem = ({ title, bytes, isLoading, style }: StatItemProps) => {
};
interface Props {
stats: UseQueryResult<StatisticsResponse, AlphaRSPCError>;
stats: UseQueryResult<StatisticsResponse, RSPCError>;
}
const OverviewStats = ({ stats }: Props) => {

View File

@@ -71,7 +71,7 @@ const SavedSearch = ({ search }: Props) => {
const dataForSearch = useSavedSearch(search);
const rspc = useRspcLibraryContext();
const deleteSearch = useLibraryMutation('search.saved.delete', {
onSuccess: () => rspc.queryClient.invalidateQueries(['search.saved.list'])
onSuccess: () => rspc.queryClient.invalidateQueries({ queryKey: ['search.saved.list'] })
});
return (
<MotiPressable

View File

@@ -1,3 +1,4 @@
import { keepPreviousData } from '@tanstack/react-query';
import { useEffect, useMemo } from 'react';
import { SearchFilterArgs, useLibraryQuery } from '@sd/client';
import { Filters, getSearchStore, SearchFilters, useSearchStore } from '~/stores/searchStore';
@@ -14,7 +15,7 @@ export function useFiltersSearch(search: string) {
const searchStore = useSearchStore();
const locations = useLibraryQuery(['locations.list'], {
keepPreviousData: true
placeholderData: keepPreviousData
});
const filterFactory = (key: SearchFilters, value: Filters[keyof Filters]) => {

View File

@@ -1,4 +1,5 @@
import { IconTypes } from '@sd/assets/util';
import { keepPreviousData } from '@tanstack/react-query';
import { useCallback, useMemo } from 'react';
import { SavedSearch, SearchFilterArgs, Tag, useLibraryQuery } from '@sd/client';
import { kinds } from '~/components/search/filters/Kind';
@@ -44,11 +45,11 @@ export function useSavedSearch(search: SavedSearch) {
};
const locations = useLibraryQuery(['locations.list'], {
keepPreviousData: true,
placeholderData: keepPreviousData,
enabled: filterKeys.includes('locations')
});
const tags = useLibraryQuery(['tags.list'], {
keepPreviousData: true,
placeholderData: keepPreviousData,
enabled: filterKeys.includes('tags')
});

View File

@@ -14,12 +14,10 @@ import PrivacySettingsScreen from '~/screens/settings/client/PrivacySettings';
import AboutScreen from '~/screens/settings/info/About';
import DebugScreen from '~/screens/settings/info/Debug';
import SupportScreen from '~/screens/settings/info/Support';
import CloudSettings from '~/screens/settings/library/CloudSettings/CloudSettings';
import EditLocationSettingsScreen from '~/screens/settings/library/EditLocationSettings';
import LibraryGeneralSettingsScreen from '~/screens/settings/library/LibraryGeneralSettings';
import LocationSettingsScreen from '~/screens/settings/library/LocationSettings';
import NodesSettingsScreen from '~/screens/settings/library/NodesSettings';
import SyncSettingsScreen from '~/screens/settings/library/SyncSettings';
import TagsSettingsScreen from '~/screens/settings/library/TagsSettings';
import SettingsScreen from '~/screens/settings/Settings';
@@ -106,16 +104,6 @@ export default function SettingsStack() {
component={TagsSettingsScreen}
options={{ header: () => <Header navBack title="Tags" /> }}
/>
<Stack.Screen
name="SyncSettings"
component={SyncSettingsScreen}
options={{ header: () => <Header navBack title="Sync" /> }}
/>
<Stack.Screen
name="CloudSettings"
component={CloudSettings}
options={{ header: () => <Header navBack title="Cloud" /> }}
/>
{/* <Stack.Screen
name="KeysSettings"
component={KeysSettingsScreen}

View File

@@ -52,10 +52,8 @@ const BackfillWaiting = () => {
const syncEnabled = useLibraryQuery(['sync.enabled']);
useEffect(() => {
(async () => {
await enableSync.mutateAsync(null);
})();
}, []);
enableSync.mutate(null);
}, [enableSync]);
return (
<View style={tw`flex-1 items-center justify-center bg-black`}>

View File

@@ -62,10 +62,13 @@ export default function LocationScreen({ navigation, route }: BrowseStackScreenP
filters: [...defaultFilters, ...layoutFilter].filter(Boolean),
take: 30
},
order,
onSuccess: () => getExplorerStore().resetNewThumbnails()
order
});
useEffect(() => {
getExplorerStore().resetNewThumbnails();
}, [path]);
useEffect(() => {
// Set screen title to location.
if (path && path !== '') {

View File

@@ -1,6 +1,6 @@
import { useIsFocused } from '@react-navigation/native';
import { ArrowLeft, DotsThree, FunnelSimple } from 'phosphor-react-native';
import { Suspense, useDeferredValue, useMemo, useState } from 'react';
import { Suspense, useDeferredValue, useEffect, useMemo, useState } from 'react';
import { ActivityIndicator, Platform, Pressable, TextInput, View } from 'react-native';
import { useSafeAreaInsets } from 'react-native-safe-area-context';
import { ObjectKindEnum, useLibraryQuery, usePathsExplorerQuery } from '@sd/client';
@@ -41,10 +41,11 @@ const SearchScreen = ({ navigation }: SearchStackScreenProps<'Search'>) => {
filters: [...layoutSearchFilter, ...searchStore.mergedFilters]
},
enabled: isFocused && searchStore.mergedFilters.length >= 1, // only fetch when screen is focused & filters are applied
suspense: true,
onSuccess: () => getExplorerStore().resetNewThumbnails()
suspense: true
});
useEffect(() => getExplorerStore().resetNewThumbnails(), [objects]);
useFiltersSearch(deferredSearch);
const appliedFiltersLength = Object.keys(searchStore.appliedFilters).length;

View File

@@ -1,6 +1,6 @@
import { AlphaRSPCError } from '@oscartbeaumont-sd/rspc-client/src/v2';
import AsyncStorage from '@react-native-async-storage/async-storage';
import { useNavigation } from '@react-navigation/native';
import { RSPCError } from '@spacedrive/rspc-client';
import { UseMutationResult } from '@tanstack/react-query';
import { useState } from 'react';
import { Controller } from 'react-hook-form';
@@ -116,7 +116,7 @@ async function signInClicked(
email: string,
password: string,
navigator: SettingsStackScreenProps<'AccountProfile'>['navigation'],
cloudBootstrap: UseMutationResult<null, AlphaRSPCError, [string, string], unknown>, // Cloud bootstrap mutation
cloudBootstrap: UseMutationResult<null, RSPCError, [string, string], unknown>, // Cloud bootstrap mutation
updateUserStore: ReturnType<typeof getUserStore>
) {
try {

View File

@@ -1,130 +0,0 @@
import { useMemo } from 'react';
import { ActivityIndicator, FlatList, Text, View } from 'react-native';
import { useLibraryContext, useLibraryMutation, useLibraryQuery } from '@sd/client';
import { Icon } from '~/components/icons/Icon';
import Card from '~/components/layout/Card';
import Empty from '~/components/layout/Empty';
import ScreenContainer from '~/components/layout/ScreenContainer';
import VirtualizedListWrapper from '~/components/layout/VirtualizedListWrapper';
import { Button } from '~/components/primitive/Button';
import { Divider } from '~/components/primitive/Divider';
import { styled, tw, twStyle } from '~/lib/tailwind';
import { useAuthStateSnapshot } from '~/stores/auth';
import Instance from './Instance';
import Library from './Library';
import Login from './Login';
import ThisInstance from './ThisInstance';
export const InfoBox = styled(View, 'rounded-md border gap-1 border-app bg-transparent p-2');
const CloudSettings = () => {
return (
<ScreenContainer scrollview={false} style={tw`gap-0 px-6 py-0`}>
<AuthSensitiveChild />
</ScreenContainer>
);
};
const AuthSensitiveChild = () => {
const authState = useAuthStateSnapshot();
if (authState.status === 'loggedIn') return <Authenticated />;
if (authState.status === 'notLoggedIn' || authState.status === 'loggingIn') return <Login />;
return null;
};
const Authenticated = () => {
const { library } = useLibraryContext();
const cloudLibrary = useLibraryQuery(['cloud.library.get'], { retry: false });
const createLibrary = useLibraryMutation(['cloud.library.create']);
const cloudInstances = useMemo(
() =>
cloudLibrary.data?.instances.filter(
(instance) => instance.uuid !== library.instance_id
),
[cloudLibrary.data, library.instance_id]
);
if (cloudLibrary.isLoading) {
return (
<View style={tw`flex-1 items-center justify-center`}>
<ActivityIndicator size="small" />
</View>
);
}
return (
<ScreenContainer
scrollview={Boolean(cloudLibrary.data)}
style={tw`gap-0`}
tabHeight={false}
>
{cloudLibrary.data ? (
<View style={tw`flex-col items-start gap-5`}>
<Library cloudLibrary={cloudLibrary.data} />
<ThisInstance cloudLibrary={cloudLibrary.data} />
<Card style={tw`w-full`}>
<View style={tw`flex-row items-center gap-2`}>
<View
style={tw`self-start rounded border border-app-lightborder bg-app-highlight px-1.5 py-[2px]`}
>
<Text style={tw`text-xs font-semibold text-ink`}>
{cloudInstances?.length}
</Text>
</View>
<Text style={tw`font-semibold text-ink`}>Instances</Text>
</View>
<Divider style={tw`mb-4 mt-2`} />
<VirtualizedListWrapper
scrollEnabled={false}
contentContainerStyle={tw`flex-1`}
horizontal
>
<FlatList
data={cloudInstances}
scrollEnabled={false}
ListEmptyComponent={
<Empty textStyle={tw`my-0`} description="No instances found" />
}
contentContainerStyle={twStyle(
cloudInstances?.length === 0 && 'flex-row'
)}
showsHorizontalScrollIndicator={false}
ItemSeparatorComponent={() => <View style={tw`h-2`} />}
renderItem={({ item }) => <Instance data={item} />}
keyExtractor={(item) => item.id}
numColumns={1}
/>
</VirtualizedListWrapper>
</Card>
</View>
) : (
<View style={tw`flex-1 justify-center`}>
<Card style={tw`relative p-6`}>
<Icon style={tw`mx-auto mb-2`} name="CloudSync" size={64} />
<Text style={tw`mx-auto text-center text-sm text-ink`}>
Uploading your library to the cloud will allow you to access your
library from other devices using your account & importing.
</Text>
<Button
variant={'accent'}
style={tw`mx-auto mt-4 max-w-[82%]`}
disabled={createLibrary.isLoading}
onPress={async () => await createLibrary.mutateAsync(null)}
>
{createLibrary.isLoading ? (
<Text style={tw`text-ink`}>Connecting library...</Text>
) : (
<Text style={tw`font-medium text-ink`}>Connect library</Text>
)}
</Button>
</Card>
</View>
)}
</ScreenContainer>
);
};
export default CloudSettings;

View File

@@ -1,64 +0,0 @@
import { Text, View } from 'react-native';
import { CloudInstance, HardwareModel } from '@sd/client';
import { Icon } from '~/components/icons/Icon';
import { hardwareModelToIcon } from '~/components/overview/Devices';
import { tw } from '~/lib/tailwind';
import { InfoBox } from './CloudSettings';
interface Props {
data: CloudInstance;
}
const Instance = ({ data }: Props) => {
return (
<InfoBox style={tw`w-full gap-2`}>
<View>
<View style={tw`mx-auto my-2`}>
<Icon
name={
hardwareModelToIcon(data.metadata.device_model as HardwareModel) as any
}
size={60}
/>
</View>
<Text
numberOfLines={1}
style={tw`mb-3 px-1 text-center text-sm font-medium text-ink`}
>
{data.metadata.name}
</Text>
<InfoBox>
<View style={tw`flex-row items-center gap-1`}>
<Text style={tw`text-sm font-medium text-ink`}>Id:</Text>
<Text numberOfLines={1} style={tw`max-w-[250px] text-ink-dull`}>
{data.id}
</Text>
</View>
</InfoBox>
</View>
<View>
<InfoBox>
<View style={tw`flex-row items-center gap-1`}>
<Text style={tw`text-sm font-medium text-ink`}>UUID:</Text>
<Text numberOfLines={1} style={tw`max-w-[85%] text-ink-dull`}>
{data.uuid}
</Text>
</View>
</InfoBox>
</View>
<View>
<InfoBox>
<View style={tw`flex-row items-center gap-1`}>
<Text style={tw`text-sm font-medium text-ink`}>Public key:</Text>
<Text numberOfLines={1} style={tw`max-w-3/4 text-ink-dull`}>
{data.identity}
</Text>
</View>
</InfoBox>
</View>
</InfoBox>
);
};
export default Instance;

View File

@@ -1,66 +0,0 @@
import { CheckCircle, XCircle } from 'phosphor-react-native';
import { useMemo } from 'react';
import { Text, View } from 'react-native';
import { CloudLibrary, useLibraryContext, useLibraryMutation } from '@sd/client';
import Card from '~/components/layout/Card';
import { Button } from '~/components/primitive/Button';
import { Divider } from '~/components/primitive/Divider';
import { SettingsTitle } from '~/components/settings/SettingsContainer';
import { tw } from '~/lib/tailwind';
import { logout, useAuthStateSnapshot } from '~/stores/auth';
import { InfoBox } from './CloudSettings';
interface LibraryProps {
cloudLibrary?: CloudLibrary;
}
const Library = ({ cloudLibrary }: LibraryProps) => {
const authState = useAuthStateSnapshot();
const { library } = useLibraryContext();
const syncLibrary = useLibraryMutation(['cloud.library.sync']);
const thisInstance = useMemo(
() => cloudLibrary?.instances.find((instance) => instance.uuid === library.instance_id),
[cloudLibrary, library.instance_id]
);
return (
<Card style={tw`w-full`}>
<View style={tw`flex-row items-center justify-between`}>
<Text style={tw`font-medium text-ink`}>Library</Text>
{authState.status === 'loggedIn' && (
<Button variant="gray" size="sm" onPress={logout}>
<Text style={tw`text-xs font-semibold text-ink`}>Logout</Text>
</Button>
)}
</View>
<Divider style={tw`mb-4 mt-2`} />
<SettingsTitle style={tw`mb-2`}>Name</SettingsTitle>
<InfoBox>
<Text style={tw`text-ink`}>{cloudLibrary?.name}</Text>
</InfoBox>
<Button
disabled={syncLibrary.isLoading || thisInstance !== undefined}
variant="gray"
onPress={() => syncLibrary.mutate(null)}
style={tw`mt-2 flex-row gap-1 py-2`}
>
{thisInstance ? (
<CheckCircle size={16} weight="fill" color={tw.color('green-400')} />
) : (
<XCircle
style={tw`rounded-full`}
size={16}
weight="fill"
color={tw.color('red-500')}
/>
)}
<Text style={tw`text-sm font-semibold text-ink`}>
{thisInstance !== undefined ? 'Library synced' : 'Library not synced'}
</Text>
</Button>
</Card>
);
};
export default Library;

View File

@@ -1,45 +0,0 @@
import { Text, View } from 'react-native';
import { Icon } from '~/components/icons/Icon';
import Card from '~/components/layout/Card';
import { Button } from '~/components/primitive/Button';
import { tw } from '~/lib/tailwind';
import { cancel, login, useAuthStateSnapshot } from '~/stores/auth';
const Login = () => {
const authState = useAuthStateSnapshot();
const buttonText = {
notLoggedIn: 'Login',
loggingIn: 'Cancel'
};
return (
<View style={tw`flex-1 flex-col items-center justify-center gap-2`}>
<Card style={tw`w-full items-center justify-center gap-2 p-6`}>
<View style={tw`flex-col items-center gap-2`}>
<Icon name="CloudSync" size={64} />
<Text style={tw`text-center text-sm text-ink`}>
Cloud Sync will upload your library to the cloud so you can access your
library from other devices by importing it from the cloud.
</Text>
</View>
{(authState.status === 'notLoggedIn' || authState.status === 'loggingIn') && (
<Button
variant="accent"
style={tw`mx-auto mt-4 max-w-[50%]`}
onPress={async (e) => {
e.preventDefault();
if (authState.status === 'loggingIn') {
await cancel();
} else {
await login();
}
}}
>
<Text style={tw`font-medium text-ink`}>{buttonText[authState.status]}</Text>
</Button>
)}
</Card>
</View>
);
};
export default Login;

View File

@@ -1,76 +0,0 @@
import { useMemo } from 'react';
import { Text, View } from 'react-native';
import { CloudLibrary, HardwareModel, useLibraryContext } from '@sd/client';
import { Icon } from '~/components/icons/Icon';
import Card from '~/components/layout/Card';
import { hardwareModelToIcon } from '~/components/overview/Devices';
import { Divider } from '~/components/primitive/Divider';
import { tw } from '~/lib/tailwind';
import { InfoBox } from './CloudSettings';
interface ThisInstanceProps {
cloudLibrary?: CloudLibrary;
}
const ThisInstance = ({ cloudLibrary }: ThisInstanceProps) => {
const { library } = useLibraryContext();
const thisInstance = useMemo(
() => cloudLibrary?.instances.find((instance) => instance.uuid === library.instance_id),
[cloudLibrary, library.instance_id]
);
if (!thisInstance) return null;
return (
<Card style={tw`w-full gap-2`}>
<View>
<Text style={tw`mb-1 font-semibold text-ink`}>This Instance</Text>
<Divider />
</View>
<View style={tw`mx-auto my-2 items-center`}>
<Icon
name={
hardwareModelToIcon(
thisInstance.metadata.device_model as HardwareModel
) as any
}
size={60}
/>
<Text numberOfLines={1} style={tw`px-1 font-semibold text-ink`}>
{thisInstance.metadata.name}
</Text>
</View>
<View>
<InfoBox>
<View style={tw`flex-row items-center gap-1`}>
<Text style={tw`text-sm font-medium text-ink`}>Id:</Text>
<Text style={tw`max-w-[250px] text-ink-dull`}>{thisInstance.id}</Text>
</View>
</InfoBox>
</View>
<View>
<InfoBox>
<View style={tw`flex-row items-center gap-1`}>
<Text style={tw`text-sm font-medium text-ink`}>UUID:</Text>
<Text numberOfLines={1} style={tw`max-w-[85%] text-ink-dull`}>
{thisInstance.uuid}
</Text>
</View>
</InfoBox>
</View>
<View>
<InfoBox>
<View style={tw`flex-row items-center gap-1`}>
<Text style={tw`text-sm font-medium text-ink`}>Publc Key:</Text>
<Text numberOfLines={1} style={tw`max-w-3/4 text-ink-dull`}>
{thisInstance.identity}
</Text>
</View>
</InfoBox>
</View>
</Card>
);
};
export default ThisInstance;

View File

@@ -40,7 +40,7 @@ const EditLocationSettingsScreen = ({
onError: (e) => console.log({ e }),
onSuccess: () => {
form.reset(form.getValues());
queryClient.invalidateQueries(['locations.list']);
queryClient.invalidateQueries({ queryKey: ['locations.list'] });
toast.success('Location updated!');
// TODO: navigate back & reset input focus!
}
@@ -90,19 +90,19 @@ const EditLocationSettingsScreen = ({
});
}, [form, navigation, onSubmit]);
useLibraryQuery(['locations.getWithRules', id], {
onSuccess: (data) => {
if (data && !form.formState.isDirty)
form.reset({
displayName: data.name,
localPath: data.path,
indexer_rules_ids: data.indexer_rules.map((i) => i.id.toString()),
generatePreviewMedia: data.generate_preview_media,
syncPreviewMedia: data.sync_preview_media,
hidden: data.hidden
});
}
});
const query = useLibraryQuery(['locations.getWithRules', id]);
useEffect(() => {
const data = query.data;
if (data && !form.formState.isDirty)
form.reset({
displayName: data.name,
localPath: data.path,
indexer_rules_ids: data.indexer_rules.map((i) => i.id.toString()),
generatePreviewMedia: data.generate_preview_media,
syncPreviewMedia: data.sync_preview_media,
hidden: data.hidden
});
}, [form, query.data]);
const fullRescan = useLibraryMutation('locations.fullRescan');

View File

@@ -1,158 +0,0 @@
import { inferSubscriptionResult } from '@oscartbeaumont-sd/rspc-client';
import { useIsFocused } from '@react-navigation/native';
import { MotiView } from 'moti';
import { Circle } from 'phosphor-react-native';
import React, { useEffect, useRef, useState } from 'react';
import { Text, View } from 'react-native';
import {
Procedures,
useLibraryMutation,
useLibraryQuery,
useLibrarySubscription
} from '@sd/client';
import { Icon } from '~/components/icons/Icon';
import Card from '~/components/layout/Card';
import { ModalRef } from '~/components/layout/Modal';
import ScreenContainer from '~/components/layout/ScreenContainer';
import CloudModal from '~/components/modal/cloud/CloudModal';
import { Button } from '~/components/primitive/Button';
import { tw } from '~/lib/tailwind';
import { SettingsStackScreenProps } from '~/navigation/tabs/SettingsStack';
const SyncSettingsScreen = ({ navigation }: SettingsStackScreenProps<'SyncSettings'>) => {
const syncEnabled = useLibraryQuery(['sync.enabled']);
const [data, setData] = useState<inferSubscriptionResult<Procedures, 'library.actors'>>({});
const modalRef = useRef<ModalRef>(null);
const [startBackfill, setStart] = useState(false);
const pageFocused = useIsFocused();
const [showCloudModal, setShowCloudModal] = useState(false);
useLibrarySubscription(['library.actors'], { onData: setData });
useEffect(() => {
if (startBackfill === true) {
navigation.navigate('BackfillWaitingStack', {
screen: 'BackfillWaiting'
});
setTimeout(() => setShowCloudModal(true), 1000);
}
}, [startBackfill, navigation]);
useEffect(() => {
if (pageFocused && showCloudModal) modalRef.current?.present();
return () => {
if (showCloudModal) setShowCloudModal(false);
};
}, [pageFocused, showCloudModal]);
return (
<ScreenContainer scrollview={false} style={tw`gap-0 px-6`}>
{syncEnabled.data === false ? (
<View style={tw`flex-1 justify-center`}>
<Card style={tw`relative flex-col items-center gap-5 p-6`}>
<View style={tw`flex-col items-center gap-2`}>
<Icon name="Sync" size={72} style={tw`mb-2`} />
<Text style={tw`text-center leading-5 text-ink`}>
With Sync, you can share your library with other devices using P2P
technology.
</Text>
<Text style={tw`text-center leading-5 text-ink`}>
Additionally, allowing you to enable Cloud services to upload your
library to the cloud, making it accessible on any of your devices.
</Text>
</View>
<Button
variant={'accent'}
style={tw`mx-auto max-w-[82%]`}
onPress={() => setStart(true)}
>
<Text style={tw`font-medium text-white`}>Start</Text>
</Button>
</Card>
</View>
) : (
<View style={tw`flex-row flex-wrap gap-2`}>
{Object.keys(data).map((key) => {
return (
<Card style={tw`w-[48%]`} key={key}>
<OnlineIndicator online={data[key] ?? false} />
<Text
key={key}
style={tw`mb-3 mt-1 flex-col items-center justify-center text-left text-xs text-white`}
>
{key}
</Text>
{data[key] ? <StopButton name={key} /> : <StartButton name={key} />}
</Card>
);
})}
</View>
)}
<CloudModal ref={modalRef} />
</ScreenContainer>
);
};
export default SyncSettingsScreen;
function OnlineIndicator({ online }: { online: boolean }) {
const size = 6;
return (
<View
style={tw`mb-1 h-6 w-6 items-center justify-center rounded-full border border-app-inputborder bg-app-input p-2`}
>
{online ? (
<View style={tw`relative items-center justify-center`}>
<MotiView
from={{ scale: 0, opacity: 1 }}
animate={{ scale: 3, opacity: 0 }}
transition={{
type: 'timing',
duration: 1500,
loop: true,
repeatReverse: false,
delay: 1000
}}
style={tw`absolute z-10 h-2 w-2 items-center justify-center rounded-full bg-green-500`}
/>
<View style={tw`h-2 w-2 rounded-full bg-green-500`} />
</View>
) : (
<Circle size={size} color={tw.color('red-400')} weight="fill" />
)}
</View>
);
}
function StartButton({ name }: { name: string }) {
const startActor = useLibraryMutation(['library.startActor']);
return (
<Button
variant="accent"
size="sm"
disabled={startActor.isLoading}
onPress={() => startActor.mutate(name)}
>
<Text style={tw`text-xs font-medium text-ink`}>
{startActor.isLoading ? 'Starting' : 'Start'}
</Text>
</Button>
);
}
function StopButton({ name }: { name: string }) {
const stopActor = useLibraryMutation(['library.stopActor']);
return (
<Button
variant="accent"
size="sm"
disabled={stopActor.isLoading}
onPress={() => stopActor.mutate(name)}
>
<Text style={tw`text-xs font-medium text-ink`}>
{stopActor.isLoading ? 'Stopping' : 'Stop'}
</Text>
</Button>
);
}

View File

@@ -1,4 +1,4 @@
import { RSPCError } from '@oscartbeaumont-sd/rspc-client';
import { RSPCError } from '@spacedrive/rspc-client';
import { Linking } from 'react-native';
import { createMutable } from 'solid-js/store';
import { nonLibraryClient, useSolidStore } from '@sd/client';

View File

@@ -16,12 +16,13 @@ default = []
sd-core = { path = "../../core", features = ["ffmpeg", "heif"] }
# Workspace dependencies
axum = { workspace = true, features = ["headers"] }
http = { workspace = true }
rspc = { workspace = true, features = ["axum"] }
tempfile = { workspace = true }
tokio = { workspace = true, features = ["rt-multi-thread", "signal", "sync"] }
tracing = { workspace = true }
axum = { workspace = true }
axum-extra = { workspace = true, features = ["typed-header"] }
http = { workspace = true }
rspc = { workspace = true, features = ["axum"] }
tempfile = { workspace = true }
tokio = { workspace = true, features = ["rt-multi-thread", "signal", "sync"] }
tracing = { workspace = true }
# Specific Desktop dependencies
include_dir = "0.7.3"

View File

@@ -1,12 +1,15 @@
use std::{collections::HashMap, env, net::SocketAddr, path::Path};
use axum::{
body::Body,
extract::{FromRequestParts, State},
headers::{authorization::Basic, Authorization},
http::Request,
middleware::Next,
response::{IntoResponse, Response},
routing::get,
};
use axum_extra::{
headers::{authorization::Basic, Authorization},
TypedHeader,
};
use sd_core::{custom_uri, Node};
@@ -24,11 +27,7 @@ pub struct AppState {
auth: HashMap<String, SecStr>,
}
async fn basic_auth<B>(
State(state): State<AppState>,
request: Request<B>,
next: Next<B>,
) -> Response {
async fn basic_auth(State(state): State<AppState>, request: Request<Body>, next: Next) -> Response {
let request = if !state.auth.is_empty() {
let (mut parts, body) = request.into_parts();
@@ -163,10 +162,7 @@ async fn main() {
.route(
"/",
get(|| async move {
use axum::{
body::{self, Full},
response::Response,
};
use axum::{body::Body, response::Response};
use http::{header, HeaderValue, StatusCode};
match ASSETS_DIR.get_file("index.html") {
@@ -176,11 +172,11 @@ async fn main() {
header::CONTENT_TYPE,
HeaderValue::from_str("text/html").unwrap(),
)
.body(body::boxed(Full::from(file.contents())))
.body(Body::from(file.contents()))
.unwrap(),
None => Response::builder()
.status(StatusCode::NOT_FOUND)
.body(body::boxed(axum::body::Empty::new()))
.body(Body::empty())
.unwrap(),
}
}),
@@ -189,10 +185,7 @@ async fn main() {
"/*id",
get(
|axum::extract::Path(path): axum::extract::Path<String>| async move {
use axum::{
body::{self, Empty, Full},
response::Response,
};
use axum::{body::Body, response::Response};
use http::{header, HeaderValue, StatusCode};
let path = path.trim_start_matches('/');
@@ -206,7 +199,7 @@ async fn main() {
)
.unwrap(),
)
.body(body::boxed(Full::from(file.contents())))
.body(Body::from(file.contents()))
.unwrap(),
None => match ASSETS_DIR.get_file("index.html") {
Some(file) => Response::builder()
@@ -215,11 +208,11 @@ async fn main() {
header::CONTENT_TYPE,
HeaderValue::from_str("text/html").unwrap(),
)
.body(body::boxed(Full::from(file.contents())))
.body(Body::from(file.contents()))
.unwrap(),
None => Response::builder()
.status(StatusCode::NOT_FOUND)
.body(body::boxed(Empty::new()))
.body(Body::empty())
.unwrap(),
},
}
@@ -242,8 +235,7 @@ async fn main() {
let mut addr = "[::]:8080".parse::<SocketAddr>().unwrap(); // This listens on IPv6 and IPv4
addr.set_port(port);
info!("Listening on http://localhost:{}", port);
axum::Server::bind(&addr)
.serve(app.into_make_service())
axum::serve(tokio::net::TcpListener::bind(addr).await.unwrap(), app)
.with_graceful_shutdown(signal)
.await
.expect("Error with HTTP server!");

View File

@@ -30,6 +30,6 @@
"storybook": "^8.0.1",
"tailwindcss": "^3.4.10",
"typescript": "^5.6.2",
"vite": "^5.2.0"
"vite": "^5.4.9"
}
}

View File

@@ -17,10 +17,10 @@
"lint": "eslint src --cache"
},
"dependencies": {
"@oscartbeaumont-sd/rspc-client": "github:spacedriveapp/rspc#path:packages/client&bc882f4724",
"@spacedrive/rspc-client": "github:spacedriveapp/rspc#path:packages/client&6a77167495",
"@sd/client": "workspace:*",
"@sd/interface": "workspace:*",
"@tanstack/react-query": "^4.36.1",
"@tanstack/react-query": "^5.59",
"html-to-image": "^1.11.11",
"html2canvas": "^1.4.1",
"react": "^18.2.0",
@@ -41,7 +41,7 @@
"rollup-plugin-visualizer": "^5.12.0",
"start-server-and-test": "^2.0.3",
"typescript": "^5.6.2",
"vite": "^5.2.0",
"vite-tsconfig-paths": "^4.3.2"
"vite": "^5.4.9",
"vite-tsconfig-paths": "^5.0.1"
}
}

View File

@@ -1,4 +1,4 @@
import { wsBatchLink } from '@oscartbeaumont-sd/rspc-client/src/v2';
import { wsBatchLink } from '@spacedrive/rspc-client';
globalThis.isDev = import.meta.env.DEV;
globalThis.rspcLinks = [

View File

@@ -68,7 +68,7 @@ rmp-serde = { workspace = true }
rmpv = { workspace = true }
rspc = { workspace = true, features = ["alpha", "axum", "chrono", "unstable", "uuid"] }
sd-cloud-schema = { workspace = true }
serde = { workspace = true, features = ["derive"] }
serde = { workspace = true, features = ["derive", "rc"] }
serde_json = { workspace = true }
specta = { workspace = true }
strum = { workspace = true, features = ["derive"] }
@@ -88,15 +88,16 @@ ctor = "0.2.8"
directories = "5.0"
flate2 = "1.0"
hostname = "0.4.0"
http-body = "0.4.6" # Update blocked by http
http-body = "1.0"
http-range = "0.1.5"
int-enum = "0.5" # Update blocked due to API breaking changes
hyper-util = { version = "0.1.9", features = ["tokio"] }
int-enum = "0.5" # Update blocked due to API breaking changes
mini-moka = "0.10.3"
serde-hashkey = "0.4.5"
serde_repr = "0.1.19"
serde_with = "3.8"
slotmap = "1.0"
sysinfo = "0.29.11" # Update blocked due to API breaking changes
sysinfo = "0.29.11" # Update blocked due to API breaking changes
tar = "0.4.41"
tower-service = "0.3.2"
tracing-appender = "0.2.3"

View File

@@ -39,7 +39,7 @@ zeroize = { workspace = true }
# External dependencies
anyhow = "1.0.86"
dashmap = "6.1.0"
iroh-net = { version = "0.26", features = ["discovery-local-network", "iroh-relay"] }
iroh-net = { version = "0.27", features = ["discovery-local-network", "iroh-relay"] }
paste = "=1.0.15"
quic-rpc = { version = "0.12.1", features = ["quinn-transport"] }
quinn = { package = "iroh-quinn", version = "0.11" }
@@ -47,7 +47,7 @@ quinn = { package = "iroh-quinn", version = "0.11" }
reqwest = { version = "0.12", features = ["json", "native-tls-vendored", "stream"] }
reqwest-middleware = { version = "0.3", features = ["json"] }
reqwest-retry = "0.6"
rustls = { version = "=0.23.13", default-features = false, features = ["brotli", "ring", "std"] }
rustls = { version = "=0.23.15", default-features = false, features = ["brotli", "ring", "std"] }
rustls-platform-verifier = "0.3.3"

View File

@@ -2,11 +2,7 @@ use crate::p2p::{NotifyUser, UserResponse};
use sd_cloud_schema::{Client, Service, ServicesALPN};
use std::{
net::SocketAddr,
sync::{atomic::AtomicBool, Arc},
time::Duration,
};
use std::{net::SocketAddr, sync::Arc, time::Duration};
use futures::Stream;
use iroh_net::relay::RelayUrl;
@@ -15,7 +11,7 @@ use quinn::{crypto::rustls::QuicClientConfig, ClientConfig, Endpoint};
use reqwest::{IntoUrl, Url};
use reqwest_middleware::{reqwest, ClientBuilder, ClientWithMiddleware};
use reqwest_retry::{policies::ExponentialBackoff, RetryTransientMiddleware};
use tokio::sync::RwLock;
use tokio::sync::{Mutex, RwLock};
use tracing::warn;
use super::{
@@ -53,7 +49,7 @@ pub struct CloudServices {
notify_user_rx: flume::Receiver<NotifyUser>,
user_response_tx: flume::Sender<UserResponse>,
pub(crate) user_response_rx: flume::Receiver<UserResponse>,
pub has_bootstrapped: Arc<AtomicBool>,
pub has_bootstrapped: Arc<Mutex<bool>>,
}
impl CloudServices {

View File

@@ -123,6 +123,8 @@ pub enum Error {
EndUpdatePushSyncMessages(io::Error),
#[error("Unexpected end of stream while encrypting sync messages")]
UnexpectedEndOfStream,
#[error("Failed to create directory to store timestamp keeper files")]
FailedToCreateTimestampKeepersDirectory(io::Error),
#[error("Failed to read last timestamp keeper for pulling sync messages: {0}")]
FailedToReadLastTimestampKeeper(io::Error),
#[error("Failed to handle last timestamp keeper serialization: {0}")]

View File

@@ -9,20 +9,20 @@ use sd_cloud_schema::{
};
use sd_crypto::{CryptoRng, SeedableRng};
use std::sync::Arc;
use std::{sync::Arc, time::Duration};
use iroh_net::{
discovery::{
dns::DnsDiscovery, local_swarm_discovery::LocalSwarmDiscovery, pkarr::dht::DhtDiscovery,
ConcurrentDiscovery,
ConcurrentDiscovery, Discovery,
},
relay::{RelayMap, RelayMode, RelayUrl},
Endpoint, NodeId,
};
use reqwest::Url;
use serde::{Deserialize, Serialize};
use tokio::{spawn, sync::oneshot};
use tracing::error;
use tokio::{spawn, sync::oneshot, time::sleep};
use tracing::{debug, error, warn};
mod new_sync_messages_notifier;
mod runner;
@@ -110,6 +110,12 @@ impl CloudP2P {
dns_pkarr_url: Url,
relay_url: RelayUrl,
) -> Result<Self, Error> {
let dht_discovery = DhtDiscovery::builder()
.secret_key(iroh_secret_key.clone())
.pkarr_relay(dns_pkarr_url)
.build()
.map_err(Error::DhtDiscoveryInit)?;
let endpoint = Endpoint::builder()
.alpns(vec![CloudP2PALPN::LATEST.to_vec()])
.discovery(Box::new(ConcurrentDiscovery::from_services(vec![
@@ -118,13 +124,7 @@ impl CloudP2P {
LocalSwarmDiscovery::new(iroh_secret_key.public())
.map_err(Error::LocalSwarmDiscoveryInit)?,
),
Box::new(
DhtDiscovery::builder()
.secret_key(iroh_secret_key.clone())
.pkarr_relay(dns_pkarr_url)
.build()
.map_err(Error::DhtDiscoveryInit)?,
),
Box::new(dht_discovery.clone()),
])))
.secret_key(iroh_secret_key)
.relay_mode(RelayMode::Custom(RelayMap::from_url(relay_url)))
@@ -132,6 +132,23 @@ impl CloudP2P {
.await
.map_err(Error::CreateCloudP2PEndpoint)?;
spawn({
let endpoint = endpoint.clone();
async move {
loop {
let Ok(node_addr) = endpoint.node_addr().await.map_err(|e| {
warn!(?e, "Failed to get direct addresses to force publish on DHT");
}) else {
sleep(Duration::from_secs(5)).await;
continue;
};
debug!("Force publishing peer on DHT");
return dht_discovery.publish(&node_addr.info);
}
}
});
let (msgs_tx, msgs_rx) = flume::bounded(16);
spawn({

View File

@@ -132,7 +132,7 @@ async fn connect_and_send_notification(
) -> Result<(), Error> {
let client = Client::new(RpcClient::new(QuinnConnection::<Service>::from_connection(
endpoint
.connect_by_node_id(*connection_id, CloudP2PALPN::LATEST)
.connect(*connection_id, CloudP2PALPN::LATEST)
.await
.map_err(Error::ConnectToCloudP2PNode)?,
)));

View File

@@ -601,7 +601,7 @@ async fn connect_to_first_available_client(
) -> Result<Client<QuinnConnection<Service>, Service>, CloudP2PError> {
for (device_pub_id, device_connection_id) in devices_in_group {
if let Ok(connection) = endpoint
.connect_by_node_id(*device_connection_id, CloudP2PALPN::LATEST)
.connect(*device_connection_id, CloudP2PALPN::LATEST)
.await
.map_err(
|e| error!(?e, %device_pub_id, "Failed to connect to authorizor device candidate"),

View File

@@ -1,10 +1,8 @@
use crate::Error;
use sd_core_sync::{from_cloud_crdt_ops, CompressedCRDTOperationsPerModelPerDevice, SyncManager};
use sd_core_sync::SyncManager;
use sd_actors::{Actor, Stopper};
use sd_prisma::prisma::{cloud_crdt_operation, SortOrder};
use sd_utils::timestamp_to_datetime;
use std::{
future::IntoFuture,
@@ -12,18 +10,18 @@ use std::{
atomic::{AtomicBool, Ordering},
Arc,
},
time::SystemTime,
};
use futures::FutureExt;
use futures_concurrency::future::Race;
use tokio::{sync::Notify, time::sleep};
use tokio::{
sync::Notify,
time::{sleep, Instant},
};
use tracing::{debug, error};
use super::{ReceiveAndIngestNotifiers, SyncActors, ONE_MINUTE};
const BATCH_SIZE: i64 = 1000;
/// Responsible for taking sync operations received from the cloud,
/// and applying them to the local database via the sync system's ingest actor.
@@ -43,20 +41,14 @@ impl Actor<SyncActors> for Ingester {
Stopped,
}
'outer: loop {
loop {
self.active.store(true, Ordering::Relaxed);
self.active_notify.notify_waiters();
loop {
match self.run_loop_iteration().await {
Ok(IngestStatus::Completed) => break,
Ok(IngestStatus::InProgress) => {}
Err(e) => {
error!(?e, "Error during cloud sync ingester actor iteration");
sleep(ONE_MINUTE).await;
continue 'outer;
}
}
if let Err(e) = self.run_loop_iteration().await {
error!(?e, "Error during cloud sync ingester actor iteration");
sleep(ONE_MINUTE).await;
continue;
}
self.active.store(false, Ordering::Relaxed);
@@ -79,11 +71,6 @@ impl Actor<SyncActors> for Ingester {
}
}
enum IngestStatus {
Completed,
InProgress,
}
impl Ingester {
pub const fn new(
sync: SyncManager,
@@ -99,48 +86,36 @@ impl Ingester {
}
}
async fn run_loop_iteration(&self) -> Result<IngestStatus, Error> {
let (ops_ids, ops) = self
async fn run_loop_iteration(&self) -> Result<(), Error> {
let start = Instant::now();
let operations_to_ingest_count = self
.sync
.db
.cloud_crdt_operation()
.find_many(vec![])
.take(BATCH_SIZE)
.order_by(cloud_crdt_operation::timestamp::order(SortOrder::Asc))
.exec()
.await
.map_err(sd_core_sync::Error::from)?
.into_iter()
.map(from_cloud_crdt_ops)
.collect::<Result<(Vec<_>, Vec<_>), _>>()?;
if ops_ids.is_empty() {
return Ok(IngestStatus::Completed);
}
debug!(
messages_count = ops.len(),
first_message = ?ops
.first()
.map_or_else(|| SystemTime::UNIX_EPOCH.into(), |op| timestamp_to_datetime(op.timestamp)),
last_message = ?ops
.last()
.map_or_else(|| SystemTime::UNIX_EPOCH.into(), |op| timestamp_to_datetime(op.timestamp)),
"Messages to ingest",
);
self.sync
.ingest_ops(CompressedCRDTOperationsPerModelPerDevice::new(ops))
.await?;
self.sync
.db
.cloud_crdt_operation()
.delete_many(vec![cloud_crdt_operation::id::in_vec(ops_ids)])
.count(vec![])
.exec()
.await
.map_err(sd_core_sync::Error::from)?;
Ok(IngestStatus::InProgress)
if operations_to_ingest_count == 0 {
debug!("Nothing to ingest, early finishing ingester loop");
return Ok(());
}
debug!(
operations_to_ingest_count,
"Starting sync messages cloud ingestion loop"
);
let ingested_count = self.sync.ingest_ops().await?;
debug!(
ingested_count,
"Finished sync messages cloud ingestion loop in {:?}",
start.elapsed()
);
Ok(())
}
}

View File

@@ -15,7 +15,7 @@ use sd_core_sync::{
use sd_actors::{Actor, Stopper};
use sd_crypto::{
cloud::{OneShotDecryption, SecretKey, StreamDecryption},
primitives::{EncryptedBlock, OneShotNonce, StreamNonce},
primitives::{EncryptedBlock, StreamNonce},
};
use sd_prisma::prisma::PrismaClient;
@@ -23,34 +23,24 @@ use std::{
collections::{hash_map::Entry, HashMap},
future::IntoFuture,
path::Path,
pin::Pin,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
task::{Context, Poll},
};
use chrono::{DateTime, Utc};
use futures::{FutureExt, StreamExt, TryStreamExt};
use futures::{FutureExt, StreamExt};
use futures_concurrency::future::{Race, TryJoin};
use quic_rpc::transport::quinn::QuinnConnection;
use reqwest::Response;
use reqwest_middleware::ClientWithMiddleware;
use serde::{Deserialize, Serialize};
use tokio::{
fs,
io::{self, AsyncRead, AsyncReadExt, ReadBuf},
sync::Notify,
time::sleep,
};
use tokio_util::io::StreamReader;
use tokio::{fs, io, sync::Notify, time::sleep};
use tracing::{debug, error, instrument, warn};
use uuid::Uuid;
use super::{ReceiveAndIngestNotifiers, SyncActors, ONE_MINUTE};
const CLOUD_SYNC_DATA_KEEPER_FILE: &str = "cloud_sync_data_keeper.bin";
const CLOUD_SYNC_DATA_KEEPER_DIRECTORY: &str = "cloud_sync_data_keeper";
/// Responsible for downloading sync operations from the cloud to be processed by the ingester
@@ -121,7 +111,7 @@ impl Receiver {
active_notify: Arc<Notify>,
) -> Result<Self, Error> {
let (keeper, cloud_client, key_manager) = (
LastTimestampKeeper::load(data_dir.as_ref()),
LastTimestampKeeper::load(data_dir.as_ref(), sync_group_pub_id),
cloud_services.client(),
cloud_services.key_manager(),
)
@@ -209,7 +199,6 @@ impl Receiver {
message,
&self.key_manager,
&self.sync,
self.cloud_services.http_client(),
)
.await?;
@@ -246,33 +235,23 @@ async fn handle_single_message(
end_time,
operations_count,
key_hash,
signed_download_link,
encrypted_messages,
..
}: MessagesCollection,
key_manager: &KeyManager,
sync: &SyncManager,
http_client: &ClientWithMiddleware,
) -> Result<(devices::PubId, DateTime<Utc>), Error> {
// FIXME(@fogodev): If we don't have the key hash, we need to fetch it from another device in the group if possible
let Some(secret_key) = key_manager.get_key(sync_group_pub_id, &key_hash).await else {
return Err(Error::MissingKeyHash);
};
let response = http_client
.get(signed_download_link)
.send()
.await
.map_err(Error::DownloadSyncMessages)?
.error_for_status()
.map_err(Error::ErrorResponseDownloadSyncMessages)?;
debug!(
size = encrypted_messages.len(),
"Received encrypted sync messages collection"
);
let crdt_ops = if let Some(size) = response.content_length() {
debug!(size, "Received encrypted sync messages collection");
extract_messages_known_size(response, size, secret_key, original_device_pub_id).await
} else {
debug!("Received encrypted sync messages collection of unknown size");
extract_messages_unknown_size(response, secret_key, original_device_pub_id).await
}?;
let crdt_ops = decrypt_messages(encrypted_messages, secret_key, original_device_pub_id).await?;
assert_eq!(
crdt_ops.len(),
@@ -285,44 +264,28 @@ async fn handle_single_message(
Ok((original_device_pub_id, end_time))
}
#[instrument(skip(response, size, secret_key), err)]
async fn extract_messages_known_size(
response: Response,
size: u64,
#[instrument(skip(encrypted_messages, secret_key), fields(messages_size = %encrypted_messages.len()), err)]
async fn decrypt_messages(
encrypted_messages: Vec<u8>,
secret_key: SecretKey,
devices::PubId(device_pub_id): devices::PubId,
) -> Result<Vec<CRDTOperation>, Error> {
let plain_text = if size <= EncryptedBlock::CIPHER_TEXT_SIZE as u64 {
OneShotDecryption::decrypt(
&secret_key,
response
.bytes()
.await
.map_err(Error::ErrorResponseDownloadReadBytesSyncMessages)?
.as_ref()
.into(),
)
.map_err(Error::Decrypt)?
let plain_text = if encrypted_messages.len() <= EncryptedBlock::CIPHER_TEXT_SIZE {
OneShotDecryption::decrypt(&secret_key, encrypted_messages.as_slice().into())
.map_err(Error::Decrypt)?
} else {
let mut reader = StreamReader::new(response.bytes_stream().map_err(|e| {
error!(?e, "Failed to read sync messages bytes stream");
io::Error::new(io::ErrorKind::Other, e)
}));
let (nonce, cipher_text) = encrypted_messages.split_at(size_of::<StreamNonce>());
let mut nonce = StreamNonce::default();
let mut plain_text = Vec::with_capacity(cipher_text.len());
reader
.read_exact(&mut nonce)
.await
.map_err(Error::ReadNonceStreamDecryption)?;
// TODO: Reimplement using async streaming with serde if it ever gets implemented
let mut plain_text = vec![];
StreamDecryption::decrypt(&secret_key, &nonce, reader, &mut plain_text)
.await
.map_err(Error::Decrypt)?;
StreamDecryption::decrypt(
&secret_key,
nonce.try_into().expect("we split the correct amount"),
cipher_text,
&mut plain_text,
)
.await
.map_err(Error::Decrypt)?;
plain_text
};
@@ -332,34 +295,6 @@ async fn extract_messages_known_size(
.map_err(Error::DeserializationFailureToPullSyncMessages)
}
#[instrument(skip_all, err)]
async fn extract_messages_unknown_size(
response: Response,
secret_key: SecretKey,
devices::PubId(device_pub_id): devices::PubId,
) -> Result<Vec<CRDTOperation>, Error> {
let plain_text = match UnknownDownloadKind::new(response).await? {
UnknownDownloadKind::OneShot(buffer) => {
OneShotDecryption::decrypt(&secret_key, buffer.as_slice().into())
.map_err(Error::Decrypt)?
}
UnknownDownloadKind::Stream((nonce, reader)) => {
let mut plain_text = vec![];
StreamDecryption::decrypt(&secret_key, &nonce, reader, &mut plain_text)
.await
.map_err(Error::Decrypt)?;
plain_text
}
};
rmp_serde::from_slice::<CompressedCRDTOperationsPerModel>(&plain_text)
.map(|compressed_ops| compressed_ops.into_ops(device_pub_id))
.map_err(Error::DeserializationFailureToPullSyncMessages)
}
#[instrument(skip_all, err)]
pub async fn write_cloud_ops_to_db(
ops: Vec<CRDTOperation>,
@@ -382,8 +317,16 @@ struct LastTimestampKeeper {
}
impl LastTimestampKeeper {
async fn load(data_dir: &Path) -> Result<Self, Error> {
let file_path = data_dir.join(CLOUD_SYNC_DATA_KEEPER_FILE).into_boxed_path();
async fn load(data_dir: &Path, sync_group_pub_id: groups::PubId) -> Result<Self, Error> {
let cloud_sync_data_directory = data_dir.join(CLOUD_SYNC_DATA_KEEPER_DIRECTORY);
fs::create_dir_all(&cloud_sync_data_directory)
.await
.map_err(Error::FailedToCreateTimestampKeepersDirectory)?;
let file_path = cloud_sync_data_directory
.join(format!("{sync_group_pub_id}.bin"))
.into_boxed_path();
match fs::read(&file_path).await {
Ok(bytes) => Ok(Self {
@@ -411,73 +354,3 @@ impl LastTimestampKeeper {
.map_err(Error::FailedToWriteLastTimestampKeeper)
}
}
struct UnknownDownloadSizeStreamer {
stream_reader: Box<dyn AsyncRead + Send + Unpin + 'static>,
buffer: Vec<u8>,
was_read: usize,
}
enum UnknownDownloadKind {
OneShot(Vec<u8>),
Stream((StreamNonce, UnknownDownloadSizeStreamer)),
}
impl UnknownDownloadKind {
async fn new(response: Response) -> Result<Self, Error> {
let mut buffer = Vec::with_capacity(EncryptedBlock::CIPHER_TEXT_SIZE * 2);
let mut stream = response.bytes_stream();
while let Some(res) = stream.next().await {
buffer.extend(res.map_err(Error::ErrorResponseDownloadReadBytesSyncMessages)?);
if buffer.len() > EncryptedBlock::CIPHER_TEXT_SIZE {
break;
}
}
if buffer.len() < size_of::<OneShotNonce>() {
return Err(Error::IncompleteDownloadBytesSyncMessages);
}
if buffer.len() <= EncryptedBlock::CIPHER_TEXT_SIZE {
Ok(Self::OneShot(buffer))
} else {
let nonce_size = size_of::<StreamNonce>();
Ok(Self::Stream((
StreamNonce::try_from(&buffer[..nonce_size]).expect("passing the right nonce size"),
UnknownDownloadSizeStreamer {
stream_reader: Box::new(StreamReader::new(stream.map_err(|e| {
error!(?e, "Failed to read sync messages bytes stream");
io::Error::new(io::ErrorKind::Other, e)
}))),
buffer,
was_read: nonce_size,
},
)))
}
}
}
impl AsyncRead for UnknownDownloadSizeStreamer {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
if buf.remaining() == 0 {
return Poll::Ready(Ok(()));
}
if self.was_read == self.buffer.len() {
Pin::new(&mut self.stream_reader).poll_read(cx, buf)
} else {
let len = std::cmp::min(self.buffer.len() - self.was_read, buf.remaining());
buf.put_slice(&self.buffer[self.was_read..(self.was_read + len)]);
self.was_read += len;
Poll::Ready(Ok(()))
}
}
}

View File

@@ -6,7 +6,7 @@ use sd_actors::{Actor, Stopper};
use sd_cloud_schema::{
devices,
error::{ClientSideError, NotFoundError},
sync::{self, groups, messages},
sync::{groups, messages},
Client, Service,
};
use sd_crypto::{
@@ -18,8 +18,7 @@ use sd_utils::{datetime_to_timestamp, timestamp_to_datetime};
use std::{
future::IntoFuture,
num::NonZero,
pin::{pin, Pin},
pin::pin,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
@@ -27,15 +26,12 @@ use std::{
time::{Duration, UNIX_EPOCH},
};
use async_stream::try_stream;
use chrono::{DateTime, Utc};
use futures::{FutureExt, SinkExt, Stream, StreamExt, TryStream, TryStreamExt};
use futures::{FutureExt, StreamExt, TryStreamExt};
use futures_concurrency::future::{Race, TryJoin};
use quic_rpc::{client::UpdateSink, pattern::bidi_streaming, transport::quinn::QuinnConnection};
use reqwest_middleware::reqwest::{header, Body};
use quic_rpc::transport::quinn::QuinnConnection;
use tokio::{
spawn,
sync::{broadcast, oneshot, Notify, Semaphore},
sync::{broadcast, Notify},
time::sleep,
};
use tracing::{debug, error};
@@ -44,9 +40,8 @@ use uuid::Uuid;
use super::{SyncActors, ONE_MINUTE};
const TEN_SECONDS: Duration = Duration::from_secs(10);
const THIRTY_SECONDS: Duration = Duration::from_secs(30);
const MESSAGES_COLLECTION_SIZE: u32 = 100_000;
const MESSAGES_COLLECTION_SIZE: u32 = 10_000;
enum RaceNotifiedOrStopped {
Notified,
@@ -60,18 +55,6 @@ enum LoopStatus {
type LatestTimestamp = NTP64;
type PushResponsesStream = Pin<
Box<
dyn Stream<
Item = Result<
Result<messages::push::Response, sd_cloud_schema::Error>,
bidi_streaming::ItemError<QuinnConnection<Service>>,
>,
> + Send
+ Sync,
>,
>;
#[derive(Debug)]
pub struct Sender {
sync_group_pub_id: groups::PubId,
@@ -164,6 +147,8 @@ impl Sender {
}
async fn run_loop_iteration(&mut self) -> Result<LoopStatus, Error> {
debug!("Starting cloud sender actor loop iteration");
let current_device_pub_id = devices::PubId(Uuid::from(&self.sync.device_pub_id));
let (key_hash, secret_key) = self
@@ -183,6 +168,11 @@ impl Sender {
let mut status = LoopStatus::Idle;
let mut new_latest_timestamp = current_latest_timestamp;
debug!(
chunk_size = MESSAGES_COLLECTION_SIZE,
"Trying to fetch chunk of sync messages from the database"
);
while let Some(ops_res) = crdt_ops_stream.next().await {
let ops = ops_res?;
@@ -190,9 +180,13 @@ impl Sender {
break;
};
debug!("Got first and last sync messages");
#[allow(clippy::cast_possible_truncation)]
let operations_count = ops.len() as u32;
debug!(operations_count, "Got chunk of sync messages");
new_latest_timestamp = last.timestamp;
let start_time = timestamp_to_datetime(first.timestamp);
@@ -205,17 +199,17 @@ impl Sender {
let messages_bytes = rmp_serde::to_vec_named(&compressed_ops)
.map_err(Error::SerializationFailureToPushSyncMessages)?;
let plain_text_size = messages_bytes.len();
let expected_blob_size = if plain_text_size <= EncryptedBlock::PLAIN_TEXT_SIZE {
OneShotEncryption::cipher_text_size(&secret_key, plain_text_size)
} else {
StreamEncryption::cipher_text_size(&secret_key, plain_text_size)
} as u64;
let encrypted_messages =
encrypt_messages(&secret_key, &mut self.rng, messages_bytes).await?;
debug!(?expected_blob_size, ?key_hash, "Preparing sync message");
let encrypted_messages_size = encrypted_messages.len();
let (mut push_updates, mut push_responses) = self
.cloud_client
debug!(
operations_count,
encrypted_messages_size, "Sending sync messages to cloud",
);
self.cloud_client
.sync()
.messages()
.push(messages::push::Request {
@@ -228,60 +222,23 @@ impl Sender {
device_pub_id: current_device_pub_id,
key_hash: key_hash.clone(),
operations_count,
start_time,
end_time,
expected_blob_size,
time_range: (start_time, end_time),
encrypted_messages,
})
.await?;
.await??;
let Some(response) = push_responses.next().await else {
return Err(Error::EmptyResponse("push initial response"));
};
let messages::push::Response(response_kind) = response??;
match response_kind {
messages::push::ResponseKind::SinglePresignedUrl(url) => {
upload_to_single_url(
url,
secret_key.clone(),
self.cloud_services.http_client(),
messages_bytes,
&mut self.rng,
)
.await?;
}
messages::push::ResponseKind::ManyPresignedUrls(urls) => {
upload_to_many_urls(
urls,
secret_key.clone(),
self.cloud_services.http_client().clone(),
messages_bytes,
&mut self.rng,
&mut push_updates,
&mut push_responses,
)
.await?;
}
messages::push::ResponseKind::Pong => {
return Err(Error::UnexpectedResponse(
"Pong on first messages push request",
))
}
messages::push::ResponseKind::End => {
return Err(Error::UnexpectedResponse(
"End on first messages push request",
))
}
}
finalize_protocol(&mut push_updates, &mut push_responses).await?;
debug!(
operations_count,
encrypted_messages_size, "Sent sync messages to cloud",
);
status = LoopStatus::SentMessages;
}
self.maybe_latest_timestamp = Some(new_latest_timestamp);
debug!("Finished cloud sender actor loop iteration");
Ok(status)
}
@@ -303,8 +260,7 @@ impl Sender {
.get_access_token()
.await?,
group_pub_id: self.sync_group_pub_id,
current_device_pub_id,
kind: messages::get_latest_time::Kind::ForCurrentDevice,
kind: messages::get_latest_time::Kind::ForCurrentDevice(current_device_pub_id),
})
.await?
{
@@ -328,320 +284,44 @@ impl Sender {
}
}
async fn finalize_protocol(
push_updates: &mut UpdateSink<
Service,
QuinnConnection<Service>,
messages::push::RequestUpdate,
sync::Service,
>,
push_responses: &mut PushResponsesStream,
) -> Result<(), Error> {
push_updates
.send(messages::push::RequestUpdate(
messages::push::UpdateKind::End,
))
.await
.map_err(Error::EndUpdatePushSyncMessages)?;
let Some(response) = push_responses.next().await else {
return Err(Error::EmptyResponse("push initial response"));
};
let messages::push::Response(response_kind) = response??;
match response_kind {
messages::push::ResponseKind::SinglePresignedUrl(_)
| messages::push::ResponseKind::ManyPresignedUrls(_) => {
return Err(Error::UnexpectedResponse(
"Urls responses on final messages push response",
))
}
messages::push::ResponseKind::Pong => {
return Err(Error::UnexpectedResponse(
"Pong on final message push response",
))
}
messages::push::ResponseKind::End => {
/*
Everything is awesome!
*/
}
}
Ok(())
}
async fn upload_to_many_urls(
urls: Vec<reqwest::Url>,
secret_key: SecretKey,
http_client: reqwest_middleware::ClientWithMiddleware,
messages_bytes: Vec<u8>,
async fn encrypt_messages(
secret_key: &SecretKey,
rng: &mut CryptoRng,
push_updates: &mut UpdateSink<
Service,
QuinnConnection<Service>,
messages::push::RequestUpdate,
sync::Service,
>,
push_responses: &mut PushResponsesStream,
) -> Result<(), Error> {
let stop_ping_pong = Arc::new(AtomicBool::new(false));
let (out_tx, mut out_rx) = oneshot::channel();
let rng = CryptoRng::from_seed(rng.generate_fixed());
let handle = spawn(handle_multipart_upload(
urls,
secret_key,
http_client,
messages_bytes,
rng,
Arc::clone(&stop_ping_pong),
out_tx,
));
loop {
if stop_ping_pong.load(Ordering::Acquire) {
break;
}
if let Err(e) = push_updates
.send(messages::push::RequestUpdate(
messages::push::UpdateKind::Ping,
))
.await
{
error!(?e, "Failed to send push ping update");
sleep(TEN_SECONDS).await;
continue;
}
let Some(response) = push_responses.next().await else {
error!("Empty response from push ping response");
continue;
};
match response {
Ok(Ok(messages::push::Response(
messages::push::ResponseKind::SinglePresignedUrl(_)
| messages::push::ResponseKind::ManyPresignedUrls(_),
))) => {
unreachable!("can't receive url if we didn't send an initial request")
}
Ok(Ok(messages::push::Response(messages::push::ResponseKind::Pong))) => {
/*
Everything is awesome!
*/
}
Ok(Ok(messages::push::Response(messages::push::ResponseKind::End))) => {
unreachable!("Can't receive an End if we didn't send an End first");
}
Ok(Err(e)) => {
error!(?e, "Error from push ping response");
sleep(TEN_SECONDS).await;
continue;
}
Err(e) => {
error!(?e, "Error from push ping response");
sleep(TEN_SECONDS).await;
continue;
}
}
if stop_ping_pong.load(Ordering::Acquire) {
break;
}
sleep(THIRTY_SECONDS).await;
}
let Ok(out) = out_rx.try_recv() else {
// SAFETY: This try_recv error can only happen if the upload task panicked
// so we're good to unwrap the error.
let e = handle.await.expect_err("upload task panicked");
error!(?e, "Critical error while uploading sync messages");
return Err(Error::CriticalErrorWhileUploadingSyncMessages);
};
out
}
async fn handle_multipart_upload(
urls: Vec<reqwest::Url>,
secret_key: SecretKey,
http_client: reqwest_middleware::ClientWithMiddleware,
messages_bytes: Vec<u8>,
rng: CryptoRng,
stop_ping_pong: Arc<AtomicBool>,
out_tx: oneshot::Sender<Result<(), Error>>,
) {
async fn inner(
urls: Vec<reqwest::Url>,
secret_key: SecretKey,
http_client: reqwest_middleware::ClientWithMiddleware,
messages_bytes: Vec<u8>,
mut rng: CryptoRng,
) -> Result<(), Error> {
let urls_count = urls.len();
let message_size = messages_bytes.len();
let blocks_per_url = message_size / urls_count / EncryptedBlock::PLAIN_TEXT_SIZE;
let cipher_text_size = StreamEncryption::cipher_text_size(&secret_key, message_size);
let parallel_upload_semaphore = Arc::new(Semaphore::new(
std::thread::available_parallelism()
.map(NonZero::get)
.unwrap_or(1),
) -> Result<Vec<u8>, Error> {
if messages_bytes.len() <= EncryptedBlock::PLAIN_TEXT_SIZE {
let mut nonce_and_cipher_text = Vec::with_capacity(OneShotEncryption::cipher_text_size(
secret_key,
messages_bytes.len(),
));
// If we're uploading to many URLs, it implies that the message size is bigger than a single
// encryption block, so we always use stream encryption.
let mut buffers = vec![Vec::with_capacity(cipher_text_size / urls_count); urls_count];
let (nonce, cipher_stream) =
StreamEncryption::encrypt(&secret_key, messages_bytes.as_slice(), &mut rng);
buffers[0].extend_from_slice(&nonce);
let mut cipher_stream = pin!(cipher_stream);
let mut handles = Vec::with_capacity(urls_count);
for (idx, (mut buffer, url)) in buffers.into_iter().zip(urls).enumerate() {
for _ in 0..blocks_per_url {
if let Some(cipher_res) = cipher_stream.next().await {
buffer.extend(cipher_res.map_err(Error::Encrypt)?);
} else {
return Err(Error::UnexpectedEndOfStream);
}
}
handles.push(spawn(upload_part(
idx,
url,
http_client.clone(),
buffer,
Arc::clone(&parallel_upload_semaphore),
)));
}
assert!(
cipher_stream.next().await.is_none(),
"Unexpected ciphered bytes still on stream"
);
handles.try_join().await.map_err(|e| {
error!(?e, "Error while uploading sync messages");
Error::CriticalErrorWhileUploadingSyncMessages
})?;
Ok(())
}
let res = inner(urls, secret_key, http_client, messages_bytes, rng).await;
stop_ping_pong.store(true, Ordering::Release);
out_tx
.send(res)
.expect("upload output channel never closes");
}
async fn upload_part(
idx: usize,
url: reqwest::Url,
http_client: reqwest_middleware::ClientWithMiddleware,
buffer: Vec<u8>,
parallel_upload_semaphore: Arc<Semaphore>,
) -> Result<(), Error> {
let _permit = parallel_upload_semaphore
.acquire()
.await
.expect("Semaphore never closes");
let response = http_client
.put(url)
.header(header::CONTENT_LENGTH, buffer.len())
.body(buffer)
.send()
.await
.map_err(Error::UploadSyncMessages)?
.error_for_status()
.map_err(Error::ErrorResponseUploadSyncMessages)?;
debug!(?response, idx, "Uploaded sync messages part");
Ok(())
}
async fn upload_to_single_url(
url: reqwest::Url,
secret_key: SecretKey,
http_client: &reqwest_middleware::ClientWithMiddleware,
messages_bytes: Vec<u8>,
rng: &mut CryptoRng,
) -> Result<(), Error> {
let (cipher_text_size, body) = if messages_bytes.len() <= EncryptedBlock::PLAIN_TEXT_SIZE {
let EncryptedBlock { nonce, cipher_text } =
OneShotEncryption::encrypt(&secret_key, messages_bytes.as_slice(), rng)
OneShotEncryption::encrypt(secret_key, messages_bytes.as_slice(), rng)
.map_err(Error::Encrypt)?;
let cipher_text_size = nonce.len() + cipher_text.len();
nonce_and_cipher_text.extend_from_slice(nonce.as_slice());
nonce_and_cipher_text.extend(&cipher_text);
let mut body_bytes = Vec::with_capacity(cipher_text_size);
body_bytes.extend_from_slice(nonce.as_slice());
body_bytes.extend(&cipher_text);
(cipher_text_size, Body::from(body_bytes))
Ok(nonce_and_cipher_text)
} else {
let mut rng = CryptoRng::from_seed(rng.generate_fixed());
let cipher_text_size =
StreamEncryption::cipher_text_size(&secret_key, messages_bytes.len());
let mut nonce_and_cipher_text = Vec::with_capacity(StreamEncryption::cipher_text_size(
secret_key,
messages_bytes.len(),
));
let body_bytes = stream_encryption(secret_key, messages_bytes, &mut rng)
.try_fold(
Vec::with_capacity(cipher_text_size),
|mut body_bytes, ciphered_chunk| async move {
body_bytes.extend(ciphered_chunk);
Ok(body_bytes)
},
)
.await?;
(cipher_text_size, Body::from(body_bytes))
};
http_client
.put(url)
.header(header::CONTENT_LENGTH, cipher_text_size)
.body(body)
.send()
.await
.map_err(Error::UploadSyncMessages)?
.error_for_status()
.map_err(Error::ErrorResponseUploadSyncMessages)?;
Ok(())
}
fn stream_encryption(
secret_key: SecretKey,
messages_bytes: Vec<u8>,
rng: &mut CryptoRng,
) -> impl TryStream<Ok = Vec<u8>, Error = Error> + Send + 'static {
let mut rng = CryptoRng::from_seed(rng.generate_fixed());
try_stream! {
let (nonce, cipher_stream) =
StreamEncryption::encrypt(&secret_key, messages_bytes.as_slice(), &mut rng);
StreamEncryption::encrypt(secret_key, messages_bytes.as_slice(), &mut rng);
nonce_and_cipher_text.extend_from_slice(nonce.as_slice());
let mut cipher_stream = pin!(cipher_stream);
yield nonce.to_vec();
while let Some(res) = cipher_stream.next().await {
yield res.map_err(Error::Encrypt)?;
while let Some(ciphered_chunk) = cipher_stream.try_next().await.map_err(Error::Encrypt)? {
nonce_and_cipher_text.extend(ciphered_chunk);
}
Ok(nonce_and_cipher_text)
}
}

View File

@@ -2,7 +2,7 @@ use sd_core_prisma_helpers::{
file_path_for_file_identifier, file_path_for_media_processor, file_path_for_object_validator,
file_path_to_full_path, file_path_to_handle_custom_uri, file_path_to_handle_p2p_serve_file,
file_path_to_isolate, file_path_to_isolate_with_id, file_path_to_isolate_with_pub_id,
file_path_walker, file_path_with_object,
file_path_walker, file_path_watcher_remove, file_path_with_object,
};
use sd_prisma::prisma::{file_path, location};
@@ -506,7 +506,8 @@ impl_from_db!(
file_path_to_isolate_with_pub_id,
file_path_walker,
file_path_to_isolate_with_id,
file_path_with_object
file_path_with_object,
file_path_watcher_remove
);
impl_from_db_without_location_id!(

View File

@@ -44,7 +44,7 @@ prisma-client-rust = { workspace = true }
rmp-serde = { workspace = true }
rmpv = { workspace = true }
rspc = { workspace = true }
serde = { workspace = true, features = ["derive"] }
serde = { workspace = true, features = ["derive", "rc"] }
serde_json = { workspace = true }
specta = { workspace = true }
strum = { workspace = true, features = ["derive", "phf"] }

View File

@@ -14,7 +14,11 @@ use crate::{
use sd_core_file_path_helper::IsolatedFilePathData;
use sd_core_prisma_helpers::{file_path_for_file_identifier, CasId};
use sd_prisma::prisma::{device, file_path, location, SortOrder};
use sd_prisma::{
prisma::{device, file_path, location, SortOrder},
prisma_sync,
};
use sd_sync::{sync_db_not_null_entry, OperationFactory};
use sd_task_system::{
AnyTaskOutput, IntoTask, SerializableTask, Task, TaskDispatcher, TaskHandle, TaskId,
TaskOutput, TaskStatus,
@@ -267,15 +271,25 @@ impl Job for FileIdentifier {
..
} = self;
ctx.db()
.location()
.update(
location::id::equals(location.id),
vec![location::scan_state::set(
LocationScanState::FilesIdentified as i32,
)],
let (sync_param, db_param) = sync_db_not_null_entry!(
LocationScanState::FilesIdentified as i32,
location::scan_state
);
ctx.sync()
.write_op(
ctx.db(),
ctx.sync().shared_update(
prisma_sync::location::SyncId {
pub_id: location.pub_id.clone(),
},
[sync_param],
),
ctx.db()
.location()
.update(location::id::equals(location.id), vec![db_param])
.select(location::select!({ id })),
)
.exec()
.await
.map_err(file_identifier::Error::from)?;
@@ -360,7 +374,7 @@ impl FileIdentifier {
self.last_orphan_file_path_id = None;
self.dispatch_deep_identifier_tasks(
&maybe_sub_iso_file_path,
maybe_sub_iso_file_path.as_ref(),
ctx,
device_id,
dispatcher,
@@ -405,7 +419,7 @@ impl FileIdentifier {
self.last_orphan_file_path_id = None;
self.dispatch_deep_identifier_tasks(
&maybe_sub_iso_file_path,
maybe_sub_iso_file_path.as_ref(),
ctx,
device_id,
dispatcher,
@@ -419,7 +433,7 @@ impl FileIdentifier {
Phase::SearchingOrphans => {
self.dispatch_deep_identifier_tasks(
&maybe_sub_iso_file_path,
maybe_sub_iso_file_path.as_ref(),
ctx,
device_id,
dispatcher,
@@ -738,7 +752,7 @@ impl FileIdentifier {
async fn dispatch_deep_identifier_tasks<OuterCtx: OuterContext>(
&mut self,
maybe_sub_iso_file_path: &Option<IsolatedFilePathData<'static>>,
maybe_sub_iso_file_path: Option<&IsolatedFilePathData<'static>>,
ctx: &impl JobContext<OuterCtx>,
device_id: device::id::Type,
dispatcher: &JobTaskDispatcher,

View File

@@ -176,7 +176,7 @@ fn orphan_path_filters_shallow(
fn orphan_path_filters_deep(
location_id: location::id::Type,
file_path_id: Option<file_path::id::Type>,
maybe_sub_iso_file_path: &Option<IsolatedFilePathData<'_>>,
maybe_sub_iso_file_path: Option<&IsolatedFilePathData<'_>>,
) -> Vec<file_path::WhereParam> {
sd_utils::chain_optional_iter(
[

View File

@@ -12,11 +12,11 @@ use sd_prisma::{
prisma::{device, file_path, location, PrismaClient},
prisma_sync,
};
use sd_sync::OperationFactory;
use sd_sync::{sync_db_entry, OperationFactory};
use sd_task_system::{
ExecStatus, Interrupter, InterruptionKind, IntoAnyTaskOutput, SerializableTask, Task, TaskId,
};
use sd_utils::{error::FileIOError, msgpack};
use sd_utils::error::FileIOError;
use std::{
collections::HashMap, convert::identity, future::IntoFuture, mem, path::PathBuf, pin::pin,
@@ -403,19 +403,17 @@ async fn assign_cas_id_to_file_paths(
let (ops, queries) = identified_files
.iter()
.map(|(pub_id, IdentifiedFile { cas_id, .. })| {
let (sync_param, db_param) = sync_db_entry!(cas_id, file_path::cas_id);
(
sync.shared_update(
prisma_sync::file_path::SyncId {
pub_id: pub_id.to_db(),
},
file_path::cas_id::NAME,
msgpack!(cas_id),
[sync_param],
),
db.file_path()
.update(
file_path::pub_id::equals(pub_id.to_db()),
vec![file_path::cas_id::set(cas_id.into())],
)
.update(file_path::pub_id::equals(pub_id.to_db()), vec![db_param])
// We don't need any data here, just the id avoids receiving the entire object
// as we can't pass an empty select macro call
.select(file_path::select!({ id })),

View File

@@ -9,7 +9,7 @@ use sd_prisma::{
prisma_sync,
};
use sd_sync::{option_sync_db_entry, sync_db_entry, sync_entry, CRDTOperation, OperationFactory};
use sd_utils::{chain_optional_iter, msgpack};
use sd_utils::chain_optional_iter;
use std::collections::{HashMap, HashSet};
@@ -47,10 +47,12 @@ fn connect_file_path_to_object<'db>(
prisma_sync::file_path::SyncId {
pub_id: file_path_pub_id.to_db(),
},
file_path::object::NAME,
msgpack!(prisma_sync::object::SyncId {
pub_id: object_pub_id.to_db(),
}),
[sync_entry!(
prisma_sync::object::SyncId {
pub_id: object_pub_id.to_db(),
},
file_path::object
)],
),
db.file_path()
.update(

View File

@@ -16,7 +16,11 @@ use sd_core_file_path_helper::IsolatedFilePathData;
use sd_core_indexer_rules::{IndexerRule, IndexerRuler};
use sd_core_prisma_helpers::location_with_indexer_rules;
use sd_prisma::prisma::{device, location};
use sd_prisma::{
prisma::{device, location},
prisma_sync,
};
use sd_sync::{sync_db_not_null_entry, OperationFactory};
use sd_task_system::{
AnyTaskOutput, IntoTask, SerializableTask, Task, TaskDispatcher, TaskHandle, TaskId,
TaskOutput, TaskStatus,
@@ -269,7 +273,7 @@ impl Job for Indexer {
.await?;
}
update_location_size(location.id, ctx.db(), &ctx).await?;
update_location_size(location.id, location.pub_id.clone(), &ctx).await?;
metadata.mean_db_write_time += start_size_update_time.elapsed();
}
@@ -287,13 +291,23 @@ impl Job for Indexer {
"all tasks must be completed here"
);
ctx.db()
.location()
.update(
location::id::equals(location.id),
vec![location::scan_state::set(LocationScanState::Indexed as i32)],
let (sync_param, db_param) =
sync_db_not_null_entry!(LocationScanState::Indexed as i32, location::scan_state);
ctx.sync()
.write_op(
ctx.db(),
ctx.sync().shared_update(
prisma_sync::location::SyncId {
pub_id: location.pub_id.clone(),
},
[sync_param],
),
ctx.db()
.location()
.update(location::id::equals(location.id), vec![db_param])
.select(location::select!({ id })),
)
.exec()
.await
.map_err(indexer::Error::from)?;

View File

@@ -10,11 +10,11 @@ use sd_prisma::{
prisma::{file_path, indexer_rule, location, PrismaClient, SortOrder},
prisma_sync,
};
use sd_sync::OperationFactory;
use sd_sync::{sync_db_entry, OperationFactory};
use sd_utils::{
db::{size_in_bytes_from_db, size_in_bytes_to_db, MissingFieldError},
error::{FileIOError, NonUtf8PathError},
from_bytes_to_uuid, msgpack,
from_bytes_to_uuid,
};
use std::{
@@ -146,22 +146,20 @@ async fn update_directory_sizes(
.map(|file_path| {
let size_bytes = iso_paths_and_sizes
.get(&IsolatedFilePathData::try_from(&file_path)?)
.map(|size| size.to_be_bytes().to_vec())
.map(|size| size_in_bytes_to_db(*size))
.expect("must be here");
let (sync_param, db_param) = sync_db_entry!(size_bytes, file_path::size_in_bytes_bytes);
Ok((
sync.shared_update(
prisma_sync::file_path::SyncId {
pub_id: file_path.pub_id.clone(),
},
file_path::size_in_bytes_bytes::NAME,
msgpack!(size_bytes),
[sync_param],
),
db.file_path()
.update(
file_path::pub_id::equals(file_path.pub_id),
vec![file_path::size_in_bytes_bytes::set(Some(size_bytes))],
)
.update(file_path::pub_id::equals(file_path.pub_id), vec![db_param])
.select(file_path::select!({ id })),
))
})
@@ -178,35 +176,45 @@ async fn update_directory_sizes(
async fn update_location_size(
location_id: location::id::Type,
db: &PrismaClient,
location_pub_id: location::pub_id::Type,
ctx: &impl OuterContext,
) -> Result<(), Error> {
let total_size = db
.file_path()
.find_many(vec![
file_path::location_id::equals(Some(location_id)),
file_path::materialized_path::equals(Some("/".to_string())),
])
.select(file_path::select!({ size_in_bytes_bytes }))
.exec()
.await?
.into_iter()
.filter_map(|file_path| {
file_path
.size_in_bytes_bytes
.map(|size_in_bytes_bytes| size_in_bytes_from_db(&size_in_bytes_bytes))
})
.sum::<u64>();
let db = ctx.db();
let sync = ctx.sync();
db.location()
.update(
location::id::equals(location_id),
vec![location::size_in_bytes::set(Some(
total_size.to_be_bytes().to_vec(),
))],
)
.exec()
.await?;
let total_size = size_in_bytes_to_db(
db.file_path()
.find_many(vec![
file_path::location_id::equals(Some(location_id)),
file_path::materialized_path::equals(Some("/".to_string())),
])
.select(file_path::select!({ size_in_bytes_bytes }))
.exec()
.await?
.into_iter()
.filter_map(|file_path| {
file_path
.size_in_bytes_bytes
.map(|size_in_bytes_bytes| size_in_bytes_from_db(&size_in_bytes_bytes))
})
.sum::<u64>(),
);
let (sync_param, db_param) = sync_db_entry!(total_size, location::size_in_bytes);
sync.write_op(
db,
sync.shared_update(
prisma_sync::location::SyncId {
pub_id: location_pub_id,
},
[sync_param],
),
db.location()
.update(location::id::equals(location_id), vec![db_param])
.select(location::select!({ id })),
)
.await?;
ctx.invalidate_query("locations.list");
ctx.invalidate_query("locations.get");
@@ -334,18 +342,19 @@ pub async fn reverse_update_directories_sizes(
{
let size_bytes = size_in_bytes_to_db(size);
let (sync_param, db_param) =
sync_db_entry!(size_bytes, file_path::size_in_bytes_bytes);
Some((
sync.shared_update(
prisma_sync::file_path::SyncId {
pub_id: pub_id.clone(),
},
file_path::size_in_bytes_bytes::NAME,
msgpack!(size_bytes),
),
db.file_path().update(
file_path::pub_id::equals(pub_id),
vec![file_path::size_in_bytes_bytes::set(Some(size_bytes))],
[sync_param],
),
db.file_path()
.update(file_path::pub_id::equals(pub_id), vec![db_param])
.select(file_path::select!({ id })),
))
} else {
warn!("Got a missing ancestor for a file_path in the database, ignoring...");

View File

@@ -136,7 +136,7 @@ pub async fn shallow(
.await?;
}
update_location_size(location.id, db, ctx).await?;
update_location_size(location.id, location.pub_id, ctx).await?;
}
if indexed_count > 0 || removed_count > 0 {

View File

@@ -9,10 +9,7 @@ use sd_prisma::{
};
use sd_sync::{sync_db_entry, sync_entry, OperationFactory};
use sd_task_system::{ExecStatus, Interrupter, IntoAnyTaskOutput, SerializableTask, Task, TaskId};
use sd_utils::{
db::{inode_to_db, size_in_bytes_to_db},
msgpack,
};
use sd_utils::db::{inode_to_db, size_in_bytes_to_db};
use std::{sync::Arc, time::Duration};
@@ -121,13 +118,13 @@ impl Task<Error> for Saver {
new file_paths and they were not identified yet"
);
let (sync_params, db_params): (Vec<_>, Vec<_>) = [
let (sync_params, db_params) = [
(
(
location::NAME,
msgpack!(prisma_sync::location::SyncId {
sync_entry!(
prisma_sync::location::SyncId {
pub_id: location_pub_id.clone()
}),
},
location
),
location_id::set(Some(*location_id)),
),
@@ -152,7 +149,7 @@ impl Task<Error> for Saver {
),
]
.into_iter()
.unzip();
.unzip::<_, _, Vec<_>, Vec<_>>();
(
sync.shared_create(

View File

@@ -93,7 +93,7 @@ impl Task<Error> for Updater {
check_interruption!(interrupter);
let (sync_stuff, paths_to_update) = walked_entries
let (crdt_ops, paths_to_update) = walked_entries
.drain(..)
.map(
|WalkedEntry {
@@ -138,18 +138,12 @@ impl Task<Error> for Updater {
.unzip::<_, _, Vec<_>, Vec<_>>();
(
sync_params
.into_iter()
.map(|(field, value)| {
sync.shared_update(
prisma_sync::file_path::SyncId {
pub_id: pub_id.to_db(),
},
field,
value,
)
})
.collect::<Vec<_>>(),
sync.shared_update(
prisma_sync::file_path::SyncId {
pub_id: pub_id.to_db(),
},
sync_params,
),
db.file_path()
.update(file_path::pub_id::equals(pub_id.into()), db_params)
// selecting id to avoid fetching whole object from database
@@ -159,9 +153,7 @@ impl Task<Error> for Updater {
)
.unzip::<_, _, Vec<_>, Vec<_>>();
let ops = sync_stuff.into_iter().flatten().collect::<Vec<_>>();
if ops.is_empty() && paths_to_update.is_empty() {
if crdt_ops.is_empty() && paths_to_update.is_empty() {
return Ok(ExecStatus::Done(
Output {
updated_count: 0,
@@ -172,7 +164,7 @@ impl Task<Error> for Updater {
}
let updated = sync
.write_ops(db, (ops, paths_to_update))
.write_ops(db, (crdt_ops, paths_to_update))
.await
.map_err(indexer::Error::from)?;

View File

@@ -290,6 +290,7 @@ impl Report {
.map(|id| job::parent::connect(job::id::equals(id.as_bytes().to_vec())))],
),
)
.select(job::select!({ id }))
.exec()
.await
.map_err(ReportError::Create)?;
@@ -318,6 +319,7 @@ impl Report {
job::date_completed::set(self.completed_at.map(Into::into)),
],
)
.select(job::select!({ id }))
.exec()
.await
.map_err(ReportError::Update)?;

View File

@@ -25,7 +25,8 @@ use image::{imageops, DynamicImage, GenericImageView};
use serde::{Deserialize, Serialize};
use specta::Type;
use tokio::{
fs, io,
fs::{self, File},
io::{self, AsyncWriteExt},
sync::{oneshot, Mutex},
task::spawn_blocking,
time::{sleep, Instant},
@@ -450,15 +451,29 @@ async fn generate_image_thumbnail(
trace!("Created shard directory and writing it to disk");
let res = fs::write(output_path, &webp).await.map_err(|e| {
let mut file = File::create(output_path).await.map_err(|e| {
thumbnailer::NonCriticalThumbnailerError::SaveThumbnail(
file_path.clone(),
FileIOError::from((output_path, e)).to_string(),
)
})?;
file.write_all(&webp).await.map_err(|e| {
thumbnailer::NonCriticalThumbnailerError::SaveThumbnail(
file_path.clone(),
FileIOError::from((output_path, e)).to_string(),
)
})?;
file.sync_all().await.map_err(|e| {
thumbnailer::NonCriticalThumbnailerError::SaveThumbnail(
file_path,
FileIOError::from((output_path, e)).to_string(),
)
});
})?;
trace!("Wrote thumbnail to disk");
res
return Ok(());
}
#[instrument(

View File

@@ -14,7 +14,11 @@ use sd_core_file_path_helper::IsolatedFilePathData;
use sd_core_prisma_helpers::file_path_for_media_processor;
use sd_file_ext::extensions::Extension;
use sd_prisma::prisma::{location, PrismaClient};
use sd_prisma::{
prisma::{location, PrismaClient},
prisma_sync,
};
use sd_sync::{sync_db_not_null_entry, OperationFactory};
use sd_task_system::{
AnyTaskOutput, IntoTask, SerializableTask, Task, TaskDispatcher, TaskHandle, TaskId,
TaskOutput, TaskStatus, TaskSystemError,
@@ -214,15 +218,23 @@ impl Job for MediaProcessor {
..
} = self;
ctx.db()
.location()
.update(
location::id::equals(location.id),
vec![location::scan_state::set(
LocationScanState::Completed as i32,
)],
let (sync_param, db_param) =
sync_db_not_null_entry!(LocationScanState::Completed as i32, location::scan_state);
ctx.sync()
.write_op(
ctx.db(),
ctx.sync().shared_update(
prisma_sync::location::SyncId {
pub_id: location.pub_id.clone(),
},
[sync_param],
),
ctx.db()
.location()
.update(location::id::equals(location.id), vec![db_param])
.select(location::select!({ id })),
)
.exec()
.await
.map_err(media_processor::Error::from)?;

View File

@@ -220,7 +220,7 @@ async fn dispatch_media_data_extractor_tasks(
async fn dispatch_thumbnailer_tasks(
parent_iso_file_path: &IsolatedFilePathData<'_>,
should_regenerate: bool,
location_path: &PathBuf,
location_path: &Path,
dispatcher: &BaseTaskDispatcher<Error>,
ctx: &impl OuterContext,
) -> Result<Vec<TaskHandle<Error>>, Error> {

View File

@@ -379,21 +379,20 @@ fn process_thumbnail_generation_output(
match status {
GenerationStatus::Generated => {
*generated += 1;
// This if is REALLY needed, due to the sheer performance of the thumbnailer,
// I restricted to only send events notifying for thumbnails in the current
// opened directory, sending events for the entire location turns into a
// humongous bottleneck in the frontend lol, since it doesn't even knows
// what to do with thumbnails for inner directories lol
// - fogodev
if with_priority {
reporter.new_thumbnail(thumb_key);
}
}
GenerationStatus::Skipped => {
*skipped += 1;
}
}
// This if is REALLY needed, due to the sheer performance of the thumbnailer,
// I restricted to only send events notifying for thumbnails in the current
// opened directory, sending events for the entire location turns into a
// humongous bottleneck in the frontend lol, since it doesn't even knows
// what to do with thumbnails for inner directories lol
// - fogodev
if with_priority {
reporter.new_thumbnail(thumb_key);
}
}
Err(e) => {
errors.push(media_processor::NonCriticalMediaProcessorError::from(e).into());

View File

@@ -19,7 +19,7 @@ globset = { workspace = true, features = ["serde1"] }
prisma-client-rust = { workspace = true }
rmp-serde = { workspace = true }
rspc = { workspace = true }
serde = { workspace = true, features = ["derive"] }
serde = { workspace = true, features = ["derive", "rc"] }
specta = { workspace = true }
thiserror = { workspace = true }
tokio = { workspace = true, features = ["fs"] }

View File

@@ -60,7 +60,7 @@ impl<'de> Deserialize<'de> for RulePerKind {
struct FieldsVisitor;
impl<'de> de::Visitor<'de> for FieldsVisitor {
impl de::Visitor<'_> for FieldsVisitor {
type Value = Fields;
fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {

View File

@@ -74,6 +74,20 @@ file_path::select!(file_path_for_media_processor {
pub_id
}
});
file_path::select!(file_path_watcher_remove {
id
pub_id
location_id
materialized_path
is_dir
name
extension
object: select {
id
pub_id
}
});
file_path::select!(file_path_to_isolate {
location_id
materialized_path
@@ -324,7 +338,7 @@ impl Clone for CasId<'_> {
}
}
impl<'cas_id> CasId<'cas_id> {
impl CasId<'_> {
#[must_use]
pub fn as_str(&self) -> &str {
self.0.as_ref()

View File

@@ -22,6 +22,7 @@ async-stream = { workspace = true }
chrono = { workspace = true }
futures = { workspace = true }
futures-concurrency = { workspace = true }
itertools = { workspace = true }
prisma-client-rust = { workspace = true, features = ["rspc"] }
rmp-serde = { workspace = true }
rmpv = { workspace = true }

View File

@@ -1,47 +1,41 @@
use sd_core_prisma_helpers::DevicePubId;
use sd_prisma::{
prisma::{crdt_operation, PrismaClient, SortOrder},
prisma::{crdt_operation, PrismaClient},
prisma_sync::ModelSyncData,
};
use sd_sync::{
CRDTOperation, CRDTOperationData, CompressedCRDTOperation, ModelId, OperationKind, RecordId,
};
use std::{collections::BTreeMap, num::NonZeroU128};
use std::{collections::BTreeMap, num::NonZeroU128, sync::Arc};
use futures_concurrency::future::TryJoin;
use tokio::sync::Mutex;
use tracing::{debug, instrument, trace, warn};
use uhlc::{Timestamp, HLC, NTP64};
use uuid::Uuid;
use super::{db_operation::write_crdt_op_to_db, Error, TimestampPerDevice};
crdt_operation::select!(crdt_operation_id { id });
// where the magic happens
#[instrument(skip(clock, ops), fields(operations_count = %ops.len()), err)]
pub async fn process_crdt_operations(
clock: &HLC,
timestamp_per_device: &TimestampPerDevice,
sync_lock: Arc<Mutex<()>>,
db: &PrismaClient,
device_pub_id: DevicePubId,
model: ModelId,
record_id: RecordId,
mut ops: Vec<CompressedCRDTOperation>,
model_id: ModelId,
(record_id, mut ops): (RecordId, Vec<CompressedCRDTOperation>),
) -> Result<(), Error> {
ops.sort_by_key(|op| op.timestamp);
let new_timestamp = ops.last().expect("Empty ops array").timestamp;
// first, we update the HLC's timestamp with the incoming one.
// this involves a drift check + sets the last time of the clock
clock
.update_with_timestamp(&Timestamp::new(
new_timestamp,
uhlc::ID::from(
NonZeroU128::new(Uuid::from(&device_pub_id).to_u128_le()).expect("Non zero id"),
),
))
.expect("timestamp has too much drift!");
update_clock(clock, new_timestamp, &device_pub_id);
// Delete - ignores all other messages
if let Some(delete_op) = ops
@@ -50,7 +44,15 @@ pub async fn process_crdt_operations(
.find(|op| matches!(op.data, CRDTOperationData::Delete))
{
trace!("Deleting operation");
handle_crdt_deletion(db, &device_pub_id, model, record_id, delete_op).await?;
handle_crdt_deletion(
db,
&sync_lock,
&device_pub_id,
model_id,
record_id,
delete_op,
)
.await?;
}
// Create + > 0 Update - overwrites the create's data with the updates
else if let Some(timestamp) = ops
@@ -61,24 +63,31 @@ pub async fn process_crdt_operations(
trace!("Create + Updates operations");
// conflict resolution
let delete = db
let delete_count = db
.crdt_operation()
.find_first(vec![
crdt_operation::model::equals(i32::from(model)),
.count(vec![
crdt_operation::model::equals(i32::from(model_id)),
crdt_operation::record_id::equals(rmp_serde::to_vec(&record_id)?),
crdt_operation::kind::equals(OperationKind::Delete.to_string()),
])
.order_by(crdt_operation::timestamp::order(SortOrder::Desc))
.exec()
.await?;
if delete.is_some() {
if delete_count > 0 {
debug!("Found a previous delete operation with the same SyncId, will ignore these operations");
return Ok(());
}
handle_crdt_create_and_updates(db, &device_pub_id, model, record_id, ops, timestamp)
.await?;
handle_crdt_create_and_updates(
db,
&sync_lock,
&device_pub_id,
model_id,
record_id,
ops,
timestamp,
)
.await?;
}
// > 0 Update - batches updates with a fake Create op
else {
@@ -87,94 +96,222 @@ pub async fn process_crdt_operations(
let mut data = BTreeMap::new();
for op in ops.into_iter().rev() {
let CRDTOperationData::Update { field, value } = op.data else {
let CRDTOperationData::Update(fields_and_values) = op.data else {
unreachable!("Create + Delete should be filtered out!");
};
data.insert(field, (value, op.timestamp));
for (field, value) in fields_and_values {
data.insert(field, (value, op.timestamp));
}
}
let earlier_time = data.values().fold(
NTP64(u64::from(u32::MAX)),
|earlier_time, (_, timestamp)| {
if timestamp.0 < earlier_time.0 {
*timestamp
} else {
earlier_time
}
},
);
// conflict resolution
let (create, updates) = db
let (create, possible_newer_updates_count) = db
._batch((
db.crdt_operation().count(vec![
crdt_operation::model::equals(i32::from(model_id)),
crdt_operation::record_id::equals(rmp_serde::to_vec(&record_id)?),
crdt_operation::kind::equals(OperationKind::Create.to_string()),
]),
// Fetching all update operations newer than our current earlier timestamp
db.crdt_operation()
.find_first(vec![
crdt_operation::model::equals(i32::from(model)),
.find_many(vec![
crdt_operation::timestamp::gt({
#[allow(clippy::cast_possible_wrap)]
// SAFETY: we had to store using i64 due to SQLite limitations
{
earlier_time.as_u64() as i64
}
}),
crdt_operation::model::equals(i32::from(model_id)),
crdt_operation::record_id::equals(rmp_serde::to_vec(&record_id)?),
crdt_operation::kind::equals(OperationKind::Create.to_string()),
crdt_operation::kind::starts_with("u".to_string()),
])
.order_by(crdt_operation::timestamp::order(SortOrder::Desc)),
data.iter()
.map(|(k, (_, timestamp))| {
Ok(db
.crdt_operation()
.find_first(vec![
crdt_operation::timestamp::gt({
#[allow(clippy::cast_possible_wrap)]
// SAFETY: we had to store using i64 due to SQLite limitations
{
timestamp.as_u64() as i64
}
}),
crdt_operation::model::equals(i32::from(model)),
crdt_operation::record_id::equals(rmp_serde::to_vec(&record_id)?),
crdt_operation::kind::equals(OperationKind::Update(k).to_string()),
])
.order_by(crdt_operation::timestamp::order(SortOrder::Desc)))
})
.collect::<Result<Vec<_>, Error>>()?,
.select(crdt_operation::select!({ kind timestamp })),
))
.await?;
if create.is_none() {
if create == 0 {
warn!("Failed to find a previous create operation with the same SyncId");
return Ok(());
}
handle_crdt_updates(db, &device_pub_id, model, record_id, data, updates).await?;
for candidate in possible_newer_updates_count {
// The first element is "u" meaning that this is an update, so we skip it
for key in candidate
.kind
.split(':')
.filter(|field| !field.is_empty())
.skip(1)
{
// remove entries if we possess locally more recent updates for this field
if data.get(key).is_some_and(|(_, new_timestamp)| {
#[allow(clippy::cast_sign_loss)]
{
// we need to store as i64 due to SQLite limitations
*new_timestamp < NTP64(candidate.timestamp as u64)
}
}) {
data.remove(key);
}
}
if data.is_empty() {
break;
}
}
handle_crdt_updates(db, &sync_lock, &device_pub_id, model_id, record_id, data).await?;
}
// read the timestamp for the operation's device, or insert one if it doesn't exist
let current_last_timestamp = timestamp_per_device
.read()
.await
.get(&device_pub_id)
.copied();
// update the stored timestamp for this device - will be derived from the crdt operations table on restart
let new_ts = NTP64::max(current_last_timestamp.unwrap_or_default(), new_timestamp);
timestamp_per_device
.write()
.await
.insert(device_pub_id, new_ts);
update_timestamp_per_device(timestamp_per_device, device_pub_id, new_timestamp).await;
Ok(())
}
pub async fn bulk_ingest_create_only_ops(
clock: &HLC,
timestamp_per_device: &TimestampPerDevice,
db: &PrismaClient,
device_pub_id: DevicePubId,
model_id: ModelId,
ops: Vec<(RecordId, CompressedCRDTOperation)>,
sync_lock: Arc<Mutex<()>>,
) -> Result<(), Error> {
let latest_timestamp = ops.iter().fold(NTP64(0), |latest, (_, op)| {
if latest < op.timestamp {
op.timestamp
} else {
latest
}
});
update_clock(clock, latest_timestamp, &device_pub_id);
let ops = ops
.into_iter()
.map(|(record_id, op)| {
rmp_serde::to_vec(&record_id)
.map(|serialized_record_id| (record_id, serialized_record_id, op))
})
.collect::<Result<Vec<_>, _>>()?;
// conflict resolution
let delete_counts = db
._batch(
ops.iter()
.map(|(_, serialized_record_id, _)| {
db.crdt_operation().count(vec![
crdt_operation::model::equals(i32::from(model_id)),
crdt_operation::record_id::equals(serialized_record_id.clone()),
crdt_operation::kind::equals(OperationKind::Delete.to_string()),
])
})
.collect::<Vec<_>>(),
)
.await?;
let lock_guard = sync_lock.lock().await;
db._transaction()
.with_timeout(30 * 10000)
.with_max_wait(30 * 10000)
.run(|db| {
let device_pub_id = device_pub_id.clone();
async move {
// complying with borrowck
let device_pub_id = &device_pub_id;
let (crdt_creates, model_sync_data) = ops
.into_iter()
.zip(delete_counts)
.filter_map(|(data, delete_count)| (delete_count == 0).then_some(data))
.map(
|(
record_id,
serialized_record_id,
CompressedCRDTOperation { timestamp, data },
)| {
let crdt_create = crdt_operation::CreateUnchecked {
timestamp: {
#[allow(clippy::cast_possible_wrap)]
// SAFETY: we have to store using i64 due to SQLite limitations
{
timestamp.0 as i64
}
},
model: i32::from(model_id),
record_id: serialized_record_id,
kind: "c".to_string(),
data: rmp_serde::to_vec(&data)?,
device_pub_id: device_pub_id.to_db(),
_params: vec![],
};
// NOTE(@fogodev): I wish I could do a create many here instead of creating separately each
// entry, but it's not supported by PCR
let model_sync_data = ModelSyncData::from_op(CRDTOperation {
device_pub_id: Uuid::from(device_pub_id),
model_id,
record_id,
timestamp,
data,
})?
.exec(&db);
Ok::<_, Error>((crdt_create, model_sync_data))
},
)
.collect::<Result<Vec<_>, _>>()?
.into_iter()
.unzip::<_, _, Vec<_>, Vec<_>>();
model_sync_data.try_join().await?;
db.crdt_operation().create_many(crdt_creates).exec().await?;
Ok::<_, Error>(())
}
})
.await?;
drop(lock_guard);
update_timestamp_per_device(timestamp_per_device, device_pub_id, latest_timestamp).await;
Ok(())
}
#[instrument(skip_all, err)]
async fn handle_crdt_updates(
db: &PrismaClient,
sync_lock: &Mutex<()>,
device_pub_id: &DevicePubId,
model_id: ModelId,
record_id: rmpv::Value,
mut data: BTreeMap<String, (rmpv::Value, NTP64)>,
updates: Vec<Option<crdt_operation::Data>>,
data: BTreeMap<String, (rmpv::Value, NTP64)>,
) -> Result<(), Error> {
let keys = data.keys().cloned().collect::<Vec<_>>();
let device_pub_id = sd_sync::DevicePubId::from(device_pub_id);
// does the same thing as processing ops one-by-one and returning early if a newer op was found
for (update, key) in updates.into_iter().zip(keys) {
if update.is_some() {
data.remove(&key);
}
}
let _lock_guard = sync_lock.lock().await;
db._transaction()
.with_timeout(30 * 10000)
.with_max_wait(30 * 10000)
.run(|db| async move {
// fake operation to batch them all at once
// fake operation to batch them all at once, inserting the latest data on appropriate table
ModelSyncData::from_op(CRDTOperation {
device_pub_id,
model_id,
@@ -185,41 +322,40 @@ async fn handle_crdt_updates(
.map(|(k, (data, _))| (k.clone(), data.clone()))
.collect(),
),
})
.ok_or(Error::InvalidModelId(model_id))?
})?
.exec(&db)
.await?;
// need to only apply ops that haven't been filtered out
data.into_iter()
.map(|(field, (value, timestamp))| {
let record_id = record_id.clone();
let db = &db;
async move {
write_crdt_op_to_db(
&CRDTOperation {
device_pub_id,
model_id,
record_id,
timestamp,
data: CRDTOperationData::Update { field, value },
},
db,
)
.await
let (fields_and_values, latest_timestamp) = data.into_iter().fold(
(BTreeMap::new(), NTP64::default()),
|(mut fields_and_values, mut latest_time_stamp), (field, (value, timestamp))| {
fields_and_values.insert(field, value);
if timestamp > latest_time_stamp {
latest_time_stamp = timestamp;
}
})
.collect::<Vec<_>>()
.try_join()
.await
.map(|_| ())
(fields_and_values, latest_time_stamp)
},
);
write_crdt_op_to_db(
&CRDTOperation {
device_pub_id,
model_id,
record_id,
timestamp: latest_timestamp,
data: CRDTOperationData::Update(fields_and_values),
},
&db,
)
.await
})
.await
}
#[instrument(skip_all, err)]
async fn handle_crdt_create_and_updates(
db: &PrismaClient,
sync_lock: &Mutex<()>,
device_pub_id: &DevicePubId,
model_id: ModelId,
record_id: rmpv::Value,
@@ -244,13 +380,18 @@ async fn handle_crdt_create_and_updates(
break;
}
CRDTOperationData::Update { field, value } => {
data.insert(field.clone(), value.clone());
CRDTOperationData::Update(fields_and_values) => {
for (field, value) in fields_and_values {
data.insert(field.clone(), value.clone());
}
applied_ops.push(op);
}
}
}
let _lock_guard = sync_lock.lock().await;
db._transaction()
.with_timeout(30 * 10000)
.with_max_wait(30 * 10000)
@@ -262,8 +403,7 @@ async fn handle_crdt_create_and_updates(
record_id: record_id.clone(),
timestamp,
data: CRDTOperationData::Create(data),
})
.ok_or(Error::InvalidModelId(model_id))?
})?
.exec(&db)
.await?;
@@ -294,14 +434,33 @@ async fn handle_crdt_create_and_updates(
.await
}
#[instrument(skip_all, err)]
async fn handle_crdt_deletion(
db: &PrismaClient,
sync_lock: &Mutex<()>,
device_pub_id: &DevicePubId,
model: u16,
record_id: rmpv::Value,
delete_op: &CompressedCRDTOperation,
) -> Result<(), Error> {
// deletes are the be all and end all, no need to check anything
// deletes are the be all and end all, except if we never created the object to begin with
// in this case we don't need to delete anything
if db
.crdt_operation()
.count(vec![
crdt_operation::model::equals(i32::from(model)),
crdt_operation::record_id::equals(rmp_serde::to_vec(&record_id)?),
])
.exec()
.await?
== 0
{
// This means that in the other device this entry was created and deleted, before this
// device here could even take notice of it. So we don't need to do anything here.
return Ok(());
}
let op = CRDTOperation {
device_pub_id: device_pub_id.into(),
model_id: model,
@@ -310,16 +469,49 @@ async fn handle_crdt_deletion(
data: CRDTOperationData::Delete,
};
let _lock_guard = sync_lock.lock().await;
db._transaction()
.with_timeout(30 * 10000)
.with_max_wait(30 * 10000)
.run(|db| async move {
ModelSyncData::from_op(op.clone())
.ok_or(Error::InvalidModelId(model))?
.exec(&db)
.await?;
ModelSyncData::from_op(op.clone())?.exec(&db).await?;
write_crdt_op_to_db(&op, &db).await
})
.await
}
fn update_clock(clock: &HLC, latest_timestamp: NTP64, device_pub_id: &DevicePubId) {
// first, we update the HLC's timestamp with the incoming one.
// this involves a drift check + sets the last time of the clock
clock
.update_with_timestamp(&Timestamp::new(
latest_timestamp,
uhlc::ID::from(
NonZeroU128::new(Uuid::from(device_pub_id).to_u128_le()).expect("Non zero id"),
),
))
.expect("timestamp has too much drift!");
}
async fn update_timestamp_per_device(
timestamp_per_device: &TimestampPerDevice,
device_pub_id: DevicePubId,
latest_timestamp: NTP64,
) {
// read the timestamp for the operation's device, or insert one if it doesn't exist
let current_last_timestamp = timestamp_per_device
.read()
.await
.get(&device_pub_id)
.copied();
// update the stored timestamp for this device - will be derived from the crdt operations table on restart
let new_ts = NTP64::max(current_last_timestamp.unwrap_or_default(), latest_timestamp);
timestamp_per_device
.write()
.await
.insert(device_pub_id, new_ts);
}

View File

@@ -27,12 +27,15 @@
#![forbid(deprecated_in_future)]
#![allow(clippy::missing_errors_doc, clippy::module_name_repetitions)]
use sd_prisma::prisma::{cloud_crdt_operation, crdt_operation};
use sd_prisma::{
prisma::{cloud_crdt_operation, crdt_operation},
prisma_sync,
};
use sd_utils::uuid_to_bytes;
use std::{collections::HashMap, sync::Arc};
use tokio::sync::RwLock;
use tokio::{sync::RwLock, task::JoinError};
pub mod backfill;
mod db_operation;
@@ -66,12 +69,16 @@ pub enum Error {
Deserialization(#[from] rmp_serde::decode::Error),
#[error("database error: {0}")]
Database(#[from] prisma_client_rust::QueryError),
#[error("PrismaSync error: {0}")]
PrismaSync(#[from] prisma_sync::Error),
#[error("invalid model id: {0}")]
InvalidModelId(ModelId),
#[error("tried to write an empty operations list")]
EmptyOperations,
#[error("device not found: {0}")]
DeviceNotFound(DevicePubId),
#[error("processes crdt task panicked")]
ProcessCrdtPanic(JoinError),
}
impl From<Error> for rspc::Error {

View File

@@ -1,38 +1,47 @@
use sd_core_prisma_helpers::DevicePubId;
use sd_prisma::{
prisma::{crdt_operation, device, PrismaClient, SortOrder},
prisma::{cloud_crdt_operation, crdt_operation, device, PrismaClient, SortOrder},
prisma_sync,
};
use sd_sync::{
CRDTOperation, CompressedCRDTOperationsPerModel, CompressedCRDTOperationsPerModelPerDevice,
ModelId, OperationFactory,
CRDTOperation, CRDTOperationData, CompressedCRDTOperation, ModelId, OperationFactory, RecordId,
};
use sd_utils::timestamp_to_datetime;
use std::{
collections::BTreeMap,
fmt,
collections::{hash_map::Entry, BTreeMap, HashMap},
fmt, mem,
num::NonZeroU128,
sync::{
atomic::{self, AtomicBool},
Arc,
},
time::{Duration, SystemTime},
};
use async_stream::stream;
use futures::Stream;
use futures::{stream::FuturesUnordered, Stream, TryStreamExt};
use futures_concurrency::future::TryJoin;
use tokio::sync::{broadcast, Mutex, Notify, RwLock};
use tracing::{debug, warn};
use itertools::Itertools;
use tokio::{
spawn,
sync::{broadcast, Mutex, Notify, RwLock},
time::Instant,
};
use tracing::{debug, instrument, warn};
use uhlc::{HLCBuilder, HLC};
use uuid::Uuid;
use super::{
crdt_op_db, db_operation::from_crdt_ops, ingest_utils::process_crdt_operations, Error,
SyncEvent, TimestampPerDevice, NTP64,
crdt_op_db,
db_operation::{from_cloud_crdt_ops, from_crdt_ops},
ingest_utils::{bulk_ingest_create_only_ops, process_crdt_operations},
Error, SyncEvent, TimestampPerDevice, NTP64,
};
const INGESTION_BATCH_SIZE: i64 = 10_000;
/// Wrapper that spawns the ingest actor and provides utilities for reading and writing sync operations.
#[derive(Clone)]
pub struct Manager {
@@ -44,7 +53,8 @@ pub struct Manager {
pub clock: Arc<HLC>,
pub active: Arc<AtomicBool>,
pub active_notify: Arc<Notify>,
pub sync_lock: Arc<Mutex<()>>,
pub(crate) sync_lock: Arc<Mutex<()>>,
pub(crate) available_parallelism: usize,
}
impl fmt::Debug for Manager {
@@ -131,67 +141,264 @@ impl Manager {
active: Arc::default(),
active_notify: Arc::default(),
sync_lock: Arc::new(Mutex::default()),
available_parallelism: std::thread::available_parallelism()
.map_or(1, std::num::NonZero::get),
},
rx,
))
}
pub async fn ingest_ops(
async fn fetch_cloud_crdt_ops(
&self,
CompressedCRDTOperationsPerModelPerDevice(compressed_ops): CompressedCRDTOperationsPerModelPerDevice,
) -> Result<(), Error> {
// WARN: this order here exists because sync messages MUST be processed in this exact order
// due to relationship dependencies between these tables.
const INGEST_ORDER: &[ModelId] = &[
prisma_sync::device::MODEL_ID,
prisma_sync::storage_statistics::MODEL_ID,
prisma_sync::tag::MODEL_ID,
prisma_sync::location::MODEL_ID,
prisma_sync::object::MODEL_ID,
prisma_sync::exif_data::MODEL_ID,
prisma_sync::file_path::MODEL_ID,
prisma_sync::label::MODEL_ID,
prisma_sync::tag_on_object::MODEL_ID,
prisma_sync::label_on_object::MODEL_ID,
];
model_id: ModelId,
batch_size: i64,
) -> Result<(Vec<cloud_crdt_operation::id::Type>, Vec<CRDTOperation>), Error> {
self.db
.cloud_crdt_operation()
.find_many(vec![cloud_crdt_operation::model::equals(i32::from(
model_id,
))])
.take(batch_size)
.order_by(cloud_crdt_operation::timestamp::order(SortOrder::Asc))
.exec()
.await?
.into_iter()
.map(from_cloud_crdt_ops)
.collect::<Result<(Vec<_>, Vec<_>), _>>()
}
let _lock_guard = self.sync_lock.lock().await;
#[instrument(skip(self))]
async fn ingest_by_model(&self, model_id: ModelId) -> Result<usize, Error> {
let mut total_count = 0;
let mut ops_fut_by_model = INGEST_ORDER
.iter()
.map(|&model_id| (model_id, vec![]))
.collect::<BTreeMap<_, _>>();
let mut buckets = (0..self.available_parallelism)
.map(|_| FuturesUnordered::new())
.collect::<Vec<_>>();
for (device_pub_id, CompressedCRDTOperationsPerModel(ops_per_model)) in compressed_ops {
for (model_id, ops_per_record) in ops_per_model {
for (record_id, ops) in ops_per_record {
ops_fut_by_model
.get_mut(&model_id)
.ok_or(Error::InvalidModelId(model_id))?
.push(process_crdt_operations(
&self.clock,
&self.timestamp_per_device,
&self.db,
device_pub_id.into(),
model_id,
record_id,
ops,
));
let mut total_fetch_time = Duration::ZERO;
let mut total_compression_time = Duration::ZERO;
let mut total_work_distribution_time = Duration::ZERO;
let mut total_process_time = Duration::ZERO;
loop {
let fetching_start = Instant::now();
let (ops_ids, ops) = self
.fetch_cloud_crdt_ops(model_id, INGESTION_BATCH_SIZE)
.await?;
if ops_ids.is_empty() {
break;
}
total_fetch_time += fetching_start.elapsed();
let messages_count = ops.len();
debug!(
messages_count,
first_message = ?ops
.first()
.map_or_else(|| SystemTime::UNIX_EPOCH.into(), |op| timestamp_to_datetime(op.timestamp)),
last_message = ?ops
.last()
.map_or_else(|| SystemTime::UNIX_EPOCH.into(), |op| timestamp_to_datetime(op.timestamp)),
"Messages by model to ingest",
);
let compression_start = Instant::now();
let mut compressed_map =
BTreeMap::<Uuid, HashMap<Vec<u8>, (RecordId, Vec<CompressedCRDTOperation>)>>::new();
for CRDTOperation {
device_pub_id,
timestamp,
model_id: _, // Ignoring model_id as we know it already
record_id,
data,
} in ops
{
let records = compressed_map.entry(device_pub_id).or_default();
// Can't use RecordId as a key because rmpv::Value doesn't implement Hash + Eq.
// So we use it's serialized bytes as a key.
let record_id_bytes =
rmp_serde::to_vec_named(&record_id).expect("already serialized to Value");
match records.entry(record_id_bytes) {
Entry::Occupied(mut entry) => {
entry
.get_mut()
.1
.push(CompressedCRDTOperation { timestamp, data });
}
Entry::Vacant(entry) => {
entry
.insert((record_id, vec![CompressedCRDTOperation { timestamp, data }]));
}
}
}
// Now that we separated all operations by their record_ids, we can do an optimization
// to process all records that only posses a single create operation, batching them together
let mut create_only_ops: BTreeMap<Uuid, Vec<(RecordId, CompressedCRDTOperation)>> =
BTreeMap::new();
for (device_pub_id, records) in &mut compressed_map {
for (record_id, ops) in records.values_mut() {
if ops.len() == 1 && matches!(ops[0].data, CRDTOperationData::Create(_)) {
create_only_ops
.entry(*device_pub_id)
.or_default()
.push((mem::replace(record_id, rmpv::Value::Nil), ops.remove(0)));
}
}
}
total_count += bulk_process_of_create_only_ops(
self.available_parallelism,
Arc::clone(&self.clock),
Arc::clone(&self.timestamp_per_device),
Arc::clone(&self.db),
Arc::clone(&self.sync_lock),
model_id,
create_only_ops,
)
.await?;
total_compression_time += compression_start.elapsed();
let work_distribution_start = Instant::now();
compressed_map
.into_iter()
.flat_map(|(device_pub_id, records)| {
records.into_values().filter_map(move |(record_id, ops)| {
if record_id.is_nil() {
return None;
}
// We can process each record in parallel as they are independent
let clock = Arc::clone(&self.clock);
let timestamp_per_device = Arc::clone(&self.timestamp_per_device);
let db = Arc::clone(&self.db);
let device_pub_id = device_pub_id.into();
let sync_lock = Arc::clone(&self.sync_lock);
Some(async move {
let count = ops.len();
process_crdt_operations(
&clock,
&timestamp_per_device,
sync_lock,
&db,
device_pub_id,
model_id,
(record_id, ops),
)
.await
.map(|()| count)
})
})
})
.enumerate()
.for_each(|(idx, fut)| buckets[idx % self.available_parallelism].push(fut));
total_work_distribution_time += work_distribution_start.elapsed();
let processing_start = Instant::now();
let handles = buckets
.iter_mut()
.enumerate()
.filter(|(_idx, bucket)| !bucket.is_empty())
.map(|(idx, bucket)| {
let mut bucket = mem::take(bucket);
spawn(async move {
let mut ops_count = 0;
let processing_start = Instant::now();
while let Some(count) = bucket.try_next().await? {
ops_count += count;
}
debug!(
"Ingested {ops_count} operations in {:?}",
processing_start.elapsed()
);
Ok::<_, Error>((ops_count, idx, bucket))
})
})
.collect::<Vec<_>>();
let results = handles.try_join().await.map_err(Error::ProcessCrdtPanic)?;
total_process_time += processing_start.elapsed();
for res in results {
let (count, idx, bucket) = res?;
buckets[idx] = bucket;
total_count += count;
}
self.db
.cloud_crdt_operation()
.delete_many(vec![cloud_crdt_operation::id::in_vec(ops_ids)])
.exec()
.await?;
}
for model_id in INGEST_ORDER {
if let Some(futs) = ops_fut_by_model.remove(model_id) {
futs.try_join().await?;
}
}
debug!(
total_count,
?total_fetch_time,
?total_compression_time,
?total_work_distribution_time,
?total_process_time,
"Ingested all operations of this model"
);
Ok(total_count)
}
pub async fn ingest_ops(&self) -> Result<usize, Error> {
let mut total_count = 0;
// WARN: this order here exists because sync messages MUST be processed in this exact order
// due to relationship dependencies between these tables.
total_count += self.ingest_by_model(prisma_sync::device::MODEL_ID).await?;
total_count += [
self.ingest_by_model(prisma_sync::storage_statistics::MODEL_ID),
self.ingest_by_model(prisma_sync::tag::MODEL_ID),
self.ingest_by_model(prisma_sync::location::MODEL_ID),
self.ingest_by_model(prisma_sync::object::MODEL_ID),
self.ingest_by_model(prisma_sync::label::MODEL_ID),
]
.try_join()
.await?
.into_iter()
.sum::<usize>();
total_count += [
self.ingest_by_model(prisma_sync::exif_data::MODEL_ID),
self.ingest_by_model(prisma_sync::file_path::MODEL_ID),
self.ingest_by_model(prisma_sync::tag_on_object::MODEL_ID),
self.ingest_by_model(prisma_sync::label_on_object::MODEL_ID),
]
.try_join()
.await?
.into_iter()
.sum::<usize>();
if self.tx.send(SyncEvent::Ingested).is_err() {
warn!("failed to send ingested message on `ingest_ops`");
}
Ok(())
Ok(total_count)
}
#[must_use]
@@ -450,6 +657,88 @@ impl Manager {
// }
}
async fn bulk_process_of_create_only_ops(
available_parallelism: usize,
clock: Arc<HLC>,
timestamp_per_device: TimestampPerDevice,
db: Arc<PrismaClient>,
sync_lock: Arc<Mutex<()>>,
model_id: ModelId,
create_only_ops: BTreeMap<Uuid, Vec<(RecordId, CompressedCRDTOperation)>>,
) -> Result<usize, Error> {
let buckets = (0..available_parallelism)
.map(|_| FuturesUnordered::new())
.collect::<Vec<_>>();
let mut bucket_idx = 0;
for (device_pub_id, records) in create_only_ops {
records
.into_iter()
.chunks(100)
.into_iter()
.for_each(|chunk| {
let ops = chunk.collect::<Vec<_>>();
buckets[bucket_idx % available_parallelism].push({
let clock = Arc::clone(&clock);
let timestamp_per_device = Arc::clone(&timestamp_per_device);
let db = Arc::clone(&db);
let device_pub_id = device_pub_id.into();
let sync_lock = Arc::clone(&sync_lock);
async move {
let count = ops.len();
bulk_ingest_create_only_ops(
&clock,
&timestamp_per_device,
&db,
device_pub_id,
model_id,
ops,
sync_lock,
)
.await
.map(|()| count)
}
});
bucket_idx += 1;
});
}
let handles = buckets
.into_iter()
.map(|mut bucket| {
spawn(async move {
let mut total_count = 0;
let process_creates_batch_start = Instant::now();
while let Some(count) = bucket.try_next().await? {
total_count += count;
}
debug!(
"Processed {total_count} creates in {:?}",
process_creates_batch_start.elapsed()
);
Ok::<_, Error>(total_count)
})
})
.collect::<Vec<_>>();
Ok(handles
.try_join()
.await
.map_err(Error::ProcessCrdtPanic)?
.into_iter()
.collect::<Result<Vec<_>, _>>()?
.into_iter()
.sum())
}
impl OperationFactory for Manager {
fn get_clock(&self) -> &HLC {
&self.clock

View File

@@ -1,234 +0,0 @@
// mod mock_instance;
// use sd_core_sync::*;
// use sd_prisma::{prisma::location, prisma_sync};
// use sd_sync::*;
// use sd_utils::{msgpack, uuid_to_bytes};
// use mock_instance::Device;
// use tracing::info;
// use tracing_test::traced_test;
// use uuid::Uuid;
// const MOCK_LOCATION_NAME: &str = "Location 0";
// const MOCK_LOCATION_PATH: &str = "/User/Anon/Documents";
// async fn write_test_location(instance: &Device) -> location::Data {
// let location_pub_id = Uuid::new_v4();
// let (sync_ops, db_ops): (Vec<_>, Vec<_>) = [
// sync_db_entry!(MOCK_LOCATION_NAME, location::name),
// sync_db_entry!(MOCK_LOCATION_PATH, location::path),
// ]
// .into_iter()
// .unzip();
// let location = instance
// .sync
// .write_op(
// &instance.db,
// instance.sync.shared_create(
// prisma_sync::location::SyncId {
// pub_id: uuid_to_bytes(&location_pub_id),
// },
// sync_ops,
// ),
// instance
// .db
// .location()
// .create(uuid_to_bytes(&location_pub_id), db_ops),
// )
// .await
// .expect("failed to create mock location");
// instance
// .sync
// .write_ops(&instance.db, {
// let (sync_ops, db_ops): (Vec<_>, Vec<_>) = [
// sync_db_entry!(1024, location::total_capacity),
// sync_db_entry!(512, location::available_capacity),
// ]
// .into_iter()
// .unzip();
// (
// sync_ops
// .into_iter()
// .map(|(k, v)| {
// instance.sync.shared_update(
// prisma_sync::location::SyncId {
// pub_id: uuid_to_bytes(&location_pub_id),
// },
// k,
// v,
// )
// })
// .collect::<Vec<_>>(),
// instance
// .db
// .location()
// .update(location::id::equals(location.id), db_ops),
// )
// })
// .await
// .expect("failed to create mock location");
// location
// }
// #[tokio::test]
// #[traced_test]
// async fn writes_operations_and_rows_together() -> Result<(), Box<dyn std::error::Error>> {
// let instance = Device::new(Uuid::new_v4()).await;
// write_test_location(&instance).await;
// let operations = instance
// .db
// .crdt_operation()
// .find_many(vec![])
// .exec()
// .await?;
// // 1 create, 2 update
// assert_eq!(operations.len(), 3);
// assert_eq!(operations[0].model, prisma_sync::location::MODEL_ID as i32);
// let out = instance.sync.get_ops(100, vec![]).await?;
// assert_eq!(out.len(), 3);
// let locations = instance.db.location().find_many(vec![]).exec().await?;
// assert_eq!(locations.len(), 1);
// let location = locations.first().unwrap();
// assert_eq!(location.name.as_deref(), Some(MOCK_LOCATION_NAME));
// assert_eq!(location.path.as_deref(), Some(MOCK_LOCATION_PATH));
// Ok(())
// }
// #[tokio::test]
// #[traced_test]
// async fn operations_send_and_ingest() -> Result<(), Box<dyn std::error::Error>> {
// let instance1 = Device::new(Uuid::new_v4()).await;
// let instance2 = Device::new(Uuid::new_v4()).await;
// let mut instance2_sync_rx = instance2.sync_rx.resubscribe();
// info!("Created instances!");
// Device::pair(&instance1, &instance2).await;
// info!("Paired instances!");
// write_test_location(&instance1).await;
// info!("Created mock location!");
// assert!(matches!(
// instance2_sync_rx.recv().await?,
// SyncEvent::Ingested
// ));
// let out = instance2.sync.get_ops(100, vec![]).await?;
// assert_locations_equality(
// &instance1.db.location().find_many(vec![]).exec().await?[0],
// &instance2.db.location().find_many(vec![]).exec().await?[0],
// );
// assert_eq!(out.len(), 3);
// instance1.teardown().await;
// instance2.teardown().await;
// Ok(())
// }
// #[tokio::test]
// async fn no_update_after_delete() -> Result<(), Box<dyn std::error::Error>> {
// let instance1 = Device::new(Uuid::new_v4()).await;
// let instance2 = Device::new(Uuid::new_v4()).await;
// let mut instance2_sync_rx = instance2.sync_rx.resubscribe();
// Device::pair(&instance1, &instance2).await;
// let location = write_test_location(&instance1).await;
// assert!(matches!(
// instance2_sync_rx.recv().await?,
// SyncEvent::Ingested
// ));
// instance2
// .sync
// .write_op(
// &instance2.db,
// instance2.sync.shared_delete(prisma_sync::location::SyncId {
// pub_id: location.pub_id.clone(),
// }),
// instance2.db.location().delete_many(vec![]),
// )
// .await?;
// assert!(matches!(
// instance1.sync_rx.resubscribe().recv().await?,
// SyncEvent::Ingested
// ));
// instance1
// .sync
// .write_op(
// &instance1.db,
// instance1.sync.shared_update(
// prisma_sync::location::SyncId {
// pub_id: location.pub_id.clone(),
// },
// "name",
// msgpack!("New Location"),
// ),
// instance1.db.location().find_many(vec![]),
// )
// .await?;
// // one spare update operation that actually gets ignored by instance 2
// assert_eq!(instance1.db.crdt_operation().count(vec![]).exec().await?, 5);
// assert_eq!(instance2.db.crdt_operation().count(vec![]).exec().await?, 4);
// assert_eq!(instance1.db.location().count(vec![]).exec().await?, 0);
// // the whole point of the test - the update (which is ingested as an upsert) should be ignored
// assert_eq!(instance2.db.location().count(vec![]).exec().await?, 0);
// instance1.teardown().await;
// instance2.teardown().await;
// Ok(())
// }
// fn assert_locations_equality(l1: &location::Data, l2: &location::Data) {
// assert_eq!(l1.pub_id, l2.pub_id, "pub id");
// assert_eq!(l1.name, l2.name, "name");
// assert_eq!(l1.path, l2.path, "path");
// assert_eq!(l1.total_capacity, l2.total_capacity, "total capacity");
// assert_eq!(
// l1.available_capacity, l2.available_capacity,
// "available capacity"
// );
// assert_eq!(l1.size_in_bytes, l2.size_in_bytes, "size in bytes");
// assert_eq!(l1.is_archived, l2.is_archived, "is archived");
// assert_eq!(
// l1.generate_preview_media, l2.generate_preview_media,
// "generate preview media"
// );
// assert_eq!(
// l1.sync_preview_media, l2.sync_preview_media,
// "sync preview media"
// );
// assert_eq!(l1.hidden, l2.hidden, "hidden");
// assert_eq!(l1.date_created, l2.date_created, "date created");
// assert_eq!(l1.scan_state, l2.scan_state, "scan state");
// assert_eq!(l1.instance_id, l2.instance_id, "instance id");
// }

View File

@@ -1,143 +0,0 @@
// use sd_core_sync::*;
// use sd_prisma::prisma;
// use sd_sync::CompressedCRDTOperationsPerModelPerDevice;
// use std::sync::{atomic::AtomicBool, Arc};
// use tokio::{fs, spawn, sync::broadcast};
// use tracing::{info, instrument, warn, Instrument};
// use uuid::Uuid;
// fn db_path(id: Uuid) -> String {
// format!("/tmp/test-{id}.db")
// }
// #[derive(Clone)]
// pub struct Device {
// pub pub_id: DevicePubId,
// pub db: Arc<prisma::PrismaClient>,
// pub sync: Arc<sd_core_sync::SyncManager>,
// pub sync_rx: Arc<broadcast::Receiver<SyncEvent>>,
// }
// impl Device {
// pub async fn new(id: Uuid) -> Arc<Self> {
// let url = format!("file:{}", db_path(id));
// let device_pub_id = DevicePubId::from(id);
// let db = Arc::new(
// prisma::PrismaClient::_builder()
// .with_url(url.to_string())
// .build()
// .await
// .unwrap(),
// );
// db._db_push().await.unwrap();
// db.device()
// .create(device_pub_id.to_db(), vec![])
// .exec()
// .await
// .unwrap();
// // let (sync, sync_rx) = sd_core_sync::SyncManager::new(
// // Arc::clone(&db),
// // &device_pub_id,
// // Arc::new(AtomicBool::new(true)),
// // Default::default(),
// // )
// // .await
// // .expect("failed to create sync manager");
// // Arc::new(Self {
// // pub_id: device_pub_id,
// // db,
// // sync: Arc::new(sync),
// // sync_rx: Arc::new(sync_rx),
// // })
// }
// pub async fn teardown(&self) {
// fs::remove_file(db_path(Uuid::from(&self.pub_id)))
// .await
// .unwrap();
// }
// pub async fn pair(instance1: &Arc<Self>, instance2: &Arc<Self>) {
// #[instrument(skip(left, right))]
// async fn half(left: &Arc<Device>, right: &Arc<Device>, context: &'static str) {
// left.db
// .device()
// .create(right.pub_id.to_db(), vec![])
// .exec()
// .await
// .unwrap();
// spawn({
// let mut sync_rx_left = left.sync_rx.resubscribe();
// let right = Arc::clone(right);
// async move {
// while let Ok(msg) = sync_rx_left.recv().await {
// info!(?msg, "sync_rx_left received message");
// if matches!(msg, SyncEvent::Created) {
// right
// .sync
// .ingest
// .event_tx
// .send(ingest::Event::Notification)
// .await
// .unwrap();
// info!("sent notification to instance 2");
// }
// }
// }
// .in_current_span()
// });
// spawn({
// let left = Arc::clone(left);
// let right = Arc::clone(right);
// async move {
// while let Ok(msg) = right.sync.ingest.req_rx.recv().await {
// info!(?msg, "right instance received request");
// match msg {
// ingest::Request::Messages { timestamps, tx } => {
// let messages = left.sync.get_ops(100, timestamps).await.unwrap();
// let ingest = &right.sync.ingest;
// ingest
// .event_tx
// .send(ingest::Event::Messages(ingest::MessagesEvent {
// messages: CompressedCRDTOperationsPerModelPerDevice::new(
// messages,
// ),
// has_more: false,
// device_pub_id: left.pub_id.clone(),
// wait_tx: None,
// }))
// .await
// .unwrap();
// if tx.send(()).is_err() {
// warn!("failed to send ack to instance 1");
// }
// }
// ingest::Request::FinishedIngesting => {
// right.sync.tx.send(SyncEvent::Ingested).unwrap();
// }
// }
// }
// }
// .in_current_span()
// });
// }
// half(instance1, instance2, "instance1 -> instance2").await;
// half(instance2, instance1, "instance2 -> instance1").await;
// }
// }

View File

@@ -16,7 +16,7 @@ use sd_cloud_schema::{
use sd_crypto::{CryptoRng, SeedableRng};
use sd_utils::error::report_error;
use std::{pin::pin, sync::atomic::Ordering};
use std::pin::pin;
use async_stream::stream;
use futures::{FutureExt, StreamExt};
@@ -51,7 +51,19 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
|node, (access_token, refresh_token): (auth::AccessToken, auth::RefreshToken)| async move {
use sd_cloud_schema::devices;
if node.cloud_services.has_bootstrapped.load(Ordering::Acquire) {
// Only allow a single bootstrap request in flight at a time
let mut has_bootstrapped_lock = node
.cloud_services
.has_bootstrapped
.try_lock()
.map_err(|_| {
rspc::Error::new(
rspc::ErrorCode::Conflict,
String::from("Bootstrap in progress"),
)
})?;
if *has_bootstrapped_lock {
return Err(rspc::Error::new(
rspc::ErrorCode::Conflict,
String::from("Already bootstrapped"),
@@ -204,16 +216,22 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
}| {
let node = &node;
async move { initialize_cloud_sync(pub_id, library, node).await }
async move {
match initialize_cloud_sync(pub_id, library, node).await {
// If we don't have this library locally, we didn't joined this group yet
Ok(()) | Err(LibraryManagerError::LibraryNotFound) => {
Ok(())
}
Err(e) => Err(e),
}
}
},
)
.collect::<Vec<_>>()
.try_join()
.await?;
node.cloud_services
.has_bootstrapped
.store(true, Ordering::Release);
*has_bootstrapped_lock = true;
Ok(())
},
@@ -243,7 +261,14 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
.procedure(
"hasBootstrapped",
R.query(|node, _: ()| async move {
Ok(node.cloud_services.has_bootstrapped.load(Ordering::Relaxed))
// If we can't lock immediately, it means that there is a bootstrap in progress
// so we didn't bootstrapped yet
Ok(node
.cloud_services
.has_bootstrapped
.try_lock()
.map(|lock| *lock)
.unwrap_or(false))
}),
)
}

View File

@@ -28,8 +28,8 @@ use sd_prisma::{
prisma::{file_path, location, object},
prisma_sync,
};
use sd_sync::OperationFactory;
use sd_utils::{db::maybe_missing, error::FileIOError, msgpack};
use sd_sync::{sync_db_entry, sync_db_nullable_entry, sync_entry, OperationFactory};
use sd_utils::{db::maybe_missing, error::FileIOError};
use std::{
ffi::OsString,
@@ -195,19 +195,19 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
)
})?;
let (sync_param, db_param) = sync_db_nullable_entry!(args.note, object::note);
sync.write_op(
db,
sync.shared_update(
prisma_sync::object::SyncId {
pub_id: object.pub_id,
},
object::note::NAME,
msgpack!(&args.note),
),
db.object().update(
object::id::equals(args.id),
vec![object::note::set(args.note)],
[sync_param],
),
db.object()
.update(object::id::equals(args.id), vec![db_param])
.select(object::select!({ id })),
)
.await?;
@@ -241,19 +241,19 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
)
})?;
let (sync_param, db_param) = sync_db_entry!(args.favorite, object::favorite);
sync.write_op(
db,
sync.shared_update(
prisma_sync::object::SyncId {
pub_id: object.pub_id,
},
object::favorite::NAME,
msgpack!(&args.favorite),
),
db.object().update(
object::id::equals(args.id),
vec![object::favorite::set(Some(args.favorite))],
[sync_param],
),
db.object()
.update(object::id::equals(args.id), vec![db_param])
.select(object::select!({ id })),
)
.await?;
@@ -346,19 +346,20 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
let date_accessed = Utc::now().into();
let (ops, object_ids): (Vec<_>, Vec<_>) = objects
let (ops, object_ids) = objects
.into_iter()
.map(|d| {
.map(|object| {
(
sync.shared_update(
prisma_sync::object::SyncId { pub_id: d.pub_id },
object::date_accessed::NAME,
msgpack!(date_accessed),
prisma_sync::object::SyncId {
pub_id: object.pub_id,
},
[sync_entry!(date_accessed, object::date_accessed)],
),
d.id,
object.id,
)
})
.unzip();
.unzip::<_, _, Vec<_>, Vec<_>>();
if !ops.is_empty() && !object_ids.is_empty() {
sync.write_ops(
@@ -392,19 +393,20 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
.exec()
.await?;
let (ops, object_ids): (Vec<_>, Vec<_>) = objects
let (ops, object_ids) = objects
.into_iter()
.map(|d| {
.map(|object| {
(
sync.shared_update(
prisma_sync::object::SyncId { pub_id: d.pub_id },
object::date_accessed::NAME,
msgpack!(nil),
prisma_sync::object::SyncId {
pub_id: object.pub_id,
},
[sync_entry!(nil, object::date_accessed)],
),
d.id,
object.id,
)
})
.unzip();
.unzip::<_, _, Vec<_>, Vec<_>>();
if !ops.is_empty() && !object_ids.is_empty() {
sync.write_ops(
@@ -487,11 +489,32 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
path = %full_path.display(),
"File not found in the file system, will remove from database;",
);
library
let file_path_pub_id = library
.db
.file_path()
.delete(file_path::id::equals(args.file_path_ids[0]))
.find_unique(file_path::id::equals(args.file_path_ids[0]))
.select(file_path::select!({ pub_id }))
.exec()
.await?
.ok_or(LocationError::FilePath(FilePathError::IdNotFound(
args.file_path_ids[0],
)))?
.pub_id;
library
.sync
.write_op(
&library.db,
library.sync.shared_delete(
prisma_sync::file_path::SyncId {
pub_id: file_path_pub_id,
},
),
library.db.file_path().delete(file_path::id::equals(
args.file_path_ids[0],
)),
)
.await
.map_err(LocationError::from)?;

View File

@@ -116,7 +116,7 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
.procedure(
"delete",
R.with2(library())
.mutation(|(_, library), label_id: i32| async move {
.mutation(|(_, library), label_id: label::id::Type| async move {
let Library { db, sync, .. } = library.as_ref();
let label = db
@@ -131,6 +131,35 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
)
})?;
let delete_ops = db
.label_on_object()
.find_many(vec![label_on_object::label_id::equals(label_id)])
.select(label_on_object::select!({ object: select { pub_id } }))
.exec()
.await?
.into_iter()
.map(|label_on_object| {
sync.relation_delete(prisma_sync::label_on_object::SyncId {
label: prisma_sync::label::SyncId {
name: label.name.clone(),
},
object: prisma_sync::object::SyncId {
pub_id: label_on_object.object.pub_id,
},
})
})
.collect::<Vec<_>>();
sync.write_ops(
db,
(
delete_ops,
db.label_on_object()
.delete_many(vec![label_on_object::label_id::equals(label_id)]),
),
)
.await?;
sync.write_op(
db,
sync.shared_delete(prisma_sync::label::SyncId { name: label.name }),

View File

@@ -69,7 +69,7 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
let pub_id = Uuid::now_v7().as_bytes().to_vec();
let date_created: DateTime<FixedOffset> = Utc::now().into();
let (sync_params, db_params): (Vec<_>, Vec<_>) = chain_optional_iter(
let (sync_params, db_params) = chain_optional_iter(
[
sync_db_entry!(date_created, saved_search::date_created),
sync_db_entry!(args.name, saved_search::name),
@@ -96,7 +96,7 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
],
)
.into_iter()
.unzip();
.unzip::<_, _, Vec<_>, Vec<_>>();
sync.write_op(
db,
@@ -106,7 +106,9 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
},
sync_params,
),
db.saved_search().create(pub_id, db_params),
db.saved_search()
.create(pub_id, db_params)
.select(saved_search::select!({ id })),
)
.await?;
@@ -162,7 +164,7 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
rspc::Error::new(rspc::ErrorCode::NotFound, "search not found".into())
})?;
let (ops, db_params): (Vec<_>, Vec<_>) = chain_optional_iter(
let (sync_params, db_params) = chain_optional_iter(
[sync_db_entry!(updated_at, saved_search::date_modified)],
[
option_sync_db_entry!(args.name.flatten(), saved_search::name),
@@ -173,34 +175,23 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
],
)
.into_iter()
.map(|((k, v), p)| {
(
sync.shared_update(
prisma_sync::saved_search::SyncId {
pub_id: search.pub_id.clone(),
},
k,
v,
),
p,
)
})
.unzip();
.unzip::<_, _, Vec<_>, Vec<_>>();
if !ops.is_empty() && !db_params.is_empty() {
sync.write_ops(
db,
(
ops,
db.saved_search()
.update_unchecked(saved_search::id::equals(id), db_params),
),
)
.await?;
sync.write_op(
db,
sync.shared_update(
prisma_sync::saved_search::SyncId {
pub_id: search.pub_id.clone(),
},
sync_params,
),
db.saved_search()
.update_unchecked(saved_search::id::equals(id), db_params),
)
.await?;
invalidate_query!(library, "search.saved.list");
invalidate_query!(library, "search.saved.get");
}
invalidate_query!(library, "search.saved.list");
invalidate_query!(library, "search.saved.get");
Ok(())
}

View File

@@ -4,8 +4,7 @@ use sd_prisma::{
prisma::{device, file_path, object, tag, tag_on_object},
prisma_sync,
};
use sd_sync::{option_sync_db_entry, sync_entry, OperationFactory};
use sd_utils::{msgpack, uuid_to_bytes};
use sd_sync::{option_sync_db_entry, sync_db_entry, sync_entry, OperationFactory};
use std::collections::BTreeMap;
@@ -14,7 +13,6 @@ use itertools::{Either, Itertools};
use rspc::{alpha::AlphaRouter, ErrorCode};
use serde::{Deserialize, Serialize};
use specta::Type;
use uuid::Uuid;
use super::{utils::library, Ctx, R};
@@ -185,17 +183,6 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
})
.await?;
macro_rules! sync_id {
($pub_id:expr) => {
prisma_sync::tag_on_object::SyncId {
tag: prisma_sync::tag::SyncId {
pub_id: tag.pub_id.clone(),
},
object: prisma_sync::object::SyncId { pub_id: $pub_id },
}
};
}
if args.unassign {
let query = db.tag_on_object().delete_many(vec![
tag_on_object::tag_id::equals(args.tag_id),
@@ -220,63 +207,20 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
.into_iter()
.filter_map(|fp| fp.object.map(|o| o.pub_id)),
)
.map(|pub_id| sync.relation_delete(sync_id!(pub_id)))
.map(|pub_id| {
sync.relation_delete(prisma_sync::tag_on_object::SyncId {
tag: prisma_sync::tag::SyncId {
pub_id: tag.pub_id.clone(),
},
object: prisma_sync::object::SyncId { pub_id },
})
})
.collect::<Vec<_>>();
if !ops.is_empty() {
sync.write_ops(db, (ops, query)).await?;
}
} else {
let mut ops = vec![];
let db_params: (Vec<_>, Vec<_>) = file_paths
.iter()
.filter(|fp| fp.is_dir.unwrap_or_default() && fp.object.is_none())
.map(|fp| {
let id = uuid_to_bytes(&Uuid::now_v7());
let device_pub_id = sync.device_pub_id.to_db();
ops.push(sync.shared_create(
prisma_sync::object::SyncId { pub_id: id.clone() },
[sync_entry!(
prisma_sync::device::SyncId {
pub_id: device_pub_id.clone(),
},
object::device
)],
));
ops.push(sync.shared_update(
prisma_sync::file_path::SyncId {
pub_id: fp.pub_id.clone(),
},
file_path::object::NAME,
msgpack!(id),
));
(
db.object().create(
id.clone(),
vec![object::device::connect(device::pub_id::equals(
device_pub_id,
))],
),
db.file_path().update(
file_path::id::equals(fp.id),
vec![file_path::object::connect(object::pub_id::equals(
id,
))],
),
)
})
.unzip();
if ops.is_empty() {
return Ok(());
}
let (new_objects, _) = sync.write_ops(db, (ops, db_params)).await?;
let (sync_ops, db_creates) = objects
.into_iter()
.map(|o| (o.id, o.pub_id))
@@ -285,22 +229,23 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
.into_iter()
.filter_map(|fp| fp.object.map(|o| (o.id, o.pub_id))),
)
.chain(new_objects.into_iter().map(|o| (o.id, o.pub_id)))
.fold(
(vec![], vec![]),
|(mut sync_ops, mut db_creates), (id, pub_id)| {
let device_pub_id = sync.device_pub_id.to_db();
sync_ops.push(sync.relation_create(
sync_id!(pub_id),
.map(|(id, pub_id)| {
(
sync.relation_create(
prisma_sync::tag_on_object::SyncId {
tag: prisma_sync::tag::SyncId {
pub_id: tag.pub_id.clone(),
},
object: prisma_sync::object::SyncId { pub_id },
},
[sync_entry!(
prisma_sync::device::SyncId {
pub_id: device_pub_id.clone(),
pub_id: sync.device_pub_id.to_db(),
},
tag_on_object::device
)],
));
db_creates.push(tag_on_object::CreateUnchecked {
),
tag_on_object::CreateUnchecked {
tag_id: args.tag_id,
object_id: id,
_params: vec![
@@ -309,24 +254,21 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
)),
tag_on_object::device_id::set(Some(device_id)),
],
});
},
)
})
.unzip::<_, _, Vec<_>, Vec<_>>();
(sync_ops, db_creates)
},
);
if sync_ops.is_empty() && db_creates.is_empty() {
return Ok(());
if !sync_ops.is_empty() && !db_creates.is_empty() {
sync.write_ops(
db,
(
sync_ops,
db.tag_on_object().create_many(db_creates).skip_duplicates(),
),
)
.await?;
}
sync.write_ops(
db,
(
sync_ops,
db.tag_on_object().create_many(db_creates).skip_duplicates(),
),
)
.await?;
}
invalidate_query!(library, "tags.getForObject");
@@ -344,13 +286,17 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
pub color: Option<String>,
}
R.with2(library())
.mutation(|(_, library), args: TagUpdateArgs| async move {
R.with2(library()).mutation(
|(_, library), TagUpdateArgs { id, name, color }: TagUpdateArgs| async move {
if name.is_none() && color.is_none() {
return Ok(());
}
let Library { sync, db, .. } = library.as_ref();
let tag = db
.tag()
.find_unique(tag::id::equals(args.id))
.find_unique(tag::id::equals(id))
.select(tag::select!({ pub_id }))
.exec()
.await?
@@ -359,68 +305,88 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
"Error finding tag in db".into(),
))?;
db.tag()
.update(
tag::id::equals(args.id),
vec![tag::date_modified::set(Some(Utc::now().into()))],
)
.exec()
.await?;
let (sync_params, db_params): (Vec<_>, Vec<_>) = [
option_sync_db_entry!(args.name, tag::name),
option_sync_db_entry!(args.color, tag::color),
let (sync_params, db_params) = [
option_sync_db_entry!(name, tag::name),
option_sync_db_entry!(color, tag::color),
Some(sync_db_entry!(Utc::now(), tag::date_modified)),
]
.into_iter()
.flatten()
.unzip();
.unzip::<_, _, Vec<_>, Vec<_>>();
if sync_params.is_empty() && db_params.is_empty() {
return Ok(());
}
sync.write_ops(
sync.write_op(
db,
(
sync_params
.into_iter()
.map(|(k, v)| {
sync.shared_update(
prisma_sync::tag::SyncId {
pub_id: tag.pub_id.clone(),
},
k,
v,
)
})
.collect(),
db.tag().update(tag::id::equals(args.id), db_params),
sync.shared_update(
prisma_sync::tag::SyncId {
pub_id: tag.pub_id.clone(),
},
sync_params,
),
db.tag()
.update(tag::id::equals(id), db_params)
.select(tag::select!({ id })),
)
.await?;
invalidate_query!(library, "tags.list");
Ok(())
})
},
)
})
.procedure(
"delete",
R.with2(library())
.mutation(|(_, library), tag_id: i32| async move {
library
.db
.tag_on_object()
.delete_many(vec![tag_on_object::tag_id::equals(tag_id)])
.exec()
.await?;
.mutation(|(_, library), tag_id: tag::id::Type| async move {
let Library { sync, db, .. } = &*library;
library
.db
let tag_pub_id = db
.tag()
.delete(tag::id::equals(tag_id))
.find_unique(tag::id::equals(tag_id))
.select(tag::select!({ pub_id }))
.exec()
.await?;
.await?
.ok_or(rspc::Error::new(
rspc::ErrorCode::NotFound,
"Tag not found".to_string(),
))?
.pub_id;
let delete_ops = db
.tag_on_object()
.find_many(vec![tag_on_object::tag_id::equals(tag_id)])
.select(tag_on_object::select!({ object: select { pub_id } }))
.exec()
.await?
.into_iter()
.map(|tag_on_object| {
sync.relation_delete(prisma_sync::tag_on_object::SyncId {
tag: prisma_sync::tag::SyncId {
pub_id: tag_pub_id.clone(),
},
object: prisma_sync::object::SyncId {
pub_id: tag_on_object.object.pub_id,
},
})
})
.collect::<Vec<_>>();
sync.write_ops(
db,
(
delete_ops,
db.tag_on_object()
.delete_many(vec![tag_on_object::tag_id::equals(tag_id)]),
),
)
.await?;
sync.write_op(
db,
sync.shared_delete(prisma_sync::tag::SyncId { pub_id: tag_pub_id }),
db.tag().delete(tag::id::equals(tag_id)),
)
.await?;
invalidate_query!(library, "tags.list");

View File

@@ -121,6 +121,7 @@ impl InvalidRequests {
}
/// `invalidate_query` is a macro which stores a list of all of it's invocations so it can ensure all of the queries match the queries attached to the router.
///
/// This allows invalidate to be type-safe even when the router keys are stringly typed.
/// ```ignore
/// invalidate_query!(

View File

@@ -22,7 +22,10 @@ pub(crate) struct LibraryArgs<T> {
pub(crate) struct LibraryArgsLike;
impl MwArgMapper for LibraryArgsLike {
type Input<T> = LibraryArgs<T> where T: Type + DeserializeOwned + 'static;
type Input<T>
= LibraryArgs<T>
where
T: Type + DeserializeOwned + 'static;
type State = Uuid;
fn map<T: Serialize + DeserializeOwned + Type + 'static>(

View File

@@ -1,61 +0,0 @@
use std::{
io,
pin::Pin,
task::{Context, Poll},
};
use axum::http::HeaderMap;
use bytes::Bytes;
use futures::Stream;
use http_body::Body;
use pin_project_lite::pin_project;
use tokio::io::{AsyncRead, AsyncReadExt, Take};
use tokio_util::io::ReaderStream;
// This code was taken from: https://github.com/tower-rs/tower-http/blob/e8eb54966604ea7fa574a2a25e55232f5cfe675b/tower-http/src/services/fs/mod.rs#L30
pin_project! {
// NOTE: This could potentially be upstreamed to `http-body`.
/// Adapter that turns an [`impl AsyncRead`][tokio::io::AsyncRead] to an [`impl Body`][http_body::Body].
#[derive(Debug)]
pub struct AsyncReadBody<T> {
#[pin]
reader: ReaderStream<T>,
}
}
impl<T> AsyncReadBody<T>
where
T: AsyncRead,
{
pub(crate) fn with_capacity_limited(
read: T,
capacity: usize,
max_read_bytes: u64,
) -> AsyncReadBody<Take<T>> {
AsyncReadBody {
reader: ReaderStream::with_capacity(read.take(max_read_bytes), capacity),
}
}
}
impl<T> Body for AsyncReadBody<T>
where
T: AsyncRead,
{
type Data = Bytes;
type Error = io::Error;
fn poll_data(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Self::Data, Self::Error>>> {
self.project().reader.poll_next(cx)
}
fn poll_trailers(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Result<Option<HeaderMap>, Self::Error>> {
Poll::Ready(Ok(None))
}
}

View File

@@ -29,7 +29,7 @@ use std::{
use async_stream::stream;
use axum::{
body::{self, Body, BoxBody, Full, StreamBody},
body::Body,
extract::{self, State},
http::{HeaderMap, HeaderValue, Request, Response, StatusCode},
middleware,
@@ -38,8 +38,8 @@ use axum::{
Router,
};
use bytes::Bytes;
use http_body::combinators::UnsyncBoxBody;
use hyper::{header, upgrade::OnUpgrade};
use hyper_util::rt::TokioIo;
use mini_moka::sync::Cache;
use tokio::{
fs::{self, File},
@@ -50,7 +50,6 @@ use uuid::Uuid;
use self::{serve_file::serve_file, utils::*};
mod async_read_body;
mod mpsc_to_async_write;
mod serve_file;
mod utils;
@@ -97,7 +96,7 @@ async fn request_to_remote_node(
p2p: Arc<P2P>,
identity: RemoteIdentity,
mut request: Request<Body>,
) -> Response<UnsyncBoxBody<bytes::Bytes, axum::Error>> {
) -> Response<Body> {
let request_upgrade_header = request.headers().get(header::UPGRADE).cloned();
let maybe_client_upgrade = request.extensions_mut().remove::<OnUpgrade>();
@@ -121,17 +120,20 @@ async fn request_to_remote_node(
};
tokio::spawn(async move {
let Ok(mut request_upgraded) = request_upgraded.await.map_err(|e| {
let Ok(request_upgraded) = request_upgraded.await.map_err(|e| {
warn!(?e, "Error upgrading websocket request;");
}) else {
return;
};
let Ok(mut response_upgraded) = response_upgraded.await.map_err(|e| {
let Ok(response_upgraded) = response_upgraded.await.map_err(|e| {
warn!(?e, "Error upgrading websocket response;");
}) else {
return;
};
let mut request_upgraded = TokioIo::new(request_upgraded);
let mut response_upgraded = TokioIo::new(response_upgraded);
copy_bidirectional(&mut request_upgraded, &mut response_upgraded)
.await
.map_err(|e| {
@@ -147,7 +149,7 @@ async fn request_to_remote_node(
async fn get_or_init_lru_entry(
state: &LocalState,
extract::Path((lib_id, loc_id, path_id)): ExtractedPath,
) -> Result<(CacheValue, Arc<Library>), Response<BoxBody>> {
) -> Result<(CacheValue, Arc<Library>), Response<Body>> {
let library_id = Uuid::from_str(&lib_id).map_err(bad_request)?;
let location_id = loc_id.parse::<location::id::Type>().map_err(bad_request)?;
let file_path_id = path_id
@@ -245,7 +247,7 @@ pub fn base_router() -> Router<LocalState> {
} else {
StatusCode::INTERNAL_SERVER_ERROR
})
.body(body::boxed(Full::from("")))
.body(Body::from(""))
})?;
let metadata = file.metadata().await;
serve_file(
@@ -290,7 +292,7 @@ pub fn base_router() -> Router<LocalState> {
} else {
StatusCode::INTERNAL_SERVER_ERROR
})
.body(body::boxed(Full::from("")))
.body(Body::from(""))
})?;
let resp = InfallibleResponse::builder().header(
@@ -335,11 +337,11 @@ pub fn base_router() -> Router<LocalState> {
// TODO: Content Type
Ok(InfallibleResponse::builder().status(StatusCode::OK).body(
body::boxed(StreamBody::new(stream! {
Body::from_stream(stream! {
while let Some(item) = rx.recv().await {
yield item;
}
})),
}),
))
}
}
@@ -364,7 +366,7 @@ pub fn base_router() -> Router<LocalState> {
} else {
StatusCode::INTERNAL_SERVER_ERROR
})
.body(body::boxed(Full::from("")))
.body(Body::from(""))
})?;
let resp = InfallibleResponse::builder().header(
@@ -453,7 +455,7 @@ async fn infer_the_mime_type(
ext: &str,
file: &mut File,
metadata: &Metadata,
) -> Result<String, Response<BoxBody>> {
) -> Result<String, Response<Body>> {
let ext = ext.to_lowercase();
let mime_type = match ext.as_str() {
// AAC audio

View File

@@ -3,18 +3,18 @@ use crate::util::InfallibleResponse;
use std::{fs::Metadata, time::UNIX_EPOCH};
use axum::{
body::{self, BoxBody, Full, StreamBody},
body::Body,
http::{header, request, HeaderValue, Method, Response, StatusCode},
};
use http_range::HttpRange;
use tokio::{
fs::File,
io::{self, AsyncSeekExt, SeekFrom},
io::{self, AsyncReadExt, AsyncSeekExt, SeekFrom},
};
use tokio_util::io::ReaderStream;
use tracing::error;
use super::{async_read_body::AsyncReadBody, utils::*};
use super::utils::*;
// default capacity 64KiB
const DEFAULT_CAPACITY: usize = 65536;
@@ -31,7 +31,7 @@ pub(crate) async fn serve_file(
metadata: io::Result<Metadata>,
req: request::Parts,
mut resp: InfallibleResponse,
) -> Result<Response<BoxBody>, Response<BoxBody>> {
) -> Result<Response<Body>, Response<Body>> {
if let Ok(metadata) = metadata {
// We only accept range queries if `files.metadata() == Ok(_)`
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept-Ranges
@@ -48,7 +48,7 @@ pub(crate) async fn serve_file(
return Ok(resp
.status(StatusCode::OK)
.header("Content-Length", HeaderValue::from_static("0"))
.body(body::boxed(Full::from(""))));
.body(Body::from("")));
}
// ETag
@@ -73,9 +73,7 @@ pub(crate) async fn serve_file(
// Used for normal requests
if let Some(etag) = req.headers.get("If-None-Match") {
if etag.as_bytes() == etag_header.as_bytes() {
return Ok(resp
.status(StatusCode::NOT_MODIFIED)
.body(body::boxed(Full::from(""))));
return Ok(resp.status(StatusCode::NOT_MODIFIED).body(Body::from("")));
}
}
@@ -104,7 +102,7 @@ pub(crate) async fn serve_file(
.map_err(internal_server_error)?,
)
.status(StatusCode::RANGE_NOT_SATISFIABLE)
.body(body::boxed(Full::from(""))));
.body(Body::from("")));
}
let range = ranges.first().expect("checked above");
@@ -116,7 +114,7 @@ pub(crate) async fn serve_file(
.map_err(internal_server_error)?,
)
.status(StatusCode::RANGE_NOT_SATISFIABLE)
.body(body::boxed(Full::from(""))));
.body(Body::from("")));
}
file.seek(SeekFrom::Start(range.start))
@@ -140,14 +138,13 @@ pub(crate) async fn serve_file(
HeaderValue::from_str(&range.length.to_string())
.map_err(internal_server_error)?,
)
.body(body::boxed(AsyncReadBody::with_capacity_limited(
file,
.body(Body::from_stream(ReaderStream::with_capacity(
file.take(range.length),
DEFAULT_CAPACITY,
range.length,
))));
}
}
}
Ok(resp.body(body::boxed(StreamBody::new(ReaderStream::new(file)))))
Ok(resp.body(Body::from_stream(ReaderStream::new(file))))
}

View File

@@ -3,50 +3,49 @@ use crate::util::InfallibleResponse;
use std::{fmt::Debug, panic::Location};
use axum::{
body::{self, BoxBody},
body::Body,
http::{self, HeaderValue, Method, Request, Response, StatusCode},
middleware::Next,
};
use http_body::Full;
use tracing::debug;
#[track_caller]
pub(crate) fn bad_request(e: impl Debug) -> http::Response<BoxBody> {
pub(crate) fn bad_request(e: impl Debug) -> http::Response<Body> {
debug!(caller = %Location::caller(), ?e, "400: Bad Request;");
InfallibleResponse::builder()
.status(StatusCode::BAD_REQUEST)
.body(body::boxed(Full::from("")))
.body(Body::from(""))
}
#[track_caller]
pub(crate) fn not_found(e: impl Debug) -> http::Response<BoxBody> {
pub(crate) fn not_found(e: impl Debug) -> http::Response<Body> {
debug!(caller = %Location::caller(), ?e, "404: Not Found;");
InfallibleResponse::builder()
.status(StatusCode::NOT_FOUND)
.body(body::boxed(Full::from("")))
.body(Body::from(""))
}
#[track_caller]
pub(crate) fn internal_server_error(e: impl Debug) -> http::Response<BoxBody> {
pub(crate) fn internal_server_error(e: impl Debug) -> http::Response<Body> {
debug!(caller = %Location::caller(), ?e, "500: Internal Server Error;");
InfallibleResponse::builder()
.status(StatusCode::INTERNAL_SERVER_ERROR)
.body(body::boxed(Full::from("")))
.body(Body::from(""))
}
#[track_caller]
pub(crate) fn not_implemented(e: impl Debug) -> http::Response<BoxBody> {
pub(crate) fn not_implemented(e: impl Debug) -> http::Response<Body> {
debug!(caller = %Location::caller(), ?e, "501: Not Implemented;");
InfallibleResponse::builder()
.status(StatusCode::NOT_IMPLEMENTED)
.body(body::boxed(Full::from("")))
.body(Body::from(""))
}
pub(crate) async fn cors_middleware<B>(req: Request<B>, next: Next<B>) -> Response<BoxBody> {
pub(crate) async fn cors_middleware(req: Request<Body>, next: Next) -> Response<Body> {
if req.method() == Method::OPTIONS {
return Response::builder()
.header("Access-Control-Allow-Methods", "GET, HEAD, POST, OPTIONS")
@@ -54,7 +53,7 @@ pub(crate) async fn cors_middleware<B>(req: Request<B>, next: Next<B>) -> Respon
.header("Access-Control-Allow-Headers", "*")
.header("Access-Control-Max-Age", "86400")
.status(StatusCode::OK)
.body(body::boxed(Full::from("")))
.body(Body::from(""))
.expect("Invalid static response!");
}

View File

@@ -10,8 +10,13 @@ use crate::{
};
use sd_core_sync::{SyncEvent, SyncManager};
use sd_p2p::{Identity, RemoteIdentity};
use sd_prisma::prisma::{device, instance, location};
use sd_prisma::{
prisma::{self, device, instance, location, PrismaClient},
prisma_sync,
};
use sd_sync::ModelId;
use sd_utils::{
db,
error::{FileIOError, NonUtf8PathError},
@@ -29,6 +34,7 @@ use std::{
use chrono::Utc;
use futures_concurrency::future::{Join, TryJoin};
use prisma_client_rust::Raw;
use tokio::{
fs, io, spawn,
sync::{broadcast, RwLock},
@@ -462,6 +468,10 @@ impl Libraries {
);
let db = Arc::new(db::load_and_migrate(&db_url).await?);
// Configure database
configure_pragmas(&db).await?;
special_sync_indexes(&db).await?;
if let Some(create) = maybe_create_device {
create.to_query(&db).exec().await?;
}
@@ -541,6 +551,7 @@ impl Libraries {
)),
],
)
.select(instance::select!({ id }))
.exec()
.await?;
}
@@ -555,9 +566,6 @@ impl Libraries {
)
.await?;
// Configure database
configure_pragmas(&db).await?;
let library = Library::new(id, config, instance_id, identity, db, node, sync).await;
// This is an exception. Generally subscribe to this by `self.tx.subscribe`.
@@ -638,3 +646,54 @@ async fn sync_rx_actor(
}
}
}
async fn special_sync_indexes(db: &PrismaClient) -> Result<(), LibraryManagerError> {
async fn create_index(
db: &PrismaClient,
model_id: ModelId,
model_name: &str,
) -> Result<(), LibraryManagerError> {
db._execute_raw(Raw::new(
&format!(
"CREATE INDEX IF NOT EXISTS partial_index_model_{model_name} \
ON crdt_operation(model,record_id,kind,timestamp) \
WHERE model = {model_id}
"
),
vec![],
))
.exec()
.await?;
debug!(model_name, "Created sync partial index");
Ok(())
}
for (model_id, model_name) in [
(prisma_sync::device::MODEL_ID, prisma::device::NAME),
(
prisma_sync::storage_statistics::MODEL_ID,
prisma::storage_statistics::NAME,
),
(prisma_sync::tag::MODEL_ID, prisma::tag::NAME),
(prisma_sync::location::MODEL_ID, prisma::location::NAME),
(prisma_sync::object::MODEL_ID, prisma::object::NAME),
(prisma_sync::label::MODEL_ID, prisma::label::NAME),
(prisma_sync::exif_data::MODEL_ID, prisma::exif_data::NAME),
(prisma_sync::file_path::MODEL_ID, prisma::file_path::NAME),
(
prisma_sync::tag_on_object::MODEL_ID,
prisma::tag_on_object::NAME,
),
(
prisma_sync::label_on_object::MODEL_ID,
prisma::label_on_object::NAME,
),
] {
// Creating indexes sequentially just in case
create_index(db, model_id, model_name).await?;
}
Ok(())
}

View File

@@ -27,6 +27,7 @@ use super::{
#[derive(Debug)]
pub(super) struct EventHandler {
location_id: location::id::Type,
location_pub_id: location::pub_id::Type,
library: Arc<Library>,
node: Arc<Node>,
last_events_eviction_check: Instant,
@@ -40,9 +41,18 @@ pub(super) struct EventHandler {
}
impl super::EventHandler for EventHandler {
fn new(location_id: location::id::Type, library: Arc<Library>, node: Arc<Node>) -> Self {
fn new(
location_id: location::id::Type,
location_pub_id: location::pub_id::Type,
library: Arc<Library>,
node: Arc<Node>,
) -> Self
where
Self: Sized,
{
Self {
location_id,
location_pub_id,
library,
node,
last_events_eviction_check: Instant::now(),
@@ -182,6 +192,7 @@ impl super::EventHandler for EventHandler {
&mut self.to_recalculate_size,
&mut self.path_and_instant_buffer,
self.location_id,
self.location_pub_id.clone(),
&self.library,
)
.await

Some files were not shown because too many files have changed in this diff Show More