This commit is contained in:
Jamie Pine
2022-05-10 09:03:20 -07:00
parent 99938128bc
commit 81fa5f8a2f
32 changed files with 622 additions and 344 deletions

View File

@@ -2,6 +2,7 @@
"cSpell.words": [
"actix",
"bpfrpt",
"consts",
"creationdate",
"ipfs",
"Keepsafe",

View File

@@ -1,7 +1,7 @@
use std::time::{Duration, Instant};
use std::env::consts;
use std::time::{Duration, Instant};
use sdcore::{ClientCommand, ClientQuery, Core, CoreController, CoreEvent, CoreResponse};
use sdcore::{ClientCommand, ClientQuery, CoreController, CoreEvent, CoreResponse, Node};
use tauri::api::path;
use tauri::Manager;
mod menu;
@@ -38,14 +38,14 @@ async fn client_command_transport(
async fn main() {
let data_dir = path::data_dir().unwrap_or(std::path::PathBuf::from("./"));
// create an instance of the core
let (mut core, mut event_receiver) = Core::new(data_dir).await;
let (mut node, mut event_receiver) = Node::new(data_dir).await;
// run startup tasks
core.initializer().await;
// extract the core controller
let controller = core.get_controller();
// throw the core into a dedicated thread
node.initializer().await;
// extract the node controller
let controller = node.get_controller();
// throw the node into a dedicated thread
tokio::spawn(async move {
core.start().await;
node.start().await;
});
// create tauri app
tauri::Builder::default()
@@ -60,16 +60,14 @@ async fn main() {
window_shadows::set_shadow(&window, true).unwrap_or(());
if consts::OS == "windows" {
window.set_decorations(true);
println!("Hello World!");
window.set_decorations(true).unwrap_or(());
println!("Hello World!");
}
window.start_dragging().unwrap_or(());
});
}
// core event transport
tokio::spawn(async move {
let mut last = Instant::now();

View File

@@ -1,4 +1,4 @@
use sdcore::{ClientCommand, ClientQuery, Core, CoreController, CoreEvent, CoreResponse};
use sdcore::{ClientCommand, ClientQuery, CoreController, CoreEvent, CoreResponse, Node};
use std::{env, path::Path};
use actix::{
@@ -196,14 +196,14 @@ async fn setup() -> (
},
};
let (mut core, event_receiver) = Core::new(data_dir_path).await;
let (mut node, event_receiver) = Node::new(data_dir_path).await;
core.initializer().await;
node.initializer().await;
let controller = core.get_controller();
let controller = node.get_controller();
tokio::spawn(async move {
core.start().await;
node.start().await;
});
(web::Data::new(event_receiver), web::Data::new(controller))

View File

@@ -0,0 +1,177 @@
/*
Warnings:
- You are about to drop the `clients` table. If the table is not empty, all the data it contains will be lost.
- You are about to drop the column `client_id` on the `sync_events` table. All the data in the column will be lost.
- You are about to drop the column `client_id` on the `locations` table. All the data in the column will be lost.
- You are about to drop the column `client_id` on the `jobs` table. All the data in the column will be lost.
- You are about to drop the column `encryption` on the `tags` table. All the data in the column will be lost.
- You are about to drop the column `client_id` on the `volumes` table. All the data in the column will be lost.
- Added the required column `node_id` to the `sync_events` table without a default value. This is not possible if the table is not empty.
- Added the required column `node_id` to the `jobs` table without a default value. This is not possible if the table is not empty.
- Added the required column `node_id` to the `volumes` table without a default value. This is not possible if the table is not empty.
*/
-- DropIndex
DROP INDEX "clients_pub_id_key";
-- DropTable
PRAGMA foreign_keys=off;
DROP TABLE "clients";
PRAGMA foreign_keys=on;
-- CreateTable
CREATE TABLE "nodes" (
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
"pub_id" TEXT NOT NULL,
"name" TEXT NOT NULL,
"platform" INTEGER NOT NULL DEFAULT 0,
"version" TEXT,
"online" BOOLEAN DEFAULT true,
"last_seen" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
"timezone" TEXT,
"date_created" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
);
-- CreateTable
CREATE TABLE "keys" (
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
"checksum" TEXT NOT NULL,
"name" TEXT,
"date_created" DATETIME DEFAULT CURRENT_TIMESTAMP,
"algorithm" INTEGER DEFAULT 0
);
-- RedefineTables
PRAGMA foreign_keys=OFF;
CREATE TABLE "new_files" (
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
"cas_id" TEXT NOT NULL,
"integrity_checksum" TEXT,
"kind" INTEGER NOT NULL DEFAULT 0,
"size_in_bytes" TEXT NOT NULL,
"encryption" INTEGER NOT NULL DEFAULT 0,
"key_id" INTEGER,
"hidden" BOOLEAN NOT NULL DEFAULT false,
"favorite" BOOLEAN NOT NULL DEFAULT false,
"important" BOOLEAN NOT NULL DEFAULT false,
"has_thumbnail" BOOLEAN NOT NULL DEFAULT false,
"has_thumbstrip" BOOLEAN NOT NULL DEFAULT false,
"has_video_preview" BOOLEAN NOT NULL DEFAULT false,
"ipfs_id" TEXT,
"comment" TEXT,
"date_created" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
"date_modified" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
"date_indexed" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT "files_key_id_fkey" FOREIGN KEY ("key_id") REFERENCES "keys" ("id") ON DELETE SET NULL ON UPDATE CASCADE
);
INSERT INTO "new_files" ("cas_id", "comment", "date_created", "date_indexed", "date_modified", "encryption", "favorite", "has_thumbnail", "has_thumbstrip", "has_video_preview", "hidden", "id", "important", "integrity_checksum", "ipfs_id", "kind", "size_in_bytes") SELECT "cas_id", "comment", "date_created", "date_indexed", "date_modified", "encryption", "favorite", "has_thumbnail", "has_thumbstrip", "has_video_preview", "hidden", "id", "important", "integrity_checksum", "ipfs_id", "kind", "size_in_bytes" FROM "files";
DROP TABLE "files";
ALTER TABLE "new_files" RENAME TO "files";
CREATE UNIQUE INDEX "files_cas_id_key" ON "files"("cas_id");
CREATE UNIQUE INDEX "files_integrity_checksum_key" ON "files"("integrity_checksum");
CREATE TABLE "new_sync_events" (
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
"node_id" INTEGER NOT NULL,
"timestamp" TEXT NOT NULL,
"data" TEXT NOT NULL,
CONSTRAINT "sync_events_node_id_fkey" FOREIGN KEY ("node_id") REFERENCES "nodes" ("id") ON DELETE RESTRICT ON UPDATE CASCADE
);
INSERT INTO "new_sync_events" ("data", "id", "timestamp") SELECT "data", "id", "timestamp" FROM "sync_events";
DROP TABLE "sync_events";
ALTER TABLE "new_sync_events" RENAME TO "sync_events";
CREATE TABLE "new_locations" (
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
"pub_id" TEXT NOT NULL,
"node_id" INTEGER,
"name" TEXT,
"local_path" TEXT,
"total_capacity" INTEGER,
"available_capacity" INTEGER,
"filesystem" TEXT,
"disk_type" INTEGER,
"is_removable" BOOLEAN,
"is_online" BOOLEAN NOT NULL DEFAULT true,
"date_created" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
);
INSERT INTO "new_locations" ("available_capacity", "date_created", "disk_type", "filesystem", "id", "is_online", "is_removable", "local_path", "name", "pub_id", "total_capacity") SELECT "available_capacity", "date_created", "disk_type", "filesystem", "id", "is_online", "is_removable", "local_path", "name", "pub_id", "total_capacity" FROM "locations";
DROP TABLE "locations";
ALTER TABLE "new_locations" RENAME TO "locations";
CREATE UNIQUE INDEX "locations_pub_id_key" ON "locations"("pub_id");
CREATE TABLE "new_jobs" (
"id" TEXT NOT NULL PRIMARY KEY,
"node_id" INTEGER NOT NULL,
"action" INTEGER NOT NULL,
"status" INTEGER NOT NULL DEFAULT 0,
"task_count" INTEGER NOT NULL DEFAULT 1,
"completed_task_count" INTEGER NOT NULL DEFAULT 0,
"date_created" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
"date_modified" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
"seconds_elapsed" INTEGER NOT NULL DEFAULT 0,
CONSTRAINT "jobs_node_id_fkey" FOREIGN KEY ("node_id") REFERENCES "nodes" ("id") ON DELETE CASCADE ON UPDATE CASCADE
);
INSERT INTO "new_jobs" ("action", "completed_task_count", "date_created", "date_modified", "id", "seconds_elapsed", "status", "task_count") SELECT "action", "completed_task_count", "date_created", "date_modified", "id", "seconds_elapsed", "status", "task_count" FROM "jobs";
DROP TABLE "jobs";
ALTER TABLE "new_jobs" RENAME TO "jobs";
CREATE TABLE "new_tags" (
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
"pub_id" TEXT NOT NULL,
"name" TEXT,
"total_files" INTEGER DEFAULT 0,
"redundancy_goal" INTEGER DEFAULT 1,
"date_created" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
"date_modified" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
);
INSERT INTO "new_tags" ("date_created", "date_modified", "id", "name", "pub_id", "redundancy_goal", "total_files") SELECT "date_created", "date_modified", "id", "name", "pub_id", "redundancy_goal", "total_files" FROM "tags";
DROP TABLE "tags";
ALTER TABLE "new_tags" RENAME TO "tags";
CREATE UNIQUE INDEX "tags_pub_id_key" ON "tags"("pub_id");
CREATE TABLE "new_file_paths" (
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
"is_dir" BOOLEAN NOT NULL DEFAULT false,
"location_id" INTEGER NOT NULL,
"materialized_path" TEXT NOT NULL,
"name" TEXT NOT NULL,
"extension" TEXT,
"file_id" INTEGER,
"parent_id" INTEGER,
"encryption" INTEGER NOT NULL DEFAULT 0,
"key_id" INTEGER,
"permissions" TEXT,
"temp_cas_id" TEXT,
"date_created" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
"date_modified" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
"date_indexed" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT "file_paths_location_id_fkey" FOREIGN KEY ("location_id") REFERENCES "locations" ("id") ON DELETE CASCADE ON UPDATE CASCADE,
CONSTRAINT "file_paths_file_id_fkey" FOREIGN KEY ("file_id") REFERENCES "files" ("id") ON DELETE CASCADE ON UPDATE CASCADE,
CONSTRAINT "file_paths_parent_id_fkey" FOREIGN KEY ("parent_id") REFERENCES "file_paths" ("id") ON DELETE SET NULL ON UPDATE CASCADE,
CONSTRAINT "file_paths_key_id_fkey" FOREIGN KEY ("key_id") REFERENCES "keys" ("id") ON DELETE SET NULL ON UPDATE CASCADE
);
INSERT INTO "new_file_paths" ("date_created", "date_indexed", "date_modified", "encryption", "extension", "file_id", "id", "is_dir", "location_id", "materialized_path", "name", "parent_id", "permissions", "temp_cas_id") SELECT "date_created", "date_indexed", "date_modified", "encryption", "extension", "file_id", "id", "is_dir", "location_id", "materialized_path", "name", "parent_id", "permissions", "temp_cas_id" FROM "file_paths";
DROP TABLE "file_paths";
ALTER TABLE "new_file_paths" RENAME TO "file_paths";
CREATE UNIQUE INDEX "file_paths_location_id_materialized_path_name_extension_key" ON "file_paths"("location_id", "materialized_path", "name", "extension");
CREATE TABLE "new_volumes" (
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
"node_id" INTEGER NOT NULL,
"name" TEXT NOT NULL,
"mount_point" TEXT NOT NULL,
"total_bytes_capacity" TEXT NOT NULL DEFAULT '0',
"total_bytes_available" TEXT NOT NULL DEFAULT '0',
"disk_type" TEXT,
"filesystem" TEXT,
"is_system" BOOLEAN NOT NULL DEFAULT false,
"date_modified" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
);
INSERT INTO "new_volumes" ("date_modified", "disk_type", "filesystem", "id", "is_system", "mount_point", "name", "total_bytes_available", "total_bytes_capacity") SELECT "date_modified", "disk_type", "filesystem", "id", "is_system", "mount_point", "name", "total_bytes_available", "total_bytes_capacity" FROM "volumes";
DROP TABLE "volumes";
ALTER TABLE "new_volumes" RENAME TO "volumes";
CREATE UNIQUE INDEX "volumes_node_id_mount_point_name_key" ON "volumes"("node_id", "mount_point", "name");
PRAGMA foreign_key_check;
PRAGMA foreign_keys=ON;
-- CreateIndex
CREATE UNIQUE INDEX "nodes_pub_id_key" ON "nodes"("pub_id");
-- CreateIndex
CREATE UNIQUE INDEX "keys_checksum_key" ON "keys"("checksum");

View File

@@ -0,0 +1,83 @@
/*
Warnings:
- You are about to drop the `spaces` table. If the table is not empty, all the data it contains will be lost.
- You are about to drop the column `encryption` on the `libraries` table. All the data in the column will be lost.
- You are about to drop the column `encryption` on the `files` table. All the data in the column will be lost.
- You are about to drop the column `encryption` on the `file_paths` table. All the data in the column will be lost.
- You are about to drop the column `permissions` on the `file_paths` table. All the data in the column will be lost.
- You are about to drop the column `temp_cas_id` on the `file_paths` table. All the data in the column will be lost.
*/
-- DropIndex
DROP INDEX "spaces_pub_id_key";
-- DropTable
PRAGMA foreign_keys=off;
DROP TABLE "spaces";
PRAGMA foreign_keys=on;
-- RedefineTables
PRAGMA foreign_keys=OFF;
CREATE TABLE "new_libraries" (
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
"pub_id" TEXT NOT NULL,
"name" TEXT NOT NULL,
"remote_id" TEXT,
"is_primary" BOOLEAN NOT NULL DEFAULT true,
"date_created" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
"timezone" TEXT
);
INSERT INTO "new_libraries" ("date_created", "id", "is_primary", "name", "pub_id", "remote_id", "timezone") SELECT "date_created", "id", "is_primary", "name", "pub_id", "remote_id", "timezone" FROM "libraries";
DROP TABLE "libraries";
ALTER TABLE "new_libraries" RENAME TO "libraries";
CREATE UNIQUE INDEX "libraries_pub_id_key" ON "libraries"("pub_id");
CREATE TABLE "new_files" (
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
"cas_id" TEXT NOT NULL,
"integrity_checksum" TEXT,
"kind" INTEGER NOT NULL DEFAULT 0,
"size_in_bytes" TEXT NOT NULL,
"key_id" INTEGER,
"hidden" BOOLEAN NOT NULL DEFAULT false,
"favorite" BOOLEAN NOT NULL DEFAULT false,
"important" BOOLEAN NOT NULL DEFAULT false,
"has_thumbnail" BOOLEAN NOT NULL DEFAULT false,
"has_thumbstrip" BOOLEAN NOT NULL DEFAULT false,
"has_video_preview" BOOLEAN NOT NULL DEFAULT false,
"ipfs_id" TEXT,
"comment" TEXT,
"date_created" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
"date_modified" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
"date_indexed" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT "files_key_id_fkey" FOREIGN KEY ("key_id") REFERENCES "keys" ("id") ON DELETE SET NULL ON UPDATE CASCADE
);
INSERT INTO "new_files" ("cas_id", "comment", "date_created", "date_indexed", "date_modified", "favorite", "has_thumbnail", "has_thumbstrip", "has_video_preview", "hidden", "id", "important", "integrity_checksum", "ipfs_id", "key_id", "kind", "size_in_bytes") SELECT "cas_id", "comment", "date_created", "date_indexed", "date_modified", "favorite", "has_thumbnail", "has_thumbstrip", "has_video_preview", "hidden", "id", "important", "integrity_checksum", "ipfs_id", "key_id", "kind", "size_in_bytes" FROM "files";
DROP TABLE "files";
ALTER TABLE "new_files" RENAME TO "files";
CREATE UNIQUE INDEX "files_cas_id_key" ON "files"("cas_id");
CREATE UNIQUE INDEX "files_integrity_checksum_key" ON "files"("integrity_checksum");
CREATE TABLE "new_file_paths" (
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
"is_dir" BOOLEAN NOT NULL DEFAULT false,
"location_id" INTEGER NOT NULL,
"materialized_path" TEXT NOT NULL,
"name" TEXT NOT NULL,
"extension" TEXT,
"file_id" INTEGER,
"parent_id" INTEGER,
"key_id" INTEGER,
"date_created" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
"date_modified" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
"date_indexed" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT "file_paths_location_id_fkey" FOREIGN KEY ("location_id") REFERENCES "locations" ("id") ON DELETE CASCADE ON UPDATE CASCADE,
CONSTRAINT "file_paths_file_id_fkey" FOREIGN KEY ("file_id") REFERENCES "files" ("id") ON DELETE CASCADE ON UPDATE CASCADE,
CONSTRAINT "file_paths_parent_id_fkey" FOREIGN KEY ("parent_id") REFERENCES "file_paths" ("id") ON DELETE SET NULL ON UPDATE CASCADE,
CONSTRAINT "file_paths_key_id_fkey" FOREIGN KEY ("key_id") REFERENCES "keys" ("id") ON DELETE SET NULL ON UPDATE CASCADE
);
INSERT INTO "new_file_paths" ("date_created", "date_indexed", "date_modified", "extension", "file_id", "id", "is_dir", "key_id", "location_id", "materialized_path", "name", "parent_id") SELECT "date_created", "date_indexed", "date_modified", "extension", "file_id", "id", "is_dir", "key_id", "location_id", "materialized_path", "name", "parent_id" FROM "file_paths";
DROP TABLE "file_paths";
ALTER TABLE "new_file_paths" RENAME TO "file_paths";
CREATE UNIQUE INDEX "file_paths_location_id_materialized_path_name_extension_key" ON "file_paths"("location_id", "materialized_path", "name", "extension");
PRAGMA foreign_key_check;
PRAGMA foreign_keys=ON;

View File

@@ -20,10 +20,10 @@ model Migration {
model SyncEvent {
id Int @id @default(autoincrement())
client_id Int
node_id Int
timestamp String
data String
client Client @relation(fields: [client_id], references: [id])
node Node @relation(fields: [node_id], references: [id])
@@map("sync_events")
}
@@ -34,10 +34,8 @@ model Library {
name String
remote_id String?
is_primary Boolean @default(true)
encryption Int @default(0)
date_created DateTime @default(now())
timezone String?
spaces Space[]
@@map("libraries")
}
@@ -57,7 +55,7 @@ model LibraryStatistics {
@@map("library_statistics")
}
model Client {
model Node {
id Int @id @default(autoincrement())
pub_id String @unique
name String
@@ -71,12 +69,12 @@ model Client {
sync_events SyncEvent[]
jobs Job[]
@@map("clients")
@@map("nodes")
}
model Volume {
id Int @id() @default(autoincrement())
client_id Int
node_id Int
name String
mount_point String
total_bytes_capacity String @default("0")
@@ -86,14 +84,14 @@ model Volume {
is_system Boolean @default(false)
date_modified DateTime @default(now())
@@unique([client_id, mount_point, name])
@@unique([node_id, mount_point, name])
@@map("volumes")
}
model Location {
id Int @id @default(autoincrement())
pub_id String @unique
client_id Int?
node_id Int?
name String?
local_path String?
total_capacity Int?
@@ -118,8 +116,7 @@ model File {
// basic metadata
kind Int @default(0)
size_in_bytes String
// mark uniqely as encrypted, will lead to all file paths being encrypted
encryption Int @default(0)
key_id Int?
// handy ways to mark a file
hidden Boolean @default(false)
favorite Boolean @default(false)
@@ -146,6 +143,8 @@ model File {
comments Comment[]
media_data MediaData?
key Key? @relation(fields: [key_id], references: [id])
@@map("files")
}
@@ -161,11 +160,11 @@ model FilePath {
extension String?
// the unique File for this file path
file_id Int?
//
// the parent in the file tree
parent_id Int?
encryption Int @default(0)
permissions String?
temp_cas_id String? // so a filepath can be created without its File, as they're created lazily
key_id Int? // replacement for encryption
// permissions String?
// temp_cas_id String? // so a filepath can be created without its File, as they're created lazily
date_created DateTime @default(now())
date_modified DateTime @default(now())
@@ -176,6 +175,8 @@ model FilePath {
parent FilePath? @relation("directory_file_paths", fields: [parent_id], references: [id])
children FilePath[] @relation("directory_file_paths")
key Key? @relation(fields: [key_id], references: [id])
@@unique([location_id, materialized_path, name, extension])
@@map("file_paths")
}
@@ -188,6 +189,23 @@ model FileConflict {
@@map("file_conflicts")
}
// keys allow us to know exactly which files can be decrypted with a given key
// they can be "mounted" to a client, and then used to decrypt files automatically
model Key {
id Int @id @default(autoincrement())
// used to identify the key when it is entered by user
checksum String @unique
name String?
// nullable if concealed for security
date_created DateTime? @default(now())
// so we know which algorithm to use, can be null if user must select
algorithm Int? @default(0)
files File[]
file_paths FilePath[]
@@map("keys")
}
model MediaData {
id Int @id
pixel_width Int?
@@ -212,7 +230,6 @@ model Tag {
id Int @id @default(autoincrement())
pub_id String @unique
name String?
encryption Int? @default(0)
total_files Int? @default(0)
redundancy_goal Int? @default(1)
date_created DateTime @default(now())
@@ -260,10 +277,10 @@ model LabelOnFile {
}
model Job {
id String @id
client_id Int
action Int
status Int @default(0)
id String @id
node_id Int
action Int
status Int @default(0)
task_count Int @default(1)
completed_task_count Int @default(0)
@@ -271,23 +288,10 @@ model Job {
date_modified DateTime @default(now())
seconds_elapsed Int @default(0)
clients Client @relation(fields: [client_id], references: [id], onDelete: Cascade, onUpdate: Cascade)
nodes Node @relation(fields: [node_id], references: [id], onDelete: Cascade, onUpdate: Cascade)
@@map("jobs")
}
model Space {
id Int @id @default(autoincrement())
pub_id String @unique
name String
encryption Int? @default(0) // remove
date_created DateTime @default(now())
date_modified DateTime @default(now())
Library Library? @relation(fields: [libraryId], references: [id])
libraryId Int?
@@map("spaces")
}
model Album {
id Int @id @default(autoincrement())
pub_id String @unique

View File

@@ -1,113 +0,0 @@
use crate::{
prisma::{self, client},
state, Core, CoreContext,
};
use chrono::{DateTime, Utc};
use int_enum::IntEnum;
use serde::{Deserialize, Serialize};
use std::env;
use thiserror::Error;
use ts_rs::TS;
#[derive(Debug, Clone, Serialize, Deserialize, TS)]
#[ts(export)]
pub struct Client {
pub uuid: String,
pub name: String,
pub platform: Platform,
pub tcp_address: String,
#[ts(type = "string")]
pub last_seen: DateTime<Utc>,
#[ts(type = "string")]
pub last_synchronized: DateTime<Utc>,
}
#[repr(i32)]
#[derive(Debug, Clone, Copy, Serialize, Deserialize, TS, Eq, PartialEq, IntEnum)]
#[ts(export)]
pub enum Platform {
Unknown = 0,
Windows = 1,
MacOS = 2,
Linux = 3,
IOS = 4,
Android = 5,
}
// impl Into<Client> for client::Data {
// fn into(self) -> Client {
// Client {
// uuid: self.pub_id,
// name: self.name,
// platform: ,
// tcp_address: self.tcp_address,
// last_seen: self.last_seen,
// last_synchronized: self.last_synchronized,
// }
// }
// }
pub async fn create(core: &Core) -> Result<(), ClientError> {
println!("Creating client...");
let mut config = state::client::get();
let db = &core.database;
let hostname = match hostname::get() {
Ok(hostname) => hostname.to_str().unwrap_or_default().to_owned(),
Err(_) => "unknown".to_owned(),
};
let platform = match env::consts::OS {
"windows" => Platform::Windows,
"macos" => Platform::MacOS,
"linux" => Platform::Linux,
_ => Platform::Unknown,
};
let client = match db
.client()
.find_unique(client::pub_id::equals(config.client_uuid.clone()))
.exec()
.await?
{
Some(client) => client,
None => {
db.client()
.create(
client::pub_id::set(config.client_uuid.clone()),
client::name::set(hostname.clone()),
vec![
client::platform::set(platform as i32),
client::online::set(Some(true)),
],
)
.exec()
.await?
}
};
config.client_name = hostname;
config.client_id = client.id;
config.save();
println!("Client: {:?}", &client);
Ok(())
}
pub async fn get_clients(ctx: &CoreContext) -> Result<Vec<client::Data>, ClientError> {
let db = &ctx.database;
let client = db.client().find_many(vec![]).exec().await?;
Ok(client)
}
#[derive(Error, Debug)]
pub enum ClientError {
#[error("Database error")]
DatabaseError(#[from] prisma::QueryError),
#[error("Client not found error")]
ClientNotFound,
}

View File

@@ -1,4 +1,3 @@
use crate::state;
use crate::{prisma, prisma::PrismaClient};
use thiserror::Error;
pub mod migrate;

View File

@@ -1,5 +1,5 @@
use crate::job::jobs::JobReportUpdate;
use crate::state::client;
use crate::node::state;
use crate::{
job::{jobs::Job, worker::WorkerContext},
prisma::file_path,
@@ -27,7 +27,7 @@ pub static THUMBNAIL_CACHE_DIR_NAME: &str = "thumbnails";
#[async_trait::async_trait]
impl Job for ThumbnailJob {
async fn run(&self, ctx: WorkerContext) -> Result<()> {
let config = client::get();
let config = state::get();
let core_ctx = ctx.core_ctx.clone();
let location = sys::locations::get_location(&core_ctx, self.location_id).await?;

View File

@@ -1,13 +1,12 @@
use std::path::Path;
use crate::{
encode::thumb::THUMBNAIL_CACHE_DIR_NAME,
file::{DirectoryWithContents, FileError, FilePath},
file::{DirectoryWithContents, File, FileError},
node::state,
prisma::file_path,
state::client,
sys::locations::get_location,
CoreContext,
};
use std::path::Path;
pub async fn open_dir(
ctx: &CoreContext,
@@ -15,7 +14,7 @@ pub async fn open_dir(
path: &str,
) -> Result<DirectoryWithContents, FileError> {
let db = &ctx.database;
let config = client::get();
let config = state::get();
// get location
let location = get_location(ctx, location_id.clone()).await?;
@@ -34,24 +33,30 @@ pub async fn open_dir(
let files = db
.file_path()
.find_many(vec![file_path::parent_id::equals(Some(directory.id))])
.with(file_path::file::fetch())
.exec()
.await?;
let files: Vec<FilePath> = files.into_iter().map(|l| l.into()).collect();
// convert database structs into a File
let files: Vec<File> = files
.into_iter()
.map(|l| {
let mut file: File = l.file().unwrap_or_default().unwrap().clone().into();
file.paths.push(l.into());
file
})
.collect();
let mut contents: Vec<FilePath> = vec![];
let mut contents: Vec<File> = vec![];
for mut file in files {
if file.temp_cas_id.is_some() {
let path = Path::new(&config.data_path)
.join(THUMBNAIL_CACHE_DIR_NAME)
.join(format!("{}", location.id))
.join(file.temp_cas_id.as_ref().unwrap())
.with_extension("webp");
let thumb_path = Path::new(&config.data_path)
.join(THUMBNAIL_CACHE_DIR_NAME)
.join(format!("{}", location.id))
.join(file.cas_id.clone())
.with_extension("webp");
let exists = path.exists();
file.has_local_thumbnail = exists;
}
file.has_thumbnail = thumb_path.exists();
contents.push(file);
}

View File

@@ -15,6 +15,8 @@ pub enum ScanProgress {
Message(String),
}
static BATCH_SIZE: usize = 100;
// creates a vector of valid path buffers from a directory
pub async fn scan_path(
ctx: &CoreContext,
@@ -49,7 +51,7 @@ pub async fn scan_path(
// spawn a dedicated thread to scan the directory for performance
let (paths, scan_start, on_progress) = tokio::task::spawn_blocking(move || {
// store every valid path discovered
let mut paths: Vec<(PathBuf, i32, Option<i32>)> = Vec::new();
let mut paths: Vec<(PathBuf, i32, Option<i32>, bool)> = Vec::new();
// store a hashmap of directories to their file ids for fast lookup
let mut dirs: HashMap<String, i32> = HashMap::new();
// begin timer for logging purposes
@@ -76,6 +78,8 @@ pub async fn scan_path(
};
let path = entry.path();
println!("found: {:?}", path);
let parent_path = path
.parent()
.unwrap_or(Path::new(""))
@@ -93,16 +97,18 @@ pub async fn scan_path(
on_progress(vec![
ScanProgress::Message(format!("{}", str)),
ScanProgress::ChunkCount(paths.len() / 100),
ScanProgress::ChunkCount(paths.len() / BATCH_SIZE),
]);
let file_id = get_id();
let file_type = entry.file_type();
let is_dir = file_type.is_dir();
if entry.file_type().is_dir() || entry.file_type().is_file() {
paths.push((path.to_owned(), file_id, parent_dir_id.cloned()));
if is_dir || file_type.is_file() {
paths.push((path.to_owned(), file_id, parent_dir_id.cloned(), is_dir));
}
if entry.file_type().is_dir() {
if is_dir {
let _path = match path.to_str() {
Some(path) => path.to_owned(),
None => continue,
@@ -118,11 +124,11 @@ pub async fn scan_path(
let db_write_start = Instant::now();
let scan_read_time = scan_start.elapsed();
for (i, chunk) in paths.chunks(100).enumerate() {
for (i, chunk) in paths.chunks(BATCH_SIZE).enumerate() {
on_progress(vec![
ScanProgress::SavedChunks(i as usize),
ScanProgress::Message(format!(
"Writing {} of {} to db",
"Writing {} of {} to library",
i * chunk.len(),
paths.len(),
)),
@@ -130,9 +136,9 @@ pub async fn scan_path(
// vector to store active models
let mut files: Vec<String> = Vec::new();
for (file_path, file_id, parent_dir_id) in chunk {
for (file_path, file_id, parent_dir_id, is_dir) in chunk {
files.push(
match prepare_values(&file_path, *file_id, &location, parent_dir_id) {
match prepare_values(&file_path, *file_id, &location, parent_dir_id, *is_dir) {
Ok(file) => file,
Err(e) => {
println!("Error creating file model from path {:?}: {}", file_path, e);
@@ -143,7 +149,7 @@ pub async fn scan_path(
}
let raw_sql = format!(
r#"
INSERT INTO file_paths (id, is_dir, location_id, materialized_path, name, extension, parent_id, date_created, temp_cas_id)
INSERT INTO file_paths (id, is_dir, location_id, materialized_path, name, extension, parent_id)
VALUES {}
"#,
files.join(", ")
@@ -168,8 +174,9 @@ fn prepare_values(
id: i32,
location: &LocationResource,
parent_id: &Option<i32>,
is_dir: bool,
) -> Result<String> {
let metadata = fs::metadata(&file_path)?;
// let metadata = fs::metadata(&file_path)?;
let location_path = location.path.as_ref().unwrap().as_str();
// let size = metadata.len();
let name;
@@ -179,7 +186,7 @@ fn prepare_values(
// if 'file_path' is a directory, set extension to an empty string to avoid periods in folder names
// - being interpreted as file extensions
if file_path.is_dir() {
if is_dir {
extension = "".to_string();
name = extract_name(file_path.file_name());
} else {
@@ -196,24 +203,24 @@ fn prepare_values(
None => return Err(anyhow!("{}", file_path.to_str().unwrap_or_default())),
};
let cas_id = {
if !metadata.is_dir() {
// TODO: remove unwrap, skip and make sure to continue loop
let mut x = generate_cas_id(&file_path.to_str().unwrap(), metadata.len()).unwrap();
x.truncate(16);
x
} else {
"".to_string()
}
};
// let cas_id = {
// if !metadata.is_dir() {
// // TODO: remove unwrap, skip and make sure to continue loop
// let mut x = generate_cas_id(&file_path.to_str().unwrap(), metadata.len()).unwrap();
// x.truncate(16);
// x
// } else {
// "".to_string()
// }
// };
let date_created: DateTime<Utc> = metadata.created().unwrap().into();
let parsed_date_created = date_created.to_rfc3339_opts(SecondsFormat::Millis, true);
// let date_created: DateTime<Utc> = metadata.created().unwrap().into();
// let parsed_date_created = date_created.to_rfc3339_opts(SecondsFormat::Millis, true);
let values = format!(
"({}, {}, {}, \"{}\", \"{}\", \"{}\", {},\"{}\", \"{}\")",
"({}, {}, {}, \"{}\", \"{}\", \"{}\", {})",
id,
metadata.is_dir(),
is_dir,
location.id,
materialized_path,
name,
@@ -222,8 +229,8 @@ fn prepare_values(
.clone()
.map(|id| format!("\"{}\"", &id))
.unwrap_or("NULL".to_string()),
parsed_date_created,
cas_id
// parsed_date_created,
// cas_id
);
println!("{}", values);

View File

@@ -29,7 +29,7 @@ pub struct File {
pub has_thumbnail: bool,
pub has_thumbstrip: bool,
pub has_video_preview: bool,
pub encryption: EncryptionAlgorithm,
// pub encryption: EncryptionAlgorithm,
pub ipfs_id: Option<String>,
pub comment: Option<String>,
@@ -58,7 +58,7 @@ pub struct FilePath {
pub extension: Option<String>,
pub file_id: Option<i32>,
pub parent_id: Option<i32>,
pub temp_cas_id: Option<String>,
// pub temp_cas_id: Option<String>,
pub has_local_thumbnail: bool,
#[ts(type = "string")]
pub date_created: chrono::DateTime<chrono::Utc>,
@@ -66,7 +66,6 @@ pub struct FilePath {
pub date_modified: chrono::DateTime<chrono::Utc>,
#[ts(type = "string")]
pub date_indexed: chrono::DateTime<chrono::Utc>,
pub permissions: Option<String>,
}
#[repr(i32)]
@@ -92,7 +91,7 @@ impl Into<File> for file::Data {
integrity_checksum: self.integrity_checksum,
kind: IntEnum::from_int(self.kind).unwrap(),
size_in_bytes: self.size_in_bytes.to_string(),
encryption: EncryptionAlgorithm::from_int(self.encryption).unwrap(),
// encryption: EncryptionAlgorithm::from_int(self.encryption).unwrap(),
ipfs_id: self.ipfs_id,
hidden: self.hidden,
favorite: self.favorite,
@@ -119,11 +118,11 @@ impl Into<FilePath> for file_path::Data {
parent_id: self.parent_id,
location_id: self.location_id,
date_indexed: self.date_indexed,
permissions: self.permissions,
// permissions: self.permissions,
has_local_thumbnail: false,
name: self.name,
extension: self.extension,
temp_cas_id: self.temp_cas_id,
// temp_cas_id: self.temp_cas_id,
date_created: self.date_created,
date_modified: self.date_modified,
}
@@ -134,7 +133,7 @@ impl Into<FilePath> for file_path::Data {
#[ts(export)]
pub struct DirectoryWithContents {
pub directory: FilePath,
pub contents: Vec<FilePath>,
pub contents: Vec<File>,
}
#[derive(Error, Debug)]

View File

@@ -3,8 +3,8 @@ use super::{
JobError,
};
use crate::{
prisma::{client, job},
state,
node::state,
prisma::{job, node},
sync::{crdt::Replicate, engine::SyncContext},
CoreContext,
};
@@ -134,14 +134,14 @@ impl JobReport {
}
}
pub async fn create(&self, ctx: &CoreContext) -> Result<(), JobError> {
let config = state::client::get();
let config = state::get();
ctx
.database
.job()
.create(
job::id::set(self.id.clone()),
job::action::set(1),
job::clients::link(client::id::equals(config.client_id)),
job::nodes::link(node::id::equals(config.node_id)),
vec![],
)
.exec()

View File

@@ -1,9 +1,11 @@
use crate::{file::cas::identifier::FileIdentifierJob, library::loader::get_library_path};
use crate::{
file::cas::identifier::FileIdentifierJob, library::loader::get_library_path,
node::state::NodeState,
};
use job::jobs::{Job, JobReport, Jobs};
use prisma::PrismaClient;
use serde::{Deserialize, Serialize};
use state::client::ClientState;
use std::{fs, path::Path, sync::Arc};
use std::{fs, sync::Arc};
use thiserror::Error;
use tokio::sync::{
mpsc::{self, unbounded_channel, UnboundedReceiver, UnboundedSender},
@@ -14,17 +16,16 @@ use ts_rs::TS;
use crate::encode::thumb::ThumbnailJob;
// init modules
pub mod client;
pub mod crypto;
pub mod db;
pub mod encode;
pub mod file;
pub mod job;
pub mod library;
pub mod node;
#[cfg(target_os = "p2p")]
pub mod p2p;
pub mod prisma;
pub mod state;
pub mod sync;
pub mod sys;
pub mod util;
@@ -101,8 +102,8 @@ impl CoreContext {
}
}
pub struct Core {
state: ClientState,
pub struct Node {
state: NodeState,
jobs: job::jobs::Jobs,
database: Arc<PrismaClient>,
// filetype_registry: library::TypeRegistry,
@@ -126,9 +127,9 @@ pub struct Core {
),
}
impl Core {
// create new instance of core, run startup tasks
pub async fn new(mut data_dir: std::path::PathBuf) -> (Core, mpsc::Receiver<CoreEvent>) {
impl Node {
// create new instance of node, run startup tasks
pub async fn new(mut data_dir: std::path::PathBuf) -> (Node, mpsc::Receiver<CoreEvent>) {
let (event_sender, event_recv) = mpsc::channel(100);
data_dir = data_dir.join("spacedrive");
@@ -136,15 +137,15 @@ impl Core {
// create data directory if it doesn't exist
fs::create_dir_all(&data_dir).unwrap();
// prepare basic client state
let mut state = ClientState::new(data_dir, "diamond-mastering-space-dragon").unwrap();
let mut state = NodeState::new(data_dir, "diamond-mastering-space-dragon").unwrap();
// load from disk
state.read_disk().unwrap_or(println!(
"Error: No client state found, creating new one..."
));
state
.read_disk()
.unwrap_or(println!("Error: No node state found, creating new one..."));
state.save();
println!("Client State: {:?}", state);
println!("Node State: {:?}", state);
// connect to default library
let database = Arc::new(
@@ -155,7 +156,7 @@ impl Core {
let internal_channel = unbounded_channel::<InternalEvent>();
let core = Core {
let node = Node {
state,
query_channel: unbounded_channel(),
command_channel: unbounded_channel(),
@@ -170,7 +171,7 @@ impl Core {
p2p::listener::listen(None).await.unwrap_or(());
});
(core, event_recv)
(node, event_recv)
}
pub fn get_context(&self) -> CoreContext {
@@ -233,10 +234,10 @@ impl Core {
}
}
}
// init client
match client::create(&self).await {
// init node data within library
match node::LibraryNode::create(&self).await {
Ok(_) => println!("Spacedrive online"),
Err(e) => println!("Error initializing client: {:?}", e),
Err(e) => println!("Error initializing node: {:?}", e),
};
}
@@ -314,11 +315,11 @@ impl Core {
),
ClientQuery::LibGetTags => todo!(),
ClientQuery::JobGetRunning => CoreResponse::JobGetRunning(self.jobs.get_running().await),
// TODO: FIX THIS
ClientQuery::JobGetHistory => CoreResponse::JobGetHistory(Jobs::get_history(&ctx).await?),
ClientQuery::GetLibraryStatistics => {
CoreResponse::GetLibraryStatistics(library::statistics::Statistics::calculate(&ctx).await?)
}
ClientQuery::GetNodes => todo!(),
})
}
}
@@ -370,6 +371,7 @@ pub enum ClientQuery {
limit: i32,
},
GetLibraryStatistics,
GetNodes,
}
// represents an event this library can emit
@@ -395,7 +397,7 @@ pub enum CoreResponse {
SysGetLocation(sys::locations::LocationResource),
SysGetLocations(Vec<sys::locations::LocationResource>),
LibGetExplorerDir(file::DirectoryWithContents),
ClientGetState(ClientState),
ClientGetState(NodeState),
LocCreate(sys::locations::LocationResource),
JobGetRunning(Vec<JobReport>),
JobGetHistory(Vec<JobReport>),

View File

@@ -1,9 +1,9 @@
use anyhow::Result;
use uuid::Uuid;
use crate::state::client::LibraryState;
use crate::{db::migrate, prisma::library, state};
use crate::{Core, CoreContext};
use crate::node::state::LibraryState;
use crate::{db::migrate, node::state, prisma::library};
use crate::{CoreContext, Node};
use super::LibraryError;
@@ -15,8 +15,8 @@ pub fn get_library_path(data_path: &str) -> String {
format!("{}/{}", path, LIBRARY_DB_NAME)
}
pub async fn get(core: &Core) -> Result<library::Data, LibraryError> {
let config = state::client::get();
pub async fn get(core: &Node) -> Result<library::Data, LibraryError> {
let config = state::get();
let db = &core.database;
let library_state = config.get_current_library();
@@ -43,7 +43,7 @@ pub async fn get(core: &Core) -> Result<library::Data, LibraryError> {
}
pub async fn load(ctx: &CoreContext, library_path: &str, library_id: &str) -> Result<()> {
let mut config = state::client::get();
let mut config = state::get();
println!("Initializing library: {} {}", &library_id, library_path);
@@ -58,7 +58,7 @@ pub async fn load(ctx: &CoreContext, library_path: &str, library_id: &str) -> Re
}
pub async fn create(ctx: &CoreContext, name: Option<String>) -> Result<()> {
let mut config = state::client::get();
let mut config = state::get();
let uuid = Uuid::new_v4().to_string();

View File

@@ -1,6 +1,6 @@
use crate::{
node::state,
prisma::{library, library_statistics::*},
state::client,
sys::{self, volumes::Volume},
CoreContext,
};
@@ -53,7 +53,7 @@ impl Default for Statistics {
impl Statistics {
pub async fn retrieve(ctx: &CoreContext) -> Result<Statistics, LibraryError> {
let config = client::get();
let config = state::get();
let db = &ctx.database;
let library_data = config.get_current_library();
@@ -70,7 +70,7 @@ impl Statistics {
Ok(library_statistics_db.into())
}
pub async fn calculate(ctx: &CoreContext) -> Result<Statistics, LibraryError> {
let config = client::get();
let config = state::get();
let db = &ctx.database;
// get library from client state
let library_data = config.get_current_library();

104
core/src/node/mod.rs Normal file
View File

@@ -0,0 +1,104 @@
use crate::{
prisma::{self, node},
CoreContext, Node,
};
use chrono::{DateTime, Utc};
use int_enum::IntEnum;
use serde::{Deserialize, Serialize};
use std::env;
use thiserror::Error;
use ts_rs::TS;
pub mod state;
#[derive(Debug, Clone, Serialize, Deserialize, TS)]
#[ts(export)]
pub struct LibraryNode {
pub uuid: String,
pub name: String,
pub platform: Platform,
pub tcp_address: String,
#[ts(type = "string")]
pub last_seen: DateTime<Utc>,
#[ts(type = "string")]
pub last_synchronized: DateTime<Utc>,
}
#[repr(i32)]
#[derive(Debug, Clone, Copy, Serialize, Deserialize, TS, Eq, PartialEq, IntEnum)]
#[ts(export)]
pub enum Platform {
Unknown = 0,
Windows = 1,
MacOS = 2,
Linux = 3,
IOS = 4,
Android = 5,
}
impl LibraryNode {
pub async fn create(node: &Node) -> Result<(), NodeError> {
println!("Creating node...");
let mut config = state::get();
let db = &node.database;
let hostname = match hostname::get() {
Ok(hostname) => hostname.to_str().unwrap_or_default().to_owned(),
Err(_) => "unknown".to_owned(),
};
let platform = match env::consts::OS {
"windows" => Platform::Windows,
"macos" => Platform::MacOS,
"linux" => Platform::Linux,
_ => Platform::Unknown,
};
let _node = match db
.node()
.find_unique(node::pub_id::equals(config.node_pub_id.clone()))
.exec()
.await?
{
Some(node) => node,
None => {
db.node()
.create(
node::pub_id::set(config.node_pub_id.clone()),
node::name::set(hostname.clone()),
vec![
node::platform::set(platform as i32),
node::online::set(Some(true)),
],
)
.exec()
.await?
}
};
config.node_name = hostname;
config.node_id = _node.id;
config.save();
println!("node: {:?}", &_node);
Ok(())
}
pub async fn get_nodes(ctx: &CoreContext) -> Result<Vec<node::Data>, NodeError> {
let db = &ctx.database;
let _node = db.node().find_many(vec![]).exec().await?;
Ok(_node)
}
}
#[derive(Error, Debug)]
pub enum NodeError {
#[error("Database error")]
DatabaseError(#[from] prisma::QueryError),
#[error("Client not found error")]
ClientNotFound,
}

View File

@@ -9,23 +9,21 @@ use uuid::Uuid;
#[derive(Debug, Serialize, Deserialize, Clone, Default, TS)]
#[ts(export)]
pub struct ClientState {
// client id is a uniquely generated UUID
pub client_uuid: String,
pub client_id: i32,
// client_name is the name of the device running the client
pub client_name: String,
pub struct NodeState {
pub node_pub_id: String,
pub node_id: i32,
pub node_name: String,
// config path is stored as struct can exist only in memory during startup and be written to disk later without supplying path
pub data_path: String,
// the port this client uses to listen for incoming connections
// the port this node uses to listen for incoming connections
pub tcp_port: u32,
// all the libraries loaded by this client
// all the libraries loaded by this node
pub libraries: Vec<LibraryState>,
// used to quickly find the default library
pub current_library_uuid: String,
}
pub static CLIENT_STATE_CONFIG_NAME: &str = "client_state.json";
pub static NODE_STATE_CONFIG_NAME: &str = "node_state.json";
#[derive(Debug, Serialize, Deserialize, Clone, Default, TS)]
#[ts(export)]
@@ -36,26 +34,26 @@ pub struct LibraryState {
pub offline: bool,
}
// global, thread-safe storage for client state
// global, thread-safe storage for node state
lazy_static! {
static ref CONFIG: RwLock<Option<ClientState>> = RwLock::new(None);
static ref CONFIG: RwLock<Option<NodeState>> = RwLock::new(None);
}
pub fn get() -> ClientState {
pub fn get() -> NodeState {
match CONFIG.read() {
Ok(guard) => guard.clone().unwrap_or(ClientState::default()),
Err(_) => return ClientState::default(),
Ok(guard) => guard.clone().unwrap_or(NodeState::default()),
Err(_) => return NodeState::default(),
}
}
impl ClientState {
pub fn new(data_path: &str, client_name: &str) -> Result<Self> {
impl NodeState {
pub fn new(data_path: &str, node_name: &str) -> Result<Self> {
let uuid = Uuid::new_v4().to_string();
// create struct and assign defaults
let config = Self {
client_uuid: uuid,
node_pub_id: uuid,
data_path: data_path.to_string(),
client_name: client_name.to_string(),
node_name: node_name.to_string(),
..Default::default()
};
Ok(config)
@@ -65,7 +63,7 @@ impl ClientState {
self.write_memory();
// only write to disk if config path is set
if !&self.data_path.is_empty() {
let config_path = format!("{}/{}", &self.data_path, CLIENT_STATE_CONFIG_NAME);
let config_path = format!("{}/{}", &self.data_path, NODE_STATE_CONFIG_NAME);
let mut file = fs::File::create(config_path).unwrap();
let json = serde_json::to_string(&self).unwrap();
file.write_all(json.as_bytes()).unwrap();
@@ -73,7 +71,7 @@ impl ClientState {
}
pub fn read_disk(&mut self) -> Result<()> {
let config_path = format!("{}/{}", &self.data_path, CLIENT_STATE_CONFIG_NAME);
let config_path = format!("{}/{}", &self.data_path, NODE_STATE_CONFIG_NAME);
// open the file and parse json
let file = fs::File::open(config_path)?;
let reader = BufReader::new(file);

View File

@@ -1 +0,0 @@
pub mod client;

View File

@@ -1,5 +1,5 @@
use crate::{
file::indexer::IndexerJob, prisma::location, state::client, ClientQuery, CoreContext, CoreEvent,
file::indexer::IndexerJob, node::state, prisma::location, ClientQuery, CoreContext, CoreEvent,
};
use anyhow::Result;
use serde::{Deserialize, Serialize};
@@ -109,7 +109,7 @@ pub async fn get_locations(ctx: &CoreContext) -> Result<Vec<LocationResource>, S
pub async fn create_location(ctx: &CoreContext, path: &str) -> Result<LocationResource, SysError> {
let db = &ctx.database;
let config = client::get();
let config = state::get();
// check if we have access to this location
if !Path::new(path).exists() {

View File

@@ -1,5 +1,5 @@
// use crate::native;
use crate::{prisma::volume::*, state::client};
use crate::{node::state, prisma::volume::*};
use serde::{Deserialize, Serialize};
use ts_rs::TS;
// #[cfg(not(target_os = "macos"))]
@@ -28,20 +28,20 @@ pub struct Volume {
impl Volume {
pub async fn save(ctx: &CoreContext) -> Result<(), SysError> {
let db = &ctx.database;
let config = client::get();
let config = state::get();
let volumes = Self::get_volumes()?;
// enter all volumes associate with this client add to db
for volume in volumes {
db.volume()
.upsert(client_id_mount_point_name(
config.client_id.clone(),
.upsert(node_id_mount_point_name(
config.node_id.clone(),
volume.mount_point.to_string(),
volume.name.to_string(),
))
.create(
client_id::set(config.client_id),
node_id::set(config.node_id),
name::set(volume.name),
mount_point::set(volume.mount_point),
vec![

View File

@@ -34,6 +34,7 @@
"phosphor-react": "^1.4.1",
"pretty-bytes": "^6.0.0",
"react": "^18.0.0",
"react-countup": "^6.2.0",
"react-dom": "^18.0.0",
"react-dropzone": "^12.0.4",
"react-error-boundary": "^3.1.4",

View File

@@ -36,7 +36,6 @@ import ExperimentalSettings from './screens/settings/ExperimentalSettings';
import { TagScreen } from './screens/Tag';
const queryClient = new QueryClient();
export const AppPropsContext = React.createContext<AppProps | null>(null);

View File

@@ -39,7 +39,7 @@ export function Device(props: DeviceProps) {
{props.type === 'laptop' && <Laptop weight="fill" size={20} className="mr-2" />}
{props.type === 'desktop' && <Desktop weight="fill" size={20} className="mr-2" />}
{props.type === 'server' && <Cloud weight="fill" size={20} className="mr-2" />}
<h3 className="font-semibold text-md">{props.name}</h3>
<h3 className="font-semibold text-md">{props.name || 'Unnamed Device'}</h3>
<div className="flex flex-row space-x-1.5 mt-0.5">
<span className="font-semibold flex flex-row h-[19px] -mt-0.5 ml-3 py-0.5 px-1.5 text-[10px] rounded bg-gray-500 text-gray-400">
<LockClosedIcon className="w-3 h-3 mr-1 -ml-0.5 m-[1px]" />

View File

View File

@@ -1,7 +1,7 @@
import { LockClosedIcon } from '@heroicons/react/outline';
import { CameraIcon, LockClosedIcon, PhotographIcon } from '@heroicons/react/outline';
import { CogIcon, EyeOffIcon, PlusIcon, ServerIcon } from '@heroicons/react/solid';
import clsx from 'clsx';
import { CirclesFour, Code, EjectSimple, MonitorPlay, Planet } from 'phosphor-react';
import { Camera, CirclesFour, Code, EjectSimple, MonitorPlay, Planet } from 'phosphor-react';
import React, { useContext, useEffect, useState } from 'react';
import { NavLink, NavLinkProps } from 'react-router-dom';
import { TrafficLights } from '../os/TrafficLights';
@@ -77,7 +77,7 @@ export const Sidebar: React.FC<SidebarProps> = (props) => {
return (
<div className="flex flex-col flex-grow-0 flex-shrink-0 w-48 min-h-full px-3 overflow-x-hidden overflow-y-scroll border-r border-gray-100 no-scrollbar bg-gray-50 dark:bg-gray-850 dark:border-gray-600">
{appPropsContext?.platform === 'macOS' ? (
{appPropsContext?.platform === 'macOS' || appPropsContext?.demoMode ? (
<>
<MacOSTrafficLights />
</>
@@ -120,6 +120,10 @@ export const Sidebar: React.FC<SidebarProps> = (props) => {
<Icon component={CirclesFour} />
Content
</SidebarLink>
<SidebarLink to="photos">
<Icon component={PhotographIcon} />
Photos
</SidebarLink>
{experimental ? (
<SidebarLink to="debug">

View File

@@ -44,6 +44,7 @@ const Light: React.FC<LightProps> = (props) => {
})}
>
{(() => {
if (!props.focused) return <></>;
switch (props.mode) {
case 'close':
return (

View File

@@ -1,15 +1,18 @@
import { CloudIcon } from '@heroicons/react/outline';
import { CogIcon, MenuIcon, PlusIcon } from '@heroicons/react/solid';
import { useBridgeQuery } from '@sd/client';
import { Statistics } from '@sd/core';
import { Button } from '@sd/ui';
import byteSize from 'byte-size';
import { DotsSixVertical, Laptop, LineSegments, Plus } from 'phosphor-react';
import React, { useState } from 'react';
import React, { useContext, useEffect, useState } from 'react';
import { AppPropsContext } from '../App';
import { Device } from '../components/device/Device';
import FileItem from '../components/file/FileItem';
import Dialog from '../components/layout/Dialog';
import { Input } from '../components/primitive';
import { InputContainer } from '../components/primitive/InputContainer';
import { useCountUp } from 'react-countup';
interface StatItemProps {
name: string;
@@ -18,12 +21,34 @@ interface StatItemProps {
}
const StatItem: React.FC<StatItemProps> = (props) => {
const countUpRef = React.useRef(null);
let size = byteSize(Number(props.value) || 0);
let amount = parseFloat(size.value);
const [hasRun, setHasRun] = useState(false);
useCountUp({
startOnMount: !hasRun,
ref: countUpRef,
start: amount / 2,
end: amount,
delay: 0.1,
decimals: 1,
duration: 2,
enableScrollSpy: true,
useEasing: true,
onEnd: () => {
setHasRun(true);
}
});
return (
<div className="flex flex-col px-4 py-3 duration-75 transform rounded-md cursor-default hover:bg-gray-50 hover:dark:bg-gray-600">
<div className="flex flex-col flex-shrink-0 w-32 px-4 py-3 duration-75 transform rounded-md cursor-default hover:bg-gray-50 hover:dark:bg-gray-600">
<span className="text-sm text-gray-400">{props.name}</span>
<span className="text-2xl font-bold">
{size.value}
<span ref={countUpRef} />
<span className="ml-1 text-[16px] text-gray-400">{size.unit}</span>
</span>
</div>
@@ -34,39 +59,63 @@ export const OverviewScreen: React.FC<{}> = (props) => {
const { data: libraryStatistics } = useBridgeQuery('GetLibraryStatistics');
const { data: clientState } = useBridgeQuery('ClientGetState');
const [stats, setStats] = useState<Statistics>(libraryStatistics || ({} as Statistics));
// get app props context
const appPropsContext = useContext(AppPropsContext);
useEffect(() => {
if (appPropsContext?.demoMode == true && !libraryStatistics?.library_db_size) {
setStats({
total_bytes_capacity: '8093333345230',
preview_media_bytes: '2304387532',
library_db_size: '83345230',
total_file_count: 20342345,
total_bytes_free: '89734502034',
total_bytes_used: '8093333345230',
total_unique_bytes: '9347397'
});
}
}, [appPropsContext, libraryStatistics]);
return (
<div className="flex flex-col w-full h-screen overflow-x-hidden custom-scroll page-scroll">
<div data-tauri-drag-region className="flex flex-shrink-0 w-full h-7" />
<div data-tauri-drag-region className="flex flex-shrink-0 w-full h-5" />
{/* PAGE */}
<div className="flex flex-col w-full h-screen px-3">
{/* STAT HEADER */}
<div className="flex w-full">
<div className="flex flex-wrap flex-grow pb-4 space-x-6">
{/* STAT CONTAINER */}
<div className="flex pb-4 overflow-hidden">
<StatItem
name="Total capacity"
value={libraryStatistics?.total_bytes_capacity}
unit={libraryStatistics?.total_bytes_capacity}
value={stats?.total_bytes_capacity}
unit={stats?.total_bytes_capacity}
/>
<StatItem
name="Index size"
value={libraryStatistics?.library_db_size}
unit={libraryStatistics?.library_db_size}
value={stats?.library_db_size}
unit={stats?.library_db_size}
/>
<StatItem
name="Preview media"
value={libraryStatistics?.preview_media_bytes}
unit={libraryStatistics?.preview_media_bytes}
value={stats?.preview_media_bytes}
unit={stats?.preview_media_bytes}
/>
<StatItem
name="Free space"
value={libraryStatistics?.total_bytes_free}
unit={libraryStatistics?.total_bytes_free}
value={stats?.total_bytes_free}
unit={stats?.total_bytes_free}
/>
<StatItem name="Total at-risk" value={'0'} unit={stats?.preview_media_bytes} />
{/* <StatItem
name="Total at-risk"
value={'0'}
unit={libraryStatistics?.preview_media_bytes}
unit={stats?.preview_media_bytes}
/>
<StatItem name="Total backed up" value={'0'} unit={''} /> */}
</div>
<div className="flex-grow" />
<div className="space-x-2">
<Dialog
title="Add Device"
@@ -78,6 +127,7 @@ export const OverviewScreen: React.FC<{}> = (props) => {
size="sm"
icon={<PlusIcon className="inline w-4 h-4 -mt-0.5 mr-1" />}
variant="gray"
className="hidden sm:visible"
>
Add Device
</Button>

View File

@@ -21,7 +21,7 @@ export default function ExperimentalSettings() {
title="Debug Menu"
description="Shows data about Spacedrive such as Jobs, Job History and Client State."
>
<div className="flex items-center h-full">
<div className="flex items-center h-full pl-10">
<Toggle initialState={experimental} size={'sm'} type="experimental" />
</div>
</InputContainer>

View File

@@ -22,8 +22,8 @@ export default function LibrarySettings() {
title="Encrypt on cloud"
description="Enable if library contains sensitive data and should not be synced to the cloud without full encryption."
>
<div className="flex items-center h-full">
<Toggle initialState={true} size={'sm'} />
<div className="flex items-center h-full pl-10">
<Toggle initialState={true} size={'sm'} type={''} />
</div>
</InputContainer>
</div>

View File

@@ -17,43 +17,3 @@
left: 50%;
transform: translate(-50%, -50%);
}
.landing-img {
background-image: url('/app.png');
background-size: contain;
background-repeat: no-repeat;
background-position: center;
}
.fade-in-image {
animation: fadeIn 1s;
-webkit-animation: fadeIn 1s;
-moz-animation: fadeIn 1s;
-o-animation: fadeIn 1s;
-ms-animation: fadeIn 1s;
}
@keyframes fadeIn {
0% {opacity:0;}
100% {opacity:1;}
}
@-moz-keyframes fadeIn {
0% {opacity:0;}
100% {opacity:1;}
}
@-webkit-keyframes fadeIn {
0% {opacity:0;}
100% {opacity:1;}
}
@-o-keyframes fadeIn {
0% {opacity:0;}
100% {opacity:1;}
}
@-ms-keyframes fadeIn {
0% {opacity:0;}
100% {opacity:1;}
}

BIN
pnpm-lock.yaml generated
View File

Binary file not shown.