wow, SEXY CORE

- corrected naming structure of Core/Client related enums
- Added CoreResource
- Created planned ClientCommands
- moved migrations out of mod.rs
- created useBridgeQuery
This commit is contained in:
Jamie Pine
2022-03-13 03:50:40 -07:00
parent f43f12e011
commit ffaa9cd67d
9 changed files with 295 additions and 223 deletions

View File

@@ -4,16 +4,24 @@ use sdcorelib::{
state::{client, client::ClientState},
sys,
sys::{volumes, volumes::Volume},
ClientQuery, ClientResponse, Core,
ClientCommand, ClientQuery, Core, CoreResponse,
};
#[tauri::command(async)]
pub async fn client_query_transport(data: ClientQuery) -> Result<ClientResponse, String> {
match Core::query(data).await {
Ok(response) => Ok(response),
Err(err) => Err(err.to_string()),
}
}
// #[tauri::command(async)]
// pub async fn client_query_transport(data: ClientQuery) -> Result<CoreResponse, String> {
// match Core::query(data).await {
// Ok(response) => Ok(response),
// Err(err) => Err(err.to_string()),
// }
// }
// #[tauri::command(async)]
// pub async fn client_command_transport(data: ClientCommand) -> Result<CoreResponse, String> {
// match Core::command(data).await {
// Ok(response) => Ok(response),
// Err(err) => Err(err.to_string()),
// }
// }
#[tauri::command(async)]
pub async fn scan_dir(path: String) -> Result<(), String> {

View File

@@ -1,4 +1,5 @@
use sdcorelib::Core;
use once_cell::sync::OnceCell;
use sdcorelib::{ClientCommand, ClientQuery, Core, CoreResponse};
use tauri::api::path;
use tauri::Manager;
// use tauri_plugin_shadows::Shadows;
@@ -6,6 +7,24 @@ use tauri::Manager;
mod commands;
mod menu;
pub static CORE: OnceCell<Core> = OnceCell::new();
#[tauri::command(async)]
async fn client_query_transport(data: ClientQuery) -> Result<CoreResponse, String> {
match CORE.get().unwrap().query(data).await {
Ok(response) => Ok(response),
Err(err) => Err(err.to_string()),
}
}
#[tauri::command(async)]
async fn client_command_transport(data: ClientCommand) -> Result<CoreResponse, String> {
match CORE.get().unwrap().command(data).await {
Ok(response) => Ok(response),
Err(err) => Err(err.to_string()),
}
}
#[tokio::main]
async fn main() {
let data_dir = path::data_dir().unwrap_or(std::path::PathBuf::from("./"));
@@ -26,7 +45,8 @@ async fn main() {
})
.on_menu_event(|event| menu::handle_menu_event(event))
.invoke_handler(tauri::generate_handler![
commands::client_query_transport,
client_query_transport,
client_command_transport,
commands::scan_dir,
commands::create_location,
commands::get_files,

View File

@@ -10,7 +10,7 @@ import { TrafficLights } from '../os/TrafficLights';
import { Button } from '../primitive';
import { Dropdown } from '../primitive/Dropdown';
import { DefaultProps } from '../primitive/types';
import { useVolumes } from '@sd/state';
import { useBridgeQuery } from '@sd/state';
interface SidebarProps extends DefaultProps {}
export const SidebarLink = (props: NavLinkProps) => (
@@ -51,7 +51,7 @@ export function MacOSTrafficLights() {
}
export const Sidebar: React.FC<SidebarProps> = (props) => {
const { data: volumes } = useVolumes();
const { data: volumes } = useBridgeQuery('SysGetVolumes');
const tags = [
{ id: 1, name: 'Keepsafe', color: '#FF6788' },

View File

@@ -0,0 +1,131 @@
use crate::file::checksum::sha256_digest;
use crate::{prisma, prisma::Migration};
use anyhow::Result;
use data_encoding::HEXLOWER;
use include_dir::{include_dir, Dir};
use std::ffi::OsStr;
use std::io::BufReader;
const INIT_MIGRATION: &str = include_str!("../../prisma/migrations/migration_table/migration.sql");
static MIGRATIONS_DIR: Dir = include_dir!("$CARGO_MANIFEST_DIR/prisma/migrations");
pub async fn run_migrations(db_url: &str) -> Result<()> {
let client = prisma::new_client_with_url(&format!("file:{}", &db_url)).await;
match client
._query_raw::<serde_json::Value>(
"SELECT name FROM sqlite_master WHERE type='table' AND name='_migrations'",
)
.await
{
Ok(data) => {
if data.len() == 0 {
println!("Migration table does not exist");
// execute migration
match client._execute_raw(INIT_MIGRATION).await {
Ok(_) => {}
Err(e) => {
println!("Failed to create migration table: {}", e);
}
};
let value: Vec<serde_json::Value> = client
._query_raw(
"SELECT name FROM sqlite_master WHERE type='table' AND name='_migrations'",
)
.await
.unwrap();
println!("Migration table created: {:?}", value);
} else {
println!("Migration table exists: {:?}", data);
}
let mut migration_subdirs = MIGRATIONS_DIR
.dirs()
.filter(|subdir| {
subdir
.path()
.file_name()
.map(|name| name != OsStr::new("migration_table"))
.unwrap_or(false)
})
.collect::<Vec<_>>();
migration_subdirs.sort_by(|a, b| {
let a_name = a.path().file_name().unwrap().to_str().unwrap();
let b_name = b.path().file_name().unwrap().to_str().unwrap();
let a_time = a_name[..14].parse::<i64>().unwrap();
let b_time = b_name[..14].parse::<i64>().unwrap();
a_time.cmp(&b_time)
});
for subdir in migration_subdirs {
println!("{:?}", subdir.path());
let migration_file = subdir
.get_file(subdir.path().join("./migration.sql"))
.unwrap();
let migration_sql = migration_file.contents_utf8().unwrap();
let digest = sha256_digest(BufReader::new(migration_file.contents()))?;
// create a lowercase hash from
let checksum = HEXLOWER.encode(digest.as_ref());
let name = subdir.path().file_name().unwrap().to_str().unwrap();
// get existing migration by checksum, if it doesn't exist run the migration
let existing_migration = client
.migration()
.find_unique(Migration::checksum().equals(checksum.clone()))
.exec()
.await;
if existing_migration.is_none() {
println!("Running migration: {}", name);
let steps = migration_sql.split(";").collect::<Vec<&str>>();
let steps = &steps[0..steps.len() - 1];
client
.migration()
.create_one(
Migration::name().set(name.to_string()),
Migration::checksum().set(checksum.clone()),
vec![],
)
.exec()
.await;
for (i, step) in steps.iter().enumerate() {
match client._execute_raw(&format!("{};", step)).await {
Ok(_) => {
println!("Step {} ran successfully", i);
client
.migration()
.find_unique(Migration::checksum().equals(checksum.clone()))
.update(vec![Migration::steps_applied().set(i as i64 + 1)])
.exec()
.await;
}
Err(e) => {
println!("Error running migration: {}", name);
println!("{}", e);
break;
}
}
}
println!("Migration {} recorded successfully", name);
} else {
println!("Migration {} already exists", name);
}
}
}
Err(err) => {
panic!("Failed to check migration table existence: {:?}", err);
}
}
Ok(())
}

View File

@@ -1,15 +1,8 @@
use crate::file::checksum::sha256_digest;
use crate::{
prisma,
prisma::{Migration, PrismaClient},
};
use crate::{state, CoreError};
use crate::state;
use crate::{prisma, prisma::PrismaClient};
use anyhow::Result;
use data_encoding::HEXLOWER;
use include_dir::{include_dir, Dir};
use once_cell::sync::OnceCell;
use std::ffi::OsStr;
use std::io::BufReader;
pub mod migrate;
pub static DB: OnceCell<PrismaClient> = OnceCell::new();
@@ -34,127 +27,3 @@ pub async fn get() -> Result<&'static PrismaClient, String> {
Ok(DB.get().unwrap())
}
}
const INIT_MIGRATION: &str = include_str!("../../prisma/migrations/migration_table/migration.sql");
static MIGRATIONS_DIR: Dir = include_dir!("$CARGO_MANIFEST_DIR/prisma/migrations");
pub async fn init(db_url: &str) -> Result<()> {
let client = prisma::new_client_with_url(&format!("file:{}", &db_url)).await;
match client
._query_raw::<serde_json::Value>(
"SELECT name FROM sqlite_master WHERE type='table' AND name='_migrations'",
)
.await
{
Ok(data) => {
if data.len() == 0 {
println!("Migration table does not exist");
// execute migration
match client._execute_raw(INIT_MIGRATION).await {
Ok(_) => {}
Err(e) => {
println!("Failed to create migration table: {}", e);
}
};
let value: Vec<serde_json::Value> = client
._query_raw(
"SELECT name FROM sqlite_master WHERE type='table' AND name='_migrations'",
)
.await
.unwrap();
println!("Migration table created: {:?}", value);
} else {
println!("Migration table exists: {:?}", data);
}
let mut migration_subdirs = MIGRATIONS_DIR
.dirs()
.filter(|subdir| {
subdir
.path()
.file_name()
.map(|name| name != OsStr::new("migration_table"))
.unwrap_or(false)
})
.collect::<Vec<_>>();
migration_subdirs.sort_by(|a, b| {
let a_name = a.path().file_name().unwrap().to_str().unwrap();
let b_name = b.path().file_name().unwrap().to_str().unwrap();
let a_time = a_name[..14].parse::<i64>().unwrap();
let b_time = b_name[..14].parse::<i64>().unwrap();
a_time.cmp(&b_time)
});
for subdir in migration_subdirs {
println!("{:?}", subdir.path());
let migration_file = subdir
.get_file(subdir.path().join("./migration.sql"))
.unwrap();
let migration_sql = migration_file.contents_utf8().unwrap();
let digest = sha256_digest(BufReader::new(migration_file.contents()))?;
// create a lowercase hash from
let checksum = HEXLOWER.encode(digest.as_ref());
let name = subdir.path().file_name().unwrap().to_str().unwrap();
// get existing migration by checksum, if it doesn't exist run the migration
let existing_migration = client
.migration()
.find_unique(Migration::checksum().equals(checksum.clone()))
.exec()
.await;
if existing_migration.is_none() {
println!("Running migration: {}", name);
let steps = migration_sql.split(";").collect::<Vec<&str>>();
let steps = &steps[0..steps.len() - 1];
client
.migration()
.create_one(
Migration::name().set(name.to_string()),
Migration::checksum().set(checksum.clone()),
vec![],
)
.exec()
.await;
for (i, step) in steps.iter().enumerate() {
match client._execute_raw(&format!("{};", step)).await {
Ok(_) => {
println!("Step {} ran successfully", i);
client
.migration()
.find_unique(Migration::checksum().equals(checksum.clone()))
.update(vec![Migration::steps_applied().set(i as i64 + 1)])
.exec()
.await;
}
Err(e) => {
println!("Error running migration: {}", name);
println!("{}", e);
break;
}
}
}
println!("Migration {} recorded successfully", name);
} else {
println!("Migration {} already exists", name);
}
}
}
Err(err) => {
panic!("Failed to check migration table existence: {:?}", err);
}
}
Ok(())
}

View File

@@ -21,91 +21,48 @@ pub mod util;
// pub mod native;
pub struct Core {
pub event_sender: mpsc::Sender<ClientEvent>,
pub event_receiver: mpsc::Receiver<ClientEvent>,
}
#[derive(Error, Debug)]
pub enum CoreError {
#[error("System error")]
SysError(#[from] sys::SysError),
}
// represents an event this library can emit
#[derive(Serialize, Deserialize, Debug, TS)]
#[serde(rename_all = "snake_case", tag = "key", content = "payload")]
#[ts(export)]
pub enum ClientEvent {
NewFileTypeThumb { file_id: u32, icon_created: bool },
NewJobCreated { job_id: u32, progress: u8 },
ResourceChange { key: String, id: String },
DatabaseDisconnected { reason: Option<String> },
}
// represents an event this library can emit
#[derive(Serialize, Deserialize, Debug, TS)]
#[serde(tag = "key", content = "params")]
#[ts(export)]
pub enum ClientQuery {
SysGetVolumes,
ClientGetCurrent,
SysGetLocations { id: String },
LibGetExplorerDir { path: String, limit: u32 },
}
#[derive(Serialize, Deserialize, Debug, TS)]
#[serde(tag = "key", content = "data")]
#[ts(export)]
pub enum ClientResponse {
SysGetVolumes(Vec<sys::volumes::Volume>),
pub event_sender: mpsc::Sender<CoreEvent>,
pub event_receiver: mpsc::Receiver<CoreEvent>,
pub state: ClientState,
}
impl Core {
pub async fn query(query: ClientQuery) -> Result<ClientResponse, CoreError> {
println!("Core Query: {:?}", query);
let response = match query {
ClientQuery::SysGetVolumes => ClientResponse::SysGetVolumes(sys::volumes::get()?),
ClientQuery::SysGetLocations { id: _ } => todo!(),
ClientQuery::LibGetExplorerDir { path: _, limit: _ } => todo!(),
ClientQuery::ClientGetCurrent => todo!(),
};
Ok(response)
}
pub async fn send(&self, event: ClientEvent) {
self.event_sender.send(event).await.unwrap();
}
// create new instance of core, run startup tasks
pub async fn new(mut data_dir: std::path::PathBuf) -> Core {
let (event_sender, event_receiver) = mpsc::channel(100);
let core = Core {
event_sender,
event_receiver,
};
data_dir = data_dir.join("spacedrive");
let data_dir = data_dir.to_str().unwrap();
// create data directory if it doesn't exist
fs::create_dir_all(&data_dir).unwrap();
// prepare basic client state
let mut client_config =
ClientState::new(data_dir, "diamond-mastering-space-dragon").unwrap();
let mut state = ClientState::new(data_dir, "diamond-mastering-space-dragon").unwrap();
// load from disk
client_config
state
.read_disk()
.unwrap_or(error!("No client state found, creating new one..."));
client_config.save();
state.save();
// begin asynchronous startup routines
info!("Starting up... {:?}", client_config);
if client_config.libraries.len() == 0 {
let core = Core {
event_sender,
event_receiver,
state,
};
core.initializer().await;
core
// activate p2p listeners
// p2p::listener::listen(None);
}
// load library database + initialize client with db
pub async fn initializer(&self) {
if self.state.libraries.len() == 0 {
match library::loader::create(None).await {
Ok(library) => info!("Created new library: {:?}", library),
Err(e) => info!("Error creating library: {:?}", e),
}
} else {
for library in client_config.libraries.iter() {
for library in self.state.libraries.iter() {
// init database for library
match library::loader::load(&library.library_path, &library.library_id).await {
Ok(library) => info!("Loaded library: {:?}", library),
@@ -118,8 +75,90 @@ impl Core {
Ok(_) => info!("Spacedrive online"),
Err(e) => info!("Error initializing client: {:?}", e),
};
// activate p2p listeners
// p2p::listener::listen(None);
core
}
pub async fn command(&self, cmd: ClientCommand) -> Result<CoreResponse, CoreError> {
info!("Core command: {:?}", cmd);
Ok(CoreResponse::Success)
}
// query sources of data
pub async fn query(&self, query: ClientQuery) -> Result<CoreResponse, CoreError> {
info!("Core query: {:?}", query);
let response = match query {
ClientQuery::SysGetVolumes => CoreResponse::SysGetVolumes(sys::volumes::get()?),
ClientQuery::SysGetLocations { id: _ } => todo!(),
ClientQuery::LibGetExplorerDir { path: _, limit: _ } => todo!(),
ClientQuery::ClientGetState => todo!(),
};
Ok(response)
}
// send an event to the client
pub async fn send(&self, event: CoreEvent) {
self.event_sender.send(event).await.unwrap();
}
}
// represents an event this library can emit
#[derive(Serialize, Deserialize, Debug, TS)]
#[serde(tag = "key", content = "params")]
#[ts(export)]
pub enum ClientCommand {
LocScanFull { location_id: u32 },
FileScanQuick { file_id: u32 },
FileScanFull { file_id: u32 },
FileDelete { file_id: u32 },
TagCreate { name: String, color: String },
TagAssign { file_id: u32, tag_id: u32 },
TagDelete { tag_id: u32 },
LocDelete { location_id: u32 },
LibDelete { library_id: u32 },
SysVolumeUnmount { volume_id: u32 },
}
// represents an event this library can emit
#[derive(Serialize, Deserialize, Debug, TS)]
#[serde(tag = "key", content = "params")]
#[ts(export)]
pub enum ClientQuery {
ClientGetState,
SysGetVolumes,
SysGetLocations { id: String },
LibGetExplorerDir { path: String, limit: u32 },
}
// represents an event this library can emit
#[derive(Serialize, Deserialize, Debug, TS)]
#[serde(tag = "key", content = "payload")]
#[ts(export)]
pub enum CoreEvent {
// most all events should be once of these two
InvalidateQuery(ClientQuery),
InvalidateResource(CoreResource),
Log { message: String },
DatabaseDisconnected { reason: Option<String> },
}
#[derive(Serialize, Deserialize, Debug, TS)]
#[serde(tag = "key", content = "data")]
#[ts(export)]
pub enum CoreResponse {
Success,
SysGetVolumes(Vec<sys::volumes::Volume>),
}
#[derive(Error, Debug)]
pub enum CoreError {
#[error("System error")]
SysError(#[from] sys::SysError),
}
#[derive(Serialize, Deserialize, Debug, TS)]
#[ts(export)]
pub enum CoreResource {
Client,
Library,
Location,
File,
Job,
Tag,
}

View File

@@ -3,7 +3,7 @@ use uuid::Uuid;
use crate::state::client::LibraryState;
use crate::{
db::{self, init},
db::{self, migrate},
prisma::{Library, LibraryData},
state,
};
@@ -48,7 +48,7 @@ pub async fn load(library_path: &str, library_id: &str) -> Result<()> {
config.save();
}
// create connection with library database & run migrations
init(&library_path).await?;
migrate::run_migrations(&library_path).await?;
// if doesn't exist, mark as offline
Ok(())
}
@@ -66,7 +66,7 @@ pub async fn create(name: Option<String>) -> Result<()> {
..LibraryState::default()
};
init(&library_state.library_path).await?;
migrate::run_migrations(&library_state.library_path).await?;
config.libraries.push(library_state);

View File

@@ -1,5 +1,6 @@
import { ClientQuery, ClientResponse } from '@sd/core';
import { EventEmitter } from 'eventemitter3';
import { useQuery } from 'react-query';
export let transport: BaseTransport | null = null;
@@ -19,3 +20,11 @@ export async function bridge<
export function setTransport(_transport: BaseTransport) {
transport = _transport;
}
export function useBridgeQuery(
key: Parameters<typeof bridge>[0],
params?: Parameters<typeof bridge>[1],
options: Parameters<typeof useQuery>[2] = {}
) {
return useQuery([key, params], () => bridge(key, params), options);
}

View File

@@ -3,10 +3,6 @@ import { useState } from 'react';
import { useFileExplorerState } from './state';
import { bridge } from '../bridge';
// export function useBridgeQuery(key: ) {
// return useQuery(['sys_get_volumes'], () => bridge('sys_get_volumes'));
// }
// this hook initializes the explorer state and queries the core
export function useFileExplorer(initialPath = '/', initialLocation: number | null = null) {
const fileState = useFileExplorerState();
@@ -28,6 +24,6 @@ export function useFileExplorer(initialPath = '/', initialLocation: number | nul
return { location, files, setPath, setLocationId };
}
export function useVolumes() {
return useQuery(['SysGetVolumes'], () => bridge('SysGetVolumes'));
}
// export function useVolumes() {
// return useQuery(['SysGetVolumes'], () => bridge('SysGetVolumes'));
// }