mirror of
https://github.com/spacedriveapp/spacedrive.git
synced 2026-05-14 18:24:27 -04:00
(chore): update tasks
This commit is contained in:
21
.github/workflows/release.yml
vendored
21
.github/workflows/release.yml
vendored
@@ -25,10 +25,14 @@ jobs:
|
||||
- host: macos-13
|
||||
target: x86_64-apple-darwin
|
||||
platform: macos-x86_64
|
||||
# Linux and Windows builds (uncomment when needed)
|
||||
# - host: ubuntu-22.04
|
||||
# target: x86_64-unknown-linux-gnu
|
||||
# platform: linux-x86_64
|
||||
# Linux builds
|
||||
- host: ubuntu-22.04
|
||||
target: x86_64-unknown-linux-gnu
|
||||
platform: linux-x86_64
|
||||
- host: ubuntu-22.04
|
||||
target: aarch64-unknown-linux-gnu
|
||||
platform: linux-aarch64
|
||||
# Windows builds (uncomment when needed)
|
||||
# - host: windows-latest
|
||||
# target: x86_64-pc-windows-msvc
|
||||
# platform: windows-x86_64
|
||||
@@ -43,9 +47,18 @@ jobs:
|
||||
with:
|
||||
targets: ${{ matrix.target }}
|
||||
|
||||
- name: Install cross-compilation tools (Linux ARM)
|
||||
if: matrix.target == 'aarch64-unknown-linux-gnu'
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y gcc-aarch64-linux-gnu
|
||||
|
||||
- name: Build CLI binaries
|
||||
run: |
|
||||
cargo build --release --bin sd-cli --bin sd-daemon --target ${{ matrix.target }}
|
||||
env:
|
||||
# Set linker for cross-compilation
|
||||
CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER: aarch64-linux-gnu-gcc
|
||||
|
||||
- name: Prepare binaries (Unix)
|
||||
if: runner.os != 'Windows'
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: AI-000
|
||||
title: "Epic: AI & Intelligence"
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
priority: High
|
||||
tags: [epic, ai, agent]
|
||||
whitepaper: Section 4.6
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: AI-001
|
||||
title: Develop AI Agent for Proactive Assistance
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: AI-000
|
||||
priority: High
|
||||
tags: [ai, agent, core]
|
||||
@@ -21,6 +21,7 @@ Implement the core AI agent that observes user behavior and proactively suggests
|
||||
4. Integrate with local models via `Ollama` for privacy-first analysis.
|
||||
|
||||
## Acceptance Criteria
|
||||
- [ ] The agent can detect when a user repeatedly performs the same organizational task.
|
||||
- [ ] The agent can propose a valid, pre-visualized `Action` to automate that task.
|
||||
- [ ] The user can approve or deny the suggestion.
|
||||
|
||||
- [ ] The agent can detect when a user repeatedly performs the same organizational task.
|
||||
- [ ] The agent can propose a valid, pre-visualized `Action` to automate that task.
|
||||
- [ ] The user can approve or deny the suggestion.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: CLI-000
|
||||
title: "Epic: Command-Line Interface"
|
||||
status: In Progress
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
priority: High
|
||||
tags: [epic, cli]
|
||||
whitepaper: "N/A"
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: CLOUD-000
|
||||
title: "Epic: Cloud as a Peer"
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
priority: High
|
||||
tags: [epic, cloud, networking, infrastructure]
|
||||
whitepaper: Section 5
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: CLOUD-001
|
||||
title: Design Managed Cloud Core Infrastructure
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: CLOUD-000
|
||||
priority: High
|
||||
tags: [cloud, infrastructure, design, kubernetes]
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: CLOUD-002
|
||||
title: Asynchronous Relay Server
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: CLOUD-000
|
||||
priority: High
|
||||
tags: [cloud, networking, relay, sharing]
|
||||
@@ -21,6 +21,7 @@ Implement the relay server functionality that enables asynchronous communication
|
||||
4. Implement the logic for clients to connect to and use the relay server when direct P2P connection is not possible.
|
||||
|
||||
## Acceptance Criteria
|
||||
- [ ] The relay server can be deployed and run as a standalone service.
|
||||
- [ ] Two peers can communicate asynchronously through the relay server.
|
||||
- [ ] The system gracefully falls back to using the relay when direct connection fails.
|
||||
|
||||
- [ ] The relay server can be deployed and run as a standalone service.
|
||||
- [ ] Two peers can communicate asynchronously through the relay server.
|
||||
- [ ] The system gracefully falls back to using the relay when direct connection fails.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: CLOUD-003
|
||||
title: Cloud Storage Provider as a Volume
|
||||
status: In Progress
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: CLOUD-000
|
||||
priority: High
|
||||
tags: [cloud, storage, volume, s3]
|
||||
@@ -41,28 +41,34 @@ Implement support for a cloud storage provider (e.g., S3-compatible service) as
|
||||
- Content phase uses backend for content hashing
|
||||
|
||||
## Acceptance Criteria
|
||||
- [x] A user can add an S3 bucket as a new location in their library.
|
||||
- [ ] Files can be copied to and from the cloud volume.
|
||||
- [x] The cloud volume can be indexed like any other location.
|
||||
|
||||
- [x] A user can add an S3 bucket as a new location in their library.
|
||||
- [ ] Files can be copied to and from the cloud volume.
|
||||
- [x] The cloud volume can be indexed like any other location.
|
||||
|
||||
## Implementation Files
|
||||
|
||||
**Core Backend:**
|
||||
|
||||
- `core/src/volume/backend/mod.rs` - VolumeBackend trait
|
||||
- `core/src/volume/backend/local.rs` - LocalBackend implementation
|
||||
- `core/src/volume/backend/cloud.rs` - CloudBackend with OpenDAL
|
||||
|
||||
**Credential Management:**
|
||||
|
||||
- `core/src/crypto/cloud_credentials.rs` - CloudCredentialManager
|
||||
|
||||
**Actions:**
|
||||
|
||||
- `core/src/ops/volumes/add_cloud/` - VolumeAddCloudAction
|
||||
- `core/src/ops/volumes/remove_cloud/` - VolumeRemoveCloudAction
|
||||
|
||||
**CLI:**
|
||||
|
||||
- `apps/cli/src/domains/volume/` - CLI commands
|
||||
|
||||
**Query System:**
|
||||
|
||||
- `core/src/domain/entry.rs` - Cloud path support
|
||||
- `core/src/ops/files/query/directory_listing.rs` - Cloud directory browsing
|
||||
- `core/src/ops/files/query/file_by_path.rs` - Cloud file lookup
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: CORE-006
|
||||
title: Semantic Tagging Architecture
|
||||
status: Done
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: CORE-000
|
||||
priority: Medium
|
||||
tags: [core, vdfs, tagging, metadata]
|
||||
@@ -21,6 +21,7 @@ Implement the graph-based semantic tagging architecture. This will allow users t
|
||||
4. Develop a query system for finding entries based on their tags.
|
||||
|
||||
## Acceptance Criteria
|
||||
- [ ] A user can create and manage a hierarchy of tags.
|
||||
- [ ] A user can assign multiple tags to a file or directory.
|
||||
- [ ] A user can search for files based on their tags.
|
||||
|
||||
- [ ] A user can create and manage a hierarchy of tags.
|
||||
- [ ] A user can assign multiple tags to a file or directory.
|
||||
- [ ] A user can search for files based on their tags.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: CORE-009
|
||||
title: User-Managed Collections
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: CORE-000
|
||||
priority: Medium
|
||||
tags: [core, vdfs, collections, organization]
|
||||
@@ -21,7 +21,8 @@ Implement the ability for users to save selections of files into persistent coll
|
||||
4. Develop a UI/CLI for managing collections.
|
||||
|
||||
## Acceptance Criteria
|
||||
- [ ] A user can create a new collection.
|
||||
- [ ] A user can add files and directories to a collection.
|
||||
- [ ] A user can view the contents of a collection.
|
||||
- [ ] A user can remove items from a collection.
|
||||
|
||||
- [ ] A user can create a new collection.
|
||||
- [ ] A user can add files and directories to a collection.
|
||||
- [ ] A user can view the contents of a collection.
|
||||
- [ ] A user can remove items from a collection.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: CORE-010
|
||||
title: File Ingestion Workflow
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: CORE-000
|
||||
priority: High
|
||||
tags: [core, vdfs, ingestion, workflow]
|
||||
@@ -21,6 +21,7 @@ Implement the "Ingest Location" workflow, which provides a quarantine zone for n
|
||||
4. Allow users to configure the processing steps for each Ingest Location.
|
||||
|
||||
## Acceptance Criteria
|
||||
- [ ] A user can configure an Ingest Location.
|
||||
- [ ] New files uploaded to the Ingest Location are processed according to the configured workflow.
|
||||
- [ ] Processed files are moved to their final destination in the library.
|
||||
|
||||
- [ ] A user can configure an Ingest Location.
|
||||
- [ ] New files uploaded to the Ingest Location are processed according to the configured workflow.
|
||||
- [ ] Processed files are moved to their final destination in the library.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: CORE-011
|
||||
title: Unified Resource Event System
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
priority: High
|
||||
tags: [core, events, architecture, refactor]
|
||||
---
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: CORE-012
|
||||
title: Resource Type Registry (Swift)
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: CORE-011
|
||||
priority: High
|
||||
tags: [client, swift, codegen, cache]
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: CORE-013
|
||||
title: Resource Type Registry (TypeScript)
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: CORE-011
|
||||
priority: High
|
||||
tags: [client, typescript, codegen, cache]
|
||||
@@ -34,32 +34,35 @@ Create the TypeScript ResourceTypeRegistry for web/desktop clients. Enables gene
|
||||
|
||||
```typescript
|
||||
class ResourceTypeRegistry {
|
||||
private static validators = new Map<string, (data: unknown) => any>();
|
||||
private static validators = new Map<string, (data: unknown) => any>();
|
||||
|
||||
static register<T>(resourceType: string, validator: (data: unknown) => T) {
|
||||
this.validators.set(resourceType, validator);
|
||||
}
|
||||
static register<T>(resourceType: string, validator: (data: unknown) => T) {
|
||||
this.validators.set(resourceType, validator);
|
||||
}
|
||||
|
||||
static decode(resourceType: string, data: unknown): any {
|
||||
const validator = this.validators.get(resourceType);
|
||||
if (!validator) {
|
||||
throw new Error(`Unknown resource type: ${resourceType}`);
|
||||
}
|
||||
return validator(data);
|
||||
}
|
||||
static decode(resourceType: string, data: unknown): any {
|
||||
const validator = this.validators.get(resourceType);
|
||||
if (!validator) {
|
||||
throw new Error(`Unknown resource type: ${resourceType}`);
|
||||
}
|
||||
return validator(data);
|
||||
}
|
||||
}
|
||||
|
||||
// Auto-generated from specta
|
||||
export const resourceTypeMap = {
|
||||
'file': File,
|
||||
'album': Album,
|
||||
'tag': Tag,
|
||||
'location': Location,
|
||||
file: File,
|
||||
album: Album,
|
||||
tag: Tag,
|
||||
location: Location,
|
||||
} as const;
|
||||
|
||||
// Auto-registration
|
||||
Object.entries(resourceTypeMap).forEach(([type, TypeClass]) => {
|
||||
ResourceTypeRegistry.register(type, (data) => data as InstanceType<typeof TypeClass>);
|
||||
ResourceTypeRegistry.register(
|
||||
type,
|
||||
(data) => data as InstanceType<typeof TypeClass>,
|
||||
);
|
||||
});
|
||||
```
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: CORE-014
|
||||
title: Specta Codegen for Resource Events
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: CORE-011
|
||||
priority: High
|
||||
tags: [codegen, specta, typescript, swift]
|
||||
@@ -25,21 +25,23 @@ Extend the existing specta codegen system to auto-generate resource type registr
|
||||
## Generated Output
|
||||
|
||||
### TypeScript
|
||||
|
||||
```typescript
|
||||
// packages/client/src/bindings/resourceRegistry.ts
|
||||
export const resourceTypeMap = {
|
||||
'file': File,
|
||||
'album': Album,
|
||||
'tag': Tag,
|
||||
'location': Location,
|
||||
'device': Device,
|
||||
'volume': Volume,
|
||||
'content_identity': ContentIdentity,
|
||||
// ... all Identifiable types
|
||||
file: File,
|
||||
album: Album,
|
||||
tag: Tag,
|
||||
location: Location,
|
||||
device: Device,
|
||||
volume: Volume,
|
||||
content_identity: ContentIdentity,
|
||||
// ... all Identifiable types
|
||||
} as const;
|
||||
```
|
||||
|
||||
### Swift (Future)
|
||||
|
||||
```swift
|
||||
// SpacedriveCore/Generated/ResourceTypeRegistry+Generated.swift
|
||||
extension ResourceTypeRegistry {
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: CORE-015
|
||||
title: Normalized Client Cache (Swift)
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
priority: High
|
||||
tags: [client, swift, cache, performance]
|
||||
depends_on: [CORE-012]
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: CORE-016
|
||||
title: Normalized Client Cache (TypeScript)
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
priority: High
|
||||
tags: [client, typescript, react, cache, performance]
|
||||
depends_on: [CORE-013]
|
||||
@@ -27,28 +27,28 @@ Implement the normalized client cache for web/desktop (Electron) apps. Same arch
|
||||
|
||||
```typescript
|
||||
function useCachedQuery<T>(
|
||||
method: string,
|
||||
input: any,
|
||||
method: string,
|
||||
input: any,
|
||||
): { data: T[] | null; loading: boolean; error: Error | null } {
|
||||
const cache = useContext(CacheContext);
|
||||
const [data, setData] = useState<T[] | null>(null);
|
||||
const cache = useContext(CacheContext);
|
||||
const [data, setData] = useState<T[] | null>(null);
|
||||
|
||||
useEffect(() => {
|
||||
const queryKey = cache.generateQueryKey(method, input);
|
||||
useEffect(() => {
|
||||
const queryKey = cache.generateQueryKey(method, input);
|
||||
|
||||
// Subscribe to cache changes
|
||||
const unsubscribe = cache.subscribe(queryKey, () => {
|
||||
const result = cache.getQueryResult<T>(queryKey);
|
||||
setData(result);
|
||||
});
|
||||
// Subscribe to cache changes
|
||||
const unsubscribe = cache.subscribe(queryKey, () => {
|
||||
const result = cache.getQueryResult<T>(queryKey);
|
||||
setData(result);
|
||||
});
|
||||
|
||||
// Initial fetch
|
||||
cache.query<T>(method, input).then(setData);
|
||||
// Initial fetch
|
||||
cache.query<T>(method, input).then(setData);
|
||||
|
||||
return unsubscribe;
|
||||
}, [method, JSON.stringify(input)]);
|
||||
return unsubscribe;
|
||||
}, [method, JSON.stringify(input)]);
|
||||
|
||||
return { data, loading: data === null, error: null };
|
||||
return { data, loading: data === null, error: null };
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: CORE-017
|
||||
title: Optimistic Updates for Client Cache
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: CORE-015
|
||||
priority: Medium
|
||||
tags: [client, cache, ux, optimistic]
|
||||
@@ -28,21 +28,24 @@ Implement optimistic updates in the normalized cache, allowing instant UI feedba
|
||||
// 1. Optimistic update (instant UI)
|
||||
const pendingId = uuid();
|
||||
await cache.updateOptimistically(pendingId, {
|
||||
id: albumId,
|
||||
name: newName,
|
||||
...optimisticAlbum
|
||||
id: albumId,
|
||||
name: newName,
|
||||
...optimisticAlbum,
|
||||
});
|
||||
|
||||
try {
|
||||
// 2. Send action to server
|
||||
const confirmed = await client.action('albums.rename.v1', { id: albumId, name: newName });
|
||||
// 2. Send action to server
|
||||
const confirmed = await client.action("albums.rename.v1", {
|
||||
id: albumId,
|
||||
name: newName,
|
||||
});
|
||||
|
||||
// 3. Commit (replace optimistic with confirmed)
|
||||
await cache.commitOptimisticUpdate(pendingId, confirmed);
|
||||
// 3. Commit (replace optimistic with confirmed)
|
||||
await cache.commitOptimisticUpdate(pendingId, confirmed);
|
||||
} catch (error) {
|
||||
// 4. Rollback on error
|
||||
await cache.rollbackOptimisticUpdate(pendingId);
|
||||
throw error;
|
||||
// 4. Rollback on error
|
||||
await cache.rollbackOptimisticUpdate(pendingId);
|
||||
throw error;
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
@@ -112,9 +112,6 @@ This checks that your YAML front matter matches the schema.
|
||||
# Show only "In Progress" tasks
|
||||
cargo run --bin task-validator -- list --status "In Progress"
|
||||
|
||||
# Show tasks by assignee
|
||||
cargo run --bin task-validator -- list --assignee james
|
||||
|
||||
# Show tasks by priority
|
||||
cargo run --bin task-validator -- list --priority High
|
||||
```
|
||||
@@ -184,7 +181,7 @@ Required fields in YAML front matter:
|
||||
- `id` - Unique identifier (e.g., CORE-001)
|
||||
- `title` - Human-readable title
|
||||
- `status` - One of: To Do, In Progress, Done, Completed
|
||||
- `assignee` - Who's working on it (or "unassigned")
|
||||
- `assignee` - Who's working on it (or "james")
|
||||
- `priority` - Critical, High, Medium, or Low
|
||||
- `tags` - Array of relevant tags
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: FILE-003
|
||||
title: Cloud Volume File Operations
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: FILE-000
|
||||
priority: High
|
||||
tags: [core, file-ops, cloud, jobs]
|
||||
@@ -56,15 +56,19 @@ Extend the file copy job system to support cloud volumes, enabling users to copy
|
||||
## Implementation Files
|
||||
|
||||
**Strategy Implementation:**
|
||||
|
||||
- `core/src/ops/files/copy/strategy.rs` - Add `CloudCopyStrategy`
|
||||
|
||||
**Routing Logic:**
|
||||
|
||||
- `core/src/ops/files/copy/routing.rs` - Update `CopyStrategyRouter::select_strategy()`
|
||||
|
||||
**Volume Backend:**
|
||||
|
||||
- `core/src/volume/backend/cloud.rs` - Already provides `read()`, `write()`, `read_range()`
|
||||
|
||||
**Testing:**
|
||||
|
||||
- `core/tests/test_cloud_file_ops.rs` - Integration tests for cloud file operations
|
||||
|
||||
## Technical Notes
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: FSYNC-000
|
||||
title: File Sync System (Epic)
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: null
|
||||
priority: High
|
||||
tags: [sync, service, epic, index-driven]
|
||||
@@ -29,6 +29,7 @@ Implement File Sync system - an index-driven service that orchestrates content s
|
||||
## Architecture Decision: Service vs Job
|
||||
|
||||
**Why FileSyncService (not FileSyncJob):**
|
||||
|
||||
- Jobs cannot spawn child jobs in Spacedrive's architecture
|
||||
- FileSyncJob would duplicate FileCopyJob's complex routing logic
|
||||
- Bidirectional sync needs persistent state management beyond job lifecycle
|
||||
@@ -38,12 +39,15 @@ Implement File Sync system - an index-driven service that orchestrates content s
|
||||
## Sync Modes
|
||||
|
||||
### Mirror Mode (MVP)
|
||||
|
||||
One-way sync: source → target. Creates exact copy with automatic cleanup.
|
||||
|
||||
### Bidirectional Mode
|
||||
|
||||
Two-way sync with conflict detection and resolution. Changes flow both directions.
|
||||
|
||||
### Selective Mode (Future)
|
||||
|
||||
Intelligent local storage management with access pattern tracking.
|
||||
|
||||
**Note:** Archive mode removed from design - users can achieve this with FileCopyJob + delete.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: FSYNC-001
|
||||
title: DeleteJob Strategy Pattern & Remote Deletion
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: FSYNC-000
|
||||
priority: High
|
||||
tags: [delete, strategy, remote, networking]
|
||||
@@ -20,6 +20,7 @@ Bring DeleteJob up to parity with FileCopyJob's architecture by implementing the
|
||||
## Problem
|
||||
|
||||
File Sync needs to delete files on remote devices as part of Mirror and Bidirectional sync modes. The current DeleteJob lacks:
|
||||
|
||||
- Strategy pattern for routing (FileCopyJob has this)
|
||||
- Cross-device deletion capability
|
||||
- Consistent architecture with other file operations
|
||||
@@ -52,6 +53,7 @@ pub struct DeleteResult {
|
||||
### 2. Implement LocalDeleteStrategy
|
||||
|
||||
Move existing DeleteJob logic into LocalDeleteStrategy:
|
||||
|
||||
- `move_to_trash()` for DeleteMode::Trash
|
||||
- `permanent_delete()` for DeleteMode::Permanent
|
||||
- `secure_delete()` for DeleteMode::Secure
|
||||
@@ -73,6 +75,7 @@ impl DeleteStrategy for RemoteDeleteStrategy {
|
||||
```
|
||||
|
||||
**Network Protocol:**
|
||||
|
||||
```rust
|
||||
pub enum FileDeleteMessage {
|
||||
Request {
|
||||
@@ -113,6 +116,7 @@ impl DeleteStrategyRouter {
|
||||
### 5. Update DeleteJob to Use Strategies
|
||||
|
||||
Refactor DeleteJob::run() to:
|
||||
|
||||
1. Select strategy via DeleteStrategyRouter
|
||||
2. Execute deletion using selected strategy
|
||||
3. Aggregate results and return DeleteOutput
|
||||
@@ -120,14 +124,17 @@ Refactor DeleteJob::run() to:
|
||||
## Files to Create/Modify
|
||||
|
||||
**New Files:**
|
||||
|
||||
- `core/src/ops/files/delete/strategy.rs` - Strategy trait and implementations
|
||||
- `core/src/ops/files/delete/routing.rs` - Strategy router
|
||||
|
||||
**Modified Files:**
|
||||
|
||||
- `core/src/ops/files/delete/job.rs` - Refactor to use strategies
|
||||
- `core/src/ops/files/delete/mod.rs` - Export new modules
|
||||
|
||||
**Networking:**
|
||||
|
||||
- `core/src/service/networking/handlers.rs` - Add file_delete handler
|
||||
|
||||
## Acceptance Criteria
|
||||
@@ -145,6 +152,7 @@ Refactor DeleteJob::run() to:
|
||||
## Technical Notes
|
||||
|
||||
**Why Strategy Pattern?**
|
||||
|
||||
- Consistent with FileCopyJob architecture (CopyStrategy pattern)
|
||||
- Separates concerns: routing logic vs. deletion logic
|
||||
- Easy to add new strategies (CloudDeleteStrategy for S3/R2)
|
||||
@@ -152,6 +160,7 @@ Refactor DeleteJob::run() to:
|
||||
|
||||
**Networking Integration:**
|
||||
Reuses existing P2P infrastructure:
|
||||
|
||||
- QUIC transport for reliability
|
||||
- Compression for small message payloads
|
||||
- Request/response pattern with timeout handling
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: FSYNC-002
|
||||
title: Database Schema & Entities
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: FSYNC-000
|
||||
priority: High
|
||||
tags: [database, schema, migration, entities]
|
||||
@@ -93,12 +93,14 @@ pub struct Model {
|
||||
### 1. Create Entity Files
|
||||
|
||||
**SyncConduit Entity:**
|
||||
|
||||
- `core/src/entities/sync_conduit.rs`
|
||||
- Define Model struct with all fields
|
||||
- Implement Relation enum (foreign keys to Entry)
|
||||
- Add SyncMode enum with as_str() and from_str()
|
||||
|
||||
**SyncGeneration Entity:**
|
||||
|
||||
- `core/src/entities/sync_generation.rs`
|
||||
- Define Model struct
|
||||
- Implement Relation enum (foreign key to SyncConduit)
|
||||
@@ -206,6 +208,7 @@ Both source and target entries must be directories (kind=1). The conduit creates
|
||||
## Technical Notes
|
||||
|
||||
**Verification Status Values:**
|
||||
|
||||
- `unverified` - Sync completed, not yet verified
|
||||
- `waiting_watcher` - Waiting for filesystem watcher to update index
|
||||
- `waiting_library_sync` - Waiting for library sync to propagate changes
|
||||
@@ -214,6 +217,7 @@ Both source and target entries must be directories (kind=1). The conduit creates
|
||||
|
||||
**Why Trust Watcher?**
|
||||
Option A (Trust Watcher) chosen over Option B (Eager Update) because:
|
||||
|
||||
- Single source of truth: Watcher already maintains index consistency
|
||||
- No duplication: Sync service doesn't need filesystem semantics
|
||||
- Eventual consistency: System naturally converges to consistent state
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: FSYNC-003
|
||||
title: FileSyncService Core Implementation
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: FSYNC-000
|
||||
priority: High
|
||||
tags: [service, core, orchestration, resolver]
|
||||
@@ -33,6 +33,7 @@ pub struct FileSyncService {
|
||||
```
|
||||
|
||||
**Key Components:**
|
||||
|
||||
- **ConduitManager**: CRUD operations for sync conduits
|
||||
- **SyncResolver**: Calculates operations from index queries
|
||||
- **Active syncs tracker**: Prevents duplicate syncs, enables progress monitoring
|
||||
@@ -70,6 +71,7 @@ impl ConduitManager {
|
||||
```
|
||||
|
||||
**Responsibilities:**
|
||||
|
||||
- Validate entries are directories before creating conduit
|
||||
- Check for duplicate conduits
|
||||
- Manage generation records
|
||||
@@ -104,6 +106,7 @@ pub struct DirectionalOps {
|
||||
```
|
||||
|
||||
**Index Query Logic:**
|
||||
|
||||
1. Load entries recursively for both source and target
|
||||
2. Build path maps (relative path → entry)
|
||||
3. Apply mode-specific resolution:
|
||||
@@ -112,6 +115,7 @@ pub struct DirectionalOps {
|
||||
- **Selective**: (future) access pattern filtering
|
||||
|
||||
**Key Method:**
|
||||
|
||||
```rust
|
||||
fn resolve_mirror(
|
||||
source_map: &HashMap<PathBuf, entry::Model>,
|
||||
@@ -186,6 +190,7 @@ pub enum ConflictStrategy {
|
||||
## Files to Create
|
||||
|
||||
**Core Service:**
|
||||
|
||||
- `core/src/service/file_sync/mod.rs` - Main service implementation
|
||||
- `core/src/service/file_sync/conduit.rs` - ConduitManager
|
||||
- `core/src/service/file_sync/resolver.rs` - SyncResolver
|
||||
@@ -225,6 +230,7 @@ async fn complete_sync_with_verification(...) -> Result<()> {
|
||||
```
|
||||
|
||||
**Why Trust Watcher?**
|
||||
|
||||
- Single source of truth: Watcher maintains index consistency
|
||||
- No duplication: Sync doesn't need filesystem semantics
|
||||
- Handles concurrent changes: User modifications during sync detected naturally
|
||||
@@ -233,11 +239,13 @@ async fn complete_sync_with_verification(...) -> Result<()> {
|
||||
## Performance Considerations
|
||||
|
||||
**Index Queries:**
|
||||
|
||||
- Use location_id filtering for efficient entry queries
|
||||
- Implement pagination for very large directories
|
||||
- Cache path maps during resolution
|
||||
|
||||
**Job Batching:**
|
||||
|
||||
- Parallel copy jobs (configurable via parallel_transfers setting)
|
||||
- Sequential deletes after all copies complete
|
||||
- Progress aggregation from job manager
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: FSYNC-004
|
||||
title: Service Integration & API
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: FSYNC-000
|
||||
priority: Medium
|
||||
tags: [api, integration, routes, events]
|
||||
@@ -74,6 +74,7 @@ pub fn mount() -> Router {
|
||||
```
|
||||
|
||||
**API Types:**
|
||||
|
||||
```rust
|
||||
// Request/Response types
|
||||
|
||||
@@ -150,6 +151,7 @@ impl FileSyncService {
|
||||
```
|
||||
|
||||
**Event Emission Points:**
|
||||
|
||||
- sync_now() → SyncStarted
|
||||
- monitor_sync() progress loop → SyncProgress
|
||||
- monitor_sync() completion → SyncCompleted
|
||||
@@ -172,12 +174,15 @@ pub fn mount() -> Router {
|
||||
## Files to Create/Modify
|
||||
|
||||
**API Implementation:**
|
||||
|
||||
- `core/src/api/sync.rs` - API routes and types
|
||||
|
||||
**Event Integration:**
|
||||
|
||||
- `core/src/service/file_sync/events.rs` - Event types and emission
|
||||
|
||||
**Service Registration:**
|
||||
|
||||
- `core/src/service/mod.rs` - Add file_sync to Services struct
|
||||
- `core/src/api/mod.rs` - Register sync router
|
||||
|
||||
@@ -203,10 +208,10 @@ pub fn mount() -> Router {
|
||||
|
||||
```typescript
|
||||
const conduit = await core.sync.createConduit({
|
||||
sourceEntryId: 123,
|
||||
targetEntryId: 456,
|
||||
syncMode: "mirror",
|
||||
schedule: "manual",
|
||||
sourceEntryId: 123,
|
||||
targetEntryId: 456,
|
||||
syncMode: "mirror",
|
||||
schedule: "manual",
|
||||
});
|
||||
```
|
||||
|
||||
@@ -217,11 +222,11 @@ const handle = await core.sync.syncNow(conduit.id);
|
||||
|
||||
// Subscribe to progress
|
||||
core.events.on("file_sync", (event) => {
|
||||
if (event.type === "SyncProgress" && event.conduit_id === conduit.id) {
|
||||
console.log(
|
||||
`Progress: ${event.progress.completed_files}/${event.progress.total_files}`
|
||||
);
|
||||
}
|
||||
if (event.type === "SyncProgress" && event.conduit_id === conduit.id) {
|
||||
console.log(
|
||||
`Progress: ${event.progress.completed_files}/${event.progress.total_files}`,
|
||||
);
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
@@ -238,15 +243,18 @@ console.log(progress.phase, progress.completed_bytes, progress.total_bytes);
|
||||
## UI Integration Points
|
||||
|
||||
**Location Context Menu:**
|
||||
|
||||
- "Sync to..." option on directory right-click
|
||||
- Opens modal to select target location and configure sync mode
|
||||
|
||||
**Sync Status Panel:**
|
||||
|
||||
- List of all conduits with status indicators
|
||||
- Per-conduit progress bars during active sync
|
||||
- History view showing past generations
|
||||
|
||||
**Settings:**
|
||||
|
||||
- Configure schedule, bandwidth limits, conflict resolution
|
||||
- Enable/disable conduits
|
||||
- View and resolve conflicts
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: FSYNC-005
|
||||
title: Advanced Features (Scheduling, Progress, Conflicts)
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: FSYNC-000
|
||||
priority: Medium
|
||||
tags: [scheduler, progress, conflicts, polish]
|
||||
@@ -66,6 +66,7 @@ impl FileSyncService {
|
||||
```
|
||||
|
||||
**Schedule Formats:**
|
||||
|
||||
- `"manual"` - Only triggered via API
|
||||
- `"instant"` - Triggers on filesystem change (requires watcher integration)
|
||||
- `"interval:5m"` - Every 5 minutes
|
||||
@@ -73,6 +74,7 @@ impl FileSyncService {
|
||||
- `"interval:1d"` - Daily
|
||||
|
||||
**Watcher Integration (Instant Mode):**
|
||||
|
||||
```rust
|
||||
// Subscribe to location watcher events
|
||||
// When files change in source or target directory:
|
||||
@@ -141,6 +143,7 @@ impl FileSyncService {
|
||||
```
|
||||
|
||||
**Progress Tracking:**
|
||||
|
||||
- Query job manager for individual job progress
|
||||
- Aggregate totals across all active jobs
|
||||
- Calculate transfer speed from job metrics
|
||||
@@ -209,6 +212,7 @@ impl ConflictResolver {
|
||||
```
|
||||
|
||||
**Conflict Filename Format:**
|
||||
|
||||
```
|
||||
original.txt
|
||||
→ original (conflict 2025-10-14 Device-Name).txt
|
||||
@@ -239,6 +243,7 @@ impl FileSyncService {
|
||||
```
|
||||
|
||||
**Metrics to Track:**
|
||||
|
||||
- Total conduits created
|
||||
- Active sync count
|
||||
- Syncs completed (24h, 7d, 30d)
|
||||
@@ -250,15 +255,19 @@ impl FileSyncService {
|
||||
## Files to Create
|
||||
|
||||
**Scheduler:**
|
||||
|
||||
- `core/src/service/file_sync/scheduler.rs` - Background scheduler
|
||||
|
||||
**Progress:**
|
||||
|
||||
- `core/src/service/file_sync/progress.rs` - Progress aggregation
|
||||
|
||||
**Conflicts:**
|
||||
|
||||
- `core/src/service/file_sync/conflict.rs` - Conflict resolution strategies (enhanced)
|
||||
|
||||
**Monitoring:**
|
||||
|
||||
- `core/src/service/file_sync/telemetry.rs` - Telemetry and metrics
|
||||
|
||||
## Acceptance Criteria
|
||||
@@ -280,18 +289,21 @@ impl FileSyncService {
|
||||
## User Experience Improvements
|
||||
|
||||
**Real-Time Progress:**
|
||||
|
||||
- Show current file being copied
|
||||
- Display transfer speed (MB/s)
|
||||
- Show ETA for completion
|
||||
- Indicate phase (copying/deleting/verifying)
|
||||
|
||||
**Conflict Management:**
|
||||
|
||||
- Highlight conflicts in sync status
|
||||
- Preview both versions before resolution
|
||||
- Batch resolution for multiple conflicts
|
||||
- Remember user's preferred strategy
|
||||
|
||||
**Scheduling UI:**
|
||||
|
||||
- Visual schedule picker
|
||||
- Next sync time indicator
|
||||
- Manual sync button always available
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: INDEX-002
|
||||
title: Stale File Detection Algorithm
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: INDEX-000
|
||||
priority: High
|
||||
tags: [indexing, stale-detection, offline-recovery]
|
||||
@@ -21,6 +21,7 @@ Implement the algorithm for detecting stale files after the application has been
|
||||
4. The algorithm should be efficient and not significantly slow down the application's startup time.
|
||||
|
||||
## Acceptance Criteria
|
||||
- [ ] The system can correctly detect files that were modified or deleted while the application was offline.
|
||||
- [ ] The system can correctly detect files that were moved or renamed while the application was offline.
|
||||
- [ ] The stale file detection process is efficient and does not block the application for an unreasonable amount of time.
|
||||
|
||||
- [ ] The system can correctly detect files that were modified or deleted while the application was offline.
|
||||
- [ ] The system can correctly detect files that were moved or renamed while the application was offline.
|
||||
- [ ] The stale file detection process is efficient and does not block the application for an unreasonable amount of time.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: JOB-003
|
||||
title: Parallel Task Execution from Jobs
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: JOB-000
|
||||
priority: High
|
||||
tags: [jobs, task-system, performance, parallelism]
|
||||
@@ -39,6 +39,7 @@ Tasks execute on multi-threaded worker pool
|
||||
```
|
||||
|
||||
**Why via Context, not Job storage?**
|
||||
|
||||
- Jobs are serialized to database (dispatcher is not serializable)
|
||||
- JobExecutor already has task system access
|
||||
- Consistent with existing patterns (library, volume_manager, etc.)
|
||||
@@ -47,9 +48,11 @@ Tasks execute on multi-threaded worker pool
|
||||
## Implementation Phases
|
||||
|
||||
### Phase 1: Core Integration (JOB-003a)
|
||||
|
||||
Enable jobs to access task dispatcher via context.
|
||||
|
||||
**Changes:**
|
||||
|
||||
1. Add `task_dispatcher` field to `JobExecutorState`
|
||||
2. Update `JobExecutor::new()` to accept dispatcher parameter
|
||||
3. Add `task_dispatcher` field to `JobContext`
|
||||
@@ -58,19 +61,23 @@ Enable jobs to access task dispatcher via context.
|
||||
6. Update `#[derive(Job)]` macro if needed
|
||||
|
||||
**Files:**
|
||||
|
||||
- core/src/infra/job/executor.rs
|
||||
- core/src/infra/job/context.rs
|
||||
- core/src/infra/job/manager.rs
|
||||
|
||||
**Acceptance Criteria:**
|
||||
|
||||
- [ ] Jobs can call `ctx.task_dispatcher()` and get valid dispatcher
|
||||
- [ ] Integration test shows job spawning parallel tasks
|
||||
- [ ] No breaking changes to existing jobs
|
||||
|
||||
### Phase 2: FileCopy Proof of Concept (JOB-003b)
|
||||
|
||||
Migrate FileCopyJob to use parallel execution.
|
||||
|
||||
**Changes:**
|
||||
|
||||
1. Create `CopyFileTask` implementing `Task<JobError>`
|
||||
2. Update `FileCopyJob::run()` to use `dispatcher.dispatch_many()`
|
||||
3. Implement progress aggregation from parallel tasks
|
||||
@@ -78,10 +85,12 @@ Migrate FileCopyJob to use parallel execution.
|
||||
5. Handle partial failures gracefully
|
||||
|
||||
**Files:**
|
||||
|
||||
- core/src/ops/files/copy/job.rs
|
||||
- core/src/ops/files/copy/task.rs (new)
|
||||
|
||||
**Acceptance Criteria:**
|
||||
|
||||
- [ ] FileCopyJob spawns parallel copy tasks
|
||||
- [ ] Performance improvement: 4-8x faster for 100+ files
|
||||
- [ ] Job remains resumable after interruption
|
||||
@@ -89,25 +98,31 @@ Migrate FileCopyJob to use parallel execution.
|
||||
- [ ] Progress reporting works correctly
|
||||
|
||||
### Phase 3: Documentation & Patterns
|
||||
|
||||
Document the pattern for other developers.
|
||||
|
||||
**Deliverables:**
|
||||
|
||||
- [ ] Add parallel execution guide to job system docs
|
||||
- [ ] Update job implementation template
|
||||
- [ ] Code examples in developer documentation
|
||||
- [ ] Integration test demonstrating pattern
|
||||
|
||||
### Phase 4: Expand to Other Operations (Future)
|
||||
|
||||
Apply pattern to other I/O-bound jobs:
|
||||
|
||||
- [ ] Thumbnail generation (highly parallel)
|
||||
- [ ] Media metadata extraction
|
||||
- [ ] File deletion (batch operations)
|
||||
- [ ] Hash calculation (CPU-bound parallelism)
|
||||
|
||||
### Phase 5: Resource Management (Future)
|
||||
|
||||
Add centralized resource limits to prevent system overload.
|
||||
|
||||
**Features:**
|
||||
|
||||
- Global resource pools (I/O, CPU, Network, DB)
|
||||
- `LimitedTaskDispatcher` wrapper with semaphores
|
||||
- Priority-aware resource allocation
|
||||
@@ -206,11 +221,13 @@ impl Task<JobError> for CopyFileTask {
|
||||
## Performance Expectations
|
||||
|
||||
**File Copy (100 files, 1MB each, SSD):**
|
||||
|
||||
- Sequential: 100 files × 20ms = 2000ms
|
||||
- Parallel (10 concurrent): 10 batches × 20ms = 200ms
|
||||
- **10x faster!**
|
||||
|
||||
**Real-world (Mixed sizes, 10GB total):**
|
||||
|
||||
- Sequential: ~102s
|
||||
- Parallel: ~12s
|
||||
- **8.5x faster!**
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
---
|
||||
id: LOC-005
|
||||
title: 'Virtual Locations via Pure Hierarchical Model'
|
||||
title: "Virtual Locations via Pure Hierarchical Model"
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: LOC-000
|
||||
priority: High
|
||||
tags: [core, vdfs, database, refactor]
|
||||
whitepaper: 'Section 4.1.2, 4.3'
|
||||
whitepaper: "Section 4.1.2, 4.3"
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: LSYNC-002
|
||||
title: Shared Metadata Sync (Albums, Tags) with HLC
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: LSYNC-000
|
||||
priority: High
|
||||
tags: [sync, metadata, albums, tags, hlc, shared-resources]
|
||||
@@ -19,11 +19,13 @@ Implement synchronization for truly shared resources (Albums, Tags) using the HL
|
||||
## Data Classification
|
||||
|
||||
**Shared Resources** (this task):
|
||||
|
||||
- Tags: Global tag definitions (no device owner)
|
||||
- Albums: Collections referencing entries from multiple devices
|
||||
- UserMetadata: When scoped to ContentIdentity (content-universal)
|
||||
|
||||
**Device-Owned** (separate - state-based):
|
||||
|
||||
- Locations: Owned by specific device
|
||||
- Entries: Owned via location's device
|
||||
- (Handled by state-based sync, not this task)
|
||||
@@ -54,6 +56,7 @@ Implement synchronization for truly shared resources (Albums, Tags) using the HL
|
||||
## Conflict Examples
|
||||
|
||||
### Tag Name Collision
|
||||
|
||||
```
|
||||
Device A: Creates tag "Vacation" → HLC(1000,A)
|
||||
Device B: Creates tag "Vacation" → HLC(1001,B)
|
||||
@@ -64,6 +67,7 @@ Resolution: Deterministic UUID from name
|
||||
```
|
||||
|
||||
### Album Concurrent Edits
|
||||
|
||||
```
|
||||
Device A: Adds entry-1 to "Summer" → HLC(1000,A)
|
||||
Device B: Adds entry-2 to "Summer" → HLC(1001,B)
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: LSYNC-007
|
||||
title: Syncable Trait (Device Ownership Aware)
|
||||
status: Done
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: LSYNC-000
|
||||
priority: High
|
||||
tags: [sync, trait, codegen, macro]
|
||||
@@ -39,6 +39,7 @@ Create the `Syncable` trait that database models implement to enable automatic s
|
||||
## Example Usage
|
||||
|
||||
### Device-Owned Resource
|
||||
|
||||
```rust
|
||||
impl Syncable for locations::Model {
|
||||
const SYNC_MODEL: &'static str = "location";
|
||||
@@ -58,6 +59,7 @@ impl Syncable for locations::Model {
|
||||
```
|
||||
|
||||
### Shared Resource
|
||||
|
||||
```rust
|
||||
impl Syncable for tags::Model {
|
||||
const SYNC_MODEL: &'static str = "tag";
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: LSYNC-008
|
||||
title: Sync Log Schema (Per-Device, HLC-Based)
|
||||
status: Done
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: LSYNC-000
|
||||
priority: High
|
||||
tags: [sync, database, schema, migration, hlc]
|
||||
@@ -18,13 +18,13 @@ Create the `sync.db` schema - a per-device log of changes to truly shared resour
|
||||
|
||||
## Key Differences from Old Design
|
||||
|
||||
| Aspect | Old (sync_log.db) | New (sync.db) |
|
||||
|--------|-------------------|-------------------------|
|
||||
| **Who has it** | Leader only | Every device |
|
||||
| **What's in it** | All changes | Only MY shared changes |
|
||||
| **Ordering** | Sequence numbers | HLC timestamps |
|
||||
| **Size** | Large (all history) | Small (pruned aggressively) |
|
||||
| **Purpose** | Source of truth | Pending changes queue |
|
||||
| Aspect | Old (sync_log.db) | New (sync.db) |
|
||||
| ---------------- | ------------------- | --------------------------- |
|
||||
| **Who has it** | Leader only | Every device |
|
||||
| **What's in it** | All changes | Only MY shared changes |
|
||||
| **Ordering** | Sequence numbers | HLC timestamps |
|
||||
| **Size** | Large (all history) | Small (pruned aggressively) |
|
||||
| **Purpose** | Source of truth | Pending changes queue |
|
||||
|
||||
## Implementation Steps
|
||||
|
||||
@@ -67,6 +67,7 @@ CREATE INDEX idx_peer_acks_hlc ON peer_acks(last_acked_hlc);
|
||||
## Database Location
|
||||
|
||||
Each library has:
|
||||
|
||||
```
|
||||
Jamie's Library.sdlibrary/
|
||||
├── database.db ← Shared state (all devices)
|
||||
@@ -154,11 +155,13 @@ async fn on_ack(peer_id: Uuid, up_to_hlc: HLC) {
|
||||
## Migration from sync_log.db
|
||||
|
||||
**Old structure**:
|
||||
|
||||
- One `sync_log.db` on leader
|
||||
- Sequence-based
|
||||
- Never pruned
|
||||
|
||||
**New structure**:
|
||||
|
||||
- One `sync.db` per device
|
||||
- HLC-based
|
||||
- Aggressively pruned
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: LSYNC-009
|
||||
title: Hybrid Logical Clock (HLC) Implementation
|
||||
status: Done
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: LSYNC-000
|
||||
priority: High
|
||||
tags: [sync, hlc, distributed-systems, leaderless]
|
||||
@@ -23,6 +23,7 @@ Implement Hybrid Logical Clocks (HLC) for ordering shared resource changes in a
|
||||
**New Solution**: Each device generates HLC independently → no bottleneck, works offline
|
||||
|
||||
**Key Properties**:
|
||||
|
||||
- Total ordering (any two HLCs comparable)
|
||||
- Causality tracking (if A→B then HLC(A) < HLC(B))
|
||||
- Distributed generation (no coordination needed)
|
||||
@@ -111,11 +112,13 @@ impl HLCGenerator {
|
||||
## Migration
|
||||
|
||||
**Remove**:
|
||||
|
||||
- `sync_leadership` field from devices table
|
||||
- `LeadershipManager` struct
|
||||
- `is_leader()` checks
|
||||
|
||||
**Add**:
|
||||
|
||||
- `HLC` type
|
||||
- `HLCGenerator` in SyncService
|
||||
- HLC column in `shared_changes` table
|
||||
|
||||
@@ -8,6 +8,7 @@ priority: High
|
||||
tags: [sync, replication, service, peer-to-peer, leaderless]
|
||||
depends_on: [LSYNC-006, LSYNC-014, LSYNC-015, LSYNC-016, LSYNC-013]
|
||||
design_doc: core/src/infra/sync/NEW_SYNC.md
|
||||
last_updated: 2025-10-14
|
||||
---
|
||||
|
||||
## Description
|
||||
@@ -141,60 +142,307 @@ impl SyncService {
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
### State-Based Sync
|
||||
### Service Lifecycle (BLOCKING)
|
||||
- [ ] PeerSync added to Services struct
|
||||
- [ ] Service starts when library opens
|
||||
- [ ] Service stops gracefully on library close
|
||||
- [ ] Flush pending changes on shutdown
|
||||
- [ ] Service config supports enable/disable
|
||||
|
||||
### State-Based Sync (Core)
|
||||
- [x] State changes broadcast to all peers ✅
|
||||
- [x] Received state applied idempotently ✅
|
||||
- [ ] Batch optimization (100ms window) (pending)
|
||||
- [ ] Incremental sync via timestamps (pending)
|
||||
- [x] No sync log for device-owned data ✅
|
||||
- [x] Parallel sends with timeout ✅
|
||||
- [x] Retry queue for failed sends ✅
|
||||
- [ ] Batch optimization (100ms window)
|
||||
- [ ] Incremental sync via timestamps
|
||||
|
||||
### Log-Based Sync
|
||||
### Log-Based Sync (Core)
|
||||
- [x] Shared changes written to per-device log ✅
|
||||
- [x] HLC generated for each change ✅
|
||||
- [x] Changes broadcast with HLC ✅
|
||||
- [x] Peers apply in HLC order ✅
|
||||
- [x] ACK mechanism works ✅
|
||||
- [ ] Log pruning keeps it small (<1000 entries) (partial - ACK tracking works, pruning implemented)
|
||||
- [x] Periodic log pruning background task ✅
|
||||
- [x] Log pruning keeps it small (<1000 entries) ✅
|
||||
|
||||
### Peer Management
|
||||
- [x] Works with any number of peers (no leader/follower) ✅
|
||||
- [ ] Offline peers handled (changes queue) (TODO comments added)
|
||||
- [ ] Reconnect triggers sync (pending)
|
||||
- [ ] New device backfill works (pending)
|
||||
### Connection Management (BLOCKING)
|
||||
- [ ] Track peer online/offline state
|
||||
- [ ] on_peer_connected() event handler
|
||||
- [ ] on_peer_disconnected() event handler
|
||||
- [ ] Queue changes for offline peers (persistent)
|
||||
- [ ] Detect stale connections
|
||||
|
||||
### Integration
|
||||
- [ ] Service starts when library opens (pending)
|
||||
- [ ] Integration tests validate peer-to-peer sync (pending)
|
||||
- [ ] Multi-peer scenario tested (3+ devices) (pending)
|
||||
- [ ] Conflict resolution via HLC verified (pending)
|
||||
### Startup/Reconnection Sync (BLOCKING)
|
||||
- [ ] Watermark tracking per peer
|
||||
- [ ] Persist watermarks to database
|
||||
- [ ] Compare watermarks on startup
|
||||
- [ ] Trigger catch-up if diverged
|
||||
- [ ] "Sync on reconnect" event handler
|
||||
- [ ] Incremental catch-up (not just full backfill)
|
||||
|
||||
### Backfill Protocol
|
||||
- [x] Backfill state machine (Uninitialized → Backfilling → CatchingUp → Ready) ✅
|
||||
- [x] Buffer queue for updates during backfill ✅
|
||||
- [x] transition_to_ready() processes buffer ✅
|
||||
- [ ] request_state_batch() wired to network
|
||||
- [ ] request_shared_changes() wired to network
|
||||
- [ ] Handle StateResponse messages
|
||||
- [ ] Handle SharedChangeResponse messages
|
||||
- [ ] Checkpoint persistence for crash recovery
|
||||
- [ ] Detect new device and trigger backfill
|
||||
- [ ] Peer selection logic
|
||||
|
||||
### Heartbeat & Monitoring
|
||||
- [x] Heartbeat message handler ✅
|
||||
- [ ] Periodic heartbeat sender
|
||||
- [ ] Health check metrics
|
||||
- [ ] Watermark exchange in heartbeat
|
||||
|
||||
### Integration Testing
|
||||
- [ ] Service lifecycle test
|
||||
- [ ] Two-peer state sync test
|
||||
- [ ] Conflict resolution via HLC test
|
||||
- [ ] Multi-peer scenario (3+ devices)
|
||||
- [ ] Offline peer handling test
|
||||
- [ ] Reconnection sync test
|
||||
- [ ] New device backfill test
|
||||
|
||||
## Implementation Progress (Oct 9, 2025)
|
||||
|
||||
Successfully implemented in `core/src/service/sync/peer.rs`:
|
||||
|
||||
**Broadcast Improvements**:
|
||||
- ✅ Parallel sends using `futures::join_all` (was sequential)
|
||||
- ✅ Proper error propagation (removed `.unwrap_or_default()`)
|
||||
- ✅ 30-second timeouts per send operation
|
||||
- ✅ Structured logging with tracing
|
||||
- ✅ Ready for retry queue integration (TODO comments added)
|
||||
- Parallel sends using `futures::join_all` (was sequential)
|
||||
- Proper error propagation (removed `.unwrap_or_default()`)
|
||||
- 30-second timeouts per send operation
|
||||
- Structured logging with tracing
|
||||
- Ready for retry queue integration (TODO comments added)
|
||||
|
||||
**State-Based Sync**:
|
||||
- ✅ `broadcast_state_change()` sends to all peers in parallel
|
||||
- ✅ `on_state_change_received()` applies via registry
|
||||
- ✅ Buffering during backfill phase
|
||||
- `broadcast_state_change()` sends to all peers in parallel
|
||||
- `on_state_change_received()` applies via registry
|
||||
- Buffering during backfill phase
|
||||
|
||||
**Log-Based Sync**:
|
||||
- ✅ `broadcast_shared_change()` generates HLC and sends to all peers
|
||||
- ✅ `on_shared_change_received()` applies with conflict resolution
|
||||
- ✅ `on_ack_received()` tracks peer ACKs for pruning
|
||||
- ✅ Peer log append before broadcast
|
||||
- `broadcast_shared_change()` generates HLC and sends to all peers
|
||||
- `on_shared_change_received()` applies with conflict resolution
|
||||
- `on_ack_received()` tracks peer ACKs for pruning
|
||||
- Peer log append before broadcast
|
||||
|
||||
**Next Steps**:
|
||||
- [ ] Implement backfill for new devices
|
||||
- [ ] Add retry queue for failed sends
|
||||
- [ ] Connection state tracking
|
||||
- [ ] Integration testing
|
||||
**Completion Estimate**: ~40% (core broadcast works, but lifecycle missing)
|
||||
|
||||
## Missing Lifecycle Components (Oct 14, 2025)
|
||||
|
||||
Detailed gap analysis to ensure nothing gets lost:
|
||||
|
||||
### CRITICAL (Blocking) ️
|
||||
|
||||
**1. Service Lifecycle Integration**
|
||||
- Location: Not in `core/src/service/mod.rs` Services struct
|
||||
- Problem: PeerSync.start() exists but never called during library open
|
||||
- Impact: Sync doesn't work at all - service never runs
|
||||
- Files: core/src/service/mod.rs:29-47, core/src/library/manager.rs
|
||||
|
||||
**2. Connection State Management**
|
||||
- Location: No peer connection tracking anywhere
|
||||
- Problem: Can't detect when peers go online/offline
|
||||
- Missing:
|
||||
- `on_peer_connected()` event handler
|
||||
- `on_peer_disconnected()` event handler (exists in backfill.rs:258 but never called)
|
||||
- Persistent peer state tracking (online/offline/last_seen)
|
||||
- Change queueing for offline peers (TODO comments only)
|
||||
- Impact: Can't handle offline peers or reconnections
|
||||
- Reference: peer.rs:447, 559 (TODO comments for retry queue)
|
||||
|
||||
**3. Startup Sync / Reconnection Logic**
|
||||
- Location: Missing entirely
|
||||
- Problem: No catch-up after device restarts or comes back online
|
||||
- Missing:
|
||||
- Watermark comparison on startup (state_watermark always None: peer.rs:119)
|
||||
- Incremental catch-up mechanism (only full backfill exists)
|
||||
- "Sync on reconnect" trigger
|
||||
- Impact: Devices drift out of sync after being offline
|
||||
- Reference: peer.rs:116-125 (get_watermarks always returns None)
|
||||
|
||||
### MAJOR (Functional Gaps)
|
||||
|
||||
**4. Backfill Network Integration**
|
||||
- Location: core/src/service/sync/backfill.rs
|
||||
- Problem: BackfillManager can't actually request data
|
||||
- Stubs:
|
||||
- `request_state_batch()` (line 220-238) - always returns empty
|
||||
- `request_shared_changes()` (line 240-255) - always returns empty
|
||||
- Missing:
|
||||
- Wire requests through NetworkTransport
|
||||
- Handle StateRequest/SharedChangeRequest responses
|
||||
- Resume from checkpoint on failure
|
||||
- Impact: New devices can't backfill initial state
|
||||
|
||||
**5. Watermark Tracking**
|
||||
- Location: peer.rs:116-125
|
||||
- Problem: Can't determine what needs syncing
|
||||
- Missing:
|
||||
- Track last synced timestamp per model type
|
||||
- Persist watermarks to database
|
||||
- Compare watermarks on reconnect
|
||||
- Impact: Can't do incremental sync, only full state transfer
|
||||
|
||||
**6. Batching Optimization**
|
||||
- Location: peer.rs (broadcast methods)
|
||||
- Problem: State changes sent one-at-a-time
|
||||
- Missing:
|
||||
- 100ms batching window (marked "pending" in task)
|
||||
- Coalescing multiple changes to same record
|
||||
- Batch send with StateBatch/SharedChangeBatch
|
||||
- Impact: High network overhead, chatty protocol
|
||||
|
||||
### MINOR (Nice to Have)
|
||||
|
||||
**7. Checkpoint Persistence**
|
||||
- Location: state.rs:186-195
|
||||
- Problem: Backfill can't resume after crash
|
||||
- Stub: save() and load() are no-ops
|
||||
- Impact: Must restart backfill from beginning if interrupted
|
||||
|
||||
**8. Initial Backfill Trigger**
|
||||
- Location: Missing entirely
|
||||
- Problem: No code to detect new device and start backfill
|
||||
- Questions:
|
||||
- When does device transition Uninitialized → Backfilling?
|
||||
- How are available peers discovered?
|
||||
- Who calls BackfillManager::start_backfill()?
|
||||
|
||||
**9. Heartbeat Health Monitoring**
|
||||
- Location: handler.rs:275-301 (receive only)
|
||||
- Problem: Heartbeat handler exists but no sender
|
||||
- Missing:
|
||||
- Periodic heartbeat background task
|
||||
- Stale connection detection
|
||||
- Health check metrics
|
||||
|
||||
**10. Incremental State Sync**
|
||||
- Location: protocol_handler.rs:116-160
|
||||
- Problem: Only supports full backfill
|
||||
- Note: query_state() supports `since` param but never used with actual timestamps
|
||||
|
||||
## Complete Lifecycle Flow
|
||||
|
||||
Here's the full sync lifecycle with gaps marked:
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ Phase 1: Library Open │
|
||||
│ PeerSync.start() never called │
|
||||
│ Not in Services struct │
|
||||
│ No integration with library manager │
|
||||
│ → BLOCKS: Everything else │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
↓
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ Phase 2: Initial Backfill (New Device) │
|
||||
│ No trigger to detect new device │
|
||||
│ request_state_batch() is stub │
|
||||
│ request_shared_changes() is stub │
|
||||
│ Checkpoint save/load not implemented │
|
||||
│ PeerSync.transition_to_ready() works │
|
||||
│ Buffer processing works │
|
||||
│ → BLOCKS: New devices joining library │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
↓
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ Phase 3: Ready State (Normal Operation) │
|
||||
│ Broadcast works (parallel sends, timeouts) │
|
||||
│ Receive works (via registry) │
|
||||
│ ACK mechanism works │
|
||||
│ Retry queue works (background processor) │
|
||||
│ Log pruning works (periodic background task) │
|
||||
│ No batching (100ms window) │
|
||||
│ State watermark always None │
|
||||
│ → WORKS: Happy path with 2+ always-online devices │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
↓
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ Phase 4: Peer Disconnection │
|
||||
│ No connection state tracking │
|
||||
│ on_peer_disconnected() exists but never called │
|
||||
│ Changes not queued persistently for offline peers │
|
||||
│ Retry queue handles temporary failures │
|
||||
│ → BLOCKS: Offline peer support │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
↓
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ Phase 5: Reconnection / Startup Sync │
|
||||
│ No watermark comparison │
|
||||
│ No incremental catch-up (only full backfill) │
|
||||
│ No "sync on reconnect" event handler │
|
||||
│ No divergence detection │
|
||||
│ → BLOCKS: Devices staying in sync after offline periods │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
↓
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ Phase 6: Library Close │
|
||||
│ No graceful shutdown in Services.stop_all() │
|
||||
│ No flush of pending changes │
|
||||
│ → MINOR: Might lose in-flight changes on shutdown │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Reality Check**: Implementation is ~75% complete for **Phase 3 only** (always-online happy path), but 0% complete for **Phases 1, 4, 5, 6** (lifecycle management).
|
||||
|
||||
## Updated Next Steps (Prioritized)
|
||||
|
||||
### Priority 1: Service Lifecycle (BLOCKING) ️
|
||||
1. Add PeerSync to Services struct (core/src/service/mod.rs)
|
||||
2. Create init_sync() and start_sync() methods
|
||||
3. Call PeerSync.start() during library open
|
||||
4. Add graceful shutdown to Services.stop_all()
|
||||
5. Add sync service to service config
|
||||
|
||||
**Unblocks**: Everything else - sync can actually run
|
||||
|
||||
### Priority 2: Connection State Management (BLOCKING) ️
|
||||
1. Add peer connection/disconnection event handlers
|
||||
2. Track peer online/offline state in database
|
||||
3. Implement change queueing for offline peers
|
||||
4. Call on_peer_disconnected() on network events
|
||||
|
||||
**Unblocks**: Offline peer support, reconnection
|
||||
|
||||
### Priority 3: Startup/Reconnection Sync (BLOCKING) ️
|
||||
1. Implement watermark tracking per peer
|
||||
2. Persist watermarks to database
|
||||
3. Compare watermarks on startup/reconnect
|
||||
4. Trigger incremental catch-up if diverged
|
||||
5. Add "sync on reconnect" handler
|
||||
|
||||
**Unblocks**: Devices staying in sync after offline periods
|
||||
|
||||
### Priority 4: Backfill Network Integration
|
||||
1. Wire request_state_batch() through NetworkTransport
|
||||
2. Wire request_shared_changes() through NetworkTransport
|
||||
3. Handle response messages properly
|
||||
4. Add checkpoint persistence for crash recovery
|
||||
5. Implement peer selection logic trigger
|
||||
|
||||
**Unblocks**: New devices joining library
|
||||
|
||||
### Priority 5: Optimizations
|
||||
1. Implement 100ms batching window
|
||||
2. Add state watermark tracking (timestamps)
|
||||
3. Implement incremental state sync
|
||||
4. Add heartbeat sender background task
|
||||
5. Add health check metrics
|
||||
|
||||
**Unblocks**: Better performance and monitoring
|
||||
|
||||
### Priority 6: Testing
|
||||
1. Integration test: Service lifecycle
|
||||
2. Integration test: Two-peer sync
|
||||
3. Integration test: Offline peer handling
|
||||
4. Integration test: Reconnection sync
|
||||
5. Integration test: New device backfill
|
||||
|
||||
## Performance Benefits
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: LSYNC-011
|
||||
title: Conflict Resolution (HLC-Based)
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: LSYNC-000
|
||||
priority: Medium
|
||||
tags: [sync, conflict-resolution, hlc, merge]
|
||||
@@ -19,6 +19,7 @@ Implement conflict resolution for shared resources using Hybrid Logical Clock (H
|
||||
## Conflict Types
|
||||
|
||||
### 1. No Conflict (Device-Owned Data)
|
||||
|
||||
```
|
||||
Device A: Creates location "/Users/jamie/Photos"
|
||||
Device B: Creates location "/home/jamie/Documents"
|
||||
@@ -28,6 +29,7 @@ Strategy: Both apply (state-based)
|
||||
```
|
||||
|
||||
### 2. Deterministic Merge (Tags)
|
||||
|
||||
```
|
||||
Device A: Creates tag "Vacation" → HLC(1000,A)
|
||||
Device B: Creates tag "Vacation" → HLC(1001,B)
|
||||
@@ -39,6 +41,7 @@ Resolution: Deterministic UUID from name
|
||||
```
|
||||
|
||||
### 3. Union Merge (Albums)
|
||||
|
||||
```
|
||||
Device A: Adds entry-1 to album → HLC(1000,A)
|
||||
Device B: Adds entry-2 to album → HLC(1001,B)
|
||||
@@ -49,6 +52,7 @@ Resolution: Union merge
|
||||
```
|
||||
|
||||
### 4. Last-Writer-Wins (UserMetadata)
|
||||
|
||||
```
|
||||
Device A: Favorites photo → HLC(1000,A)
|
||||
Device B: Un-favorites photo → HLC(1001,B)
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: LSYNC-012
|
||||
title: Bulk Entry Sync Optimization (State-Based)
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: LSYNC-000
|
||||
priority: High
|
||||
tags: [sync, indexing, bulk, performance, state-based]
|
||||
@@ -19,10 +19,11 @@ Optimize entry (file/folder) synchronization for bulk indexing operations using
|
||||
Device A indexes 1M files:
|
||||
|
||||
**Naive approach**: Send 1M individual `StateChange` messages
|
||||
- ❌ ~500MB of messages
|
||||
- ❌ 10+ minutes to broadcast
|
||||
- ❌ Network congestion
|
||||
- ❌ Memory pressure on receivers
|
||||
|
||||
- ~500MB of messages
|
||||
- 10+ minutes to broadcast
|
||||
- Network congestion
|
||||
- Memory pressure on receivers
|
||||
|
||||
**This doesn't scale.**
|
||||
|
||||
@@ -49,10 +50,11 @@ for chunk in entries.chunks(1000) {
|
||||
```
|
||||
|
||||
**Benefits**:
|
||||
- ✅ Compressed batches (gzip)
|
||||
- ✅ Streaming application on receiver
|
||||
- ✅ Progress tracking
|
||||
- ✅ Resumable if interrupted
|
||||
|
||||
- Compressed batches (gzip)
|
||||
- Streaming application on receiver
|
||||
- Progress tracking
|
||||
- Resumable if interrupted
|
||||
|
||||
### Strategy 2: Bulk Notification + On-Demand Load
|
||||
|
||||
@@ -72,9 +74,10 @@ broadcast_to_peers(BulkIndexComplete {
|
||||
```
|
||||
|
||||
**Benefits**:
|
||||
- ✅ Tiny notification (~100 bytes)
|
||||
- ✅ Peers control when to sync (bandwidth-aware)
|
||||
- ✅ Can trigger local indexing if same filesystem
|
||||
|
||||
- Tiny notification (~100 bytes)
|
||||
- Peers control when to sync (bandwidth-aware)
|
||||
- Can trigger local indexing if same filesystem
|
||||
|
||||
### Strategy 3: Database-Level Replication (Initial Sync)
|
||||
|
||||
@@ -91,9 +94,10 @@ import_database_snapshot(snapshot).await?;
|
||||
```
|
||||
|
||||
**Benefits**:
|
||||
- ✅ Extremely fast (database native format)
|
||||
- ✅ No serialization overhead
|
||||
- ✅ Atomic import
|
||||
|
||||
- Extremely fast (database native format)
|
||||
- No serialization overhead
|
||||
- Atomic import
|
||||
|
||||
## Implementation
|
||||
|
||||
@@ -214,21 +218,21 @@ pub async fn export_device_snapshot(
|
||||
|
||||
## When to Use Each Strategy
|
||||
|
||||
| Scenario | Strategy | Reason |
|
||||
|----------|----------|--------|
|
||||
| New device joins | Database snapshot | Fast initial sync |
|
||||
| Incremental sync (few changes) | Individual StateChange | Simple, immediate |
|
||||
| Large batch (100-10K entries) | Batched StateBatch | Efficient, streaming |
|
||||
| Massive index (100K+ entries) | Bulk notification + on-demand | Bandwidth-aware |
|
||||
| Scenario | Strategy | Reason |
|
||||
| ------------------------------ | ----------------------------- | -------------------- |
|
||||
| New device joins | Database snapshot | Fast initial sync |
|
||||
| Incremental sync (few changes) | Individual StateChange | Simple, immediate |
|
||||
| Large batch (100-10K entries) | Batched StateBatch | Efficient, streaming |
|
||||
| Massive index (100K+ entries) | Bulk notification + on-demand | Bandwidth-aware |
|
||||
|
||||
## Performance Comparison
|
||||
|
||||
| Method | 1M Entries | Network | Time | Memory |
|
||||
|--------|------------|---------|------|--------|
|
||||
| Individual messages | 500MB | High | 10 min | Low |
|
||||
| Batched (1K chunks) | 50MB (compressed) | Medium | 2 min | Medium |
|
||||
| Bulk notification + lazy | 1KB notification | Minimal | Async | Low |
|
||||
| Database snapshot | 150MB | One-time | 30 sec | High |
|
||||
| Method | 1M Entries | Network | Time | Memory |
|
||||
| ------------------------ | ----------------- | -------- | ------ | ------ |
|
||||
| Individual messages | 500MB | High | 10 min | Low |
|
||||
| Batched (1K chunks) | 50MB (compressed) | Medium | 2 min | Medium |
|
||||
| Bulk notification + lazy | 1KB notification | Minimal | Async | Low |
|
||||
| Database snapshot | 150MB | One-time | 30 sec | High |
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
@@ -293,6 +297,7 @@ impl SyncService {
|
||||
**New approach**: Efficient state batching, no central log
|
||||
|
||||
**Changes needed**:
|
||||
|
||||
- Remove bulk operation sync log entries
|
||||
- Add batching to state broadcasts
|
||||
- Add database snapshot capability
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: NET-003
|
||||
title: Spacedrop Protocol
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: NET-000
|
||||
priority: High
|
||||
tags: [networking, spacedrop, sharing, p2p]
|
||||
@@ -21,6 +21,7 @@ Implement the Spacedrop protocol for ephemeral, secure file sharing between non-
|
||||
4. Integrate the Spacedrop functionality with the UI/CLI.
|
||||
|
||||
## Acceptance Criteria
|
||||
- [ ] Two non-paired devices can discover each other on a local network.
|
||||
- [ ] A user can initiate a file transfer to another device using Spacedrop.
|
||||
- [ ] The file transfer is secure and efficient.
|
||||
|
||||
- [ ] Two non-paired devices can discover each other on a local network.
|
||||
- [ ] A user can initiate a file transfer to another device using Spacedrop.
|
||||
- [ ] The file transfer is secure and efficient.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: PLUG-000
|
||||
title: "Epic: WASM Extension System"
|
||||
status: In Progress
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
priority: High
|
||||
tags: [epic, plugins, wasm, extensibility, extensions]
|
||||
whitepaper: Section 6.7
|
||||
@@ -19,4 +19,4 @@ This epic covers the implementation of the WebAssembly (WASM) based extension sy
|
||||
|
||||
**In Progress:** WASM memory interaction helpers, complete host function bridge, and production extensions (Photos, Finance, Email).
|
||||
|
||||
**Reference:** See `core/src/infra/extension/README.md` and `extensions/README.md` for implementation details.
|
||||
**Reference:** See `core/src/infra/extension/README.md` and `extensions/README.md` for implementation details.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: PLUG-001
|
||||
title: Integrate WASM Runtime
|
||||
status: In Progress
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: PLUG-000
|
||||
priority: High
|
||||
tags: [plugins, wasm, runtime, wasmer]
|
||||
@@ -33,12 +33,13 @@ Integrate a WebAssembly runtime (e.g., Wasmer or Wasmtime) into the Spacedrive c
|
||||
- [ ] Add hot-reload support for development
|
||||
|
||||
## Acceptance Criteria
|
||||
- [x] A WASM runtime is successfully integrated into the Spacedrive core.
|
||||
- [x] The `PluginManager` can load and run a WASM module from a file.
|
||||
- [x] The "hello world" plugin executes successfully and returns the expected output.
|
||||
|
||||
- [x] A WASM runtime is successfully integrated into the Spacedrive core.
|
||||
- [x] The `PluginManager` can load and run a WASM module from a file.
|
||||
- [x] The "hello world" plugin executes successfully and returns the expected output.
|
||||
|
||||
## Implementation Files
|
||||
|
||||
- core/src/infra/extension/manager.rs - PluginManager
|
||||
- core/src/infra/extension/README.md - Architecture and status
|
||||
- extensions/test-extension/ - Working test extension
|
||||
- extensions/test-extension/ - Working test extension
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: PLUG-002
|
||||
title: Define and Implement VDFS Plugin API Bridge
|
||||
status: In Progress
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: PLUG-000
|
||||
priority: High
|
||||
tags: [plugins, wasm, api, vdfs, wire]
|
||||
@@ -40,13 +40,14 @@ The key architectural insight: expose ONE generic `spacedrive_call()` function t
|
||||
- [ ] End-to-end integration testing
|
||||
|
||||
## Acceptance Criteria
|
||||
- [x] A clear API definition document is created.
|
||||
- [ ] A plugin can call a host function to interact with the VDFS (e.g., read a file).
|
||||
- [x] The API enforces the principle of least privilege.
|
||||
|
||||
- [x] A clear API definition document is created.
|
||||
- [ ] A plugin can call a host function to interact with the VDFS (e.g., read a file).
|
||||
- [x] The API enforces the principle of least privilege.
|
||||
|
||||
## Implementation Files
|
||||
|
||||
- core/src/infra/extension/host_functions.rs - Host function skeleton
|
||||
- core/src/infra/extension/permissions.rs - Capability-based security
|
||||
- core/src/infra/extension/README.md - Architecture documentation
|
||||
- extensions/spacedrive-sdk/ - Guest-side SDK (referenced)
|
||||
- extensions/spacedrive-sdk/ - Guest-side SDK (referenced)
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: PLUG-003
|
||||
title: Develop Production Extension (Photos or Email)
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: PLUG-000
|
||||
priority: High
|
||||
tags: [plugins, wasm, extension, production]
|
||||
@@ -16,6 +16,7 @@ related_tasks: [PLUG-001, PLUG-002]
|
||||
Develop a production-ready extension as a real-world validation of the WASM extension system. This will serve as the canonical example for third-party developers and demonstrate the full capabilities of the extension platform.
|
||||
|
||||
**Candidates:**
|
||||
|
||||
- **Photos Extension**: AI-powered photo management (face recognition, places, moments) - Currently "In Progress"
|
||||
- **Email Archive Extension**: Gmail/Outlook ingestion with OCR and classification - Design complete
|
||||
|
||||
@@ -39,20 +40,23 @@ Develop a production-ready extension as a real-world validation of the WASM exte
|
||||
- Job dispatch and monitoring
|
||||
|
||||
## Acceptance Criteria
|
||||
- [ ] Extension can be loaded and initialized by the `PluginManager`
|
||||
- [ ] Extension creates and queries its own database tables
|
||||
- [ ] Extension can dispatch jobs with full progress tracking
|
||||
- [ ] Extension integrates with AI operations (OCR, classification, embeddings)
|
||||
- [ ] Extension data is searchable and accessible in the library
|
||||
- [ ] Extension can be distributed as a standalone .wasm + manifest.json
|
||||
|
||||
- [ ] Extension can be loaded and initialized by the `PluginManager`
|
||||
- [ ] Extension creates and queries its own database tables
|
||||
- [ ] Extension can dispatch jobs with full progress tracking
|
||||
- [ ] Extension integrates with AI operations (OCR, classification, embeddings)
|
||||
- [ ] Extension data is searchable and accessible in the library
|
||||
- [ ] Extension can be distributed as a standalone .wasm + manifest.json
|
||||
|
||||
## Implementation Files
|
||||
|
||||
**Extension Code:**
|
||||
|
||||
- extensions/photos/ - Photos extension (in progress)
|
||||
- extensions/finance/ - Finance extension (planned)
|
||||
|
||||
**Supporting Infrastructure:**
|
||||
|
||||
- core/src/ops/extension_test/ - Test operations
|
||||
- workbench/core/extensions/ - Design documents
|
||||
|
||||
@@ -60,4 +64,4 @@ Develop a production-ready extension as a real-world validation of the WASM exte
|
||||
|
||||
- **Supersedes**: Original PLUG-003 (Twitter Archive) is outdated
|
||||
- **Current Focus**: Photos extension is partially implemented
|
||||
- **Reference**: See docs/extensions/ for SDK documentation and examples/
|
||||
- **Reference**: See docs/extensions/ for SDK documentation and examples/
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: RES-000
|
||||
title: "Epic: Resource Management & Mobile"
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
priority: Medium
|
||||
tags: [epic, core, performance, mobile]
|
||||
whitepaper: Section 7
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: RES-001
|
||||
title: Adaptive Resource Throttling
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: RES-000
|
||||
priority: Medium
|
||||
tags: [performance, mobile, core]
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: SEARCH-000
|
||||
title: "Epic: Temporal-Semantic Search"
|
||||
status: In Progress
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
priority: High
|
||||
tags: [epic, search, ai, fts]
|
||||
whitepaper: Section 4.7
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: SEARCH-001
|
||||
title: Asynchronous SearchJob
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: SEARCH-000
|
||||
priority: High
|
||||
tags: [search, jobs, async]
|
||||
@@ -21,6 +21,7 @@ Implement an asynchronous `SearchJob` that can perform complex search queries in
|
||||
4. The job should provide progress updates and return the search results upon completion.
|
||||
|
||||
## Acceptance Criteria
|
||||
- [ ] A `SearchJob` can be dispatched to the `JobManager`.
|
||||
- [ ] The job can execute a search query asynchronously.
|
||||
- [ ] The job returns the correct search results.
|
||||
|
||||
- [ ] A `SearchJob` can be dispatched to the `JobManager`.
|
||||
- [ ] The job can execute a search query asynchronously.
|
||||
- [ ] The job returns the correct search results.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: SEARCH-002
|
||||
title: Two-Stage FTS5 + Semantic Re-ranking
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: SEARCH-000
|
||||
priority: High
|
||||
tags: [search, fts, semantic-search, ai]
|
||||
@@ -21,6 +21,7 @@ Implement the two-stage search process that combines fast FTS5 keyword filtering
|
||||
4. Develop the logic to combine the results from both stages into a single, relevance-ranked list.
|
||||
|
||||
## Acceptance Criteria
|
||||
- [ ] The system can perform fast keyword searches using FTS5.
|
||||
- [ ] The system can re-rank search results based on semantic similarity.
|
||||
- [ ] The two-stage search process is implemented and functional.
|
||||
|
||||
- [ ] The system can perform fast keyword searches using FTS5.
|
||||
- [ ] The system can re-rank search results based on semantic similarity.
|
||||
- [ ] The two-stage search process is implemented and functional.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: SEARCH-003
|
||||
title: Unified Vector Repositories
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: SEARCH-000
|
||||
priority: High
|
||||
tags: [search, vector-search, ai, repositories]
|
||||
@@ -21,6 +21,7 @@ Implement the Unified Vector Repositories, a system for storing and querying vec
|
||||
4. Integrate the `VectorRepository` with the `SearchJob` and the semantic re-ranking logic.
|
||||
|
||||
## Acceptance Criteria
|
||||
- [ ] The system can generate and store vector embeddings for files.
|
||||
- [ ] The `VectorRepository` can perform efficient vector similarity searches.
|
||||
- [ ] The semantic search capabilities are integrated into the overall search system.
|
||||
|
||||
- [ ] The system can generate and store vector embeddings for files.
|
||||
- [ ] The `VectorRepository` can perform efficient vector similarity searches.
|
||||
- [ ] The semantic search capabilities are integrated into the overall search system.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: SEC-002
|
||||
title: SQLCipher for At-Rest Library Encryption
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: SEC-000
|
||||
priority: High
|
||||
tags: [security, database, core, encryption]
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
---
|
||||
id: SEC-003
|
||||
title: Cryptographically Chained Audit Log
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
parent: SEC-000
|
||||
priority: Medium
|
||||
tags: [security, core, actions, audit]
|
||||
whitepaper: Section 8.7
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
Enhance the `audit_log` table to be tamper-proof by implementing a cryptographic chain. Each new log entry must include a hash of the previous entry, making it computationally infeasible to alter the history without detection.
|
||||
|
||||
## Implementation Steps
|
||||
|
||||
1. Create a new database migration to add `previous_hash` and `entry_hash` columns to the `audit_log` table.
|
||||
2. Modify the `ActionManager`'s audit logging logic to fetch the previous entry's hash before inserting a new record.
|
||||
3. Implement the hashing function as described in the whitepaper to compute the new `entry_hash`.
|
||||
4. Develop a background verification job that periodically scans the chain to ensure its integrity.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] New `audit_log` records correctly store a hash of the preceding entry.
|
||||
- [ ] The chain is verifiable from the first entry to the last.
|
||||
- [ ] An integrity check function can detect a tampered log entry.
|
||||
@@ -2,7 +2,7 @@
|
||||
id: SEC-004
|
||||
title: Role-Based Access Control (RBAC) System
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: SEC-000
|
||||
priority: High
|
||||
tags: [security, enterprise, collaboration]
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: SEC-005
|
||||
title: Secure Credential Vault
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: SEC-000
|
||||
priority: High
|
||||
tags: [security, credentials, vault, cloud]
|
||||
@@ -21,6 +21,7 @@ Implement a secure credential vault for storing API keys and other secrets for c
|
||||
4. Integrate the credential vault with the cloud volume system.
|
||||
|
||||
## Acceptance Criteria
|
||||
- [ ] Credentials are encrypted at rest in the database.
|
||||
- [ ] The master encryption key is stored securely.
|
||||
- [ ] The system can retrieve credentials to authenticate with cloud services.
|
||||
|
||||
- [ ] Credentials are encrypted at rest in the database.
|
||||
- [ ] The master encryption key is stored securely.
|
||||
- [ ] The system can retrieve credentials to authenticate with cloud services.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: SEC-006
|
||||
title: Certificate Pinning
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: SEC-000
|
||||
priority: Medium
|
||||
tags: [security, networking, certificate-pinning]
|
||||
@@ -21,6 +21,7 @@ Implement certificate pinning for all connections to third-party cloud storage p
|
||||
4. Implement a mechanism for updating the pinned certificates.
|
||||
|
||||
## Acceptance Criteria
|
||||
- [ ] The application rejects connections to servers with untrusted certificates.
|
||||
- [ ] The application can successfully connect to trusted cloud storage providers.
|
||||
- [ ] The list of pinned certificates can be updated without requiring a full application update.
|
||||
|
||||
- [ ] The application rejects connections to servers with untrusted certificates.
|
||||
- [ ] The application can successfully connect to trusted cloud storage providers.
|
||||
- [ ] The list of pinned certificates can be updated without requiring a full application update.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: SEC-007
|
||||
title: Per-Library Encryption Policies for Public Sharing
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: SEC-000
|
||||
priority: High
|
||||
tags: [security, encryption, sharing, policies]
|
||||
@@ -21,7 +21,8 @@ Implement per-library encryption policies to enable secure public sharing of fil
|
||||
4. For private libraries, use a user-specific key for encryption.
|
||||
|
||||
## Acceptance Criteria
|
||||
- [ ] A user can create a library with a specific encryption policy.
|
||||
- [ ] The encryption policy is enforced for all files in the library.
|
||||
- [ ] Files in a publicly shared library can be decrypted by anyone with the public key.
|
||||
- [ ] Files in a private library can only be decrypted by the owner.
|
||||
|
||||
- [ ] A user can create a library with a specific encryption policy.
|
||||
- [ ] The encryption policy is enforced for all files in the library.
|
||||
- [ ] Files in a publicly shared library can be decrypted by anyone with the public key.
|
||||
- [ ] Files in a private library can only be decrypted by the owner.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: VOL-001
|
||||
title: Volume PhysicalClass and Location LogicalClass
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: VOL-000
|
||||
priority: High
|
||||
tags: [volume, storage-tiering, classification]
|
||||
@@ -21,6 +21,7 @@ Implement the `PhysicalClass` for Volumes and `LogicalClass` for Locations. This
|
||||
4. Implement the logic to allow users to assign a `LogicalClass` to each `Location`.
|
||||
|
||||
## Acceptance Criteria
|
||||
- [ ] The `PhysicalClass` and `LogicalClass` enums are defined.
|
||||
- [ ] The system can correctly identify the `PhysicalClass` of a Volume.
|
||||
- [ ] A user can assign a `LogicalClass` to a Location.
|
||||
|
||||
- [ ] The `PhysicalClass` and `LogicalClass` enums are defined.
|
||||
- [ ] The system can correctly identify the `PhysicalClass` of a Volume.
|
||||
- [ ] A user can assign a `LogicalClass` to a Location.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: VOL-002
|
||||
title: Automatic Volume Classification
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: VOL-000
|
||||
priority: Medium
|
||||
tags: [volume, classification, automation]
|
||||
@@ -21,6 +21,7 @@ Implement the logic for automatic classification of a Volume's `PhysicalClass`.
|
||||
4. Provide a way for the user to override the automatic classification.
|
||||
|
||||
## Acceptance Criteria
|
||||
- [ ] The system can run performance benchmarks on a Volume.
|
||||
- [ ] The system can automatically assign a `PhysicalClass` to a Volume based on the benchmark results.
|
||||
- [ ] The user can manually change the `PhysicalClass` of a Volume.
|
||||
|
||||
- [ ] The system can run performance benchmarks on a Volume.
|
||||
- [ ] The system can automatically assign a `PhysicalClass` to a Volume based on the benchmark results.
|
||||
- [ ] The user can manually change the `PhysicalClass` of a Volume.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: VOL-003
|
||||
title: Intelligent Storage Tiering Warning System
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: VOL-000
|
||||
priority: Medium
|
||||
tags: [volume, storage-tiering, warnings, ai]
|
||||
@@ -21,6 +21,7 @@ Implement the intelligent warning system that alerts the user when there is a mi
|
||||
4. Integrate the warning system with the UI to display the warnings to the user.
|
||||
|
||||
## Acceptance Criteria
|
||||
- [ ] The system can detect mismatches between `LogicalClass` and `PhysicalClass`.
|
||||
- [ ] The system generates a clear and helpful warning message for the user.
|
||||
- [ ] The user is notified of the warning through the UI.
|
||||
|
||||
- [ ] The system can detect mismatches between `LogicalClass` and `PhysicalClass`.
|
||||
- [ ] The system generates a clear and helpful warning message for the user.
|
||||
- [ ] The user is notified of the warning through the UI.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: VOL-004
|
||||
title: Remote Volume Indexing with OpenDAL
|
||||
status: Done
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: VOL-000
|
||||
priority: High
|
||||
tags: [volume, remote-indexing, opendal, cloud]
|
||||
@@ -38,13 +38,15 @@ Integrate the OpenDAL library to enable indexing of remote storage services like
|
||||
- Secure credential storage in OS keyring
|
||||
|
||||
## Acceptance Criteria
|
||||
- [x] A user can add a remote storage service as a new location in their library.
|
||||
- [x] Files on the remote storage can be indexed and browsed like any other location.
|
||||
- [x] The system can handle authentication and configuration for different remote services.
|
||||
|
||||
- [x] A user can add a remote storage service as a new location in their library.
|
||||
- [x] Files on the remote storage can be indexed and browsed like any other location.
|
||||
- [x] The system can handle authentication and configuration for different remote services.
|
||||
|
||||
## Currently Supported Services
|
||||
|
||||
**S3-Compatible (via OpenDAL):**
|
||||
|
||||
- Amazon S3
|
||||
- Cloudflare R2
|
||||
- MinIO (self-hosted)
|
||||
@@ -53,6 +55,7 @@ Integrate the OpenDAL library to enable indexing of remote storage services like
|
||||
- DigitalOcean Spaces
|
||||
|
||||
**Planned:**
|
||||
|
||||
- Google Drive (OAuth required)
|
||||
- Dropbox (OAuth required)
|
||||
- OneDrive (OAuth required)
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: VOL-005
|
||||
title: "Treat Connected iPhone as a Virtual Volume for Direct Import"
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: VOL-000
|
||||
priority: High
|
||||
tags: [feature, import, ios, volume, macos]
|
||||
@@ -27,4 +27,4 @@ Implement a feature for the macOS build that detects a physically connected iPho
|
||||
- [ ] When an iPhone is connected to a Mac, it appears as a new, browsable volume in Spacedrive.
|
||||
- [ ] The contents of the iPhone's camera roll (photos and videos) are displayed correctly.
|
||||
- [ ] A user can select items from the iPhone volume and import them into a standard Spacedrive Location.
|
||||
- [ ] The import operation shows progress and is resumable, like other Spacedrive jobs.
|
||||
- [ ] The import operation shows progress and is resumable, like other Spacedrive jobs.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
id: VSS-003
|
||||
title: "Reference Sidecars for Live Photo Support"
|
||||
status: To Do
|
||||
assignee: unassigned
|
||||
assignee: james
|
||||
parent: VSS-000
|
||||
priority: Medium
|
||||
tags: [vss, feature, photos, indexing]
|
||||
@@ -25,4 +25,4 @@ whitepaper: "Section 4.1.4"
|
||||
- [ ] The indexer correctly identifies Live Photo pairs (image + video).
|
||||
- [ ] The video component is recorded as a "reference" sidecar for the image's content.
|
||||
- [ ] The video file is NOT moved from its original location during indexing.
|
||||
- [ ] A user can successfully trigger an action to convert the reference into a managed sidecar, moving the file into the library.
|
||||
- [ ] A user can successfully trigger an action to convert the reference into a managed sidecar, moving the file into the library.
|
||||
|
||||
@@ -203,7 +203,7 @@ async fn check_launchd_status(instance: Option<String>) -> Result<()> {
|
||||
if !plist_path.exists() {
|
||||
println!("Daemon auto-start: Not installed");
|
||||
println!();
|
||||
println!("To install: sd daemon install");
|
||||
println!("To install: sd-cli daemon install");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
@@ -240,23 +240,229 @@ async fn check_launchd_status(instance: Option<String>) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "macos"))]
|
||||
#[cfg(target_os = "linux")]
|
||||
async fn install_launchd_service(data_dir: PathBuf, instance: Option<String>) -> Result<()> {
|
||||
use std::fs;
|
||||
use std::io::Write;
|
||||
|
||||
let home = dirs::home_dir().ok_or_else(|| anyhow::anyhow!("Could not determine home directory"))?;
|
||||
let systemd_user_dir = home.join(".config/systemd/user");
|
||||
|
||||
// Create systemd user directory if it doesn't exist
|
||||
fs::create_dir_all(&systemd_user_dir)?;
|
||||
|
||||
// Determine service filename based on instance
|
||||
let service_name = if let Some(ref inst) = instance {
|
||||
format!("spacedrive-daemon@{}.service", inst)
|
||||
} else {
|
||||
"spacedrive-daemon.service".to_string()
|
||||
};
|
||||
let service_path = systemd_user_dir.join(&service_name);
|
||||
|
||||
// Get the current daemon binary path
|
||||
let current_exe = std::env::current_exe()?;
|
||||
let daemon_path = current_exe
|
||||
.parent()
|
||||
.ok_or_else(|| anyhow::anyhow!("Could not determine binary directory"))?
|
||||
.join("sd-daemon");
|
||||
|
||||
if !daemon_path.exists() {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Daemon binary not found at {}. Ensure both 'sd-cli' and 'sd-daemon' are in the same directory.",
|
||||
daemon_path.display()
|
||||
));
|
||||
}
|
||||
|
||||
// Build ExecStart command
|
||||
let mut exec_start = format!("{} --data-dir {}", daemon_path.display(), data_dir.display());
|
||||
if let Some(ref inst) = instance {
|
||||
exec_start.push_str(&format!(" --instance {}", inst));
|
||||
}
|
||||
|
||||
// Build the systemd service unit file
|
||||
let service_content = format!(
|
||||
r#"[Unit]
|
||||
Description=Spacedrive Daemon{}
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart={}
|
||||
Restart=on-failure
|
||||
RestartSec=5s
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
|
||||
[Install]
|
||||
WantedBy=default.target
|
||||
"#,
|
||||
if let Some(ref inst) = instance {
|
||||
format!(" ({})", inst)
|
||||
} else {
|
||||
String::new()
|
||||
},
|
||||
exec_start
|
||||
);
|
||||
|
||||
// Write the service file
|
||||
let mut file = fs::File::create(&service_path)?;
|
||||
file.write_all(service_content.as_bytes())?;
|
||||
|
||||
println!("Created systemd service: {}", service_path.display());
|
||||
|
||||
// Reload systemd daemon
|
||||
let _ = std::process::Command::new("systemctl")
|
||||
.arg("--user")
|
||||
.arg("daemon-reload")
|
||||
.output();
|
||||
|
||||
// Enable the service
|
||||
let output = std::process::Command::new("systemctl")
|
||||
.arg("--user")
|
||||
.arg("enable")
|
||||
.arg(&service_name)
|
||||
.output()?;
|
||||
|
||||
if !output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
return Err(anyhow::anyhow!("Failed to enable systemd service: {}", stderr));
|
||||
}
|
||||
|
||||
// Start the service
|
||||
let output = std::process::Command::new("systemctl")
|
||||
.arg("--user")
|
||||
.arg("start")
|
||||
.arg(&service_name)
|
||||
.output()?;
|
||||
|
||||
if !output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
return Err(anyhow::anyhow!("Failed to start systemd service: {}", stderr));
|
||||
}
|
||||
|
||||
println!("Daemon installed and started successfully!");
|
||||
println!("The daemon will start automatically on login.");
|
||||
println!();
|
||||
println!("Useful commands:");
|
||||
println!(" systemctl --user status {}", service_name);
|
||||
println!(" journalctl --user -u {} -f", service_name);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
async fn uninstall_launchd_service(instance: Option<String>) -> Result<()> {
|
||||
use std::fs;
|
||||
|
||||
let home = dirs::home_dir().ok_or_else(|| anyhow::anyhow!("Could not determine home directory"))?;
|
||||
let systemd_user_dir = home.join(".config/systemd/user");
|
||||
|
||||
let service_name = if let Some(ref inst) = instance {
|
||||
format!("spacedrive-daemon@{}.service", inst)
|
||||
} else {
|
||||
"spacedrive-daemon.service".to_string()
|
||||
};
|
||||
let service_path = systemd_user_dir.join(&service_name);
|
||||
|
||||
if !service_path.exists() {
|
||||
println!("Daemon auto-start is not installed.");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Stop the service
|
||||
let _ = std::process::Command::new("systemctl")
|
||||
.arg("--user")
|
||||
.arg("stop")
|
||||
.arg(&service_name)
|
||||
.output();
|
||||
|
||||
// Disable the service
|
||||
let _ = std::process::Command::new("systemctl")
|
||||
.arg("--user")
|
||||
.arg("disable")
|
||||
.arg(&service_name)
|
||||
.output();
|
||||
|
||||
// Remove the service file
|
||||
fs::remove_file(&service_path)?;
|
||||
|
||||
// Reload systemd daemon
|
||||
let _ = std::process::Command::new("systemctl")
|
||||
.arg("--user")
|
||||
.arg("daemon-reload")
|
||||
.output();
|
||||
|
||||
println!("Daemon auto-start uninstalled successfully!");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
async fn check_launchd_status(instance: Option<String>) -> Result<()> {
|
||||
let home = dirs::home_dir().ok_or_else(|| anyhow::anyhow!("Could not determine home directory"))?;
|
||||
let systemd_user_dir = home.join(".config/systemd/user");
|
||||
|
||||
let service_name = if let Some(ref inst) = instance {
|
||||
format!("spacedrive-daemon@{}.service", inst)
|
||||
} else {
|
||||
"spacedrive-daemon.service".to_string()
|
||||
};
|
||||
let service_path = systemd_user_dir.join(&service_name);
|
||||
|
||||
if !service_path.exists() {
|
||||
println!("Daemon auto-start: Not installed");
|
||||
println!();
|
||||
println!("To install: sd-cli daemon install");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
println!("Daemon auto-start: Installed");
|
||||
println!("Service file: {}", service_path.display());
|
||||
println!();
|
||||
|
||||
// Check service status
|
||||
let output = std::process::Command::new("systemctl")
|
||||
.arg("--user")
|
||||
.arg("is-active")
|
||||
.arg(&service_name)
|
||||
.output()?;
|
||||
|
||||
let status = String::from_utf8_lossy(&output.stdout).trim().to_string();
|
||||
|
||||
match status.as_str() {
|
||||
"active" => println!("Service status: ● Active (running)"),
|
||||
"inactive" => println!("Service status: ○ Inactive (stopped)"),
|
||||
"failed" => println!("Service status: Failed"),
|
||||
_ => println!("Service status: {}", status),
|
||||
}
|
||||
|
||||
println!();
|
||||
println!("Useful commands:");
|
||||
println!(" systemctl --user status {}", service_name);
|
||||
println!(" systemctl --user start {}", service_name);
|
||||
println!(" systemctl --user stop {}", service_name);
|
||||
println!(" journalctl --user -u {} -f", service_name);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(not(any(target_os = "macos", target_os = "linux")))]
|
||||
async fn install_launchd_service(_data_dir: PathBuf, _instance: Option<String>) -> Result<()> {
|
||||
Err(anyhow::anyhow!(
|
||||
"Daemon auto-start is currently only supported on macOS.\nLinux systemd support coming soon."
|
||||
"Daemon auto-start is currently only supported on macOS and Linux."
|
||||
))
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "macos"))]
|
||||
#[cfg(not(any(target_os = "macos", target_os = "linux")))]
|
||||
async fn uninstall_launchd_service(_instance: Option<String>) -> Result<()> {
|
||||
Err(anyhow::anyhow!(
|
||||
"Daemon auto-start is currently only supported on macOS.\nLinux systemd support coming soon."
|
||||
"Daemon auto-start is currently only supported on macOS and Linux."
|
||||
))
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "macos"))]
|
||||
#[cfg(not(any(target_os = "macos", target_os = "linux")))]
|
||||
async fn check_launchd_status(_instance: Option<String>) -> Result<()> {
|
||||
Err(anyhow::anyhow!(
|
||||
"Daemon auto-start is currently only supported on macOS.\nLinux systemd support coming soon."
|
||||
"Daemon auto-start is currently only supported on macOS and Linux."
|
||||
))
|
||||
}
|
||||
|
||||
@@ -1,953 +0,0 @@
|
||||
//! Initial database schema for Spacedrive V2
|
||||
//!
|
||||
//! This migration creates all the tables needed for the pure hierarchical
|
||||
//! virtual location model with closure table support.
|
||||
|
||||
use sea_orm_migration::prelude::*;
|
||||
|
||||
#[derive(DeriveMigrationName)]
|
||||
pub struct Migration;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl MigrationTrait for Migration {
|
||||
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
// Create libraries table
|
||||
manager
|
||||
.create_table(
|
||||
Table::create()
|
||||
.table(Libraries::Table)
|
||||
.if_not_exists()
|
||||
.col(
|
||||
ColumnDef::new(Libraries::Id)
|
||||
.integer()
|
||||
.not_null()
|
||||
.auto_increment()
|
||||
.primary_key(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(Libraries::Uuid)
|
||||
.uuid()
|
||||
.not_null()
|
||||
.unique_key(),
|
||||
)
|
||||
.col(ColumnDef::new(Libraries::Name).string().not_null())
|
||||
.col(ColumnDef::new(Libraries::DbVersion).integer().not_null())
|
||||
.col(ColumnDef::new(Libraries::SyncId).uuid())
|
||||
.col(
|
||||
ColumnDef::new(Libraries::CreatedAt)
|
||||
.timestamp_with_time_zone()
|
||||
.not_null(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(Libraries::UpdatedAt)
|
||||
.timestamp_with_time_zone()
|
||||
.not_null(),
|
||||
)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Create devices table
|
||||
manager
|
||||
.create_table(
|
||||
Table::create()
|
||||
.table(Devices::Table)
|
||||
.if_not_exists()
|
||||
.col(
|
||||
ColumnDef::new(Devices::Id)
|
||||
.integer()
|
||||
.not_null()
|
||||
.auto_increment()
|
||||
.primary_key(),
|
||||
)
|
||||
.col(ColumnDef::new(Devices::Uuid).uuid().not_null().unique_key())
|
||||
.col(ColumnDef::new(Devices::Name).string().not_null())
|
||||
.col(ColumnDef::new(Devices::Os).string().not_null())
|
||||
.col(ColumnDef::new(Devices::OsVersion).string())
|
||||
.col(ColumnDef::new(Devices::HardwareModel).string())
|
||||
.col(ColumnDef::new(Devices::NetworkAddresses).json().not_null())
|
||||
.col(ColumnDef::new(Devices::IsOnline).boolean().not_null())
|
||||
.col(
|
||||
ColumnDef::new(Devices::LastSeenAt)
|
||||
.timestamp_with_time_zone()
|
||||
.not_null(),
|
||||
)
|
||||
.col(ColumnDef::new(Devices::Capabilities).json().not_null())
|
||||
.col(
|
||||
ColumnDef::new(Devices::CreatedAt)
|
||||
.timestamp_with_time_zone()
|
||||
.not_null(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(Devices::UpdatedAt)
|
||||
.timestamp_with_time_zone()
|
||||
.not_null(),
|
||||
)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Create user_metadata table (modern schema for semantic tagging)
|
||||
manager
|
||||
.create_table(
|
||||
Table::create()
|
||||
.table(UserMetadata::Table)
|
||||
.if_not_exists()
|
||||
.col(
|
||||
ColumnDef::new(UserMetadata::Id)
|
||||
.integer()
|
||||
.not_null()
|
||||
.auto_increment()
|
||||
.primary_key(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(UserMetadata::Uuid)
|
||||
.uuid()
|
||||
.not_null()
|
||||
.unique_key(),
|
||||
)
|
||||
// Exactly one of these is set - defines the scope
|
||||
.col(ColumnDef::new(UserMetadata::EntryUuid).uuid()) // File-specific metadata (higher priority)
|
||||
.col(ColumnDef::new(UserMetadata::ContentIdentityUuid).uuid()) // Content-universal metadata (lower priority)
|
||||
// All metadata types benefit from scope flexibility
|
||||
.col(ColumnDef::new(UserMetadata::Notes).text())
|
||||
.col(
|
||||
ColumnDef::new(UserMetadata::Favorite)
|
||||
.boolean()
|
||||
.default(false),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(UserMetadata::Hidden)
|
||||
.boolean()
|
||||
.default(false),
|
||||
)
|
||||
.col(ColumnDef::new(UserMetadata::CustomData).json().not_null()) // Arbitrary JSON data
|
||||
.col(
|
||||
ColumnDef::new(UserMetadata::CreatedAt)
|
||||
.timestamp_with_time_zone()
|
||||
.not_null(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(UserMetadata::UpdatedAt)
|
||||
.timestamp_with_time_zone()
|
||||
.not_null(),
|
||||
)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Create mime_types table (lookup table)
|
||||
manager
|
||||
.create_table(
|
||||
Table::create()
|
||||
.table(MimeTypes::Table)
|
||||
.if_not_exists()
|
||||
.col(
|
||||
ColumnDef::new(MimeTypes::Id)
|
||||
.integer()
|
||||
.not_null()
|
||||
.auto_increment()
|
||||
.primary_key(),
|
||||
)
|
||||
.col(ColumnDef::new(MimeTypes::Uuid).uuid().not_null())
|
||||
.col(
|
||||
ColumnDef::new(MimeTypes::MimeType)
|
||||
.string()
|
||||
.not_null()
|
||||
.unique_key(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(MimeTypes::CreatedAt)
|
||||
.timestamp_with_time_zone()
|
||||
.not_null(),
|
||||
)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Create content_kinds table (lookup table)
|
||||
manager
|
||||
.create_table(
|
||||
Table::create()
|
||||
.table(ContentKinds::Table)
|
||||
.if_not_exists()
|
||||
.col(
|
||||
ColumnDef::new(ContentKinds::Id)
|
||||
.integer()
|
||||
.not_null()
|
||||
.primary_key(),
|
||||
)
|
||||
.col(ColumnDef::new(ContentKinds::Name).string().not_null())
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Create content_identities table
|
||||
manager
|
||||
.create_table(
|
||||
Table::create()
|
||||
.table(ContentIdentities::Table)
|
||||
.if_not_exists()
|
||||
.col(
|
||||
ColumnDef::new(ContentIdentities::Id)
|
||||
.integer()
|
||||
.not_null()
|
||||
.auto_increment()
|
||||
.primary_key(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(ContentIdentities::Uuid)
|
||||
.uuid()
|
||||
.not_null()
|
||||
.unique_key(),
|
||||
)
|
||||
.col(ColumnDef::new(ContentIdentities::IntegrityHash).string())
|
||||
.col(
|
||||
ColumnDef::new(ContentIdentities::ContentHash)
|
||||
.string()
|
||||
.not_null()
|
||||
.unique_key(),
|
||||
)
|
||||
.col(ColumnDef::new(ContentIdentities::MimeTypeId).integer())
|
||||
.col(
|
||||
ColumnDef::new(ContentIdentities::KindId)
|
||||
.integer()
|
||||
.not_null(),
|
||||
)
|
||||
.col(ColumnDef::new(ContentIdentities::TextContent).text())
|
||||
.col(
|
||||
ColumnDef::new(ContentIdentities::TotalSize)
|
||||
.big_integer()
|
||||
.not_null(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(ContentIdentities::EntryCount)
|
||||
.integer()
|
||||
.not_null()
|
||||
.default(1),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(ContentIdentities::FirstSeenAt)
|
||||
.timestamp_with_time_zone()
|
||||
.not_null(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(ContentIdentities::LastVerifiedAt)
|
||||
.timestamp_with_time_zone()
|
||||
.not_null(),
|
||||
)
|
||||
.foreign_key(
|
||||
ForeignKey::create()
|
||||
.from(ContentIdentities::Table, ContentIdentities::MimeTypeId)
|
||||
.to(MimeTypes::Table, MimeTypes::Id)
|
||||
.on_delete(ForeignKeyAction::SetNull),
|
||||
)
|
||||
.foreign_key(
|
||||
ForeignKey::create()
|
||||
.from(ContentIdentities::Table, ContentIdentities::KindId)
|
||||
.to(ContentKinds::Table, ContentKinds::Id)
|
||||
.on_delete(ForeignKeyAction::Restrict),
|
||||
)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Create entries table - This is the core of our hierarchical model
|
||||
manager
|
||||
.create_table(
|
||||
Table::create()
|
||||
.table(Entries::Table)
|
||||
.if_not_exists()
|
||||
.col(
|
||||
ColumnDef::new(Entries::Id)
|
||||
.integer()
|
||||
.not_null()
|
||||
.auto_increment()
|
||||
.primary_key(),
|
||||
)
|
||||
.col(ColumnDef::new(Entries::Uuid).uuid())
|
||||
.col(ColumnDef::new(Entries::Name).string().not_null())
|
||||
.col(ColumnDef::new(Entries::Kind).integer().not_null())
|
||||
.col(ColumnDef::new(Entries::Extension).string())
|
||||
.col(ColumnDef::new(Entries::MetadataId).integer())
|
||||
.col(ColumnDef::new(Entries::ContentId).integer())
|
||||
.col(ColumnDef::new(Entries::Size).big_integer().not_null())
|
||||
.col(
|
||||
ColumnDef::new(Entries::AggregateSize)
|
||||
.big_integer()
|
||||
.not_null(),
|
||||
)
|
||||
.col(ColumnDef::new(Entries::ChildCount).integer().not_null())
|
||||
.col(ColumnDef::new(Entries::FileCount).integer().not_null())
|
||||
.col(
|
||||
ColumnDef::new(Entries::CreatedAt)
|
||||
.timestamp_with_time_zone()
|
||||
.not_null(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(Entries::ModifiedAt)
|
||||
.timestamp_with_time_zone()
|
||||
.not_null(),
|
||||
)
|
||||
.col(ColumnDef::new(Entries::AccessedAt).timestamp_with_time_zone())
|
||||
.col(ColumnDef::new(Entries::Permissions).string())
|
||||
.col(ColumnDef::new(Entries::Inode).big_integer())
|
||||
.col(ColumnDef::new(Entries::ParentId).integer())
|
||||
.foreign_key(
|
||||
ForeignKey::create()
|
||||
.from(Entries::Table, Entries::MetadataId)
|
||||
.to(UserMetadata::Table, UserMetadata::Id)
|
||||
.on_delete(ForeignKeyAction::SetNull),
|
||||
)
|
||||
.foreign_key(
|
||||
ForeignKey::create()
|
||||
.from(Entries::Table, Entries::ContentId)
|
||||
.to(ContentIdentities::Table, ContentIdentities::Id)
|
||||
.on_delete(ForeignKeyAction::SetNull),
|
||||
)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Create entry_closure table for efficient hierarchical queries
|
||||
manager
|
||||
.create_table(
|
||||
Table::create()
|
||||
.table(EntryClosure::Table)
|
||||
.if_not_exists()
|
||||
.col(
|
||||
ColumnDef::new(EntryClosure::AncestorId)
|
||||
.integer()
|
||||
.not_null(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(EntryClosure::DescendantId)
|
||||
.integer()
|
||||
.not_null(),
|
||||
)
|
||||
.col(ColumnDef::new(EntryClosure::Depth).integer().not_null())
|
||||
.primary_key(
|
||||
Index::create()
|
||||
.col(EntryClosure::AncestorId)
|
||||
.col(EntryClosure::DescendantId),
|
||||
)
|
||||
.foreign_key(
|
||||
ForeignKey::create()
|
||||
.from(EntryClosure::Table, EntryClosure::AncestorId)
|
||||
.to(Entries::Table, Entries::Id)
|
||||
.on_delete(ForeignKeyAction::Cascade),
|
||||
)
|
||||
.foreign_key(
|
||||
ForeignKey::create()
|
||||
.from(EntryClosure::Table, EntryClosure::DescendantId)
|
||||
.to(Entries::Table, Entries::Id)
|
||||
.on_delete(ForeignKeyAction::Cascade),
|
||||
)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Create directory_paths table for caching directory paths
|
||||
manager
|
||||
.create_table(
|
||||
Table::create()
|
||||
.table(DirectoryPaths::Table)
|
||||
.if_not_exists()
|
||||
.col(
|
||||
ColumnDef::new(DirectoryPaths::EntryId)
|
||||
.integer()
|
||||
.primary_key(),
|
||||
)
|
||||
.col(ColumnDef::new(DirectoryPaths::Path).text().not_null())
|
||||
.foreign_key(
|
||||
ForeignKey::create()
|
||||
.from(DirectoryPaths::Table, DirectoryPaths::EntryId)
|
||||
.to(Entries::Table, Entries::Id)
|
||||
.on_delete(ForeignKeyAction::Cascade),
|
||||
)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Create locations table - Now points to entries instead of storing paths
|
||||
manager
|
||||
.create_table(
|
||||
Table::create()
|
||||
.table(Locations::Table)
|
||||
.if_not_exists()
|
||||
.col(
|
||||
ColumnDef::new(Locations::Id)
|
||||
.integer()
|
||||
.not_null()
|
||||
.auto_increment()
|
||||
.primary_key(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(Locations::Uuid)
|
||||
.uuid()
|
||||
.not_null()
|
||||
.unique_key(),
|
||||
)
|
||||
.col(ColumnDef::new(Locations::DeviceId).integer().not_null())
|
||||
.col(ColumnDef::new(Locations::EntryId).integer().not_null())
|
||||
.col(ColumnDef::new(Locations::Name).string())
|
||||
.col(ColumnDef::new(Locations::IndexMode).string().not_null())
|
||||
.col(ColumnDef::new(Locations::ScanState).string().not_null())
|
||||
.col(ColumnDef::new(Locations::LastScanAt).timestamp_with_time_zone())
|
||||
.col(ColumnDef::new(Locations::ErrorMessage).text())
|
||||
.col(
|
||||
ColumnDef::new(Locations::TotalFileCount)
|
||||
.integer()
|
||||
.not_null(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(Locations::TotalByteSize)
|
||||
.big_integer()
|
||||
.not_null(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(Locations::CreatedAt)
|
||||
.timestamp_with_time_zone()
|
||||
.not_null(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(Locations::UpdatedAt)
|
||||
.timestamp_with_time_zone()
|
||||
.not_null(),
|
||||
)
|
||||
.foreign_key(
|
||||
ForeignKey::create()
|
||||
.from(Locations::Table, Locations::DeviceId)
|
||||
.to(Devices::Table, Devices::Id)
|
||||
.on_delete(ForeignKeyAction::Restrict),
|
||||
)
|
||||
.foreign_key(
|
||||
ForeignKey::create()
|
||||
.from(Locations::Table, Locations::EntryId)
|
||||
.to(Entries::Table, Entries::Id)
|
||||
.on_delete(ForeignKeyAction::Cascade),
|
||||
)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Create volumes table
|
||||
manager
|
||||
.create_table(
|
||||
Table::create()
|
||||
.table(Volumes::Table)
|
||||
.if_not_exists()
|
||||
.col(
|
||||
ColumnDef::new(Volumes::Id)
|
||||
.integer()
|
||||
.not_null()
|
||||
.auto_increment()
|
||||
.primary_key(),
|
||||
)
|
||||
.col(ColumnDef::new(Volumes::Uuid).uuid().not_null())
|
||||
.col(ColumnDef::new(Volumes::DeviceId).uuid().not_null())
|
||||
.col(ColumnDef::new(Volumes::Fingerprint).string().not_null())
|
||||
.col(ColumnDef::new(Volumes::MountPoint).string())
|
||||
.col(ColumnDef::new(Volumes::TotalCapacity).big_integer())
|
||||
.col(ColumnDef::new(Volumes::AvailableCapacity).big_integer())
|
||||
.col(ColumnDef::new(Volumes::IsRemovable).boolean())
|
||||
.col(ColumnDef::new(Volumes::IsEjectable).boolean())
|
||||
.col(ColumnDef::new(Volumes::FileSystem).string())
|
||||
.col(ColumnDef::new(Volumes::DisplayName).string())
|
||||
.col(
|
||||
ColumnDef::new(Volumes::TrackedAt)
|
||||
.timestamp_with_time_zone()
|
||||
.not_null()
|
||||
.default(Expr::current_timestamp()),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(Volumes::LastSeenAt)
|
||||
.timestamp_with_time_zone()
|
||||
.not_null()
|
||||
.default(Expr::current_timestamp()),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(Volumes::IsOnline)
|
||||
.boolean()
|
||||
.not_null()
|
||||
.default(true),
|
||||
)
|
||||
.col(ColumnDef::new(Volumes::ReadSpeedMbps).integer())
|
||||
.col(ColumnDef::new(Volumes::WriteSpeedMbps).integer())
|
||||
.col(ColumnDef::new(Volumes::LastSpeedTestAt).timestamp_with_time_zone())
|
||||
.col(ColumnDef::new(Volumes::IsNetworkDrive).boolean())
|
||||
.col(ColumnDef::new(Volumes::DeviceModel).string())
|
||||
.col(ColumnDef::new(Volumes::VolumeType).string())
|
||||
.col(ColumnDef::new(Volumes::IsUserVisible).boolean())
|
||||
.col(ColumnDef::new(Volumes::AutoTrackEligible).boolean())
|
||||
.col(
|
||||
ColumnDef::new(Volumes::CreatedAt)
|
||||
.timestamp_with_time_zone()
|
||||
.not_null()
|
||||
.default(Expr::current_timestamp()),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(Volumes::UpdatedAt)
|
||||
.timestamp_with_time_zone()
|
||||
.not_null()
|
||||
.default(Expr::current_timestamp()),
|
||||
)
|
||||
.foreign_key(
|
||||
ForeignKey::create()
|
||||
.from(Volumes::Table, Volumes::DeviceId)
|
||||
.to(Devices::Table, Devices::Uuid)
|
||||
.on_delete(ForeignKeyAction::Cascade),
|
||||
)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Create audit_log table
|
||||
manager
|
||||
.create_table(
|
||||
Table::create()
|
||||
.table(AuditLog::Table)
|
||||
.if_not_exists()
|
||||
.col(
|
||||
ColumnDef::new(AuditLog::Id)
|
||||
.integer()
|
||||
.not_null()
|
||||
.auto_increment()
|
||||
.primary_key(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(AuditLog::Uuid)
|
||||
.string()
|
||||
.not_null()
|
||||
.unique_key(),
|
||||
)
|
||||
.col(ColumnDef::new(AuditLog::ActionType).string().not_null())
|
||||
.col(ColumnDef::new(AuditLog::ActorDeviceId).string().not_null())
|
||||
.col(ColumnDef::new(AuditLog::Targets).string().not_null())
|
||||
.col(ColumnDef::new(AuditLog::Status).string().not_null())
|
||||
.col(ColumnDef::new(AuditLog::JobId).string())
|
||||
.col(
|
||||
ColumnDef::new(AuditLog::CreatedAt)
|
||||
.timestamp_with_time_zone()
|
||||
.not_null(),
|
||||
)
|
||||
.col(ColumnDef::new(AuditLog::CompletedAt).timestamp_with_time_zone())
|
||||
.col(ColumnDef::new(AuditLog::ErrorMessage).string())
|
||||
.col(ColumnDef::new(AuditLog::ResultPayload).string())
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Create sync_checkpoints table
|
||||
manager
|
||||
.create_table(
|
||||
Table::create()
|
||||
.table(SyncCheckpoints::Table)
|
||||
.if_not_exists()
|
||||
.col(
|
||||
ColumnDef::new(SyncCheckpoints::Id)
|
||||
.integer()
|
||||
.not_null()
|
||||
.auto_increment()
|
||||
.primary_key(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(SyncCheckpoints::DeviceId)
|
||||
.integer()
|
||||
.not_null()
|
||||
.unique_key(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(SyncCheckpoints::LastSync)
|
||||
.timestamp_with_time_zone()
|
||||
.not_null(),
|
||||
)
|
||||
.col(ColumnDef::new(SyncCheckpoints::SyncData).json())
|
||||
.col(
|
||||
ColumnDef::new(SyncCheckpoints::CreatedAt)
|
||||
.timestamp_with_time_zone()
|
||||
.not_null(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(SyncCheckpoints::UpdatedAt)
|
||||
.timestamp_with_time_zone()
|
||||
.not_null(),
|
||||
)
|
||||
.foreign_key(
|
||||
ForeignKey::create()
|
||||
.from(SyncCheckpoints::Table, SyncCheckpoints::DeviceId)
|
||||
.to(Devices::Table, Devices::Id)
|
||||
.on_delete(ForeignKeyAction::Cascade),
|
||||
)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Create indices for better query performance
|
||||
|
||||
// Entry indices
|
||||
manager
|
||||
.create_index(
|
||||
Index::create()
|
||||
.name("idx_entries_uuid")
|
||||
.table(Entries::Table)
|
||||
.col(Entries::Uuid)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
manager
|
||||
.create_index(
|
||||
Index::create()
|
||||
.name("idx_entries_parent_id")
|
||||
.table(Entries::Table)
|
||||
.col(Entries::ParentId)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
manager
|
||||
.create_index(
|
||||
Index::create()
|
||||
.name("idx_entries_kind")
|
||||
.table(Entries::Table)
|
||||
.col(Entries::Kind)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Entry closure indices for efficient queries
|
||||
manager
|
||||
.create_index(
|
||||
Index::create()
|
||||
.name("idx_entry_closure_descendant")
|
||||
.table(EntryClosure::Table)
|
||||
.col(EntryClosure::DescendantId)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
manager
|
||||
.create_index(
|
||||
Index::create()
|
||||
.name("idx_entry_closure_ancestor_depth")
|
||||
.table(EntryClosure::Table)
|
||||
.col(EntryClosure::AncestorId)
|
||||
.col(EntryClosure::Depth)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Location indices
|
||||
manager
|
||||
.create_index(
|
||||
Index::create()
|
||||
.name("idx_locations_entry_id")
|
||||
.table(Locations::Table)
|
||||
.col(Locations::EntryId)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Content identity index
|
||||
manager
|
||||
.create_index(
|
||||
Index::create()
|
||||
.name("idx_content_identities_content_hash")
|
||||
.table(ContentIdentities::Table)
|
||||
.col(ContentIdentities::ContentHash)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Volume indices
|
||||
manager
|
||||
.create_index(
|
||||
Index::create()
|
||||
.name("idx_volumes_device_fingerprint")
|
||||
.table(Volumes::Table)
|
||||
.col(Volumes::DeviceId)
|
||||
.col(Volumes::Fingerprint)
|
||||
.unique()
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Audit log indices
|
||||
manager
|
||||
.create_index(
|
||||
Index::create()
|
||||
.name("idx_audit_log_action_type")
|
||||
.table(AuditLog::Table)
|
||||
.col(AuditLog::ActionType)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
manager
|
||||
.create_index(
|
||||
Index::create()
|
||||
.name("idx_audit_log_actor_device")
|
||||
.table(AuditLog::Table)
|
||||
.col(AuditLog::ActorDeviceId)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
manager
|
||||
.create_index(
|
||||
Index::create()
|
||||
.name("idx_audit_log_status")
|
||||
.table(AuditLog::Table)
|
||||
.col(AuditLog::Status)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
manager
|
||||
.create_index(
|
||||
Index::create()
|
||||
.name("idx_audit_log_job_id")
|
||||
.table(AuditLog::Table)
|
||||
.col(AuditLog::JobId)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
// Drop tables in reverse order of creation
|
||||
manager
|
||||
.drop_table(Table::drop().table(SyncCheckpoints::Table).to_owned())
|
||||
.await?;
|
||||
manager
|
||||
.drop_table(Table::drop().table(AuditLog::Table).to_owned())
|
||||
.await?;
|
||||
manager
|
||||
.drop_table(Table::drop().table(Volumes::Table).to_owned())
|
||||
.await?;
|
||||
manager
|
||||
.drop_table(Table::drop().table(Locations::Table).to_owned())
|
||||
.await?;
|
||||
manager
|
||||
.drop_table(Table::drop().table(DirectoryPaths::Table).to_owned())
|
||||
.await?;
|
||||
manager
|
||||
.drop_table(Table::drop().table(EntryClosure::Table).to_owned())
|
||||
.await?;
|
||||
manager
|
||||
.drop_table(Table::drop().table(Entries::Table).to_owned())
|
||||
.await?;
|
||||
manager
|
||||
.drop_table(Table::drop().table(ContentIdentities::Table).to_owned())
|
||||
.await?;
|
||||
manager
|
||||
.drop_table(Table::drop().table(ContentKinds::Table).to_owned())
|
||||
.await?;
|
||||
manager
|
||||
.drop_table(Table::drop().table(MimeTypes::Table).to_owned())
|
||||
.await?;
|
||||
manager
|
||||
.drop_table(Table::drop().table(UserMetadata::Table).to_owned())
|
||||
.await?;
|
||||
manager
|
||||
.drop_table(Table::drop().table(Devices::Table).to_owned())
|
||||
.await?;
|
||||
manager
|
||||
.drop_table(Table::drop().table(Libraries::Table).to_owned())
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// Table identifiers
|
||||
|
||||
#[derive(DeriveIden)]
|
||||
enum Libraries {
|
||||
Table,
|
||||
Id,
|
||||
Uuid,
|
||||
Name,
|
||||
DbVersion,
|
||||
SyncId,
|
||||
CreatedAt,
|
||||
UpdatedAt,
|
||||
}
|
||||
|
||||
#[derive(DeriveIden)]
|
||||
enum Devices {
|
||||
Table,
|
||||
Id,
|
||||
Uuid,
|
||||
Name,
|
||||
Os,
|
||||
OsVersion,
|
||||
HardwareModel,
|
||||
NetworkAddresses,
|
||||
IsOnline,
|
||||
LastSeenAt,
|
||||
Capabilities,
|
||||
CreatedAt,
|
||||
UpdatedAt,
|
||||
}
|
||||
|
||||
#[derive(DeriveIden)]
|
||||
enum MimeTypes {
|
||||
Table,
|
||||
Id,
|
||||
Uuid,
|
||||
MimeType,
|
||||
CreatedAt,
|
||||
}
|
||||
|
||||
#[derive(DeriveIden)]
|
||||
enum ContentKinds {
|
||||
Table,
|
||||
Id,
|
||||
Name,
|
||||
}
|
||||
|
||||
#[derive(DeriveIden)]
|
||||
enum UserMetadata {
|
||||
Table,
|
||||
Id,
|
||||
Uuid,
|
||||
EntryUuid,
|
||||
ContentIdentityUuid,
|
||||
Notes,
|
||||
Favorite,
|
||||
Hidden,
|
||||
CustomData,
|
||||
CreatedAt,
|
||||
UpdatedAt,
|
||||
}
|
||||
|
||||
#[derive(DeriveIden)]
|
||||
enum ContentIdentities {
|
||||
Table,
|
||||
Id,
|
||||
Uuid,
|
||||
IntegrityHash,
|
||||
ContentHash,
|
||||
MimeTypeId,
|
||||
KindId,
|
||||
TextContent,
|
||||
TotalSize,
|
||||
EntryCount,
|
||||
FirstSeenAt,
|
||||
LastVerifiedAt,
|
||||
}
|
||||
|
||||
#[derive(DeriveIden)]
|
||||
enum Entries {
|
||||
Table,
|
||||
Id,
|
||||
Uuid,
|
||||
Name,
|
||||
Kind,
|
||||
Extension,
|
||||
MetadataId,
|
||||
ContentId,
|
||||
Size,
|
||||
AggregateSize,
|
||||
ChildCount,
|
||||
FileCount,
|
||||
CreatedAt,
|
||||
ModifiedAt,
|
||||
AccessedAt,
|
||||
Permissions,
|
||||
Inode,
|
||||
ParentId,
|
||||
}
|
||||
|
||||
#[derive(DeriveIden)]
|
||||
enum EntryClosure {
|
||||
Table,
|
||||
AncestorId,
|
||||
DescendantId,
|
||||
Depth,
|
||||
}
|
||||
|
||||
#[derive(DeriveIden)]
|
||||
enum DirectoryPaths {
|
||||
Table,
|
||||
EntryId,
|
||||
Path,
|
||||
}
|
||||
|
||||
#[derive(DeriveIden)]
|
||||
enum Locations {
|
||||
Table,
|
||||
Id,
|
||||
Uuid,
|
||||
DeviceId,
|
||||
EntryId,
|
||||
Name,
|
||||
IndexMode,
|
||||
ScanState,
|
||||
LastScanAt,
|
||||
ErrorMessage,
|
||||
TotalFileCount,
|
||||
TotalByteSize,
|
||||
CreatedAt,
|
||||
UpdatedAt,
|
||||
}
|
||||
|
||||
#[derive(DeriveIden)]
|
||||
enum Volumes {
|
||||
Table,
|
||||
Id,
|
||||
Uuid,
|
||||
DeviceId,
|
||||
Fingerprint,
|
||||
DisplayName,
|
||||
MountPoint,
|
||||
TotalCapacity,
|
||||
AvailableCapacity,
|
||||
IsRemovable,
|
||||
IsEjectable,
|
||||
FileSystem,
|
||||
TrackedAt,
|
||||
LastSeenAt,
|
||||
IsOnline,
|
||||
ReadSpeedMbps,
|
||||
WriteSpeedMbps,
|
||||
LastSpeedTestAt,
|
||||
IsNetworkDrive,
|
||||
DeviceModel,
|
||||
VolumeType,
|
||||
IsUserVisible,
|
||||
AutoTrackEligible,
|
||||
CreatedAt,
|
||||
UpdatedAt,
|
||||
}
|
||||
|
||||
#[derive(DeriveIden)]
|
||||
enum AuditLog {
|
||||
Table,
|
||||
Id,
|
||||
Uuid,
|
||||
ActionType,
|
||||
ActorDeviceId,
|
||||
Targets,
|
||||
Status,
|
||||
JobId,
|
||||
CreatedAt,
|
||||
CompletedAt,
|
||||
ErrorMessage,
|
||||
ResultPayload,
|
||||
}
|
||||
|
||||
#[derive(DeriveIden)]
|
||||
enum SyncCheckpoints {
|
||||
Table,
|
||||
Id,
|
||||
DeviceId,
|
||||
LastSync,
|
||||
SyncData,
|
||||
CreatedAt,
|
||||
UpdatedAt,
|
||||
}
|
||||
@@ -1,53 +0,0 @@
|
||||
//! Populate lookup tables with initial data
|
||||
|
||||
use sea_orm_migration::prelude::*;
|
||||
|
||||
#[derive(DeriveMigrationName)]
|
||||
pub struct Migration;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl MigrationTrait for Migration {
|
||||
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
// Populate content_kinds table
|
||||
let insert_kinds = Query::insert()
|
||||
.into_table(ContentKinds::Table)
|
||||
.columns([ContentKinds::Id, ContentKinds::Name])
|
||||
.values_panic([0.into(), "unknown".into()])
|
||||
.values_panic([1.into(), "image".into()])
|
||||
.values_panic([2.into(), "video".into()])
|
||||
.values_panic([3.into(), "audio".into()])
|
||||
.values_panic([4.into(), "document".into()])
|
||||
.values_panic([5.into(), "archive".into()])
|
||||
.values_panic([6.into(), "code".into()])
|
||||
.values_panic([7.into(), "text".into()])
|
||||
.values_panic([8.into(), "database".into()])
|
||||
.values_panic([9.into(), "book".into()])
|
||||
.values_panic([10.into(), "font".into()])
|
||||
.values_panic([11.into(), "mesh".into()])
|
||||
.values_panic([12.into(), "config".into()])
|
||||
.values_panic([13.into(), "encrypted".into()])
|
||||
.values_panic([14.into(), "key".into()])
|
||||
.values_panic([15.into(), "executable".into()])
|
||||
.values_panic([16.into(), "binary".into()])
|
||||
.to_owned();
|
||||
|
||||
manager.exec_stmt(insert_kinds).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
// Delete all content kinds
|
||||
let delete = Query::delete().from_table(ContentKinds::Table).to_owned();
|
||||
manager.exec_stmt(delete).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(DeriveIden)]
|
||||
enum ContentKinds {
|
||||
Table,
|
||||
Id,
|
||||
Name,
|
||||
}
|
||||
@@ -1,156 +0,0 @@
|
||||
use sea_orm_migration::prelude::*;
|
||||
|
||||
pub struct Migration;
|
||||
|
||||
impl MigrationName for Migration {
|
||||
fn name(&self) -> &str {
|
||||
"m20240107_000001_create_collections"
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl MigrationTrait for Migration {
|
||||
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
// Create collections table
|
||||
manager
|
||||
.create_table(
|
||||
Table::create()
|
||||
.table(Collection::Table)
|
||||
.if_not_exists()
|
||||
.col(
|
||||
ColumnDef::new(Collection::Id)
|
||||
.integer()
|
||||
.not_null()
|
||||
.auto_increment()
|
||||
.primary_key(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(Collection::Uuid)
|
||||
.uuid()
|
||||
.not_null()
|
||||
.unique_key(),
|
||||
)
|
||||
.col(ColumnDef::new(Collection::Name).string().not_null())
|
||||
.col(ColumnDef::new(Collection::Description).text().null())
|
||||
.col(
|
||||
ColumnDef::new(Collection::CreatedAt)
|
||||
.timestamp()
|
||||
.not_null()
|
||||
.default(Expr::current_timestamp()),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(Collection::UpdatedAt)
|
||||
.timestamp()
|
||||
.not_null()
|
||||
.default(Expr::current_timestamp()),
|
||||
)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Create collection_entries junction table
|
||||
manager
|
||||
.create_table(
|
||||
Table::create()
|
||||
.table(CollectionEntry::Table)
|
||||
.if_not_exists()
|
||||
.col(
|
||||
ColumnDef::new(CollectionEntry::CollectionId)
|
||||
.integer()
|
||||
.not_null(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(CollectionEntry::EntryId)
|
||||
.integer()
|
||||
.not_null(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(CollectionEntry::AddedAt)
|
||||
.timestamp()
|
||||
.not_null()
|
||||
.default(Expr::current_timestamp()),
|
||||
)
|
||||
.primary_key(
|
||||
Index::create()
|
||||
.col(CollectionEntry::CollectionId)
|
||||
.col(CollectionEntry::EntryId),
|
||||
)
|
||||
.foreign_key(
|
||||
ForeignKey::create()
|
||||
.name("fk_collection_entry_collection")
|
||||
.from(CollectionEntry::Table, CollectionEntry::CollectionId)
|
||||
.to(Collection::Table, Collection::Id)
|
||||
.on_delete(ForeignKeyAction::Cascade),
|
||||
)
|
||||
.foreign_key(
|
||||
ForeignKey::create()
|
||||
.name("fk_collection_entry_entry")
|
||||
.from(CollectionEntry::Table, CollectionEntry::EntryId)
|
||||
.to(Entry::Table, Entry::Id)
|
||||
.on_delete(ForeignKeyAction::Cascade),
|
||||
)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Create indexes
|
||||
manager
|
||||
.create_index(
|
||||
Index::create()
|
||||
.name("idx_collection_name")
|
||||
.table(Collection::Table)
|
||||
.col(Collection::Name)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
manager
|
||||
.create_index(
|
||||
Index::create()
|
||||
.name("idx_collection_entry_entry_id")
|
||||
.table(CollectionEntry::Table)
|
||||
.col(CollectionEntry::EntryId)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
manager
|
||||
.drop_table(Table::drop().table(CollectionEntry::Table).to_owned())
|
||||
.await?;
|
||||
|
||||
manager
|
||||
.drop_table(Table::drop().table(Collection::Table).to_owned())
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Iden)]
|
||||
enum Collection {
|
||||
Table,
|
||||
Id,
|
||||
Uuid,
|
||||
Name,
|
||||
Description,
|
||||
CreatedAt,
|
||||
UpdatedAt,
|
||||
}
|
||||
|
||||
#[derive(Iden)]
|
||||
enum CollectionEntry {
|
||||
Table,
|
||||
CollectionId,
|
||||
EntryId,
|
||||
AddedAt,
|
||||
}
|
||||
|
||||
#[derive(Iden)]
|
||||
enum Entry {
|
||||
Table,
|
||||
Id,
|
||||
}
|
||||
@@ -1,248 +0,0 @@
|
||||
use sea_orm_migration::prelude::*;
|
||||
|
||||
#[derive(DeriveMigrationName)]
|
||||
pub struct Migration;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl MigrationTrait for Migration {
|
||||
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
// Create sidecars table
|
||||
manager
|
||||
.create_table(
|
||||
Table::create()
|
||||
.table(Sidecar::Table)
|
||||
.if_not_exists()
|
||||
.col(
|
||||
ColumnDef::new(Sidecar::Id)
|
||||
.integer()
|
||||
.not_null()
|
||||
.auto_increment()
|
||||
.primary_key(),
|
||||
)
|
||||
.col(ColumnDef::new(Sidecar::ContentUuid).uuid().not_null())
|
||||
.col(ColumnDef::new(Sidecar::Kind).string().not_null())
|
||||
.col(ColumnDef::new(Sidecar::Variant).string().not_null())
|
||||
.col(ColumnDef::new(Sidecar::Format).string().not_null())
|
||||
.col(ColumnDef::new(Sidecar::RelPath).string().not_null())
|
||||
.col(ColumnDef::new(Sidecar::SourceEntryId).integer().null())
|
||||
.col(ColumnDef::new(Sidecar::Size).big_integer().not_null())
|
||||
.col(ColumnDef::new(Sidecar::Checksum).string().null())
|
||||
.col(
|
||||
ColumnDef::new(Sidecar::Status)
|
||||
.string()
|
||||
.not_null()
|
||||
.default("pending"),
|
||||
)
|
||||
.col(ColumnDef::new(Sidecar::Source).string().null())
|
||||
.col(
|
||||
ColumnDef::new(Sidecar::Version)
|
||||
.integer()
|
||||
.not_null()
|
||||
.default(1),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(Sidecar::CreatedAt)
|
||||
.timestamp()
|
||||
.not_null()
|
||||
.default(Expr::current_timestamp()),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(Sidecar::UpdatedAt)
|
||||
.timestamp()
|
||||
.not_null()
|
||||
.default(Expr::current_timestamp()),
|
||||
)
|
||||
.foreign_key(
|
||||
ForeignKey::create()
|
||||
.name("fk_sidecar_content")
|
||||
.from(Sidecar::Table, Sidecar::ContentUuid)
|
||||
.to(ContentIdentities::Table, ContentIdentities::Uuid)
|
||||
.on_delete(ForeignKeyAction::Cascade)
|
||||
.on_update(ForeignKeyAction::Cascade),
|
||||
)
|
||||
.foreign_key(
|
||||
ForeignKey::create()
|
||||
.name("fk_sidecar_source_entry")
|
||||
.from(Sidecar::Table, Sidecar::SourceEntryId)
|
||||
.to(Entries::Table, Entries::Id)
|
||||
.on_delete(ForeignKeyAction::SetNull)
|
||||
.on_update(ForeignKeyAction::Cascade),
|
||||
)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Create unique index on (content_uuid, kind, variant)
|
||||
manager
|
||||
.create_index(
|
||||
Index::create()
|
||||
.if_not_exists()
|
||||
.name("idx_sidecar_unique")
|
||||
.table(Sidecar::Table)
|
||||
.col(Sidecar::ContentUuid)
|
||||
.col(Sidecar::Kind)
|
||||
.col(Sidecar::Variant)
|
||||
.unique()
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Create sidecar_availability table
|
||||
manager
|
||||
.create_table(
|
||||
Table::create()
|
||||
.table(SidecarAvailability::Table)
|
||||
.if_not_exists()
|
||||
.col(
|
||||
ColumnDef::new(SidecarAvailability::Id)
|
||||
.integer()
|
||||
.not_null()
|
||||
.auto_increment()
|
||||
.primary_key(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(SidecarAvailability::ContentUuid)
|
||||
.uuid()
|
||||
.not_null(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(SidecarAvailability::Kind)
|
||||
.string()
|
||||
.not_null(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(SidecarAvailability::Variant)
|
||||
.string()
|
||||
.not_null(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(SidecarAvailability::DeviceUuid)
|
||||
.uuid()
|
||||
.not_null(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(SidecarAvailability::Has)
|
||||
.boolean()
|
||||
.not_null()
|
||||
.default(false),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(SidecarAvailability::Size)
|
||||
.big_integer()
|
||||
.null(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(SidecarAvailability::Checksum)
|
||||
.string()
|
||||
.null(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(SidecarAvailability::LastSeenAt)
|
||||
.timestamp()
|
||||
.not_null()
|
||||
.default(Expr::current_timestamp()),
|
||||
)
|
||||
.foreign_key(
|
||||
ForeignKey::create()
|
||||
.name("fk_sidecar_availability_content")
|
||||
.from(SidecarAvailability::Table, SidecarAvailability::ContentUuid)
|
||||
.to(ContentIdentities::Table, ContentIdentities::Uuid)
|
||||
.on_delete(ForeignKeyAction::Cascade)
|
||||
.on_update(ForeignKeyAction::Cascade),
|
||||
)
|
||||
.foreign_key(
|
||||
ForeignKey::create()
|
||||
.name("fk_sidecar_availability_device")
|
||||
.from(SidecarAvailability::Table, SidecarAvailability::DeviceUuid)
|
||||
.to(Devices::Table, Devices::Uuid)
|
||||
.on_delete(ForeignKeyAction::Cascade)
|
||||
.on_update(ForeignKeyAction::Cascade),
|
||||
)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Create unique index on (content_uuid, kind, variant, device_uuid)
|
||||
manager
|
||||
.create_index(
|
||||
Index::create()
|
||||
.if_not_exists()
|
||||
.name("idx_sidecar_availability_unique")
|
||||
.table(SidecarAvailability::Table)
|
||||
.col(SidecarAvailability::ContentUuid)
|
||||
.col(SidecarAvailability::Kind)
|
||||
.col(SidecarAvailability::Variant)
|
||||
.col(SidecarAvailability::DeviceUuid)
|
||||
.unique()
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
// Drop sidecar_availability table
|
||||
manager
|
||||
.drop_table(Table::drop().table(SidecarAvailability::Table).to_owned())
|
||||
.await?;
|
||||
|
||||
// Drop sidecars table
|
||||
manager
|
||||
.drop_table(Table::drop().table(Sidecar::Table).to_owned())
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Iden)]
|
||||
enum Sidecar {
|
||||
Table,
|
||||
Id,
|
||||
ContentUuid,
|
||||
Kind,
|
||||
Variant,
|
||||
Format,
|
||||
RelPath,
|
||||
SourceEntryId,
|
||||
Size,
|
||||
Checksum,
|
||||
Status,
|
||||
Source,
|
||||
Version,
|
||||
CreatedAt,
|
||||
UpdatedAt,
|
||||
}
|
||||
|
||||
#[derive(Iden)]
|
||||
enum SidecarAvailability {
|
||||
Table,
|
||||
Id,
|
||||
ContentUuid,
|
||||
Kind,
|
||||
Variant,
|
||||
DeviceUuid,
|
||||
Has,
|
||||
Size,
|
||||
Checksum,
|
||||
LastSeenAt,
|
||||
}
|
||||
|
||||
#[derive(Iden)]
|
||||
enum ContentIdentities {
|
||||
Table,
|
||||
Uuid,
|
||||
}
|
||||
|
||||
#[derive(Iden)]
|
||||
enum Devices {
|
||||
Table,
|
||||
Uuid,
|
||||
}
|
||||
|
||||
#[derive(Iden)]
|
||||
enum Entries {
|
||||
Table,
|
||||
Id,
|
||||
}
|
||||
@@ -1,194 +0,0 @@
|
||||
use sea_orm_migration::prelude::*;
|
||||
|
||||
#[derive(DeriveMigrationName)]
|
||||
pub struct Migration;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl MigrationTrait for Migration {
|
||||
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
// For SQLite, we can't easily alter columns, so we'll just add the UUID column
|
||||
// if the table exists with the old schema
|
||||
|
||||
// Try to add UUID column to existing table
|
||||
let _ = manager
|
||||
.alter_table(
|
||||
Table::alter()
|
||||
.table(Volumes::Table)
|
||||
.add_column_if_not_exists(
|
||||
ColumnDef::new(Volumes::Uuid)
|
||||
.string() // SQLite doesn't have native UUID type
|
||||
.not_null()
|
||||
.default(""), // Will be populated later
|
||||
)
|
||||
.to_owned(),
|
||||
)
|
||||
.await;
|
||||
|
||||
// Add other missing columns one by one (SQLite limitation)
|
||||
let _ = manager
|
||||
.alter_table(
|
||||
Table::alter()
|
||||
.table(Volumes::Table)
|
||||
.add_column_if_not_exists(ColumnDef::new(Volumes::Fingerprint).string())
|
||||
.to_owned(),
|
||||
)
|
||||
.await;
|
||||
|
||||
let _ = manager
|
||||
.alter_table(
|
||||
Table::alter()
|
||||
.table(Volumes::Table)
|
||||
.add_column_if_not_exists(ColumnDef::new(Volumes::DisplayName).string())
|
||||
.to_owned(),
|
||||
)
|
||||
.await;
|
||||
|
||||
let _ = manager
|
||||
.alter_table(
|
||||
Table::alter()
|
||||
.table(Volumes::Table)
|
||||
.add_column_if_not_exists(
|
||||
ColumnDef::new(Volumes::TrackedAt)
|
||||
.timestamp()
|
||||
.not_null()
|
||||
.default(Expr::current_timestamp()),
|
||||
)
|
||||
.to_owned(),
|
||||
)
|
||||
.await;
|
||||
|
||||
let _ = manager
|
||||
.alter_table(
|
||||
Table::alter()
|
||||
.table(Volumes::Table)
|
||||
.add_column_if_not_exists(ColumnDef::new(Volumes::LastSpeedTestAt).timestamp())
|
||||
.to_owned(),
|
||||
)
|
||||
.await;
|
||||
|
||||
let _ = manager
|
||||
.alter_table(
|
||||
Table::alter()
|
||||
.table(Volumes::Table)
|
||||
.add_column_if_not_exists(ColumnDef::new(Volumes::ReadSpeedMbps).integer())
|
||||
.to_owned(),
|
||||
)
|
||||
.await;
|
||||
|
||||
let _ = manager
|
||||
.alter_table(
|
||||
Table::alter()
|
||||
.table(Volumes::Table)
|
||||
.add_column_if_not_exists(ColumnDef::new(Volumes::WriteSpeedMbps).integer())
|
||||
.to_owned(),
|
||||
)
|
||||
.await;
|
||||
|
||||
let _ = manager
|
||||
.alter_table(
|
||||
Table::alter()
|
||||
.table(Volumes::Table)
|
||||
.add_column_if_not_exists(
|
||||
ColumnDef::new(Volumes::IsOnline).boolean().default(true),
|
||||
)
|
||||
.to_owned(),
|
||||
)
|
||||
.await;
|
||||
|
||||
let _ = manager
|
||||
.alter_table(
|
||||
Table::alter()
|
||||
.table(Volumes::Table)
|
||||
.add_column_if_not_exists(ColumnDef::new(Volumes::IsNetworkDrive).boolean())
|
||||
.to_owned(),
|
||||
)
|
||||
.await;
|
||||
|
||||
let _ = manager
|
||||
.alter_table(
|
||||
Table::alter()
|
||||
.table(Volumes::Table)
|
||||
.add_column_if_not_exists(ColumnDef::new(Volumes::DeviceModel).string())
|
||||
.to_owned(),
|
||||
)
|
||||
.await;
|
||||
|
||||
let _ = manager
|
||||
.alter_table(
|
||||
Table::alter()
|
||||
.table(Volumes::Table)
|
||||
.add_column_if_not_exists(ColumnDef::new(Volumes::VolumeType).string())
|
||||
.to_owned(),
|
||||
)
|
||||
.await;
|
||||
|
||||
let _ = manager
|
||||
.alter_table(
|
||||
Table::alter()
|
||||
.table(Volumes::Table)
|
||||
.add_column_if_not_exists(ColumnDef::new(Volumes::IsUserVisible).boolean())
|
||||
.to_owned(),
|
||||
)
|
||||
.await;
|
||||
|
||||
let _ = manager
|
||||
.alter_table(
|
||||
Table::alter()
|
||||
.table(Volumes::Table)
|
||||
.add_column_if_not_exists(ColumnDef::new(Volumes::AutoTrackEligible).boolean())
|
||||
.to_owned(),
|
||||
)
|
||||
.await;
|
||||
|
||||
let _ = manager
|
||||
.alter_table(
|
||||
Table::alter()
|
||||
.table(Volumes::Table)
|
||||
.add_column_if_not_exists(
|
||||
ColumnDef::new(Volumes::LastSeenAt)
|
||||
.timestamp()
|
||||
.not_null()
|
||||
.default(Expr::current_timestamp()),
|
||||
)
|
||||
.to_owned(),
|
||||
)
|
||||
.await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
// Remove added columns
|
||||
// Note: SQLite doesn't support dropping columns easily
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(DeriveIden)]
|
||||
enum Volumes {
|
||||
Table,
|
||||
Id,
|
||||
Uuid,
|
||||
DeviceId,
|
||||
Fingerprint,
|
||||
DisplayName,
|
||||
MountPoint,
|
||||
TotalCapacity,
|
||||
AvailableCapacity,
|
||||
ReadSpeedMbps,
|
||||
WriteSpeedMbps,
|
||||
IsRemovable,
|
||||
IsEjectable,
|
||||
IsOnline,
|
||||
IsNetworkDrive,
|
||||
FileSystemType,
|
||||
DeviceModel,
|
||||
VolumeType,
|
||||
IsUserVisible,
|
||||
AutoTrackEligible,
|
||||
TrackedAt,
|
||||
LastSeenAt,
|
||||
LastSpeedTestAt,
|
||||
CreatedAt,
|
||||
UpdatedAt,
|
||||
}
|
||||
@@ -1,63 +0,0 @@
|
||||
use sea_orm_migration::prelude::*;
|
||||
|
||||
#[derive(DeriveMigrationName)]
|
||||
pub struct Migration;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl MigrationTrait for Migration {
|
||||
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
manager
|
||||
.create_table(
|
||||
Table::create()
|
||||
.table(IndexerRules::Table)
|
||||
.if_not_exists()
|
||||
.col(
|
||||
ColumnDef::new(IndexerRules::Id)
|
||||
.integer()
|
||||
.not_null()
|
||||
.auto_increment()
|
||||
.primary_key(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(IndexerRules::Name)
|
||||
.string()
|
||||
.not_null()
|
||||
.unique_key(),
|
||||
)
|
||||
.col(ColumnDef::new(IndexerRules::Default).boolean().not_null())
|
||||
.col(ColumnDef::new(IndexerRules::RulesBlob).binary().not_null())
|
||||
.col(
|
||||
ColumnDef::new(IndexerRules::CreatedAt)
|
||||
.timestamp_with_time_zone()
|
||||
.not_null(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(IndexerRules::UpdatedAt)
|
||||
.timestamp_with_time_zone()
|
||||
.not_null(),
|
||||
)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
manager
|
||||
.drop_table(Table::drop().table(IndexerRules::Table).to_owned())
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(DeriveIden)]
|
||||
enum IndexerRules {
|
||||
Table,
|
||||
Id,
|
||||
Name,
|
||||
Default,
|
||||
RulesBlob,
|
||||
CreatedAt,
|
||||
UpdatedAt,
|
||||
}
|
||||
@@ -1,587 +0,0 @@
|
||||
//! Migration: Create semantic tagging system
|
||||
//!
|
||||
//! This migration creates the complete semantic tagging infrastructure:
|
||||
//! - Enhanced tag table with polymorphic naming
|
||||
//! - Hierarchical relationships with closure table
|
||||
//! - Context-aware tag applications
|
||||
//! - Usage pattern tracking for intelligent suggestions
|
||||
//! - Full-text search across all tag variants
|
||||
|
||||
use sea_orm_migration::prelude::*;
|
||||
|
||||
#[derive(DeriveMigrationName)]
|
||||
pub struct Migration;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl MigrationTrait for Migration {
|
||||
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
// Create the enhanced tag table
|
||||
manager
|
||||
.create_table(
|
||||
Table::create()
|
||||
.table(Alias::new("tag"))
|
||||
.if_not_exists()
|
||||
.col(
|
||||
ColumnDef::new(Alias::new("id"))
|
||||
.integer()
|
||||
.not_null()
|
||||
.auto_increment()
|
||||
.primary_key(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(Alias::new("uuid"))
|
||||
.uuid()
|
||||
.not_null()
|
||||
.unique_key(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(Alias::new("canonical_name"))
|
||||
.string()
|
||||
.not_null(),
|
||||
)
|
||||
.col(ColumnDef::new(Alias::new("display_name")).string())
|
||||
.col(ColumnDef::new(Alias::new("formal_name")).string())
|
||||
.col(ColumnDef::new(Alias::new("abbreviation")).string())
|
||||
.col(ColumnDef::new(Alias::new("aliases")).json())
|
||||
.col(ColumnDef::new(Alias::new("namespace")).string())
|
||||
.col(
|
||||
ColumnDef::new(Alias::new("tag_type"))
|
||||
.string()
|
||||
.not_null()
|
||||
.default("standard"),
|
||||
)
|
||||
.col(ColumnDef::new(Alias::new("color")).string())
|
||||
.col(ColumnDef::new(Alias::new("icon")).string())
|
||||
.col(ColumnDef::new(Alias::new("description")).text())
|
||||
.col(
|
||||
ColumnDef::new(Alias::new("is_organizational_anchor"))
|
||||
.boolean()
|
||||
.default(false),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(Alias::new("privacy_level"))
|
||||
.string()
|
||||
.default("normal"),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(Alias::new("search_weight"))
|
||||
.integer()
|
||||
.default(100),
|
||||
)
|
||||
.col(ColumnDef::new(Alias::new("attributes")).json())
|
||||
.col(ColumnDef::new(Alias::new("composition_rules")).json())
|
||||
.col(
|
||||
ColumnDef::new(Alias::new("created_at"))
|
||||
.timestamp_with_time_zone()
|
||||
.not_null(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(Alias::new("updated_at"))
|
||||
.timestamp_with_time_zone()
|
||||
.not_null(),
|
||||
)
|
||||
.col(ColumnDef::new(Alias::new("created_by_device")).uuid())
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Create indexes for the tag table
|
||||
manager
|
||||
.create_index(
|
||||
Index::create()
|
||||
.name("idx_tag_canonical_name")
|
||||
.table(Alias::new("tag"))
|
||||
.col(Alias::new("canonical_name"))
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
manager
|
||||
.create_index(
|
||||
Index::create()
|
||||
.name("idx_tag_namespace")
|
||||
.table(Alias::new("tag"))
|
||||
.col(Alias::new("namespace"))
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
manager
|
||||
.create_index(
|
||||
Index::create()
|
||||
.name("idx_tag_type")
|
||||
.table(Alias::new("tag"))
|
||||
.col(Alias::new("tag_type"))
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
manager
|
||||
.create_index(
|
||||
Index::create()
|
||||
.name("idx_tag_privacy_level")
|
||||
.table(Alias::new("tag"))
|
||||
.col(Alias::new("privacy_level"))
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Create the tag_relationship table
|
||||
manager
|
||||
.create_table(
|
||||
Table::create()
|
||||
.table(Alias::new("tag_relationship"))
|
||||
.if_not_exists()
|
||||
.col(
|
||||
ColumnDef::new(Alias::new("id"))
|
||||
.integer()
|
||||
.not_null()
|
||||
.auto_increment()
|
||||
.primary_key(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(Alias::new("parent_tag_id"))
|
||||
.integer()
|
||||
.not_null(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(Alias::new("child_tag_id"))
|
||||
.integer()
|
||||
.not_null(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(Alias::new("relationship_type"))
|
||||
.string()
|
||||
.not_null()
|
||||
.default("parent_child"),
|
||||
)
|
||||
.col(ColumnDef::new(Alias::new("strength")).float().default(1.0))
|
||||
.col(
|
||||
ColumnDef::new(Alias::new("created_at"))
|
||||
.timestamp_with_time_zone()
|
||||
.not_null(),
|
||||
)
|
||||
.foreign_key(
|
||||
&mut ForeignKey::create()
|
||||
.name("fk_tag_relationship_parent")
|
||||
.from(Alias::new("tag_relationship"), Alias::new("parent_tag_id"))
|
||||
.to(Alias::new("tag"), Alias::new("id"))
|
||||
.on_delete(ForeignKeyAction::Cascade),
|
||||
)
|
||||
.foreign_key(
|
||||
&mut ForeignKey::create()
|
||||
.name("fk_tag_relationship_child")
|
||||
.from(Alias::new("tag_relationship"), Alias::new("child_tag_id"))
|
||||
.to(Alias::new("tag"), Alias::new("id"))
|
||||
.on_delete(ForeignKeyAction::Cascade),
|
||||
)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Create indexes for tag_relationship
|
||||
manager
|
||||
.create_index(
|
||||
Index::create()
|
||||
.name("idx_tag_relationship_parent")
|
||||
.table(Alias::new("tag_relationship"))
|
||||
.col(Alias::new("parent_tag_id"))
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
manager
|
||||
.create_index(
|
||||
Index::create()
|
||||
.name("idx_tag_relationship_child")
|
||||
.table(Alias::new("tag_relationship"))
|
||||
.col(Alias::new("child_tag_id"))
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
manager
|
||||
.create_index(
|
||||
Index::create()
|
||||
.name("idx_tag_relationship_type")
|
||||
.table(Alias::new("tag_relationship"))
|
||||
.col(Alias::new("relationship_type"))
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Create the tag_closure table for efficient hierarchical queries
|
||||
manager
|
||||
.create_table(
|
||||
Table::create()
|
||||
.table(Alias::new("tag_closure"))
|
||||
.if_not_exists()
|
||||
.col(
|
||||
ColumnDef::new(Alias::new("ancestor_id"))
|
||||
.integer()
|
||||
.not_null(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(Alias::new("descendant_id"))
|
||||
.integer()
|
||||
.not_null(),
|
||||
)
|
||||
.col(ColumnDef::new(Alias::new("depth")).integer().not_null())
|
||||
.col(
|
||||
ColumnDef::new(Alias::new("path_strength"))
|
||||
.float()
|
||||
.not_null(),
|
||||
)
|
||||
.primary_key(
|
||||
Index::create()
|
||||
.col(Alias::new("ancestor_id"))
|
||||
.col(Alias::new("descendant_id")),
|
||||
)
|
||||
.foreign_key(
|
||||
&mut ForeignKey::create()
|
||||
.name("fk_tag_closure_ancestor")
|
||||
.from(Alias::new("tag_closure"), Alias::new("ancestor_id"))
|
||||
.to(Alias::new("tag"), Alias::new("id"))
|
||||
.on_delete(ForeignKeyAction::Cascade),
|
||||
)
|
||||
.foreign_key(
|
||||
&mut ForeignKey::create()
|
||||
.name("fk_tag_closure_descendant")
|
||||
.from(Alias::new("tag_closure"), Alias::new("descendant_id"))
|
||||
.to(Alias::new("tag"), Alias::new("id"))
|
||||
.on_delete(ForeignKeyAction::Cascade),
|
||||
)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Create indexes for tag_closure
|
||||
manager
|
||||
.create_index(
|
||||
Index::create()
|
||||
.name("idx_tag_closure_ancestor")
|
||||
.table(Alias::new("tag_closure"))
|
||||
.col(Alias::new("ancestor_id"))
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
manager
|
||||
.create_index(
|
||||
Index::create()
|
||||
.name("idx_tag_closure_descendant")
|
||||
.table(Alias::new("tag_closure"))
|
||||
.col(Alias::new("descendant_id"))
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
manager
|
||||
.create_index(
|
||||
Index::create()
|
||||
.name("idx_tag_closure_depth")
|
||||
.table(Alias::new("tag_closure"))
|
||||
.col(Alias::new("depth"))
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Create the user_metadata_tag table
|
||||
manager
|
||||
.create_table(
|
||||
Table::create()
|
||||
.table(Alias::new("user_metadata_tag"))
|
||||
.if_not_exists()
|
||||
.col(
|
||||
ColumnDef::new(Alias::new("id"))
|
||||
.integer()
|
||||
.not_null()
|
||||
.auto_increment()
|
||||
.primary_key(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(Alias::new("user_metadata_id"))
|
||||
.integer()
|
||||
.not_null(),
|
||||
)
|
||||
.col(ColumnDef::new(Alias::new("tag_id")).integer().not_null())
|
||||
.col(ColumnDef::new(Alias::new("applied_context")).string())
|
||||
.col(ColumnDef::new(Alias::new("applied_variant")).string())
|
||||
.col(
|
||||
ColumnDef::new(Alias::new("confidence"))
|
||||
.float()
|
||||
.default(1.0),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(Alias::new("source"))
|
||||
.string()
|
||||
.default("user"),
|
||||
)
|
||||
.col(ColumnDef::new(Alias::new("instance_attributes")).json())
|
||||
.col(
|
||||
ColumnDef::new(Alias::new("created_at"))
|
||||
.timestamp_with_time_zone()
|
||||
.not_null(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(Alias::new("updated_at"))
|
||||
.timestamp_with_time_zone()
|
||||
.not_null(),
|
||||
)
|
||||
.col(ColumnDef::new(Alias::new("device_uuid")).uuid().not_null())
|
||||
.foreign_key(
|
||||
&mut ForeignKey::create()
|
||||
.name("fk_user_metadata_tag_metadata")
|
||||
.from(
|
||||
Alias::new("user_metadata_tag"),
|
||||
Alias::new("user_metadata_id"),
|
||||
)
|
||||
.to(Alias::new("user_metadata"), Alias::new("id"))
|
||||
.on_delete(ForeignKeyAction::Cascade),
|
||||
)
|
||||
.foreign_key(
|
||||
&mut ForeignKey::create()
|
||||
.name("fk_user_metadata_tag_tag")
|
||||
.from(Alias::new("user_metadata_tag"), Alias::new("tag_id"))
|
||||
.to(Alias::new("tag"), Alias::new("id"))
|
||||
.on_delete(ForeignKeyAction::Cascade),
|
||||
)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Create indexes for user_metadata_tag
|
||||
manager
|
||||
.create_index(
|
||||
Index::create()
|
||||
.name("idx_user_metadata_tag_metadata")
|
||||
.table(Alias::new("user_metadata_tag"))
|
||||
.col(Alias::new("user_metadata_id"))
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
manager
|
||||
.create_index(
|
||||
Index::create()
|
||||
.name("idx_user_metadata_tag_tag")
|
||||
.table(Alias::new("user_metadata_tag"))
|
||||
.col(Alias::new("tag_id"))
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
manager
|
||||
.create_index(
|
||||
Index::create()
|
||||
.name("idx_user_metadata_tag_source")
|
||||
.table(Alias::new("user_metadata_tag"))
|
||||
.col(Alias::new("source"))
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Create the tag_usage_pattern table
|
||||
manager
|
||||
.create_table(
|
||||
Table::create()
|
||||
.table(Alias::new("tag_usage_pattern"))
|
||||
.if_not_exists()
|
||||
.col(
|
||||
ColumnDef::new(Alias::new("id"))
|
||||
.integer()
|
||||
.not_null()
|
||||
.auto_increment()
|
||||
.primary_key(),
|
||||
)
|
||||
.col(ColumnDef::new(Alias::new("tag_id")).integer().not_null())
|
||||
.col(
|
||||
ColumnDef::new(Alias::new("co_occurrence_tag_id"))
|
||||
.integer()
|
||||
.not_null(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(Alias::new("occurrence_count"))
|
||||
.integer()
|
||||
.default(1),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(Alias::new("last_used_together"))
|
||||
.timestamp_with_time_zone()
|
||||
.not_null(),
|
||||
)
|
||||
.foreign_key(
|
||||
&mut ForeignKey::create()
|
||||
.name("fk_tag_usage_pattern_tag")
|
||||
.from(Alias::new("tag_usage_pattern"), Alias::new("tag_id"))
|
||||
.to(Alias::new("tag"), Alias::new("id"))
|
||||
.on_delete(ForeignKeyAction::Cascade),
|
||||
)
|
||||
.foreign_key(
|
||||
&mut ForeignKey::create()
|
||||
.name("fk_tag_usage_pattern_co_occurrence")
|
||||
.from(
|
||||
Alias::new("tag_usage_pattern"),
|
||||
Alias::new("co_occurrence_tag_id"),
|
||||
)
|
||||
.to(Alias::new("tag"), Alias::new("id"))
|
||||
.on_delete(ForeignKeyAction::Cascade),
|
||||
)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Create indexes for tag_usage_pattern
|
||||
manager
|
||||
.create_index(
|
||||
Index::create()
|
||||
.name("idx_tag_usage_pattern_tag")
|
||||
.table(Alias::new("tag_usage_pattern"))
|
||||
.col(Alias::new("tag_id"))
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
manager
|
||||
.create_index(
|
||||
Index::create()
|
||||
.name("idx_tag_usage_pattern_co_occurrence")
|
||||
.table(Alias::new("tag_usage_pattern"))
|
||||
.col(Alias::new("co_occurrence_tag_id"))
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Create full-text search indexes
|
||||
manager
|
||||
.create_index(
|
||||
Index::create()
|
||||
.name("idx_tag_fulltext")
|
||||
.table(Alias::new("tag"))
|
||||
.col(Alias::new("canonical_name"))
|
||||
.col(Alias::new("display_name"))
|
||||
.col(Alias::new("formal_name"))
|
||||
.col(Alias::new("abbreviation"))
|
||||
.col(Alias::new("aliases"))
|
||||
.col(Alias::new("description"))
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Create FTS5 virtual table for full-text search
|
||||
manager
|
||||
.get_connection()
|
||||
.execute_unprepared(
|
||||
"CREATE VIRTUAL TABLE IF NOT EXISTS tag_search_fts USING fts5(
|
||||
tag_id UNINDEXED,
|
||||
canonical_name,
|
||||
display_name,
|
||||
formal_name,
|
||||
abbreviation,
|
||||
aliases,
|
||||
description,
|
||||
content='tag',
|
||||
content_rowid='id'
|
||||
)",
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Create triggers to maintain FTS5 table
|
||||
manager
|
||||
.get_connection()
|
||||
.execute_unprepared(
|
||||
"CREATE TRIGGER IF NOT EXISTS tag_ai AFTER INSERT ON tag BEGIN
|
||||
INSERT INTO tag_search_fts(
|
||||
tag_id, canonical_name, display_name, formal_name,
|
||||
abbreviation, aliases, description
|
||||
) VALUES (
|
||||
NEW.id, NEW.canonical_name, NEW.display_name, NEW.formal_name,
|
||||
NEW.abbreviation, NEW.aliases, NEW.description
|
||||
);
|
||||
END",
|
||||
)
|
||||
.await?;
|
||||
|
||||
manager
|
||||
.get_connection()
|
||||
.execute_unprepared(
|
||||
"CREATE TRIGGER IF NOT EXISTS tag_au AFTER UPDATE ON tag BEGIN
|
||||
UPDATE tag_search_fts SET
|
||||
canonical_name = NEW.canonical_name,
|
||||
display_name = NEW.display_name,
|
||||
formal_name = NEW.formal_name,
|
||||
abbreviation = NEW.abbreviation,
|
||||
aliases = NEW.aliases,
|
||||
description = NEW.description
|
||||
WHERE tag_id = NEW.id;
|
||||
END",
|
||||
)
|
||||
.await?;
|
||||
|
||||
manager
|
||||
.get_connection()
|
||||
.execute_unprepared(
|
||||
"CREATE TRIGGER IF NOT EXISTS tag_ad AFTER DELETE ON tag BEGIN
|
||||
DELETE FROM tag_search_fts WHERE tag_id = OLD.id;
|
||||
END",
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
// Drop FTS5 table and triggers first
|
||||
manager
|
||||
.get_connection()
|
||||
.execute_unprepared("DROP TRIGGER IF EXISTS tag_ad")
|
||||
.await?;
|
||||
manager
|
||||
.get_connection()
|
||||
.execute_unprepared("DROP TRIGGER IF EXISTS tag_au")
|
||||
.await?;
|
||||
manager
|
||||
.get_connection()
|
||||
.execute_unprepared("DROP TRIGGER IF EXISTS tag_ai")
|
||||
.await?;
|
||||
manager
|
||||
.get_connection()
|
||||
.execute_unprepared("DROP TABLE IF EXISTS tag_search_fts")
|
||||
.await?;
|
||||
|
||||
// Drop tables in reverse order
|
||||
manager
|
||||
.drop_table(
|
||||
Table::drop()
|
||||
.table(Alias::new("tag_usage_pattern"))
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
manager
|
||||
.drop_table(
|
||||
Table::drop()
|
||||
.table(Alias::new("user_metadata_tag"))
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
manager
|
||||
.drop_table(Table::drop().table(Alias::new("tag_closure")).to_owned())
|
||||
.await?;
|
||||
|
||||
manager
|
||||
.drop_table(
|
||||
Table::drop()
|
||||
.table(Alias::new("tag_relationship"))
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
manager
|
||||
.drop_table(Table::drop().table(Alias::new("tag")).to_owned())
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,160 +0,0 @@
|
||||
//! FTS5 Search Index Migration
|
||||
//!
|
||||
//! Creates FTS5 virtual table for high-performance full-text search
|
||||
//! and associated triggers for real-time index updates.
|
||||
|
||||
use sea_orm_migration::prelude::*;
|
||||
|
||||
#[derive(DeriveMigrationName)]
|
||||
pub struct Migration;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl MigrationTrait for Migration {
|
||||
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
// Create FTS5 virtual table for search indexing
|
||||
manager
|
||||
.get_connection()
|
||||
.execute_unprepared(
|
||||
r#"
|
||||
CREATE VIRTUAL TABLE search_index USING fts5(
|
||||
content='entries',
|
||||
content_rowid='id',
|
||||
name,
|
||||
extension,
|
||||
tokenize="unicode61 remove_diacritics 2 tokenchars '.@-_'",
|
||||
prefix='2,3'
|
||||
);
|
||||
"#,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Create trigger for INSERT operations
|
||||
manager
|
||||
.get_connection()
|
||||
.execute_unprepared(
|
||||
r#"
|
||||
CREATE TRIGGER IF NOT EXISTS entries_search_insert
|
||||
AFTER INSERT ON entries WHEN new.kind = 0
|
||||
BEGIN
|
||||
INSERT INTO search_index(rowid, name, extension)
|
||||
VALUES (new.id, new.name, new.extension);
|
||||
END;
|
||||
"#,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Create trigger for UPDATE operations
|
||||
manager
|
||||
.get_connection()
|
||||
.execute_unprepared(
|
||||
r#"
|
||||
CREATE TRIGGER IF NOT EXISTS entries_search_update
|
||||
AFTER UPDATE ON entries WHEN new.kind = 0
|
||||
BEGIN
|
||||
UPDATE search_index SET
|
||||
name = new.name,
|
||||
extension = new.extension
|
||||
WHERE rowid = new.id;
|
||||
END;
|
||||
"#,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Create trigger for DELETE operations
|
||||
manager
|
||||
.get_connection()
|
||||
.execute_unprepared(
|
||||
r#"
|
||||
CREATE TRIGGER IF NOT EXISTS entries_search_delete
|
||||
AFTER DELETE ON entries WHEN old.kind = 0
|
||||
BEGIN
|
||||
DELETE FROM search_index WHERE rowid = old.id;
|
||||
END;
|
||||
"#,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Populate FTS5 index with existing file entries
|
||||
manager
|
||||
.get_connection()
|
||||
.execute_unprepared(
|
||||
r#"
|
||||
INSERT INTO search_index(rowid, name, extension)
|
||||
SELECT id, name, extension FROM entries WHERE kind = 0;
|
||||
"#,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Create search analytics table for query optimization
|
||||
manager
|
||||
.get_connection()
|
||||
.execute_unprepared(
|
||||
r#"
|
||||
CREATE TABLE search_analytics (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
query_text TEXT NOT NULL,
|
||||
query_hash TEXT NOT NULL,
|
||||
search_mode TEXT NOT NULL,
|
||||
execution_time_ms INTEGER NOT NULL,
|
||||
result_count INTEGER NOT NULL,
|
||||
fts5_used BOOLEAN DEFAULT TRUE,
|
||||
semantic_used BOOLEAN DEFAULT FALSE,
|
||||
user_clicked_result BOOLEAN DEFAULT FALSE,
|
||||
clicked_result_position INTEGER,
|
||||
created_at TEXT NOT NULL DEFAULT (datetime('now'))
|
||||
);
|
||||
"#,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Create index on query_hash for performance analytics
|
||||
manager
|
||||
.get_connection()
|
||||
.execute_unprepared(
|
||||
r#"
|
||||
CREATE INDEX idx_search_analytics_query_hash
|
||||
ON search_analytics(query_hash);
|
||||
"#,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
// Drop analytics table and index
|
||||
manager
|
||||
.get_connection()
|
||||
.execute_unprepared("DROP INDEX IF EXISTS idx_search_analytics_query_hash;")
|
||||
.await?;
|
||||
|
||||
manager
|
||||
.get_connection()
|
||||
.execute_unprepared("DROP TABLE IF EXISTS search_analytics;")
|
||||
.await?;
|
||||
|
||||
// Drop triggers
|
||||
manager
|
||||
.get_connection()
|
||||
.execute_unprepared("DROP TRIGGER IF EXISTS entries_search_delete;")
|
||||
.await?;
|
||||
|
||||
manager
|
||||
.get_connection()
|
||||
.execute_unprepared("DROP TRIGGER IF EXISTS entries_search_update;")
|
||||
.await?;
|
||||
|
||||
manager
|
||||
.get_connection()
|
||||
.execute_unprepared("DROP TRIGGER IF EXISTS entries_search_insert;")
|
||||
.await?;
|
||||
|
||||
// Drop FTS5 virtual table
|
||||
manager
|
||||
.get_connection()
|
||||
.execute_unprepared("DROP TABLE IF EXISTS search_index;")
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
//! Migration to add sync fields to devices table
|
||||
//!
|
||||
//! Extends the devices table with sync coordination fields.
|
||||
//! This eliminates the need for a separate sync_partners table - if a device
|
||||
//! is registered in a library, it's a sync partner.
|
||||
|
||||
use sea_orm_migration::prelude::*;
|
||||
|
||||
#[derive(DeriveMigrationName)]
|
||||
pub struct Migration;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl MigrationTrait for Migration {
|
||||
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
// Add sync_enabled column (defaults to true - all registered devices sync by default)
|
||||
manager
|
||||
.alter_table(
|
||||
Table::alter()
|
||||
.table(Devices::Table)
|
||||
.add_column(
|
||||
ColumnDef::new(Devices::SyncEnabled)
|
||||
.boolean()
|
||||
.not_null()
|
||||
.default(true),
|
||||
)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Add last_sync_at column to track last successful sync
|
||||
manager
|
||||
.alter_table(
|
||||
Table::alter()
|
||||
.table(Devices::Table)
|
||||
.add_column(ColumnDef::new(Devices::LastSyncAt).timestamp_with_time_zone())
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
manager
|
||||
.alter_table(
|
||||
Table::alter()
|
||||
.table(Devices::Table)
|
||||
.drop_column(Devices::SyncEnabled)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
manager
|
||||
.alter_table(
|
||||
Table::alter()
|
||||
.table(Devices::Table)
|
||||
.drop_column(Devices::LastSyncAt)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(DeriveIden)]
|
||||
enum Devices {
|
||||
Table,
|
||||
SyncEnabled,
|
||||
LastSyncAt,
|
||||
}
|
||||
@@ -2,31 +2,13 @@
|
||||
|
||||
use sea_orm_migration::prelude::*;
|
||||
|
||||
mod m20240101_000001_initial_schema;
|
||||
mod m20240102_000001_populate_lookups;
|
||||
mod m20240107_000001_create_collections;
|
||||
mod m20250109_000001_create_sidecars;
|
||||
mod m20250110_000001_refactor_volumes_table;
|
||||
mod m20250112_000001_create_indexer_rules;
|
||||
mod m20250115_000001_semantic_tags;
|
||||
mod m20250120_000001_create_fts5_search_index;
|
||||
mod m20251009_000001_add_sync_to_devices;
|
||||
mod m20240101_000001_unified_schema;
|
||||
|
||||
pub struct Migrator;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl MigratorTrait for Migrator {
|
||||
fn migrations() -> Vec<Box<dyn MigrationTrait>> {
|
||||
vec![
|
||||
Box::new(m20240101_000001_initial_schema::Migration),
|
||||
Box::new(m20240102_000001_populate_lookups::Migration),
|
||||
Box::new(m20240107_000001_create_collections::Migration),
|
||||
Box::new(m20250109_000001_create_sidecars::Migration),
|
||||
Box::new(m20250110_000001_refactor_volumes_table::Migration),
|
||||
Box::new(m20250112_000001_create_indexer_rules::Migration),
|
||||
Box::new(m20250115_000001_semantic_tags::Migration),
|
||||
Box::new(m20250120_000001_create_fts5_search_index::Migration),
|
||||
Box::new(m20251009_000001_add_sync_to_devices::Migration),
|
||||
]
|
||||
vec![Box::new(m20240101_000001_unified_schema::Migration)]
|
||||
}
|
||||
}
|
||||
|
||||
2
docs
2
docs
Submodule docs updated: 7031f4c394...ffcd1266ec
Reference in New Issue
Block a user